code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""Linear model of tree-based decision rules based on the rulefit algorithm from <NAME> Popescu.
The algorithm can be used for predicting an output vector y given an input matrix X. In the first step a tree ensemble
is generated with gradient boosting. The trees are then used to form rules, where the paths to each node in each tree
form one rule. A rule is a binary decision if an observation is in a given node, which is dependent on the input features
that were used in the splits. The ensemble of rules together with the original input features are then being input in a
L1-regularized linear model, also called Lasso, which estimates the effects of each rule on the output target but at the
same time estimating many of those effects to zero.
"""
from typing import List, Tuple
import numpy as np
import pandas as pd
from scipy.special import softmax
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.base import TransformerMixin
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from imodels.rule_set.rule_set import RuleSet
from imodels.util.extract import extract_rulefit
from imodels.util.rule import get_feature_dict, replace_feature_name, Rule
from imodels.util.score import score_linear
from imodels.util.transforms import Winsorizer, FriedScale
class RuleFit(BaseEstimator, TransformerMixin, RuleSet):
"""Rulefit class. Rather than using this class directly, should use RuleFitRegressor or RuleFitClassifier
Parameters
----------
tree_size: Number of terminal nodes in generated trees. If exp_rand_tree_size=True,
this will be the mean number of terminal nodes.
sample_fract: fraction of randomly chosen training observations used to produce each tree.
FP 2004 (Sec. 2)
max_rules: total number of terms included in the final model (both linear and rules)
approximate total number of candidate rules generated for fitting also is based on this
Note that actual number of candidate rules will usually be lower than this due to duplicates.
memory_par: scale multiplier (shrinkage factor) applied to each new tree when
sequentially induced. FP 2004 (Sec. 2)
lin_standardise: If True, the linear terms will be standardised as per Friedman Sec 3.2
by multiplying the winsorised variable by 0.4/stdev.
lin_trim_quantile: If lin_standardise is True, this quantile will be used to trim linear
terms before standardisation.
exp_rand_tree_size: If True, each boosted tree will have a different maximum number of
terminal nodes based on an exponential distribution about tree_size.
(Friedman Sec 3.3)
include_linear: Include linear terms as opposed to only rules
alpha: Regularization strength, will override max_rules parameter
cv: Whether to use cross-validation scores to select the regularization strength
the final regularization value out of all that satisfy max_rules. If False, the
least regularization possible is used.
random_state: Integer to initialise random objects and provide repeatability.
tree_generator: Optional: this object will be used as provided to generate the rules.
This will override almost all the other properties above.
Must be GradientBoostingRegressor(), GradientBoostingClassifier(), or RandomForestRegressor()
Attributes
----------
rule_ensemble: RuleEnsemble
The rule ensemble
feature_names: list of strings, optional (default=None)
The names of the features (columns)
"""
def __init__(self,
n_estimators=100,
tree_size=4,
sample_fract='default',
max_rules=30,
memory_par=0.01,
tree_generator=None,
lin_trim_quantile=0.025,
lin_standardise=True,
exp_rand_tree_size=True,
include_linear=True,
alpha=None,
cv=True,
random_state=None):
self.n_estimators = n_estimators
self.tree_size = tree_size
self.sample_fract = sample_fract
self.max_rules = max_rules
self.memory_par = memory_par
self.tree_generator = tree_generator
self.lin_trim_quantile = lin_trim_quantile
self.lin_standardise = lin_standardise
self.exp_rand_tree_size = exp_rand_tree_size
self.include_linear = include_linear
self.alpha = alpha
self.cv = cv
self.random_state = random_state
self.winsorizer = Winsorizer(trim_quantile=self.lin_trim_quantile)
self.friedscale = FriedScale(self.winsorizer)
self.stddev = None
self.mean = None
self._init_prediction_task() # decides between regressor and classifier
def _init_prediction_task(self):
"""
RuleFitRegressor and RuleFitClassifier override this method
to alter the prediction task. When using this class directly,
it is equivalent to RuleFitRegressor
"""
self.prediction_task = 'regression'
def fit(self, X, y=None, feature_names=None):
"""Fit and estimate linear combination of rule ensemble
"""
if feature_names is None and isinstance(X, pd.DataFrame):
feature_names = X.columns
X, y = check_X_y(X, y)
if self.prediction_task == 'classification':
self.classes_ = unique_labels(y)
self.n_features_in_ = X.shape[1]
self.n_features_ = X.shape[1]
self.feature_dict_ = get_feature_dict(X.shape[1], feature_names)
self.feature_placeholders = np.array(list(self.feature_dict_.keys()))
self.feature_names = np.array(list(self.feature_dict_.values()))
extracted_rules = self._extract_rules(X, y)
self.rules_without_feature_names_, self.coef, self.intercept = self._score_rules(X, y, extracted_rules)
self.rules_ = [
replace_feature_name(rule, self.feature_dict_) for rule in self.rules_without_feature_names_
]
# count total rule terms, plus nonzero linear terms
self.complexity_ = self._get_complexity()
if self.include_linear:
self.complexity_ += np.sum(
np.array(self.coef[:X.shape[1]]) != 0)
return self
def predict_continuous_output(self, X):
"""Predict outcome of linear model for X
"""
if type(X) == pd.DataFrame:
X = X.values.astype(np.float32)
y_pred = np.zeros(X.shape[0])
y_pred += self.eval_weighted_rule_sum(X)
if self.include_linear:
if self.lin_standardise:
X = self.friedscale.scale(X)
y_pred += X @ self.coef[:X.shape[1]]
return y_pred + self.intercept
def predict(self, X):
'''Predict. For regression returns continuous output.
For classification, returns discrete output.
'''
check_is_fitted(self)
X = check_array(X)
if self.prediction_task == 'regression':
return self.predict_continuous_output(X)
else:
return np.argmax(self.predict_proba(X), axis=1)
def predict_proba(self, X):
check_is_fitted(self)
X = check_array(X)
continuous_output = self.predict_continuous_output(X)
logits = np.vstack((1 - continuous_output, continuous_output)).transpose()
return softmax(logits, axis=1)
def transform(self, X=None, rules=None):
"""Transform dataset.
Parameters
----------
X : array-like matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency.
Returns
-------
X_transformed: matrix, shape=(n_samples, n_out)
Transformed data set
"""
df = pd.DataFrame(X, columns=self.feature_placeholders)
X_transformed = np.zeros((X.shape[0], len(rules)))
for i, r in enumerate(rules):
features_r_uses = [term.split(' ')[0] for term in r.split(' and ')]
X_transformed[df[features_r_uses].query(r).index.values, i] = 1
return X_transformed
def get_rules(self, exclude_zero_coef=False, subregion=None):
"""Return the estimated rules
Parameters
----------
exclude_zero_coef: If True (default), returns only the rules with an estimated
coefficient not equalt to zero.
subregion: If None (default) returns global importances (FP 2004 eq. 28/29), else returns importance over
subregion of inputs (FP 2004 eq. 30/31/32).
Returns
-------
rules: pandas.DataFrame with the rules. Column 'rule' describes the rule, 'coef' holds
the coefficients and 'support' the support of the rule in the training
data set (X)
"""
n_features = len(self.coef) - len(self.rules_)
rule_ensemble = list(self.rules_without_feature_names_)
output_rules = []
# Add coefficients for linear effects
for i in range(0, n_features):
if self.lin_standardise:
coef = self.coef[i] * self.friedscale.scale_multipliers[i]
else:
coef = self.coef[i]
if subregion is None:
importance = abs(coef) * self.stddev[i]
else:
subregion = np.array(subregion)
importance = sum(abs(coef) * abs([x[i] for x in self.winsorizer.trim(subregion)] - self.mean[i])) / len(
subregion)
output_rules += [(self.feature_names[i], 'linear', coef, 1, importance)]
# Add rules
for i in range(0, len(self.rules_)):
rule = rule_ensemble[i]
coef = self.coef[i + n_features]
if subregion is None:
importance = abs(coef) * (rule.support * (1 - rule.support)) ** (1 / 2)
else:
rkx = self.transform(subregion, [rule])[:, -1]
importance = sum(abs(coef) * abs(rkx - rule.support)) / len(subregion)
output_rules += [(self.rules_[i].rule, 'rule', coef, rule.support, importance)]
rules = pd.DataFrame(output_rules, columns=["rule", "type", "coef", "support", "importance"])
if exclude_zero_coef:
rules = rules.ix[rules.coef != 0]
return rules
def visualize(self, decimals=2):
rules = self.get_rules()
rules = rules[rules.coef != 0].sort_values("support", ascending=False)
pd.set_option('display.max_colwidth', None)
return rules[['rule', 'coef']].round(decimals)
def __str__(self):
return 'RuleFit:\n' + self.visualize().to_string(index=False) + '\n'
def _extract_rules(self, X, y) -> List[Rule]:
return extract_rulefit(X, y,
feature_names=self.feature_placeholders,
n_estimators=self.n_estimators,
tree_size=self.tree_size,
memory_par=self.memory_par,
tree_generator=self.tree_generator,
exp_rand_tree_size=self.exp_rand_tree_size,
random_state=self.random_state)
def _score_rules(self, X, y, rules) -> Tuple[List[Rule], List[float], float]:
X_concat = np.zeros([X.shape[0], 0])
# standardise linear variables if requested (for regression model only)
if self.include_linear:
# standard deviation and mean of winsorized features
self.winsorizer.train(X)
winsorized_X = self.winsorizer.trim(X)
self.stddev = np.std(winsorized_X, axis=0)
self.mean = np.mean(winsorized_X, axis=0)
if self.lin_standardise:
self.friedscale.train(X)
X_regn = self.friedscale.scale(X)
else:
X_regn = X.copy()
X_concat = np.concatenate((X_concat, X_regn), axis=1)
X_rules = self.transform(X, rules)
if X_rules.shape[0] > 0:
X_concat = np.concatenate((X_concat, X_rules), axis=1)
# no rules fit and self.include_linear == False
if X_concat.shape[1] == 0:
return [], [], 0
return score_linear(X_concat, y, rules,
prediction_task=self.prediction_task,
max_rules=self.max_rules,
alpha=self.alpha,
cv=self.cv,
random_state=self.random_state)
class RuleFitRegressor(RuleFit, RegressorMixin):
def _init_prediction_task(self):
self.prediction_task = 'regression'
class RuleFitClassifier(RuleFit, ClassifierMixin):
def _init_prediction_task(self):
self.prediction_task = 'classification'
| [
"imodels.util.rule.replace_feature_name",
"numpy.mean",
"pandas.set_option",
"imodels.util.transforms.Winsorizer",
"pandas.DataFrame",
"numpy.std",
"sklearn.utils.validation.check_array",
"imodels.util.rule.get_feature_dict",
"sklearn.utils.multiclass.unique_labels",
"scipy.special.softmax",
"im... | [((4870, 4918), 'imodels.util.transforms.Winsorizer', 'Winsorizer', ([], {'trim_quantile': 'self.lin_trim_quantile'}), '(trim_quantile=self.lin_trim_quantile)\n', (4880, 4918), False, 'from imodels.util.transforms import Winsorizer, FriedScale\n'), ((4945, 4972), 'imodels.util.transforms.FriedScale', 'FriedScale', (['self.winsorizer'], {}), '(self.winsorizer)\n', (4955, 4972), False, 'from imodels.util.transforms import Winsorizer, FriedScale\n'), ((5644, 5659), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (5653, 5659), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((5867, 5910), 'imodels.util.rule.get_feature_dict', 'get_feature_dict', (['X.shape[1]', 'feature_names'], {}), '(X.shape[1], feature_names)\n', (5883, 5910), False, 'from imodels.util.rule import get_feature_dict, replace_feature_name, Rule\n'), ((6829, 6849), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (6837, 6849), True, 'import numpy as np\n'), ((7264, 7285), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (7279, 7285), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((7298, 7312), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (7309, 7312), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((7530, 7551), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (7545, 7551), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((7564, 7578), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (7575, 7578), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((7739, 7762), 'scipy.special.softmax', 'softmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (7746, 7762), False, 'from scipy.special import softmax\n'), ((8189, 8239), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'self.feature_placeholders'}), '(X, columns=self.feature_placeholders)\n', (8201, 8239), True, 'import pandas as pd\n'), ((10590, 10679), 'pandas.DataFrame', 'pd.DataFrame', (['output_rules'], {'columns': "['rule', 'type', 'coef', 'support', 'importance']"}), "(output_rules, columns=['rule', 'type', 'coef', 'support',\n 'importance'])\n", (10602, 10679), True, 'import pandas as pd\n'), ((10931, 10974), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', 'None'], {}), "('display.max_colwidth', None)\n", (10944, 10974), True, 'import pandas as pd\n'), ((11197, 11472), 'imodels.util.extract.extract_rulefit', 'extract_rulefit', (['X', 'y'], {'feature_names': 'self.feature_placeholders', 'n_estimators': 'self.n_estimators', 'tree_size': 'self.tree_size', 'memory_par': 'self.memory_par', 'tree_generator': 'self.tree_generator', 'exp_rand_tree_size': 'self.exp_rand_tree_size', 'random_state': 'self.random_state'}), '(X, y, feature_names=self.feature_placeholders, n_estimators\n =self.n_estimators, tree_size=self.tree_size, memory_par=self.\n memory_par, tree_generator=self.tree_generator, exp_rand_tree_size=self\n .exp_rand_tree_size, random_state=self.random_state)\n', (11212, 11472), False, 'from imodels.util.extract import extract_rulefit\n'), ((11777, 11802), 'numpy.zeros', 'np.zeros', (['[X.shape[0], 0]'], {}), '([X.shape[0], 0])\n', (11785, 11802), True, 'import numpy as np\n'), ((12707, 12874), 'imodels.util.score.score_linear', 'score_linear', (['X_concat', 'y', 'rules'], {'prediction_task': 'self.prediction_task', 'max_rules': 'self.max_rules', 'alpha': 'self.alpha', 'cv': 'self.cv', 'random_state': 'self.random_state'}), '(X_concat, y, rules, prediction_task=self.prediction_task,\n max_rules=self.max_rules, alpha=self.alpha, cv=self.cv, random_state=\n self.random_state)\n', (12719, 12874), False, 'from imodels.util.score import score_linear\n'), ((5741, 5757), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y'], {}), '(y)\n', (5754, 5757), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((6263, 6309), 'imodels.util.rule.replace_feature_name', 'replace_feature_name', (['rule', 'self.feature_dict_'], {}), '(rule, self.feature_dict_)\n', (6283, 6309), False, 'from imodels.util.rule import get_feature_dict, replace_feature_name, Rule\n'), ((12096, 12124), 'numpy.std', 'np.std', (['winsorized_X'], {'axis': '(0)'}), '(winsorized_X, axis=0)\n', (12102, 12124), True, 'import numpy as np\n'), ((12149, 12178), 'numpy.mean', 'np.mean', (['winsorized_X'], {'axis': '(0)'}), '(winsorized_X, axis=0)\n', (12156, 12178), True, 'import numpy as np\n'), ((12383, 12425), 'numpy.concatenate', 'np.concatenate', (['(X_concat, X_regn)'], {'axis': '(1)'}), '((X_concat, X_regn), axis=1)\n', (12397, 12425), True, 'import numpy as np\n'), ((12526, 12569), 'numpy.concatenate', 'np.concatenate', (['(X_concat, X_rules)'], {'axis': '(1)'}), '((X_concat, X_rules), axis=1)\n', (12540, 12569), True, 'import numpy as np\n'), ((7658, 7711), 'numpy.vstack', 'np.vstack', (['(1 - continuous_output, continuous_output)'], {}), '((1 - continuous_output, continuous_output))\n', (7667, 7711), True, 'import numpy as np\n'), ((9786, 9805), 'numpy.array', 'np.array', (['subregion'], {}), '(subregion)\n', (9794, 9805), True, 'import numpy as np\n'), ((6565, 6597), 'numpy.array', 'np.array', (['self.coef[:X.shape[1]]'], {}), '(self.coef[:X.shape[1]])\n', (6573, 6597), True, 'import numpy as np\n')] |
# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.21.*, run `git checkout -b 0.21` or switch to the `0.21` branch on GitHub)
import cv2
import numpy as np
import math
from .bbox import BoundBox, bbox_iou
from scipy.special import expit
def _sigmoid(x):
return expit(x)
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w) / image_w) < (float(net_h) / image_h):
new_w = net_w
new_h = (image_h * net_w) / image_w
else:
new_h = net_w
new_w = (image_w * net_h) / image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w) / 2.0 / net_w, float(new_w) / net_w
y_offset, y_scale = (net_h - new_h) / 2.0 / net_h, float(new_h) / net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0:
continue
for j in range(i + 1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h * grid_w):
row = i // grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[row, col, b, 4]
if objectness <= obj_thresh:
continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row, col, b, :4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[row, col, b, 5:]
box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, objectness, classes)
boxes.append(box)
return boxes
def preprocess_input(image, net_h, net_w):
new_h, new_w, _ = image.shape
# determine the new size of the image
if (float(net_w) / new_w) < (float(net_h) / new_h):
new_h = (new_h * net_w) // new_w
new_w = net_w
else:
new_w = (new_w * net_h) // new_h
new_h = net_h
# resize the image to the new size
resized = cv2.resize(image[:, :, ::-1] / 255.0, (new_w, new_h))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[
(net_h - new_h) // 2 : (net_h + new_h) // 2, (net_w - new_w) // 2 : (net_w + new_w) // 2, :
] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def get_yolo_boxes(
model, image, net_h, net_w, anchors, obj_thresh, nms_thresh, classes, tensorflow_model=True
):
# preprocess the input
image_h, image_w, _ = image.shape
batch_input = np.zeros((1, net_h, net_w, 3))
batch_input[0] = preprocess_input(image, net_h, net_w)
# run the prediction
if tensorflow_model:
output = model.predict({"input_1": batch_input})
yolos = [output["conv_81"], output["conv_93"], output["conv_105"]]
filters = 3 * (5 + classes)
for i in range(len(yolos)):
length = len(yolos[i])
box_size = int(math.sqrt(length / filters))
yolos[i] = np.array(yolos[i]).reshape((box_size, box_size, filters))
else:
output = model.predict_on_batch(batch_input)
yolos = [output[0][0], output[1][0], output[2][0]]
boxes = []
# decode the output of the network
for j in range(len(yolos)):
yolo_anchors = anchors[(2 - j) * 6 : (3 - j) * 6] # config['model']['anchors']
boxes += decode_netout(yolos[j], yolo_anchors, obj_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
return boxes
def _softmax(x, axis=-1):
x = x - np.amax(x, axis, keepdims=True)
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
| [
"math.sqrt",
"numpy.zeros",
"numpy.expand_dims",
"numpy.ones",
"scipy.special.expit",
"numpy.argsort",
"numpy.amax",
"numpy.array",
"numpy.exp",
"cv2.resize"
] | [((356, 364), 'scipy.special.expit', 'expit', (['x'], {}), '(x)\n', (361, 364), False, 'from scipy.special import expit\n'), ((3490, 3543), 'cv2.resize', 'cv2.resize', (['(image[:, :, ::-1] / 255.0)', '(new_w, new_h)'], {}), '(image[:, :, ::-1] / 255.0, (new_w, new_h))\n', (3500, 3543), False, 'import cv2\n'), ((3792, 3820), 'numpy.expand_dims', 'np.expand_dims', (['new_image', '(0)'], {}), '(new_image, 0)\n', (3806, 3820), True, 'import numpy as np\n'), ((4047, 4077), 'numpy.zeros', 'np.zeros', (['(1, net_h, net_w, 3)'], {}), '((1, net_h, net_w, 3))\n', (4055, 4077), True, 'import numpy as np\n'), ((5215, 5224), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (5221, 5224), True, 'import numpy as np\n'), ((1308, 1356), 'numpy.argsort', 'np.argsort', (['[(-box.classes[c]) for box in boxes]'], {}), '([(-box.classes[c]) for box in boxes])\n', (1318, 1356), True, 'import numpy as np\n'), ((3612, 3638), 'numpy.ones', 'np.ones', (['(net_h, net_w, 3)'], {}), '((net_h, net_w, 3))\n', (3619, 3638), True, 'import numpy as np\n'), ((5173, 5204), 'numpy.amax', 'np.amax', (['x', 'axis'], {'keepdims': '(True)'}), '(x, axis, keepdims=True)\n', (5180, 5204), True, 'import numpy as np\n'), ((4454, 4481), 'math.sqrt', 'math.sqrt', (['(length / filters)'], {}), '(length / filters)\n', (4463, 4481), False, 'import math\n'), ((2765, 2774), 'numpy.exp', 'np.exp', (['w'], {}), '(w)\n', (2771, 2774), True, 'import numpy as np\n'), ((2841, 2850), 'numpy.exp', 'np.exp', (['h'], {}), '(h)\n', (2847, 2850), True, 'import numpy as np\n'), ((4506, 4524), 'numpy.array', 'np.array', (['yolos[i]'], {}), '(yolos[i])\n', (4514, 4524), True, 'import numpy as np\n')] |
"""
Module containing earth model classes.
The earth models define density as a function of radius and provide a simple
integrator for calculation of the column density along a straight path through
the Earth.
"""
import logging
import numpy as np
from pyrex.internal_functions import normalize
logger = logging.getLogger(__name__)
class PREM:
"""
Class describing the Earth's density.
Uses densities from the Preliminary reference Earth Model (PREM).
Attributes
----------
earth_radius : float
Mean radius of the Earth (m).
radii : tuple
Boundary radii (m) at which the functional form of the density of the
Earth changes. The density function in `densities` at index `i`
corresponds to the radius range from radius at index `i-1` to radius
at index `i`.
densities : tuple
Functions which calculate the density of the Earth (g/cm^3) in a
specific radius range as described by `radii`. The parameter of each
function is the fractional radius, e.g. radius divided by
`earth_radius`. Scalar values denote constant density over the range of
radii.
Notes
-----
The density calculation is based on the Preliminary reference Earth Model
[1]_.
References
----------
.. [1] <NAME> & <NAME>, "Preliminary reference Earth model."
Physics of the Earth and Planetary Interiors **25**, 297–356 (1981). :doi:`10.1016/0031-9201(81)90046-7`
"""
earth_radius = 6.3710e6
radii = (1.2215e6, 3.4800e6, 5.7010e6, 5.7710e6, 5.9710e6,
6.1510e6, 6.3466e6, 6.3560e6, 6.3680e6, earth_radius)
densities = (
lambda x: 13.0885 - 8.8381*x**2,
lambda x: 12.5815 - 1.2638*x - 3.6426*x**2 - 5.5281*x**3,
lambda x: 7.9565 - 6.4761*x + 5.5283*x**2 - 3.0807*x**3,
lambda x: 5.3197 - 1.4836*x,
lambda x: 11.2494 - 8.0298*x,
lambda x: 7.1089 - 3.8045*x,
lambda x: 2.691 + 0.6924*x,
2.9,
2.6,
1.02
)
def density(self, r):
"""
Calculates the Earth's density at a given radius.
Supports passing an array of radii or a single radius.
Parameters
----------
r : array_like
Radius (m) at which to calculate density.
Returns
-------
array_like
Density (g/cm^3) of the Earth at the given radii.
"""
r = np.array(r)
radius_bounds = np.concatenate(([0], self.radii))
conditions = list((lower<=r) & (r<upper) for lower, upper in
zip(radius_bounds[:-1], radius_bounds[1:]))
return np.piecewise(r/self.earth_radius, conditions, self.densities)
def slant_depth(self, endpoint, direction, step=500):
"""
Calculates the column density of a chord cutting through Earth.
Integrates the Earth's density along the chord, resulting in a column
density (or material thickness) with units of mass per area.
Parameters
----------
endpoint : array_like
Vector position (m) of the chord endpoint, in a coordinate system
centered on the surface of the Earth (e.g. a negative third
coordinate represents the depth below the surface).
direction : array_like
Vector direction of the chord, in a coordinate system
centered on the surface of the Earth (e.g. a negative third
coordinate represents the chord pointing into the Earth).
step : float, optional
Step size (m) for the density integration.
Returns
-------
float
Column density (g/cm^2) along the chord starting at `endpoint` and
passing through the Earth at the given `direction`.
See Also
--------
PREM.density : Calculates the Earth's density at a given radius.
"""
# Convert to Earth-centric coordinate system (e.g. center of the Earth
# is at (0, 0, 0))
endpoint = np.array([endpoint[0], endpoint[1],
endpoint[2]+self.earth_radius])
direction = normalize(direction)
dot_prod = np.dot(endpoint, direction)
# Check for intersection of line and sphere
discriminant = dot_prod**2 - np.sum(endpoint**2) + self.earth_radius**2
if discriminant<=0:
return 0
# Calculate the distance at which the line intersects the sphere
distance = -dot_prod + np.sqrt(discriminant)
if distance<=0:
return 0
# Parameterize line integral with ts from 0 to 1, with steps just under
# the given step size (in meters)
n_steps = int(distance/step)
if distance%step:
n_steps += 1
ts = np.linspace(0, 1, n_steps)
xs = endpoint[0] + ts * distance * direction[0]
ys = endpoint[1] + ts * distance * direction[1]
zs = endpoint[2] + ts * distance * direction[2]
rs = np.sqrt(xs**2 + ys**2 + zs**2)
rhos = self.density(rs)
# Integrate the density times the distance along the chord
return 100 * np.trapz(rhos*distance, ts)
class CoreMantleCrustModel(PREM):
"""
Class describing the Earth's density.
Uses densities from the Core-Mantle-Crust model as implemented in AraSim.
Attributes
----------
earth_radius : float
Mean radius of the Earth (m).
radii : tuple
Boundary radii (m) at which the functional form of the density of the
Earth changes. The density function in `densities` at index `i`
corresponds to the radius range from radius at index `i-1` to radius
at index `i`.
densities : tuple
Functions which calculate the density of the Earth (g/cm^3) in a
specific radius range as described by `radii`. The parameter of each
function is the fractional radius, e.g. radius divided by
`earth_radius`. Scalar values denote constant density over the range of
radii.
"""
earth_radius = 6.378140e6
radii = (np.sqrt(1.2e13), earth_radius-4e4, earth_radius)
densities = (14, 3.4, 2.9)
# Preferred earth model:
earth = PREM()
| [
"numpy.trapz",
"numpy.sum",
"logging.getLogger",
"pyrex.internal_functions.normalize",
"numpy.array",
"numpy.linspace",
"numpy.dot",
"numpy.piecewise",
"numpy.concatenate",
"numpy.sqrt"
] | [((308, 335), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (325, 335), False, 'import logging\n'), ((2451, 2462), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (2459, 2462), True, 'import numpy as np\n'), ((2487, 2520), 'numpy.concatenate', 'np.concatenate', (['([0], self.radii)'], {}), '(([0], self.radii))\n', (2501, 2520), True, 'import numpy as np\n'), ((2675, 2738), 'numpy.piecewise', 'np.piecewise', (['(r / self.earth_radius)', 'conditions', 'self.densities'], {}), '(r / self.earth_radius, conditions, self.densities)\n', (2687, 2738), True, 'import numpy as np\n'), ((4073, 4142), 'numpy.array', 'np.array', (['[endpoint[0], endpoint[1], endpoint[2] + self.earth_radius]'], {}), '([endpoint[0], endpoint[1], endpoint[2] + self.earth_radius])\n', (4081, 4142), True, 'import numpy as np\n'), ((4190, 4210), 'pyrex.internal_functions.normalize', 'normalize', (['direction'], {}), '(direction)\n', (4199, 4210), False, 'from pyrex.internal_functions import normalize\n'), ((4230, 4257), 'numpy.dot', 'np.dot', (['endpoint', 'direction'], {}), '(endpoint, direction)\n', (4236, 4257), True, 'import numpy as np\n'), ((4833, 4859), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_steps'], {}), '(0, 1, n_steps)\n', (4844, 4859), True, 'import numpy as np\n'), ((5041, 5077), 'numpy.sqrt', 'np.sqrt', (['(xs ** 2 + ys ** 2 + zs ** 2)'], {}), '(xs ** 2 + ys ** 2 + zs ** 2)\n', (5048, 5077), True, 'import numpy as np\n'), ((6133, 6158), 'numpy.sqrt', 'np.sqrt', (['(12000000000000.0)'], {}), '(12000000000000.0)\n', (6140, 6158), True, 'import numpy as np\n'), ((4543, 4564), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (4550, 4564), True, 'import numpy as np\n'), ((5192, 5221), 'numpy.trapz', 'np.trapz', (['(rhos * distance)', 'ts'], {}), '(rhos * distance, ts)\n', (5200, 5221), True, 'import numpy as np\n'), ((4347, 4368), 'numpy.sum', 'np.sum', (['(endpoint ** 2)'], {}), '(endpoint ** 2)\n', (4353, 4368), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import copy
class ModelStacker:
def __init__(self):
self.base_models = {}
self.stacked_model = None
self.fitted = False
def add_base_model(self, model):
"""
model: model object, preferably sklearn
adds model objects for stacking
"""
if not hasattr(model, "fit"):
raise ValueError("Add method only takes in a model object which has fit method, such as models from sklearn or xgboost")
temp_idx = len(self.base_models)
self.base_models['model_' + str(temp_idx)] = model
def add_stacked_model(self, model):
"""
model: model object, preferably sklearn
adds a model object for final prediction
"""
if not hasattr(model, "fit"):
raise ValueError("Add method only takes in a model object which has fit method, such as models from sklearn or xgboost")
self.stacked_model = model
def fit(self, X, Y, shuffle=True, seed=0, folds=5, new_features=False):
"""
X: pandas dataframe or numpy matrix
Independent variables to be trained on
Y: pandas series or numpy array
Dependent variables to be trained on
shuffle: boolean
True if want to shuffle data before stacking
folds: int
cross validation folds for stacking
test: float
0 if does not want to split current dataset into training and test set
returns new feature columns by stacking, number of feature columns will correspond to the number of models
"""
if self.stacked_model is None:
raise ValueError("Stacked model has not been chosen")
if len(self.base_models) <= 1:
raise Exception("Add more than 1 model for stacking to make sense")
if isinstance(X, pd.DataFrame):
X = X.values
if isinstance(Y, pd.Series):
Y = Y.values
if np.isnan(np.sum(X)):
raise ValueError("X contains null values")
if np.isnan(np.sum(Y)):
raise ValueError("Y contains null vlaues")
if len(X) != len(Y):
raise ValueError("Number of training samples must be equal to the number of labels")
if not isinstance(shuffle, bool):
raise ValueError("shuffle should be a boolean")
if not isinstance(seed, int):
raise ValueError("seed should be an integer")
if not isinstance(folds, int):
raise ValueError("folds should be an integer")
if folds < 2:
raise ValueError("folds should be 2 or more")
if shuffle:
combined = np.hstack((X, Y.reshape(X.shape[0], 1)))
np.random.seed(seed)
np.random.shuffle(combined)
X = combined[:, :-1]
Y = combined[:, -1]
del combined
# Validation splits
X_split = np.array(np.array_split(X, folds))
Y_split = np.array(np.array_split(Y, folds))
assert len(X_split) == len(Y_split)
# Stacking Starts and Concatenating Features Generated
index_lst = list(range(len(X_split)))
initial_col = X.shape[1]
X_stacked = X.copy()
for key, mod in list(self.base_models.items()):
mod_pred = []
for idx, X_chunk in enumerate(X_split):
temp_indices = index_lst.copy()
temp_indices.remove(idx)
temp_mod = copy.deepcopy(mod)
tmp_xsplit = np.array([j for i in X_split[temp_indices] for j in i])
tmp_ysplit = np.array([j for i in Y_split[temp_indices] for j in i])
temp_mod.fit(tmp_xsplit, tmp_ysplit)
mod_pred.extend(list(temp_mod.predict(X_chunk)))
X_stacked = np.hstack((X_stacked, np.array(mod_pred).reshape((-1, 1))))
then = X_stacked.shape[1] - initial_col
assert then == len(self.base_models)
# Fit All Base Models to All Training Data
for key, mod in self.base_models.items():
mod.fit(X, Y)
self.base_models[key] = mod
self.stacked_model.fit(X_stacked, Y)
self.fitted = True
if new_features:
return X_stacked
def predict(self, X_test):
"""
X_test: pandas dataframe, numpy matrix
dataset with independent variables and previously added features through stacking to be predicted
returns predictions by the final model of stacking
"""
if self.stacked_model is None:
raise ValueError("Stacked model has not been chosen")
if len(self.base_models) <= 1:
raise Exception("Add more than 1 model for stacking to make sense")
if isinstance(X_test, pd.DataFrame):
X_test = X_test.values
if np.isnan(np.sum(X_test)):
raise ValueError("X_test contains null values")
if not self.fitted:
raise Exception("Base Models and Stacked Model have not been fitted")
initial_col = X_test.shape[1]
X_test_copy = X_test.copy()
for mod in self.base_models.values():
X_test_copy = np.hstack((X_test_copy, mod.predict(X_test).reshape((-1, 1))))
then = X_test_copy.shape[1] - initial_col
assert then == len(self.base_models)
return self.stacked_model.predict(X_test_copy) | [
"copy.deepcopy",
"numpy.random.seed",
"numpy.sum",
"numpy.array",
"numpy.array_split",
"numpy.random.shuffle"
] | [((2007, 2016), 'numpy.sum', 'np.sum', (['X'], {}), '(X)\n', (2013, 2016), True, 'import numpy as np\n'), ((2094, 2103), 'numpy.sum', 'np.sum', (['Y'], {}), '(Y)\n', (2100, 2103), True, 'import numpy as np\n'), ((2759, 2779), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2773, 2779), True, 'import numpy as np\n'), ((2792, 2819), 'numpy.random.shuffle', 'np.random.shuffle', (['combined'], {}), '(combined)\n', (2809, 2819), True, 'import numpy as np\n'), ((2965, 2989), 'numpy.array_split', 'np.array_split', (['X', 'folds'], {}), '(X, folds)\n', (2979, 2989), True, 'import numpy as np\n'), ((3018, 3042), 'numpy.array_split', 'np.array_split', (['Y', 'folds'], {}), '(Y, folds)\n', (3032, 3042), True, 'import numpy as np\n'), ((4890, 4904), 'numpy.sum', 'np.sum', (['X_test'], {}), '(X_test)\n', (4896, 4904), True, 'import numpy as np\n'), ((3509, 3527), 'copy.deepcopy', 'copy.deepcopy', (['mod'], {}), '(mod)\n', (3522, 3527), False, 'import copy\n'), ((3557, 3612), 'numpy.array', 'np.array', (['[j for i in X_split[temp_indices] for j in i]'], {}), '([j for i in X_split[temp_indices] for j in i])\n', (3565, 3612), True, 'import numpy as np\n'), ((3642, 3697), 'numpy.array', 'np.array', (['[j for i in Y_split[temp_indices] for j in i]'], {}), '([j for i in Y_split[temp_indices] for j in i])\n', (3650, 3697), True, 'import numpy as np\n'), ((3862, 3880), 'numpy.array', 'np.array', (['mod_pred'], {}), '(mod_pred)\n', (3870, 3880), True, 'import numpy as np\n')] |
from typing import Dict, Any
from configparser import ConfigParser, ExtendedInterpolation
from numpy import random as np_random
import torch
import random
def set_seed(seed):
random.seed(seed)
np_random.seed(seed)
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
def to_cuda(data):
if isinstance(data, tuple):
return [d.cuda() for d in data]
elif isinstance(data, torch.Tensor):
return data.cuda()
raise RuntimeError
def load_config(config_file: str) -> dict:
'''
config example:
# This is a comment
[section] # section
a = 5 # int
b = 3.1415 # float
s = 'abc' # str
lst = [3, 4, 5] # list
it will ouput:
(dict) {'section': {'a': 5, 'b':3.1415, 's': 'abc', 'lst':[3, 4, 5]}
'''
config = ConfigParser(interpolation=ExtendedInterpolation())
# fix the problem of automatic lowercase
config.optionxform = lambda option: option # type: ignore
config.read(config_file)
config_dct: Dict[str, Dict] = dict()
for section in config.sections():
tmp_dct: Dict[str, Any] = dict()
for key, value in config.items(section):
if value == '': # allow no value
tmp_dct[key] = None
continue
try:
tmp_dct[key] = eval(value) # It may be unsafe
except NameError:
print("Note the configuration file format!")
config_dct[section] = tmp_dct
return config_dct
| [
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed",
"random.seed",
"configparser.ExtendedInterpolation"
] | [((183, 200), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (194, 200), False, 'import random\n'), ((205, 225), 'numpy.random.seed', 'np_random.seed', (['seed'], {}), '(seed)\n', (219, 225), True, 'from numpy import random as np_random\n'), ((230, 258), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (252, 258), False, 'import torch\n'), ((263, 286), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (280, 286), False, 'import torch\n'), ((856, 879), 'configparser.ExtendedInterpolation', 'ExtendedInterpolation', ([], {}), '()\n', (877, 879), False, 'from configparser import ConfigParser, ExtendedInterpolation\n')] |
import numpy as np
from classifiers.linear_svm import *
from classifiers.softmax import *
class LinearClassifier(object):
def __init__(self, batch_size=200, n_iters=1000, log_iters=100,
learning_rate=1e-3, regularization=5e-4):
"""
Train this linear classifier using stochastic gradient descent.
:param batch_size: (integer) number of training examples to use at each step, default 200
:param n_iters: (integer) number of steps to take when optimizing, default 1000
:param log_iters: (integer) number of steps to log when optimizing, default 100
:param learning_rate: (float) learning rate for optimization., default 1e-3
:param regularization: (float) regularization strength., default 5e-4
"""
self.batch_size = batch_size
self.n_iters = n_iters
self.log_iters = log_iters
self.lr = learning_rate
self.reg = regularization
self.x = None # images
self.y = None # labels
self.n_classes = 0 # the number of classes
self.W = None # Weights
def train(self, x, y):
"""
:param x: A numpy array of shape (N, D) containing training data;
there are N training samples each of dimension D.
:param y: A numpy array of shape (N,) containing training labels;
y[i] = c means that X[i] has label 0 <= c < C for C classes.
:return: A list containing the value of the loss function at each training iteration.
"""
n_train, dim = x.shape
self.n_classes = int(np.max(y) + 1) # assume y takes values 0...K-1 where K is number of classes
if not self.W:
self.W = np.random.randn(dim, self.n_classes) * 0.001 # lazily initialize W
# Run stochastic gradient descent to optimize W
losses = []
for i in range(self.n_iters):
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (dim, batch_size) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
# Get training batch data
rand_idx = np.random.choice(np.arange(n_train), self.batch_size, replace=True)
x_batch = x[rand_idx]
y_batch = y[rand_idx]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss & gradient
loss, gradient = self.loss(x_batch, y_batch, self.reg)
losses.append(loss)
# Update weights using gradients & learning rate
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
self.W += - gradient * self.lr
#########################################################################
# END OF YOUR CODE #
#########################################################################
if i % self.log_iters == 0:
print("[+] Iter {}, loss : {}".format(i, loss))
return losses
def predict(self, x):
"""
Use the trained weights of this linear classifier to predict labels for data points.
:param x: A numpy array of shape (N, D) containing training data;
there are N training samples each of dimension D.
:return: Predicted labels for the data in X. y_pred is a 1-dimensional array of length N,
and each element is an integer giving the predicted class.
"""
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
scores = np.dot(x, self.W)
y_prediction = np.argmax(scores, axis=1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_prediction
def loss(self, x_batch, y_batch, reg):
"""
Compute the loss function and its derivative. Subclasses will override this.
:param x_batch: A numpy array of shape (N, D) containing a minibatch of N data points;
each point has dimension D.
:param y_batch: A numpy array of shape (N,) containing labels for the mini-batch.
:param reg: (float) regularization strength.
:return: loss (single float), gradient (an array of same shape as W)
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, x_batch, y_batch, reg):
return svm_loss_vectorized(self.W, x_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, x_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, x_batch, y_batch, reg)
| [
"numpy.random.randn",
"numpy.argmax",
"numpy.max",
"numpy.arange",
"numpy.dot"
] | [((5069, 5086), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (5075, 5086), True, 'import numpy as np\n'), ((5110, 5135), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (5119, 5135), True, 'import numpy as np\n'), ((1588, 1597), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1594, 1597), True, 'import numpy as np\n'), ((1710, 1746), 'numpy.random.randn', 'np.random.randn', (['dim', 'self.n_classes'], {}), '(dim, self.n_classes)\n', (1725, 1746), True, 'import numpy as np\n'), ((2918, 2936), 'numpy.arange', 'np.arange', (['n_train'], {}), '(n_train)\n', (2927, 2936), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# External Libraries
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
from PIL import Image
import numpy as np
import torch
# Standard Libraries
from os import path, makedirs
import copy
# Modules
from model.utils import udata, umath
from model.ml.esr_9 import ESR
from ensemble_network import Ensemble
def evaluate(val_model_eval, val_loader_eval, val_criterion_eval, device_to_process="cpu",
current_branch_on_training_val=0):
"""
Evaluate on the validation set.
"""
running_val_loss = [0.0 for _ in range(val_model_eval.get_ensemble_size())]
running_val_corrects = [0 for _ in range(val_model_eval.get_ensemble_size() + 1)]
running_val_steps = [0 for _ in range(val_model_eval.get_ensemble_size())]
labels_all, preds_all = [], []
for inputs_eval, labels_eval in val_loader_eval:
inputs_eval, labels_eval = inputs_eval.to(device_to_process), labels_eval.to(device_to_process)
outputs_eval = val_model_eval(inputs_eval)
outputs_eval = outputs_eval[:val_model_eval.get_ensemble_size() - current_branch_on_training_val]
# Ensemble prediction
overall_preds = torch.zeros(outputs_eval[0].size()).to(device_to_process)
for o_eval, outputs_per_branch_eval in enumerate(outputs_eval, 0):
_, preds_eval = torch.max(outputs_per_branch_eval, 1)
running_val_corrects[o_eval] += torch.sum(preds_eval == labels_eval).cpu().numpy()
loss_eval = val_criterion_eval(outputs_per_branch_eval, labels_eval)
running_val_loss[o_eval] += loss_eval.item()
running_val_steps[o_eval] += 1
for v_i, v_p in enumerate(preds_eval, 0):
overall_preds[v_i, v_p] += 1
# Compute accuracy of ensemble predictions
_, preds_eval = torch.max(overall_preds, 1)
running_val_corrects[-1] += torch.sum(preds_eval == labels_eval).cpu().numpy()
labels_all.extend(labels_eval)
preds_all.extend(preds_eval)
for b_eval in range(val_model_eval.get_ensemble_size()):
div = running_val_steps[b_eval] if running_val_steps[b_eval] != 0 else 1
running_val_loss[b_eval] /= div
return running_val_loss, running_val_corrects, labels_all, preds_all
def main():
base_path_experiment = "./experiments/self_data/"
name_experiment = "ESR_9-sample"
base_path_to_dataset = "./self_data/"
num_branches_trained_network = 9
validation_interval = 2
max_training_epoch = 2
current_branch_on_training = 8
# Make dir
if not path.isdir(path.join(base_path_experiment, name_experiment)):
makedirs(path.join(base_path_experiment, name_experiment))
# Define transforms
data_transforms = [transforms.ColorJitter(brightness=0.5, contrast=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomAffine(degrees=30,
translate=(.1, .1),
scale=(1.0, 1.25),
resample=Image.BILINEAR)]
# Running device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Starting: {}".format(str(name_experiment)))
print("Running on {}".format(device))
# Load network trained on AffectNet -
# load_path can indicate the path where the network needs to be loaded from.
load_path = None
net = Ensemble.load(device, num_branches_trained_network, load_path)
# Send params to device
net.to_device(device)
# Set optimizer
optimizer = optim.SGD([{"params": net.base.parameters(), "lr": 0.1, "momentum": 0.9},
{"params": net.branches[0].parameters(), "lr": 0.1, "momentum": 0.9}])
for b in range(1, net.get_ensemble_size()):
optimizer.add_param_group({"params": net.branches[b].parameters(), "lr": 0.02, "momentum": 0.9})
# Define criterion
criterion = nn.CrossEntropyLoss()
# Load validation set
# max_loaded_images_per_label=100000 loads the whole validation set
val_data = udata.Sample(idx_set=1,
max_loaded_images_per_label=1000,
transforms=None,
base_path_to_sample=base_path_to_dataset)
val_loader = DataLoader(val_data, batch_size=16, shuffle=False, num_workers=8)
# Fine-tune ESR-9
for branch_on_training in range(num_branches_trained_network):
# Load training data
train_data = udata.Sample(idx_set=0,
max_loaded_images_per_label=5000,
transforms=transforms.Compose(data_transforms),
base_path_to_sample=base_path_to_dataset)
train_batch_size = 16
# Best network
best_ensemble = net.to_state_dict()
best_ensemble_acc = 0.0
# Initialize scheduler
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.75, last_epoch=-1)
# History
history_loss = []
history_acc = [[] for _ in range(net.get_ensemble_size())]
history_val_loss = [[] for _ in range(net.get_ensemble_size())]
history_val_acc = [[] for _ in range(net.get_ensemble_size() + 1)]
# Training branch
for epoch in range(max_training_epoch):
train_loader = DataLoader(train_data, batch_size=train_batch_size, shuffle=True, num_workers=8)
running_loss = 0.0
running_corrects = [0.0 for _ in range(net.get_ensemble_size())]
running_updates = 0
idx = 1
for inputs, labels in train_loader:
# Get the inputs
# print(inputs, labels)
inputs, labels = inputs.to(device), labels.to(device)
# Set gradients to zero
optimizer.zero_grad()
# Forward
outputs = net(inputs)
confs_preds = [torch.max(o, 1) for o in outputs]
# Compute loss
loss = 0.0
for i_4 in range(net.get_ensemble_size() - current_branch_on_training):
preds = confs_preds[i_4][1]
running_corrects[i_4] += torch.sum(preds == labels).cpu().numpy()
loss += criterion(outputs[i_4], labels)
# Backward
loss.backward()
# Optimize
optimizer.step()
# Save loss
running_loss += loss.item()
running_updates += 1
print("Number: {:d}, Loss: {:.4f}".format(train_batch_size * idx, loss.item()))
idx += 1
scheduler.step()
# Statistics
print("[Branch {:d}, Epochs {:d}--{:d}] "
"Loss: {:.4f} Acc: {}".format(net.get_ensemble_size() - current_branch_on_training,
epoch + 1,
max_training_epoch,
running_loss / running_updates,
np.array(running_corrects) / len(train_data)))
# Validation
if ((epoch % validation_interval) == 0) or ((epoch + 1) == max_training_epoch):
net.eval()
val_loss, val_corrects, _, _ = evaluate(net, val_loader, criterion, device, current_branch_on_training)
print("\nValidation - [Branch {:d}, Epochs {:d}--{:d}] Loss: {:.4f} Acc: {}\n\n".format(
net.get_ensemble_size() - current_branch_on_training,
epoch + 1,
max_training_epoch,
val_loss[-1],
np.array(val_corrects) / len(val_data)))
# Add to history training and validation statistics
history_loss.append(running_loss / running_updates)
for i_4 in range(net.get_ensemble_size()):
history_acc[i_4].append(running_corrects[i_4] / len(train_data))
for b in range(net.get_ensemble_size()):
history_val_loss[b].append(val_loss[b])
history_val_acc[b].append(float(val_corrects[b]) / len(val_data))
# Add ensemble accuracy to history
history_val_acc[-1].append(float(val_corrects[-1]) / len(val_data))
# Save best ensemble
ensemble_acc = (float(val_corrects[-1]) / len(val_data))
if ensemble_acc >= best_ensemble_acc:
best_ensemble_acc = ensemble_acc
best_ensemble = net.to_state_dict()
# Save network
Ensemble.save(best_ensemble,
path.join(base_path_experiment, name_experiment, "Saved Networks"),
current_branch_on_training)
net.train()
# Change branch on training
if current_branch_on_training > 0:
# Decrease max training epoch
max_training_epoch = 2
# Reload best configuration
net.reload(best_ensemble)
# Set optimizer
optimizer = optim.SGD([{"params": net.base.parameters(), "lr": 0.02, "momentum": 0.9},
{"params": net.branches[
net.get_ensemble_size() - current_branch_on_training].parameters(),
"lr": 0.1,
"momentum": 0.9
}])
# Trained branches
for b in range(net.get_ensemble_size()):
if b != (net.get_ensemble_size() - current_branch_on_training):
optimizer.add_param_group({"params": net.branches[b].parameters(), "lr": 0.02, "momentum": 0.9})
# Change branch on training
current_branch_on_training -= 1
# Finish training after fine-tuning all branches
else:
break
if __name__ == '__main__':
main()
| [
"torchvision.transforms.ColorJitter",
"torchvision.transforms.RandomAffine",
"torch.optim.lr_scheduler.StepLR",
"torch.utils.data.DataLoader",
"model.utils.udata.Sample",
"torchvision.transforms.RandomHorizontalFlip",
"torch.sum",
"torch.nn.CrossEntropyLoss",
"ensemble_network.Ensemble.load",
"tor... | [((3616, 3678), 'ensemble_network.Ensemble.load', 'Ensemble.load', (['device', 'num_branches_trained_network', 'load_path'], {}), '(device, num_branches_trained_network, load_path)\n', (3629, 3678), False, 'from ensemble_network import Ensemble\n'), ((4136, 4157), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4155, 4157), True, 'import torch.nn as nn\n'), ((4272, 4392), 'model.utils.udata.Sample', 'udata.Sample', ([], {'idx_set': '(1)', 'max_loaded_images_per_label': '(1000)', 'transforms': 'None', 'base_path_to_sample': 'base_path_to_dataset'}), '(idx_set=1, max_loaded_images_per_label=1000, transforms=None,\n base_path_to_sample=base_path_to_dataset)\n', (4284, 4392), False, 'from model.utils import udata, umath\n'), ((4490, 4555), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {'batch_size': '(16)', 'shuffle': '(False)', 'num_workers': '(8)'}), '(val_data, batch_size=16, shuffle=False, num_workers=8)\n', (4500, 4555), False, 'from torch.utils.data import DataLoader\n'), ((1957, 1984), 'torch.max', 'torch.max', (['overall_preds', '(1)'], {}), '(overall_preds, 1)\n', (1966, 1984), False, 'import torch\n'), ((2884, 2936), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.5)', 'contrast': '(0.5)'}), '(brightness=0.5, contrast=0.5)\n', (2906, 2936), False, 'from torchvision import transforms\n'), ((2961, 2999), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2992, 2999), False, 'from torchvision import transforms\n'), ((3024, 3129), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '(30)', 'translate': '(0.1, 0.1)', 'scale': '(1.0, 1.25)', 'resample': 'Image.BILINEAR'}), '(degrees=30, translate=(0.1, 0.1), scale=(1.0, 1.25),\n resample=Image.BILINEAR)\n', (3047, 3129), False, 'from torchvision import transforms\n'), ((5128, 5205), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(10)', 'gamma': '(0.75)', 'last_epoch': '(-1)'}), '(optimizer, step_size=10, gamma=0.75, last_epoch=-1)\n', (5153, 5205), True, 'import torch.optim as optim\n'), ((1466, 1503), 'torch.max', 'torch.max', (['outputs_per_branch_eval', '(1)'], {}), '(outputs_per_branch_eval, 1)\n', (1475, 1503), False, 'import torch\n'), ((2718, 2766), 'os.path.join', 'path.join', (['base_path_experiment', 'name_experiment'], {}), '(base_path_experiment, name_experiment)\n', (2727, 2766), False, 'from os import path, makedirs\n'), ((2786, 2834), 'os.path.join', 'path.join', (['base_path_experiment', 'name_experiment'], {}), '(base_path_experiment, name_experiment)\n', (2795, 2834), False, 'from os import path, makedirs\n'), ((3324, 3349), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3347, 3349), False, 'import torch\n'), ((5567, 5652), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'train_batch_size', 'shuffle': '(True)', 'num_workers': '(8)'}), '(train_data, batch_size=train_batch_size, shuffle=True, num_workers=8\n )\n', (5577, 5652), False, 'from torch.utils.data import DataLoader\n'), ((4833, 4868), 'torchvision.transforms.Compose', 'transforms.Compose', (['data_transforms'], {}), '(data_transforms)\n', (4851, 4868), False, 'from torchvision import transforms\n'), ((6177, 6192), 'torch.max', 'torch.max', (['o', '(1)'], {}), '(o, 1)\n', (6186, 6192), False, 'import torch\n'), ((2021, 2057), 'torch.sum', 'torch.sum', (['(preds_eval == labels_eval)'], {}), '(preds_eval == labels_eval)\n', (2030, 2057), False, 'import torch\n'), ((7370, 7396), 'numpy.array', 'np.array', (['running_corrects'], {}), '(running_corrects)\n', (7378, 7396), True, 'import numpy as np\n'), ((9043, 9109), 'os.path.join', 'path.join', (['base_path_experiment', 'name_experiment', '"""Saved Networks"""'], {}), "(base_path_experiment, name_experiment, 'Saved Networks')\n", (9052, 9109), False, 'from os import path, makedirs\n'), ((1549, 1585), 'torch.sum', 'torch.sum', (['(preds_eval == labels_eval)'], {}), '(preds_eval == labels_eval)\n', (1558, 1585), False, 'import torch\n'), ((7987, 8009), 'numpy.array', 'np.array', (['val_corrects'], {}), '(val_corrects)\n', (7995, 8009), True, 'import numpy as np\n'), ((6451, 6477), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (6460, 6477), False, 'import torch\n')] |
#!/usr/bin/env python3
#
# Partly derived from:
# https://github.com/locuslab/optnet/blob/master/sudoku/train.py
import argparse
import os
import shutil
import csv
import numpy as np
import numpy.random as npr
#import setproctitle
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from tqdm.auto import tqdm
import satnet
class SudokuSolver(nn.Module):
def __init__(self, boardSz, aux, m):
super(SudokuSolver, self).__init__()
n = boardSz**6
self.sat = satnet.SATNet(n, m, aux)
def forward(self, y_in, mask):
out = self.sat(y_in, mask)
return out
class DigitConv(nn.Module):
'''
Convolutional neural network for MNIST digit recognition. From:
https://github.com/pytorch/examples/blob/master/mnist/main.py
'''
def __init__(self):
super(DigitConv, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)[:,:9].contiguous()
class MNISTSudokuSolver(nn.Module):
def __init__(self, boardSz, aux, m):
super(MNISTSudokuSolver, self).__init__()
self.digit_convnet = DigitConv()
self.sudoku_solver = SudokuSolver(boardSz, aux, m)
self.boardSz = boardSz
self.nSq = boardSz**2
def forward(self, x, is_inputs):
nBatch = x.shape[0]
x = x.flatten(start_dim = 0, end_dim = 1)
digit_guess = self.digit_convnet(x)
puzzles = digit_guess.view(nBatch, self.nSq * self.nSq * self.nSq)
solution = self.sudoku_solver(puzzles, is_inputs)
return solution
class CSVLogger(object):
def __init__(self, fname):
self.f = open(fname, 'w')
self.logger = csv.writer(self.f)
def log(self, fields):
self.logger.writerow(fields)
self.f.flush()
class FigLogger(object):
def __init__(self, fig, base_ax, title):
self.colors = ['tab:red', 'tab:blue']
self.labels = ['Loss (entropy)', 'Error']
self.markers = ['d', '.']
self.axes = [base_ax, base_ax.twinx()]
base_ax.set_xlabel('Epochs')
base_ax.set_title(title)
for i, ax in enumerate(self.axes):
ax.set_ylabel(self.labels[i], color=self.colors[i])
ax.tick_params(axis='y', labelcolor=self.colors[i])
self.reset()
self.fig = fig
def log(self, args):
for i, arg in enumerate(args[-2:]):
self.curves[i].append(arg)
x = list(range(len(self.curves[i])))
self.axes[i].plot(x, self.curves[i], self.colors[i], marker=self.markers[i])
self.axes[i].set_ylim(0, 1.05)
self.fig.canvas.draw()
def reset(self):
for ax in self.axes:
for line in ax.lines:
line.remove()
self.curves = [[], []]
def print_header(msg):
print('===>', msg)
def find_unperm(perm):
unperm = torch.zeros_like(perm)
for i in range(perm.size(0)):
unperm[perm[i]] = i
return unperm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='sudoku')
parser.add_argument('--boardSz', type=int, default=3)
parser.add_argument('--batchSz', type=int, default=40)
parser.add_argument('--testBatchSz', type=int, default=40)
parser.add_argument('--aux', type=int, default=300)
parser.add_argument('--m', type=int, default=600)
parser.add_argument('--nEpoch', type=int, default=100)
parser.add_argument('--testPct', type=float, default=0.1)
parser.add_argument('--lr', type=float, default=2e-3)
parser.add_argument('--save', type=str)
parser.add_argument('--model', type=str)
parser.add_argument('--no_cuda', action='store_true')
parser.add_argument('--mnist', action='store_true')
parser.add_argument('--perm', action='store_true')
args = parser.parse_args()
# For debugging: fix the random seed
npr.seed(1)
torch.manual_seed(7)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
print('Using', torch.cuda.get_device_name(0))
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.init()
save = 'sudoku{}{}.boardSz{}-aux{}-m{}-lr{}-bsz{}'.format(
'.perm' if args.perm else '', '.mnist' if args.mnist else '',
args.boardSz, args.aux, args.m, args.lr, args.batchSz)
if args.save: save = '{}-{}'.format(args.save, save)
save = os.path.join('logs', save)
if os.path.isdir(save): shutil.rmtree(save)
os.makedirs(save)
#setproctitle.setproctitle('sudoku.{}'.format(save))
print_header('Loading data')
with open(os.path.join(args.data_dir, 'features.pt'), 'rb') as f:
X_in = torch.load(f)
with open(os.path.join(args.data_dir, 'features_img.pt'), 'rb') as f:
Ximg_in = torch.load(f)
with open(os.path.join(args.data_dir, 'labels.pt'), 'rb') as f:
Y_in = torch.load(f)
with open(os.path.join(args.data_dir, 'perm.pt'), 'rb') as f:
perm = torch.load(f)
N = X_in.size(0)
nTrain = int(N*(1.-args.testPct))
nTest = N-nTrain
assert(nTrain % args.batchSz == 0)
assert(nTest % args.testBatchSz == 0)
print_header('Forming inputs')
X, Ximg, Y, is_input = process_inputs(X_in, Ximg_in, Y_in, args.boardSz)
data = Ximg if args.mnist else X
if args.cuda: data, is_input, Y = data.cuda(), is_input.cuda(), Y.cuda()
unperm = None
if args.perm and not args.mnist:
print('Applying permutation')
data[:,:], Y[:,:], is_input[:,:] = data[:,perm], Y[:,perm], is_input[:,perm]
unperm = find_unperm(perm)
train_set = TensorDataset(data[:nTrain], is_input[:nTrain], Y[:nTrain])
test_set = TensorDataset(data[nTrain:], is_input[nTrain:], Y[nTrain:])
print_header('Building model')
if args.mnist:
model = MNISTSudokuSolver(args.boardSz, args.aux, args.m)
else:
model = SudokuSolver(args.boardSz, args.aux, args.m)
if args.cuda: model = model.cuda()
if args.mnist:
optimizer = optim.Adam([
{'params': model.sudoku_solver.parameters(), 'lr': args.lr},
{'params': model.digit_convnet.parameters(), 'lr': 1e-5},
])
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.model:
model.load_state_dict(torch.load(args.model))
train_logger = CSVLogger(os.path.join(save, 'train.csv'))
test_logger = CSVLogger(os.path.join(save, 'test.csv'))
fields = ['epoch', 'loss', 'err']
train_logger.log(fields)
test_logger.log(fields)
test(args.boardSz, 0, model, optimizer, test_logger, test_set, args.testBatchSz, unperm)
for epoch in range(1, args.nEpoch+1):
train(args.boardSz, epoch, model, optimizer, train_logger, train_set, args.batchSz, unperm)
test(args.boardSz, epoch, model, optimizer, test_logger, test_set, args.testBatchSz, unperm)
#torch.save(model.state_dict(), os.path.join(save, 'it'+str(epoch)+'.pth'))
def process_inputs(X, Ximg, Y, boardSz):
is_input = X.sum(dim=3, keepdim=True).expand_as(X).int().sign()
Ximg = Ximg.flatten(start_dim=1, end_dim=2)
Ximg = Ximg.unsqueeze(2).float()
X = X.view(X.size(0), -1)
Y = Y.view(Y.size(0), -1)
is_input = is_input.view(is_input.size(0), -1)
return X, Ximg, Y, is_input
def run(boardSz, epoch, model, optimizer, logger, dataset, batchSz, to_train=False, unperm=None):
loss_final, err_final = 0, 0
loader = DataLoader(dataset, batch_size=batchSz)
tloader = tqdm(enumerate(loader), total=len(loader))
for i,(data,is_input,label) in tloader:
if to_train: optimizer.zero_grad()
preds = model(data.contiguous(), is_input.contiguous())
loss = nn.functional.binary_cross_entropy(preds, label)
if to_train:
loss.backward()
optimizer.step()
err = computeErr(preds.data, boardSz, unperm)/batchSz
tloader.set_description('Epoch {} {} Loss {:.4f} Err: {:.4f}'.format(epoch, ('Train' if to_train else 'Test '), loss.item(), err))
loss_final += loss.item()
err_final += err
loss_final, err_final = loss_final/len(loader), err_final/len(loader)
logger.log((epoch, loss_final, err_final))
if not to_train:
print('TESTING SET RESULTS: Average loss: {:.4f} Err: {:.4f}'.format(loss_final, err_final))
#print('memory: {:.2f} MB, cached: {:.2f} MB'.format(torch.cuda.memory_allocated()/2.**20, torch.cuda.memory_cached()/2.**20))
torch.cuda.empty_cache()
def train(args, epoch, model, optimizer, logger, dataset, batchSz, unperm=None):
run(args, epoch, model, optimizer, logger, dataset, batchSz, True, unperm)
@torch.no_grad()
def test(args, epoch, model, optimizer, logger, dataset, batchSz, unperm=None):
run(args, epoch, model, optimizer, logger, dataset, batchSz, False, unperm)
@torch.no_grad()
def computeErr(pred_flat, n, unperm):
if unperm is not None: pred_flat[:,:] = pred_flat[:,unperm]
nsq = n ** 2
pred = pred_flat.view(-1, nsq, nsq, nsq)
batchSz = pred.size(0)
s = (nsq-1)*nsq//2 # 0 + 1 + ... + n^2-1
I = torch.max(pred, 3)[1].squeeze().view(batchSz, nsq, nsq)
def invalidGroups(x):
valid = (x.min(1)[0] == 0)
valid *= (x.max(1)[0] == nsq-1)
valid *= (x.sum(1) == s)
return valid.bitwise_not()
boardCorrect = torch.ones(batchSz).type_as(pred)
for j in range(nsq):
# Check the jth row and column.
boardCorrect[invalidGroups(I[:,j,:])] = 0
boardCorrect[invalidGroups(I[:,:,j])] = 0
# Check the jth block.
row, col = n*(j // n), n*(j % n)
M = invalidGroups(I[:,row:row+n,col:col+n].contiguous().view(batchSz,-1))
boardCorrect[M] = 0
if boardCorrect.sum() == 0:
return batchSz
return float(batchSz-boardCorrect.sum())
if __name__=='__main__':
main()
| [
"satnet.SATNet",
"torch.nn.functional.binary_cross_entropy",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.utils.data.TensorDataset",
"shutil.rmtree",
"torch.no_grad",
"os.path.join",
"torch.ones",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.Linear",
"torch.nn.functional.... | [((9265, 9280), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9278, 9280), False, 'import torch\n'), ((9443, 9458), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9456, 9458), False, 'import torch\n'), ((3377, 3399), 'torch.zeros_like', 'torch.zeros_like', (['perm'], {}), '(perm)\n', (3393, 3399), False, 'import torch\n'), ((3506, 3531), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3529, 3531), False, 'import argparse\n'), ((4403, 4414), 'numpy.random.seed', 'npr.seed', (['(1)'], {}), '(1)\n', (4411, 4414), True, 'import numpy.random as npr\n'), ((4419, 4439), 'torch.manual_seed', 'torch.manual_seed', (['(7)'], {}), '(7)\n', (4436, 4439), False, 'import torch\n'), ((4973, 4999), 'os.path.join', 'os.path.join', (['"""logs"""', 'save'], {}), "('logs', save)\n", (4985, 4999), False, 'import os\n'), ((5007, 5026), 'os.path.isdir', 'os.path.isdir', (['save'], {}), '(save)\n', (5020, 5026), False, 'import os\n'), ((5052, 5069), 'os.makedirs', 'os.makedirs', (['save'], {}), '(save)\n', (5063, 5069), False, 'import os\n'), ((6180, 6239), 'torch.utils.data.TensorDataset', 'TensorDataset', (['data[:nTrain]', 'is_input[:nTrain]', 'Y[:nTrain]'], {}), '(data[:nTrain], is_input[:nTrain], Y[:nTrain])\n', (6193, 6239), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((6256, 6315), 'torch.utils.data.TensorDataset', 'TensorDataset', (['data[nTrain:]', 'is_input[nTrain:]', 'Y[nTrain:]'], {}), '(data[nTrain:], is_input[nTrain:], Y[nTrain:])\n', (6269, 6315), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((8043, 8082), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batchSz'}), '(dataset, batch_size=batchSz)\n', (8053, 8082), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((9077, 9101), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (9099, 9101), False, 'import torch\n'), ((590, 614), 'satnet.SATNet', 'satnet.SATNet', (['n', 'm', 'aux'], {}), '(n, m, aux)\n', (603, 614), False, 'import satnet\n'), ((971, 993), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(20)', '(5)', '(1)'], {}), '(1, 20, 5, 1)\n', (980, 993), True, 'import torch.nn as nn\n'), ((1015, 1038), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(50)', '(5)', '(1)'], {}), '(20, 50, 5, 1)\n', (1024, 1038), True, 'import torch.nn as nn\n'), ((1058, 1084), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 50)', '(500)'], {}), '(4 * 4 * 50, 500)\n', (1067, 1084), True, 'import torch.nn as nn\n'), ((1100, 1118), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(10)'], {}), '(500, 10)\n', (1109, 1118), True, 'import torch.nn as nn\n'), ((1192, 1213), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (1204, 1213), True, 'import torch.nn.functional as F\n'), ((1260, 1281), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (1272, 1281), True, 'import torch.nn.functional as F\n'), ((2147, 2165), 'csv.writer', 'csv.writer', (['self.f'], {}), '(self.f)\n', (2157, 2165), False, 'import csv\n'), ((4478, 4503), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4501, 4503), False, 'import torch\n'), ((4682, 4699), 'torch.cuda.init', 'torch.cuda.init', ([], {}), '()\n', (4697, 4699), False, 'import torch\n'), ((5028, 5047), 'shutil.rmtree', 'shutil.rmtree', (['save'], {}), '(save)\n', (5041, 5047), False, 'import shutil\n'), ((5248, 5261), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (5258, 5261), False, 'import torch\n'), ((5354, 5367), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (5364, 5367), False, 'import torch\n'), ((5451, 5464), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (5461, 5464), False, 'import torch\n'), ((5546, 5559), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (5556, 5559), False, 'import torch\n'), ((6936, 6967), 'os.path.join', 'os.path.join', (['save', '"""train.csv"""'], {}), "(save, 'train.csv')\n", (6948, 6967), False, 'import os\n'), ((6997, 7027), 'os.path.join', 'os.path.join', (['save', '"""test.csv"""'], {}), "(save, 'test.csv')\n", (7009, 7027), False, 'import os\n'), ((8307, 8355), 'torch.nn.functional.binary_cross_entropy', 'nn.functional.binary_cross_entropy', (['preds', 'label'], {}), '(preds, label)\n', (8341, 8355), True, 'import torch.nn as nn\n'), ((4546, 4575), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (4572, 4575), False, 'import torch\n'), ((5177, 5219), 'os.path.join', 'os.path.join', (['args.data_dir', '"""features.pt"""'], {}), "(args.data_dir, 'features.pt')\n", (5189, 5219), False, 'import os\n'), ((5276, 5322), 'os.path.join', 'os.path.join', (['args.data_dir', '"""features_img.pt"""'], {}), "(args.data_dir, 'features_img.pt')\n", (5288, 5322), False, 'import os\n'), ((5382, 5422), 'os.path.join', 'os.path.join', (['args.data_dir', '"""labels.pt"""'], {}), "(args.data_dir, 'labels.pt')\n", (5394, 5422), False, 'import os\n'), ((5479, 5517), 'os.path.join', 'os.path.join', (['args.data_dir', '"""perm.pt"""'], {}), "(args.data_dir, 'perm.pt')\n", (5491, 5517), False, 'import os\n'), ((6882, 6904), 'torch.load', 'torch.load', (['args.model'], {}), '(args.model)\n', (6892, 6904), False, 'import torch\n'), ((9951, 9970), 'torch.ones', 'torch.ones', (['batchSz'], {}), '(batchSz)\n', (9961, 9970), False, 'import torch\n'), ((1384, 1403), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1393, 1403), True, 'import torch.nn.functional as F\n'), ((9705, 9723), 'torch.max', 'torch.max', (['pred', '(3)'], {}), '(pred, 3)\n', (9714, 9723), False, 'import torch\n')] |
import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(201909113)
one_hot_size = 7
_num_samples = 47
_samples = [np.random.uniform(-1, one_hot_size+1) for _ in range(_num_samples)]
# Sample access functions
def get_sample(index):
return [_samples[index]]
def num_samples():
return _num_samples
def sample_dims():
return (1,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
x_lbann = lbann.Input(data_field='samples')
y_numpy = np.random.normal(size=one_hot_size).astype(np.float32)
y_numpy[:] = 1 ### @todo Remove
y_lbann = lbann.Weights(
initializer=lbann.ValueInitializer(
values=y_numpy))
y_lbann = lbann.WeightsLayer(
weights=y_lbann,
dims=[one_hot_size],
)
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Compute expected metric values with NumPy
# ------------------------------------------
vals = []
for i in range(num_samples()):
x = int(np.floor(get_sample(i)[0]))
y = y_numpy
z = y[x] if (0 <= x < one_hot_size) else 0
vals.append(z)
val = np.mean(vals, dtype=np.float64)
tol = np.abs(8 * val * np.finfo(np.float32).eps)
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
x = x_lbann
y = y_lbann
x_onehot = lbann.OneHot(
x,
size=one_hot_size,
data_layout='data_parallel',
)
z = lbann.MatMul(
lbann.Reshape(x_onehot, dims=[1, -1]),
lbann.Reshape(y, dims=[1, -1]),
transpose_b=True,
)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
callbacks.append(
lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test',
)
)
# ------------------------------------------
# Model-parallel layout
# ------------------------------------------
x = x_lbann
y = y_lbann
x_onehot = lbann.OneHot(
x,
size=one_hot_size,
data_layout='model_parallel',
)
z = lbann.MatMul(
lbann.Reshape(x_onehot, dims=[1, -1]),
lbann.Reshape(y, dims=[1, -1]),
transpose_b=True,
)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout'))
callbacks.append(
lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test',
)
)
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=x_lbann,
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
| [
"numpy.random.uniform",
"tools.create_python_data_reader",
"numpy.random.seed",
"os.path.realpath",
"os.path.dirname",
"tools.create_tests",
"numpy.finfo",
"numpy.mean",
"numpy.random.normal"
] | [((123, 149), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (139, 149), False, 'import os\n'), ((164, 193), 'os.path.dirname', 'os.path.dirname', (['current_file'], {}), '(current_file)\n', (179, 193), False, 'import os\n'), ((536, 561), 'numpy.random.seed', 'np.random.seed', (['(201909113)'], {}), '(201909113)\n', (550, 561), True, 'import numpy as np\n'), ((5365, 5411), 'tools.create_tests', 'tools.create_tests', (['setup_experiment', '__file__'], {}), '(setup_experiment, __file__)\n', (5383, 5411), False, 'import tools\n'), ((609, 648), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(one_hot_size + 1)'], {}), '(-1, one_hot_size + 1)\n', (626, 648), True, 'import numpy as np\n'), ((2341, 2372), 'numpy.mean', 'np.mean', (['vals'], {'dtype': 'np.float64'}), '(vals, dtype=np.float64)\n', (2348, 2372), True, 'import numpy as np\n'), ((226, 254), 'os.path.dirname', 'os.path.dirname', (['current_dir'], {}), '(current_dir)\n', (241, 254), False, 'import os\n'), ((1629, 1664), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'one_hot_size'}), '(size=one_hot_size)\n', (1645, 1664), True, 'import numpy as np\n'), ((4734, 4843), 'tools.create_python_data_reader', 'tools.create_python_data_reader', (['lbann', 'current_file', '"""get_sample"""', '"""num_samples"""', '"""sample_dims"""', '"""train"""'], {}), "(lbann, current_file, 'get_sample',\n 'num_samples', 'sample_dims', 'train')\n", (4765, 4843), False, 'import tools\n'), ((4965, 5073), 'tools.create_python_data_reader', 'tools.create_python_data_reader', (['lbann', 'current_file', '"""get_sample"""', '"""num_samples"""', '"""sample_dims"""', '"""test"""'], {}), "(lbann, current_file, 'get_sample',\n 'num_samples', 'sample_dims', 'test')\n", (4996, 5073), False, 'import tools\n'), ((2400, 2420), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2408, 2420), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from models import base_server
from configs import configs
# Read example image
test_img = cv2.imread(configs.test_img_fp)
test_img = cv2.resize(test_img, configs.face_describer_tensor_shape)
# Define input tensors feed to session graph
#dropout_rate = 0.5
input_data = np.array([np.expand_dims(test_img, axis=0)])
print(input_data.shape)
# Define a Base Server
srv = base_server.BaseServer(model_fp=configs.face_describer_model_fp,
input_tensor_names=configs.face_describer_input_tensor_names,
output_tensor_names=configs.face_describer_output_tensor_names,
device=configs.face_describer_device)
# Run prediction
prediction = srv.inference(data=input_data)
# Print results
print('512-D Features are \n{}'.format(prediction))
| [
"numpy.expand_dims",
"cv2.imread",
"models.base_server.BaseServer",
"cv2.resize"
] | [((122, 153), 'cv2.imread', 'cv2.imread', (['configs.test_img_fp'], {}), '(configs.test_img_fp)\n', (132, 153), False, 'import cv2\n'), ((165, 222), 'cv2.resize', 'cv2.resize', (['test_img', 'configs.face_describer_tensor_shape'], {}), '(test_img, configs.face_describer_tensor_shape)\n', (175, 222), False, 'import cv2\n'), ((400, 641), 'models.base_server.BaseServer', 'base_server.BaseServer', ([], {'model_fp': 'configs.face_describer_model_fp', 'input_tensor_names': 'configs.face_describer_input_tensor_names', 'output_tensor_names': 'configs.face_describer_output_tensor_names', 'device': 'configs.face_describer_device'}), '(model_fp=configs.face_describer_model_fp,\n input_tensor_names=configs.face_describer_input_tensor_names,\n output_tensor_names=configs.face_describer_output_tensor_names, device=\n configs.face_describer_device)\n', (422, 641), False, 'from models import base_server\n'), ((312, 344), 'numpy.expand_dims', 'np.expand_dims', (['test_img'], {'axis': '(0)'}), '(test_img, axis=0)\n', (326, 344), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import math
import arim
import arim.im as im
import arim.im.tfm, arim.ray
import arim.io
import arim.geometry as g
def test_extrema_lookup_times_in_rectbox():
grid = g.Grid(-10.0, 10.0, 0.0, 0.0, 0.0, 15.0, 1.0)
tx = [0, 0, 0, 1, 1, 1]
rx = [0, 1, 2, 1, 1, 2]
lookup_times_tx = np.zeros((grid.numpoints, len(tx)))
lookup_times_rx = np.zeros((grid.numpoints, len(tx)))
# timetrace 5 (tx=1, rx=2) is the minimum time:
grid_idx = 5
lookup_times_tx[grid_idx, 5] = -1.5
lookup_times_rx[grid_idx, 5] = -1.5
# some noise:
lookup_times_tx[grid_idx, 4] = -2.0
lookup_times_rx[grid_idx, 4] = -0.1
# timetrace 1 (tx=0, rx=1) is the maximum time:
grid_idx = 3
lookup_times_tx[grid_idx, 1] = 1.5
lookup_times_rx[grid_idx, 1] = 1.5
# some noise:
lookup_times_tx[0, 0] = 2.0
lookup_times_rx[0, 0] = 0.1
out = im.tfm.extrema_lookup_times_in_rectbox(
grid, lookup_times_tx, lookup_times_rx, tx, rx
)
assert math.isclose(out.tmin, -3.0)
assert math.isclose(out.tmax, 3.0)
assert out.tx_elt_for_tmin == 1
assert out.rx_elt_for_tmin == 2
assert out.tx_elt_for_tmax == 0
assert out.rx_elt_for_tmax == 1
@pytest.mark.parametrize("use_real_grid", [True, False])
def test_multiview_tfm(use_real_grid):
# make probe
probe = arim.Probe.make_matrix_probe(5, 0.5e-3, 1, np.nan, 1e6)
probe.set_reference_element("first")
probe.reset_position()
probe.translate([0.0, 0.0, -1e-3])
# make frame
tx_arr, rx_arr = arim.ut.fmc(probe.numelements)
time = arim.Time(0.5e-6, 1 / 20e6, 100)
# use random data but ensure reciprocity
timetraces = np.zeros((len(tx_arr), len(time)))
for i, (tx, rx) in enumerate(zip(tx_arr, rx_arr)):
np.random.seed((tx * rx) ** 2) # symmetric in tx and rx
timetraces[i] = np.random.rand(len(time))
block = arim.Material(6300, 3100)
frame = arim.Frame(
timetraces, time, tx_arr, rx_arr, probe, arim.ExaminationObject(block)
)
# prepare view LL-T in contact
if use_real_grid:
grid = arim.Grid(0.0, 0.0, 0.0, 0.0, 5e-3, 5e-3, np.nan)
grid_interface = arim.Interface(*grid.to_oriented_points())
else:
grid = arim.Points(np.array([0.0, 0.0, 5e-3]), name="Grid")
grid_interface = arim.Interface(
*arim.geometry.default_oriented_points(grid.to_1d_points())
)
backwall = arim.geometry.points_1d_wall_z(-1e-3, 1e-3, 10e-3, 200)
backwall_interface = arim.Interface(*backwall)
probe_interface = arim.Interface(*probe.to_oriented_points())
path_LL = arim.Path(
[probe_interface, backwall_interface, grid_interface],
[block, block],
["L", "L"],
)
path_T = arim.Path([probe_interface, grid_interface], [block], ["T"])
view = arim.View(path_LL, path_T, "LL-T")
arim.ray.ray_tracing([view], convert_to_fortran_order=True)
# make TFM
tfm = im.tfm.tfm_for_view(frame, grid, view, fillvalue=np.nan)
# Check this value is unchanged over time!
expected_val = 12.745499105785953 / frame.numtimetraces
assert tfm.res.shape == grid.shape
if use_real_grid:
np.testing.assert_array_almost_equal(tfm.res, [[[expected_val]]])
else:
np.testing.assert_allclose(tfm.res, expected_val)
# Reverse view
view_rev = arim.View(path_LL, path_T, "T-LL")
tfm_rev = im.tfm.tfm_for_view(frame, grid, view_rev, fillvalue=np.nan)
assert tfm.res.shape == grid.shape
if use_real_grid:
np.testing.assert_array_almost_equal(tfm_rev.res, [[[expected_val]]])
else:
np.testing.assert_allclose(tfm_rev.res, expected_val)
@pytest.mark.parametrize("use_hmc", [False, True])
def test_contact_tfm(use_hmc):
# make probe
probe = arim.Probe.make_matrix_probe(5, 0.5e-3, 1, np.nan, 1e6)
probe.set_reference_element("first")
probe.reset_position()
probe.translate([0.0, 0.0, -1e-3])
# make frame
if use_hmc:
tx_arr, rx_arr = arim.ut.hmc(probe.numelements)
else:
tx_arr, rx_arr = arim.ut.fmc(probe.numelements)
time = arim.Time(0.5e-6, 1 / 20e6, 100)
# use random data but ensure reciprocity
timetraces = np.zeros((len(tx_arr), len(time)))
for i, (tx, rx) in enumerate(zip(tx_arr, rx_arr)):
np.random.seed((tx * rx) ** 2) # symmetric in tx and rx
timetraces[i] = np.random.rand(len(time))
# check reciprocity
if not use_hmc:
for i, (tx, rx) in enumerate(zip(tx_arr, rx_arr)):
timetrace_1 = timetraces[i]
timetrace_2 = timetraces[np.logical_and(tx_arr == rx, rx_arr == tx)][0]
np.testing.assert_allclose(
timetrace_1, timetrace_2, err_msg="fmc data not symmetric"
)
block = arim.Material(6300, 3100)
frame = arim.Frame(
timetraces, time, tx_arr, rx_arr, probe, arim.ExaminationObject(block)
)
# prepare view LL-T in contact
grid = arim.Points(np.array([0.0, 0.0, 5e-3]), name="Grid")
tfm = im.tfm.contact_tfm(frame, grid, block.longitudinal_vel, fillvalue=np.nan)
# Check this value is unchanged over time!
expected_val = 12.49925772283528 / frame.numtimetraces
assert tfm.res.shape == grid.shape
np.testing.assert_allclose(tfm.res, expected_val)
| [
"numpy.random.seed",
"arim.im.tfm.extrema_lookup_times_in_rectbox",
"arim.im.tfm.contact_tfm",
"arim.geometry.Grid",
"pytest.mark.parametrize",
"numpy.testing.assert_array_almost_equal",
"arim.Path",
"arim.geometry.points_1d_wall_z",
"numpy.testing.assert_allclose",
"arim.ut.fmc",
"arim.ut.hmc",... | [((1241, 1296), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_real_grid"""', '[True, False]'], {}), "('use_real_grid', [True, False])\n", (1264, 1296), False, 'import pytest\n'), ((3713, 3762), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_hmc"""', '[False, True]'], {}), "('use_hmc', [False, True])\n", (3736, 3762), False, 'import pytest\n'), ((206, 251), 'arim.geometry.Grid', 'g.Grid', (['(-10.0)', '(10.0)', '(0.0)', '(0.0)', '(0.0)', '(15.0)', '(1.0)'], {}), '(-10.0, 10.0, 0.0, 0.0, 0.0, 15.0, 1.0)\n', (212, 251), True, 'import arim.geometry as g\n'), ((914, 1004), 'arim.im.tfm.extrema_lookup_times_in_rectbox', 'im.tfm.extrema_lookup_times_in_rectbox', (['grid', 'lookup_times_tx', 'lookup_times_rx', 'tx', 'rx'], {}), '(grid, lookup_times_tx,\n lookup_times_rx, tx, rx)\n', (952, 1004), True, 'import arim.im as im\n'), ((1026, 1054), 'math.isclose', 'math.isclose', (['out.tmin', '(-3.0)'], {}), '(out.tmin, -3.0)\n', (1038, 1054), False, 'import math\n'), ((1066, 1093), 'math.isclose', 'math.isclose', (['out.tmax', '(3.0)'], {}), '(out.tmax, 3.0)\n', (1078, 1093), False, 'import math\n'), ((1365, 1426), 'arim.Probe.make_matrix_probe', 'arim.Probe.make_matrix_probe', (['(5)', '(0.0005)', '(1)', 'np.nan', '(1000000.0)'], {}), '(5, 0.0005, 1, np.nan, 1000000.0)\n', (1393, 1426), False, 'import arim\n'), ((1567, 1597), 'arim.ut.fmc', 'arim.ut.fmc', (['probe.numelements'], {}), '(probe.numelements)\n', (1578, 1597), False, 'import arim\n'), ((1609, 1646), 'arim.Time', 'arim.Time', (['(5e-07)', '(1 / 20000000.0)', '(100)'], {}), '(5e-07, 1 / 20000000.0, 100)\n', (1618, 1646), False, 'import arim\n'), ((1921, 1946), 'arim.Material', 'arim.Material', (['(6300)', '(3100)'], {}), '(6300, 3100)\n', (1934, 1946), False, 'import arim\n'), ((2464, 2520), 'arim.geometry.points_1d_wall_z', 'arim.geometry.points_1d_wall_z', (['(-0.001)', '(0.001)', '(0.01)', '(200)'], {}), '(-0.001, 0.001, 0.01, 200)\n', (2494, 2520), False, 'import arim\n'), ((2545, 2570), 'arim.Interface', 'arim.Interface', (['*backwall'], {}), '(*backwall)\n', (2559, 2570), False, 'import arim\n'), ((2652, 2748), 'arim.Path', 'arim.Path', (['[probe_interface, backwall_interface, grid_interface]', '[block, block]', "['L', 'L']"], {}), "([probe_interface, backwall_interface, grid_interface], [block,\n block], ['L', 'L'])\n", (2661, 2748), False, 'import arim\n'), ((2789, 2849), 'arim.Path', 'arim.Path', (['[probe_interface, grid_interface]', '[block]', "['T']"], {}), "([probe_interface, grid_interface], [block], ['T'])\n", (2798, 2849), False, 'import arim\n'), ((2861, 2895), 'arim.View', 'arim.View', (['path_LL', 'path_T', '"""LL-T"""'], {}), "(path_LL, path_T, 'LL-T')\n", (2870, 2895), False, 'import arim\n'), ((2900, 2959), 'arim.ray.ray_tracing', 'arim.ray.ray_tracing', (['[view]'], {'convert_to_fortran_order': '(True)'}), '([view], convert_to_fortran_order=True)\n', (2920, 2959), False, 'import arim\n'), ((2986, 3042), 'arim.im.tfm.tfm_for_view', 'im.tfm.tfm_for_view', (['frame', 'grid', 'view'], {'fillvalue': 'np.nan'}), '(frame, grid, view, fillvalue=np.nan)\n', (3005, 3042), True, 'import arim.im as im\n'), ((3389, 3423), 'arim.View', 'arim.View', (['path_LL', 'path_T', '"""T-LL"""'], {}), "(path_LL, path_T, 'T-LL')\n", (3398, 3423), False, 'import arim\n'), ((3438, 3498), 'arim.im.tfm.tfm_for_view', 'im.tfm.tfm_for_view', (['frame', 'grid', 'view_rev'], {'fillvalue': 'np.nan'}), '(frame, grid, view_rev, fillvalue=np.nan)\n', (3457, 3498), True, 'import arim.im as im\n'), ((3823, 3884), 'arim.Probe.make_matrix_probe', 'arim.Probe.make_matrix_probe', (['(5)', '(0.0005)', '(1)', 'np.nan', '(1000000.0)'], {}), '(5, 0.0005, 1, np.nan, 1000000.0)\n', (3851, 3884), False, 'import arim\n'), ((4154, 4191), 'arim.Time', 'arim.Time', (['(5e-07)', '(1 / 20000000.0)', '(100)'], {}), '(5e-07, 1 / 20000000.0, 100)\n', (4163, 4191), False, 'import arim\n'), ((4825, 4850), 'arim.Material', 'arim.Material', (['(6300)', '(3100)'], {}), '(6300, 3100)\n', (4838, 4850), False, 'import arim\n'), ((5071, 5144), 'arim.im.tfm.contact_tfm', 'im.tfm.contact_tfm', (['frame', 'grid', 'block.longitudinal_vel'], {'fillvalue': 'np.nan'}), '(frame, grid, block.longitudinal_vel, fillvalue=np.nan)\n', (5089, 5144), True, 'import arim.im as im\n'), ((5295, 5344), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tfm.res', 'expected_val'], {}), '(tfm.res, expected_val)\n', (5321, 5344), True, 'import numpy as np\n'), ((1802, 1832), 'numpy.random.seed', 'np.random.seed', (['((tx * rx) ** 2)'], {}), '((tx * rx) ** 2)\n', (1816, 1832), True, 'import numpy as np\n'), ((2020, 2049), 'arim.ExaminationObject', 'arim.ExaminationObject', (['block'], {}), '(block)\n', (2042, 2049), False, 'import arim\n'), ((2129, 2180), 'arim.Grid', 'arim.Grid', (['(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.005)', '(0.005)', 'np.nan'], {}), '(0.0, 0.0, 0.0, 0.0, 0.005, 0.005, np.nan)\n', (2138, 2180), False, 'import arim\n'), ((3220, 3285), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['tfm.res', '[[[expected_val]]]'], {}), '(tfm.res, [[[expected_val]]])\n', (3256, 3285), True, 'import numpy as np\n'), ((3304, 3353), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tfm.res', 'expected_val'], {}), '(tfm.res, expected_val)\n', (3330, 3353), True, 'import numpy as np\n'), ((3568, 3637), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['tfm_rev.res', '[[[expected_val]]]'], {}), '(tfm_rev.res, [[[expected_val]]])\n', (3604, 3637), True, 'import numpy as np\n'), ((3656, 3709), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tfm_rev.res', 'expected_val'], {}), '(tfm_rev.res, expected_val)\n', (3682, 3709), True, 'import numpy as np\n'), ((4045, 4075), 'arim.ut.hmc', 'arim.ut.hmc', (['probe.numelements'], {}), '(probe.numelements)\n', (4056, 4075), False, 'import arim\n'), ((4111, 4141), 'arim.ut.fmc', 'arim.ut.fmc', (['probe.numelements'], {}), '(probe.numelements)\n', (4122, 4141), False, 'import arim\n'), ((4348, 4378), 'numpy.random.seed', 'np.random.seed', (['((tx * rx) ** 2)'], {}), '((tx * rx) ** 2)\n', (4362, 4378), True, 'import numpy as np\n'), ((4924, 4953), 'arim.ExaminationObject', 'arim.ExaminationObject', (['block'], {}), '(block)\n', (4946, 4953), False, 'import arim\n'), ((5019, 5046), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.005]'], {}), '([0.0, 0.0, 0.005])\n', (5027, 5046), True, 'import numpy as np\n'), ((2284, 2311), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.005]'], {}), '([0.0, 0.0, 0.005])\n', (2292, 2311), True, 'import numpy as np\n'), ((4695, 4786), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['timetrace_1', 'timetrace_2'], {'err_msg': '"""fmc data not symmetric"""'}), "(timetrace_1, timetrace_2, err_msg=\n 'fmc data not symmetric')\n", (4721, 4786), True, 'import numpy as np\n'), ((4636, 4678), 'numpy.logical_and', 'np.logical_and', (['(tx_arr == rx)', '(rx_arr == tx)'], {}), '(tx_arr == rx, rx_arr == tx)\n', (4650, 4678), True, 'import numpy as np\n')] |
import itertools
import numba
import numpy as np
import scipy.sparse
from itertools import zip_longest
from ._utils import isscalar, equivalent, _zero_of_dtype
def elemwise(func, *args, **kwargs):
"""
Apply a function to any number of arguments.
Parameters
----------
func : Callable
The function to apply. Must support broadcasting.
*args : tuple, optional
The arguments to the function. Can be :obj:`SparseArray` objects
or :obj:`scipy.sparse.spmatrix` objects.
**kwargs : dict, optional
Any additional arguments to pass to the function.
Returns
-------
SparseArray
The result of applying the function.
Raises
------
ValueError
If the operation would result in a dense matrix, or if the operands
don't have broadcastable shapes.
See Also
--------
:obj:`numpy.ufunc` :
A similar Numpy construct. Note that any :code:`ufunc` can be used
as the :code:`func` input to this function.
Notes
-----
Previously, operations with Numpy arrays were sometimes supported. Now,
it is necessary to convert Numpy arrays to :obj:`COO` objects.
"""
return _Elemwise(func, *args, **kwargs).get_result()
@numba.jit(nopython=True, nogil=True)
def _match_arrays(a, b): # pragma: no cover
"""
Finds all indexes into a and b such that a[i] = b[j]. The outputs are sorted
in lexographical order.
Parameters
----------
a, b : np.ndarray
The input 1-D arrays to match. If matching of multiple fields is
needed, use np.recarrays. These two arrays must be sorted.
Returns
-------
a_idx, b_idx : np.ndarray
The output indices of every possible pair of matching elements.
"""
if len(a) == 0 or len(b) == 0:
return np.empty(0, dtype=np.uintp), np.empty(0, dtype=np.uintp)
a_ind, b_ind = [], []
nb = len(b)
ib = 0
match = 0
for ia, j in enumerate(a):
if j == b[match]:
ib = match
while ib < nb and j >= b[ib]:
if j == b[ib]:
a_ind.append(ia)
b_ind.append(ib)
if b[match] < b[ib]:
match = ib
ib += 1
return np.array(a_ind, dtype=np.uintp), np.array(b_ind, dtype=np.uintp)
def _get_nary_broadcast_shape(*shapes):
"""
Broadcast any number of shapes to a result shape.
Parameters
----------
*shapes : tuple[tuple[int]]
The shapes to broadcast.
Returns
-------
tuple[int]
The output shape.
Raises
------
ValueError
If the input shapes cannot be broadcast to a single shape.
"""
result_shape = ()
for shape in shapes:
try:
result_shape = _get_broadcast_shape(shape, result_shape)
except ValueError:
shapes_str = ", ".join(str(shape) for shape in shapes)
raise ValueError(
"operands could not be broadcast together with shapes %s" % shapes_str
)
return result_shape
def _get_broadcast_shape(shape1, shape2, is_result=False):
"""
Get the overall broadcasted shape.
Parameters
----------
shape1, shape2 : tuple[int]
The input shapes to broadcast together.
is_result : bool
Whether or not shape2 is also the result shape.
Returns
-------
result_shape : tuple[int]
The overall shape of the result.
Raises
------
ValueError
If the two shapes cannot be broadcast together.
"""
# https://stackoverflow.com/a/47244284/774273
if not all(
(l1 == l2) or (l1 == 1) or ((l2 == 1) and not is_result)
for l1, l2 in zip(shape1[::-1], shape2[::-1])
):
raise ValueError(
"operands could not be broadcast together with shapes %s, %s"
% (shape1, shape2)
)
result_shape = tuple(
l1 if l1 != 1 else l2
for l1, l2 in zip_longest(shape1[::-1], shape2[::-1], fillvalue=1)
)[::-1]
return result_shape
def _get_broadcast_parameters(shape, broadcast_shape):
"""
Get the broadcast parameters.
Parameters
----------
shape : tuple[int]
The input shape.
broadcast_shape
The shape to broadcast to.
Returns
-------
params : list
A list containing None if the dimension isn't in the original array, False if
it needs to be broadcast, and True if it doesn't.
"""
params = [
None if l1 is None else l1 == l2
for l1, l2 in zip_longest(shape[::-1], broadcast_shape[::-1], fillvalue=None)
][::-1]
return params
def _get_reduced_coords(coords, params):
"""
Gets only those dimensions of the coordinates that don't need to be broadcast.
Parameters
----------
coords : np.ndarray
The coordinates to reduce.
params : list
The params from which to check which dimensions to get.
Returns
-------
reduced_coords : np.ndarray
The reduced coordinates.
"""
reduced_params = [bool(param) for param in params]
return coords[reduced_params]
def _get_reduced_shape(shape, params):
"""
Gets only those dimensions of the coordinates that don't need to be broadcast.
Parameters
----------
shape : np.ndarray
The coordinates to reduce.
params : list
The params from which to check which dimensions to get.
Returns
-------
reduced_coords : np.ndarray
The reduced coordinates.
"""
reduced_shape = tuple(l for l, p in zip(shape, params) if p)
return reduced_shape
def _get_expanded_coords_data(coords, data, params, broadcast_shape):
"""
Expand coordinates/data to broadcast_shape. Does most of the heavy lifting for broadcast_to.
Produces sorted output for sorted inputs.
Parameters
----------
coords : np.ndarray
The coordinates to expand.
data : np.ndarray
The data corresponding to the coordinates.
params : list
The broadcast parameters.
broadcast_shape : tuple[int]
The shape to broadcast to.
Returns
-------
expanded_coords : np.ndarray
List of 1-D arrays. Each item in the list has one dimension of coordinates.
expanded_data : np.ndarray
The data corresponding to expanded_coords.
"""
first_dim = -1
expand_shapes = []
for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape):
if p and first_dim == -1:
expand_shapes.append(coords.shape[1])
first_dim = d
if not p:
expand_shapes.append(l)
all_idx = _cartesian_product(*(np.arange(d, dtype=np.intp) for d in expand_shapes))
false_dim = 0
dim = 0
expanded_coords = np.empty((len(broadcast_shape), all_idx.shape[1]), dtype=np.intp)
if first_dim != -1:
expanded_data = data[all_idx[first_dim]]
else:
expanded_coords = all_idx
expanded_data = np.repeat(data, np.prod(broadcast_shape, dtype=np.int64))
return np.asarray(expanded_coords), np.asarray(expanded_data)
for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape):
if p:
expanded_coords[d] = coords[dim, all_idx[first_dim]]
else:
expanded_coords[d] = all_idx[false_dim + (d > first_dim)]
false_dim += 1
if p is not None:
dim += 1
return np.asarray(expanded_coords), np.asarray(expanded_data)
# (c) senderle
# Taken from https://stackoverflow.com/a/11146645/774273
# License: https://creativecommons.org/licenses/by-sa/3.0/
def _cartesian_product(*arrays):
"""
Get the cartesian product of a number of arrays.
Parameters
----------
*arrays : Tuple[np.ndarray]
The arrays to get a cartesian product of. Always sorted with respect
to the original array.
Returns
-------
out : np.ndarray
The overall cartesian product of all the input arrays.
"""
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = np.prod(broadcasted[0].shape), len(broadcasted)
dtype = np.result_type(*arrays)
out = np.empty(rows * cols, dtype=dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
return out.reshape(cols, rows)
def _get_matching_coords(coords, params):
"""
Get the matching coords across a number of broadcast operands.
Parameters
----------
coords : list[numpy.ndarray]
The input coordinates.
params : list[Union[bool, none]]
The broadcast parameters.
Returns
-------
numpy.ndarray
The broacasted coordinates
"""
matching_coords = []
dims = np.zeros(len(coords), dtype=np.uint8)
for p_all in zip(*params):
for i, p in enumerate(p_all):
if p:
matching_coords.append(coords[i][dims[i]])
break
else:
matching_coords.append(coords[dims[0]])
for i, p in enumerate(p_all):
if p is not None:
dims[i] += 1
return np.asarray(matching_coords, dtype=np.intp)
def broadcast_to(x, shape):
"""
Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that
this function returns a new array instead of a view.
Parameters
----------
shape : tuple[int]
The shape to broadcast the data to.
Returns
-------
COO
The broadcasted sparse array.
Raises
------
ValueError
If the operand cannot be broadcast to the given shape.
See Also
--------
:obj:`numpy.broadcast_to` : NumPy equivalent function
"""
from ._coo import COO
if shape == x.shape:
return x
result_shape = _get_broadcast_shape(x.shape, shape, is_result=True)
params = _get_broadcast_parameters(x.shape, result_shape)
coords, data = _get_expanded_coords_data(x.coords, x.data, params, result_shape)
# Check if all the non-broadcast axes are next to each other
nonbroadcast_idx = [idx for idx, p in enumerate(params) if p]
diff_nonbroadcast_idx = [
a - b for a, b in zip(nonbroadcast_idx[1:], nonbroadcast_idx[:-1])
]
sorted = all(d == 1 for d in diff_nonbroadcast_idx)
return COO(
coords,
data,
shape=result_shape,
has_duplicates=False,
sorted=sorted,
fill_value=x.fill_value,
)
class _Elemwise:
def __init__(self, func, *args, **kwargs):
"""
Initialize the element-wise function calculator.
Parameters
----------
func : types.Callable
The function to compute
*args : tuple[Union[SparseArray, ndarray, scipy.sparse.spmatrix]]
The arguments to compute the function on.
**kwargs : dict
Extra arguments to pass to the function.
"""
from ._coo import COO
from ._sparse_array import SparseArray
from ._compressed import GCXS
from ._dok import DOK
processed_args = []
out_type = GCXS
sparse_args = [arg for arg in args if isinstance(arg, SparseArray)]
if all(isinstance(arg, DOK) for arg in sparse_args):
out_type = DOK
elif all(isinstance(arg, GCXS) for arg in sparse_args):
out_type = GCXS
else:
out_type = COO
for arg in args:
if isinstance(arg, scipy.sparse.spmatrix):
processed_args.append(COO.from_scipy_sparse(arg))
elif isscalar(arg) or isinstance(arg, np.ndarray):
# Faster and more reliable to pass ()-shaped ndarrays as scalars.
processed_args.append(np.asarray(arg))
elif isinstance(arg, SparseArray) and not isinstance(arg, COO):
processed_args.append(COO(arg))
elif not isinstance(arg, COO):
self.args = None
return
else:
processed_args.append(arg)
self.out_type = out_type
self.args = tuple(processed_args)
self.func = func
self.dtype = kwargs.pop("dtype", None)
self.kwargs = kwargs
self.cache = {}
self._dense_result = False
self._check_broadcast()
self._get_fill_value()
def get_result(self):
from ._coo import COO
if self.args is None:
return NotImplemented
if self._dense_result:
args = [a.todense() if isinstance(a, COO) else a for a in self.args]
return self.func(*args, **self.kwargs)
if any(s == 0 for s in self.shape):
data = np.empty((0,), dtype=self.fill_value.dtype)
coords = np.empty((0, len(self.shape)), dtype=np.intp)
return COO(
coords,
data,
shape=self.shape,
has_duplicates=False,
fill_value=self.fill_value,
)
data_list = []
coords_list = []
for mask in itertools.product(
*[[True, False] if isinstance(arg, COO) else [None] for arg in self.args]
):
if not any(mask):
continue
r = self._get_func_coords_data(mask)
if r is not None:
coords_list.append(r[0])
data_list.append(r[1])
# Concatenate matches and mismatches
data = (
np.concatenate(data_list)
if len(data_list)
else np.empty((0,), dtype=self.fill_value.dtype)
)
coords = (
np.concatenate(coords_list, axis=1)
if len(coords_list)
else np.empty((0, len(self.shape)), dtype=np.intp)
)
return COO(
coords,
data,
shape=self.shape,
has_duplicates=False,
fill_value=self.fill_value,
).asformat(self.out_type)
def _get_fill_value(self):
"""
A function that finds and returns the fill-value.
Raises
------
ValueError
If the fill-value is inconsistent.
"""
from ._coo import COO
zero_args = tuple(
arg.fill_value[...] if isinstance(arg, COO) else arg for arg in self.args
)
# Some elemwise functions require a dtype argument, some abhorr it.
try:
fill_value_array = self.func(
*np.broadcast_arrays(*zero_args), dtype=self.dtype, **self.kwargs
)
except TypeError:
fill_value_array = self.func(
*np.broadcast_arrays(*zero_args), **self.kwargs
)
try:
fill_value = fill_value_array[(0,) * fill_value_array.ndim]
except IndexError:
zero_args = tuple(
arg.fill_value if isinstance(arg, COO) else _zero_of_dtype(arg.dtype)
for arg in self.args
)
fill_value = self.func(*zero_args, **self.kwargs)[()]
equivalent_fv = equivalent(fill_value, fill_value_array).all()
if not equivalent_fv and self.shape != self.ndarray_shape:
raise ValueError(
"Performing a mixed sparse-dense operation that would result in a dense array. "
"Please make sure that func(sparse_fill_values, ndarrays) is a constant array."
)
elif not equivalent_fv:
self._dense_result = True
# Store dtype separately if needed.
if self.dtype is not None:
fill_value = fill_value.astype(self.dtype)
self.fill_value = fill_value
self.dtype = self.fill_value.dtype
def _check_broadcast(self):
"""
Checks if adding the ndarrays changes the broadcast shape.
Raises
------
ValueError
If the check fails.
"""
from ._coo import COO
full_shape = _get_nary_broadcast_shape(*tuple(arg.shape for arg in self.args))
non_ndarray_shape = _get_nary_broadcast_shape(
*tuple(arg.shape for arg in self.args if isinstance(arg, COO))
)
ndarray_shape = _get_nary_broadcast_shape(
*tuple(arg.shape for arg in self.args if isinstance(arg, np.ndarray))
)
self.shape = full_shape
self.ndarray_shape = ndarray_shape
self.non_ndarray_shape = non_ndarray_shape
def _get_func_coords_data(self, mask):
"""
Gets the coords/data for a certain mask
Parameters
----------
mask : tuple[Union[bool, NoneType]]
The mask determining whether to match or unmatch.
Returns
-------
None or tuple
The coords/data tuple for the given mask.
"""
from ._coo import COO
matched_args = [arg for arg, m in zip(self.args, mask) if m is not None and m]
unmatched_args = [
arg for arg, m in zip(self.args, mask) if m is not None and not m
]
ndarray_args = [arg for arg, m in zip(self.args, mask) if m is None]
matched_broadcast_shape = _get_nary_broadcast_shape(
*tuple(arg.shape for arg in itertools.chain(matched_args, ndarray_args))
)
matched_arrays = self._match_coo(
*matched_args, cache=self.cache, broadcast_shape=matched_broadcast_shape
)
func_args = []
m_arg = 0
for arg, m in zip(self.args, mask):
if m is None:
func_args.append(
np.broadcast_to(arg, matched_broadcast_shape)[
tuple(matched_arrays[0].coords)
]
)
continue
if m:
func_args.append(matched_arrays[m_arg].data)
m_arg += 1
else:
func_args.append(arg.fill_value)
# Try our best to preserve the output dtype.
try:
func_data = self.func(*func_args, dtype=self.dtype, **self.kwargs)
except TypeError:
try:
func_args = np.broadcast_arrays(*func_args)
out = np.empty(func_args[0].shape, dtype=self.dtype)
func_data = self.func(*func_args, out=out, **self.kwargs)
except TypeError:
func_data = self.func(*func_args, **self.kwargs).astype(self.dtype)
unmatched_mask = ~equivalent(func_data, self.fill_value)
if not unmatched_mask.any():
return None
func_coords = matched_arrays[0].coords[:, unmatched_mask]
func_data = func_data[unmatched_mask]
if matched_arrays[0].shape != self.shape:
params = _get_broadcast_parameters(matched_arrays[0].shape, self.shape)
func_coords, func_data = _get_expanded_coords_data(
func_coords, func_data, params, self.shape
)
if all(m is None or m for m in mask):
return func_coords, func_data
# Not really sorted but we need the sortedness.
func_array = COO(
func_coords, func_data, self.shape, has_duplicates=False, sorted=True
)
unmatched_mask = np.ones(func_array.nnz, dtype=np.bool_)
for arg in unmatched_args:
matched_idx = self._match_coo(func_array, arg, return_midx=True)[0]
unmatched_mask[matched_idx] = False
coords = np.asarray(func_array.coords[:, unmatched_mask], order="C")
data = np.asarray(func_array.data[unmatched_mask], order="C")
return coords, data
@staticmethod
def _match_coo(*args, **kwargs):
"""
Matches the coordinates for any number of input :obj:`COO` arrays.
Equivalent to "sparse" broadcasting for all arrays.
Parameters
----------
*args : Tuple[COO]
The input :obj:`COO` arrays.
return_midx : bool
Whether to return matched indices or matched arrays. Matching
only supported for two arrays. ``False`` by default.
cache : dict
Cache of things already matched. No cache by default.
Returns
-------
matched_idx : List[ndarray]
The indices of matched elements in the original arrays. Only returned if
``return_midx`` is ``True``.
matched_arrays : List[COO]
The expanded, matched :obj:`COO` objects. Only returned if
``return_midx`` is ``False``.
"""
from ._coo import COO
from ._coo.common import linear_loc
cache = kwargs.pop("cache", None)
return_midx = kwargs.pop("return_midx", False)
broadcast_shape = kwargs.pop("broadcast_shape", None)
if kwargs:
raise ValueError("Unknown kwargs: {}".format(kwargs.keys()))
if return_midx and (len(args) != 2 or cache is not None):
raise NotImplementedError(
"Matching indices only supported for two args, and no cache."
)
matched_arrays = [args[0]]
cache_key = [id(args[0])]
for arg2 in args[1:]:
cache_key.append(id(arg2))
key = tuple(cache_key)
if cache is not None and key in cache:
matched_arrays = cache[key]
continue
cargs = [matched_arrays[0], arg2]
current_shape = _get_broadcast_shape(matched_arrays[0].shape, arg2.shape)
params = [
_get_broadcast_parameters(arg.shape, current_shape) for arg in cargs
]
reduced_params = [all(p) for p in zip(*params)]
reduced_shape = _get_reduced_shape(
arg2.shape, _rev_idx(reduced_params, arg2.ndim)
)
reduced_coords = [
_get_reduced_coords(arg.coords, _rev_idx(reduced_params, arg.ndim))
for arg in cargs
]
linear = [linear_loc(rc, reduced_shape) for rc in reduced_coords]
sorted_idx = [np.argsort(idx) for idx in linear]
linear = [idx[s] for idx, s in zip(linear, sorted_idx)]
matched_idx = _match_arrays(*linear)
if return_midx:
matched_idx = [
sidx[midx] for sidx, midx in zip(sorted_idx, matched_idx)
]
return matched_idx
coords = [arg.coords[:, s] for arg, s in zip(cargs, sorted_idx)]
mcoords = [c[:, idx] for c, idx in zip(coords, matched_idx)]
mcoords = _get_matching_coords(mcoords, params)
mdata = [arg.data[sorted_idx[0]][matched_idx[0]] for arg in matched_arrays]
mdata.append(arg2.data[sorted_idx[1]][matched_idx[1]])
# The coords aren't truly sorted, but we don't need them, so it's
# best to avoid the extra cost.
matched_arrays = [
COO(mcoords, md, shape=current_shape, sorted=True, has_duplicates=False)
for md in mdata
]
if cache is not None:
cache[key] = matched_arrays
if broadcast_shape is not None and matched_arrays[0].shape != broadcast_shape:
params = _get_broadcast_parameters(matched_arrays[0].shape, broadcast_shape)
coords, idx = _get_expanded_coords_data(
matched_arrays[0].coords,
np.arange(matched_arrays[0].nnz),
params,
broadcast_shape,
)
matched_arrays = [
COO(
coords,
arr.data[idx],
shape=broadcast_shape,
sorted=True,
has_duplicates=False,
)
for arr in matched_arrays
]
return matched_arrays
def _rev_idx(arg, idx):
if idx == 0:
return arg[len(arg) :]
return arg[-idx:]
| [
"numpy.result_type",
"numpy.concatenate",
"numpy.empty",
"numpy.ix_",
"numpy.asarray",
"itertools.zip_longest",
"numpy.ones",
"numpy.argsort",
"numba.jit",
"numpy.array",
"numpy.arange",
"numpy.broadcast_to",
"numpy.broadcast_arrays",
"itertools.chain",
"numpy.prod"
] | [((1259, 1295), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (1268, 1295), False, 'import numba\n'), ((8105, 8120), 'numpy.ix_', 'np.ix_', (['*arrays'], {}), '(*arrays)\n', (8111, 8120), True, 'import numpy as np\n'), ((8139, 8174), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*broadcastable'], {}), '(*broadcastable)\n', (8158, 8174), True, 'import numpy as np\n'), ((8252, 8275), 'numpy.result_type', 'np.result_type', (['*arrays'], {}), '(*arrays)\n', (8266, 8275), True, 'import numpy as np\n'), ((8286, 8320), 'numpy.empty', 'np.empty', (['(rows * cols)'], {'dtype': 'dtype'}), '(rows * cols, dtype=dtype)\n', (8294, 8320), True, 'import numpy as np\n'), ((9273, 9315), 'numpy.asarray', 'np.asarray', (['matching_coords'], {'dtype': 'np.intp'}), '(matching_coords, dtype=np.intp)\n', (9283, 9315), True, 'import numpy as np\n'), ((2276, 2307), 'numpy.array', 'np.array', (['a_ind'], {'dtype': 'np.uintp'}), '(a_ind, dtype=np.uintp)\n', (2284, 2307), True, 'import numpy as np\n'), ((2309, 2340), 'numpy.array', 'np.array', (['b_ind'], {'dtype': 'np.uintp'}), '(b_ind, dtype=np.uintp)\n', (2317, 2340), True, 'import numpy as np\n'), ((7515, 7542), 'numpy.asarray', 'np.asarray', (['expanded_coords'], {}), '(expanded_coords)\n', (7525, 7542), True, 'import numpy as np\n'), ((7544, 7569), 'numpy.asarray', 'np.asarray', (['expanded_data'], {}), '(expanded_data)\n', (7554, 7569), True, 'import numpy as np\n'), ((8192, 8221), 'numpy.prod', 'np.prod', (['broadcasted[0].shape'], {}), '(broadcasted[0].shape)\n', (8199, 8221), True, 'import numpy as np\n'), ((19406, 19445), 'numpy.ones', 'np.ones', (['func_array.nnz'], {'dtype': 'np.bool_'}), '(func_array.nnz, dtype=np.bool_)\n', (19413, 19445), True, 'import numpy as np\n'), ((19628, 19687), 'numpy.asarray', 'np.asarray', (['func_array.coords[:, unmatched_mask]'], {'order': '"""C"""'}), "(func_array.coords[:, unmatched_mask], order='C')\n", (19638, 19687), True, 'import numpy as np\n'), ((19703, 19757), 'numpy.asarray', 'np.asarray', (['func_array.data[unmatched_mask]'], {'order': '"""C"""'}), "(func_array.data[unmatched_mask], order='C')\n", (19713, 19757), True, 'import numpy as np\n'), ((1836, 1863), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.uintp'}), '(0, dtype=np.uintp)\n', (1844, 1863), True, 'import numpy as np\n'), ((1865, 1892), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.uintp'}), '(0, dtype=np.uintp)\n', (1873, 1892), True, 'import numpy as np\n'), ((7074, 7114), 'numpy.prod', 'np.prod', (['broadcast_shape'], {'dtype': 'np.int64'}), '(broadcast_shape, dtype=np.int64)\n', (7081, 7114), True, 'import numpy as np\n'), ((7131, 7158), 'numpy.asarray', 'np.asarray', (['expanded_coords'], {}), '(expanded_coords)\n', (7141, 7158), True, 'import numpy as np\n'), ((7160, 7185), 'numpy.asarray', 'np.asarray', (['expanded_data'], {}), '(expanded_data)\n', (7170, 7185), True, 'import numpy as np\n'), ((12847, 12890), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'self.fill_value.dtype'}), '((0,), dtype=self.fill_value.dtype)\n', (12855, 12890), True, 'import numpy as np\n'), ((13635, 13660), 'numpy.concatenate', 'np.concatenate', (['data_list'], {}), '(data_list)\n', (13649, 13660), True, 'import numpy as np\n'), ((13708, 13751), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'self.fill_value.dtype'}), '((0,), dtype=self.fill_value.dtype)\n', (13716, 13751), True, 'import numpy as np\n'), ((13793, 13828), 'numpy.concatenate', 'np.concatenate', (['coords_list'], {'axis': '(1)'}), '(coords_list, axis=1)\n', (13807, 13828), True, 'import numpy as np\n'), ((4602, 4665), 'itertools.zip_longest', 'zip_longest', (['shape[::-1]', 'broadcast_shape[::-1]'], {'fillvalue': 'None'}), '(shape[::-1], broadcast_shape[::-1], fillvalue=None)\n', (4613, 4665), False, 'from itertools import zip_longest\n'), ((6743, 6770), 'numpy.arange', 'np.arange', (['d'], {'dtype': 'np.intp'}), '(d, dtype=np.intp)\n', (6752, 6770), True, 'import numpy as np\n'), ((22233, 22248), 'numpy.argsort', 'np.argsort', (['idx'], {}), '(idx)\n', (22243, 22248), True, 'import numpy as np\n'), ((23598, 23630), 'numpy.arange', 'np.arange', (['matched_arrays[0].nnz'], {}), '(matched_arrays[0].nnz)\n', (23607, 23630), True, 'import numpy as np\n'), ((4006, 4058), 'itertools.zip_longest', 'zip_longest', (['shape1[::-1]', 'shape2[::-1]'], {'fillvalue': '(1)'}), '(shape1[::-1], shape2[::-1], fillvalue=1)\n', (4017, 4058), False, 'from itertools import zip_longest\n'), ((14645, 14676), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*zero_args'], {}), '(*zero_args)\n', (14664, 14676), True, 'import numpy as np\n'), ((18314, 18345), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*func_args'], {}), '(*func_args)\n', (18333, 18345), True, 'import numpy as np\n'), ((18368, 18414), 'numpy.empty', 'np.empty', (['func_args[0].shape'], {'dtype': 'self.dtype'}), '(func_args[0].shape, dtype=self.dtype)\n', (18376, 18414), True, 'import numpy as np\n'), ((11896, 11911), 'numpy.asarray', 'np.asarray', (['arg'], {}), '(arg)\n', (11906, 11911), True, 'import numpy as np\n'), ((14809, 14840), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*zero_args'], {}), '(*zero_args)\n', (14828, 14840), True, 'import numpy as np\n'), ((17755, 17800), 'numpy.broadcast_to', 'np.broadcast_to', (['arg', 'matched_broadcast_shape'], {}), '(arg, matched_broadcast_shape)\n', (17770, 17800), True, 'import numpy as np\n'), ((17395, 17438), 'itertools.chain', 'itertools.chain', (['matched_args', 'ndarray_args'], {}), '(matched_args, ndarray_args)\n', (17410, 17438), False, 'import itertools\n')] |
import numpy as np
import trimesh
class Mesh(object):
def __init__(self, mesh, normalize=False):
self.mesh = mesh
# Normalize points such that they are in the unit cube
if normalize:
bbox = self.mesh.bounding_box.bounds
# Compute location and scale
loc = (bbox[0] + bbox[1]) / 2
scale = (bbox[1] - bbox[0]).max() # / (1 - 0.05)
# Transform input mesh
self.mesh.apply_translation(-loc)
self.mesh.apply_scale(1 / scale)
# Make sure that the input meshes are watertight
assert self.mesh.is_watertight
self._vertices = None
self._vertex_normals = None
self._faces = None
self._face_normals = None
@property
def vertices(self):
if self._vertices is None:
self._vertices = np.array(self.mesh.vertices)
return self._vertices
@property
def vertex_normals(self):
if self._vertex_normals is None:
self._vertex_normals = np.array(self.mesh.vertex_normals)
return self._vertex_normals
@property
def faces(self):
if self._faces is None:
self._faces = np.array(self.mesh.faces)
return self._faces
@property
def face_normals(self):
if self._face_normals is None:
self._face_normals = np.array(self.mesh.face_normals)
return self._face_normals
def sample_faces(self, N=10000):
P, t = trimesh.sample.sample_surface(self.mesh, N)
return np.hstack([
P, self.face_normals[t, :]
])
@classmethod
def from_file(cls, filename, normalize):
return cls(trimesh.load(filename, process=False), normalize)
def read_mesh_file(filename, normalize):
return Mesh.from_file(filename, normalize)
| [
"trimesh.sample.sample_surface",
"numpy.array",
"trimesh.load",
"numpy.hstack"
] | [((1496, 1539), 'trimesh.sample.sample_surface', 'trimesh.sample.sample_surface', (['self.mesh', 'N'], {}), '(self.mesh, N)\n', (1525, 1539), False, 'import trimesh\n'), ((1555, 1594), 'numpy.hstack', 'np.hstack', (['[P, self.face_normals[t, :]]'], {}), '([P, self.face_normals[t, :]])\n', (1564, 1594), True, 'import numpy as np\n'), ((863, 891), 'numpy.array', 'np.array', (['self.mesh.vertices'], {}), '(self.mesh.vertices)\n', (871, 891), True, 'import numpy as np\n'), ((1043, 1077), 'numpy.array', 'np.array', (['self.mesh.vertex_normals'], {}), '(self.mesh.vertex_normals)\n', (1051, 1077), True, 'import numpy as np\n'), ((1208, 1233), 'numpy.array', 'np.array', (['self.mesh.faces'], {}), '(self.mesh.faces)\n', (1216, 1233), True, 'import numpy as np\n'), ((1376, 1408), 'numpy.array', 'np.array', (['self.mesh.face_normals'], {}), '(self.mesh.face_normals)\n', (1384, 1408), True, 'import numpy as np\n'), ((1699, 1736), 'trimesh.load', 'trimesh.load', (['filename'], {'process': '(False)'}), '(filename, process=False)\n', (1711, 1736), False, 'import trimesh\n')] |
"""Tests for fooof.plts.aperiodic."""
import numpy as np
from fooof.tests.tutils import plot_test
from fooof.plts.aperiodic import *
###################################################################################################
###################################################################################################
@plot_test
def test_plot_aperiodic_params(skip_if_no_mpl):
# Test for 'fixed' mode: offset, exponent
aps = np.array([[1, 1], [0.5, 0.5], [2, 2]])
plot_aperiodic_params(aps)
# Test for multiple inputs
plot_aperiodic_params([aps, aps])
# Test for 'knee' mode: offset, knee exponent
aps = np.array([[1, 100, 1], [0.5, 150, 0.5], [2, 200, 2]])
plot_aperiodic_params(aps)
@plot_test
def test_plot_aperiodic_fits(skip_if_no_mpl):
aps = np.array([[1, 1], [0.5, 0.5], [2, 2]])
# Test for single group input
plot_aperiodic_fits(aps, [1, 50], control_offset=True)
# Test for multiple input
plot_aperiodic_fits([aps, aps], [1, 50], control_offset=True)
# Test for 'knee' mode: offset, knee exponent
aps = np.array([[1, 100, 1], [0.5, 150, 0.5], [2, 200, 2]])
plot_aperiodic_fits(aps, [1, 50])
| [
"numpy.array"
] | [((454, 492), 'numpy.array', 'np.array', (['[[1, 1], [0.5, 0.5], [2, 2]]'], {}), '([[1, 1], [0.5, 0.5], [2, 2]])\n', (462, 492), True, 'import numpy as np\n'), ((655, 708), 'numpy.array', 'np.array', (['[[1, 100, 1], [0.5, 150, 0.5], [2, 200, 2]]'], {}), '([[1, 100, 1], [0.5, 150, 0.5], [2, 200, 2]])\n', (663, 708), True, 'import numpy as np\n'), ((809, 847), 'numpy.array', 'np.array', (['[[1, 1], [0.5, 0.5], [2, 2]]'], {}), '([[1, 1], [0.5, 0.5], [2, 2]])\n', (817, 847), True, 'import numpy as np\n'), ((1100, 1153), 'numpy.array', 'np.array', (['[[1, 100, 1], [0.5, 150, 0.5], [2, 200, 2]]'], {}), '([[1, 100, 1], [0.5, 150, 0.5], [2, 200, 2]])\n', (1108, 1153), True, 'import numpy as np\n')] |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions and classes for embedding processing.
"""
import sys
from typing import Callable, Dict
import numpy as np
from BIT_DL.pytorch.hyperparams import HParams
from BIT_DL.pytorch.utils import utils
__all__ = [
"load_word2vec",
"load_glove",
"Embedding",
]
def load_word2vec(filename: str, vocab: Dict[str, int],
word_vecs: np.ndarray) -> np.ndarray:
r"""Loads embeddings in the word2vec binary format which has a header line
containing the number of vectors and their dimensionality (two integers),
followed with number-of-vectors lines each of which is formatted as
``<word-string> <embedding-vector>``.
Args:
filename (str): Path to the embedding file.
vocab (dict): A dictionary that maps token strings to integer index.
Tokens not in :attr:`vocab` are not read.
word_vecs: A 2D numpy array of shape `[vocab_size, embed_dim]`
which is updated as reading from the file.
Returns:
The updated :attr:`word_vecs`.
"""
with open(filename, "rb") as fin:
header = fin.readline()
vocab_size, vector_size = [int(s) for s in header.split()]
if vector_size != word_vecs.shape[1]:
raise ValueError("Inconsistent word vector sizes: %d vs %d" %
(vector_size, word_vecs.shape[1]))
binary_len = np.dtype('float32').itemsize * vector_size
for _ in np.arange(vocab_size):
chars = []
while True:
char = fin.read(1)
if char == b' ':
break
if char != b'\n':
chars.append(char)
word = b''.join(chars).decode('utf-8')
if word in vocab:
word_vecs[vocab[word]] = np.frombuffer(
fin.read(binary_len), dtype='float32')
else:
fin.read(binary_len)
return word_vecs
def load_glove(filename: str, vocab: Dict[str, int],
word_vecs: np.ndarray) -> np.ndarray:
r"""Loads embeddings in the glove text format in which each line is
``<word-string> <embedding-vector>``. Dimensions of the embedding vector
are separated with whitespace characters.
Args:
filename (str): Path to the embedding file.
vocab (dict): A dictionary that maps token strings to integer index.
Tokens not in :attr:`vocab` are not read.
word_vecs: A 2D numpy array of shape `[vocab_size, embed_dim]`
which is updated as reading from the file.
Returns:
The updated :attr:`word_vecs`.
"""
with open(filename) as fin:
for line in fin:
vec = line.strip().split()
if len(vec) == 0:
continue
word, vec = vec[0], vec[1:]
if word not in vocab:
continue
if len(vec) != word_vecs.shape[1]:
raise ValueError("Inconsistent word vector sizes: %d vs %d" %
(len(vec), word_vecs.shape[1]))
word_vecs[vocab[word]] = np.array([float(v) for v in vec])
return word_vecs
class Embedding:
r"""Embedding class that loads token embedding vectors from file. Token
embeddings not in the embedding file are initialized as specified in
:attr:`hparams`.
Args:
vocab (dict): A dictionary that maps token strings to integer index.
hparams (dict): Hyperparameters. See :meth:`default_hparams` for the
defaults.
"""
def __init__(self, vocab: Dict[str, int],
hparams=None):
self._hparams = HParams(hparams, self.default_hparams())
# Initialize embeddings
init_fn_kwargs = self._hparams.init_fn.kwargs.todict()
if "shape" in init_fn_kwargs or "size" in init_fn_kwargs:
raise ValueError("Argument 'shape' or 'size' must not be "
"specified. They are inferred automatically.")
init_fn: Callable[..., np.ndarray]
init_fn = utils.get_function(
self._hparams.init_fn.type,
["numpy.random", "numpy", "texar.torch.custom"])
try:
self._word_vecs = init_fn( # type: ignore
size=[len(vocab), self._hparams.dim], **init_fn_kwargs)
except TypeError:
self._word_vecs = init_fn( # type: ignore
shape=[len(vocab), self._hparams.dim], **init_fn_kwargs)
# Optionally read embeddings from file
if self._hparams.file is not None and self._hparams.file != "":
read_fn: Callable[[str, Dict[str, int], np.ndarray], np.ndarray]
read_fn = utils.get_function( # type: ignore
self._hparams.read_fn,
["texar.torch.data.embedding", "texar.torch.data",
"texar.torch.custom"])
self._word_vecs = read_fn(self._hparams.file,
vocab, self._word_vecs)
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values:
.. code-block:: python
{
"file": "",
"dim": 50,
"read_fn": "load_word2vec",
"init_fn": {
"type": "numpy.random.uniform",
"kwargs": {
"low": -0.1,
"high": 0.1,
}
},
}
Here:
`"file"`: str
Path to the embedding file. If not provided, all embeddings are
initialized with the initialization function.
`"dim"`: int
Dimension size of each embedding vector
`"read_fn"`: str or callable
Function to read the embedding file. This can be the function,
or its string name or full module path. For example,
.. code-block:: python
"read_fn": texar.torch.data.load_word2vec
"read_fn": "load_word2vec"
"read_fn": "texar.torch.data.load_word2vec"
"read_fn": "my_module.my_read_fn"
If function string name is used, the function must be in
one of the modules: :mod:`texar.torch.data` or
:mod:`texar.torch.custom`.
The function must have the same signature as with
:func:`load_word2vec`.
`"init_fn"`: dict
Hyperparameters of the initialization function used to initialize
embedding of tokens missing in the embedding
file.
The function must accept argument named `size` or `shape` to
specify the output shape, and return a numpy array of the shape.
The `dict` has the following fields:
`"type"`: str or callable
The initialization function. Can be either the function,
or its string name or full module path.
`"kwargs"`: dict
Keyword arguments for calling the function. The function
is called with :python:`init_fn(size=[.., ..], **kwargs)`.
"""
return {
"file": "",
"dim": 50,
"read_fn": "load_word2vec",
"init_fn": {
"type": "numpy.random.uniform",
"kwargs": {
"low": -0.1,
"high": 0.1,
},
},
"@no_typecheck": ["read_fn", "init_fn"]
}
@property
def word_vecs(self):
r"""2D numpy array of shape `[vocab_size, embedding_dim]`.
"""
return self._word_vecs
@property
def vector_size(self):
r"""The embedding dimension size.
"""
return self._hparams.dim
| [
"numpy.dtype",
"numpy.arange",
"BIT_DL.pytorch.utils.utils.get_function"
] | [((2058, 2079), 'numpy.arange', 'np.arange', (['vocab_size'], {}), '(vocab_size)\n', (2067, 2079), True, 'import numpy as np\n'), ((4681, 4780), 'BIT_DL.pytorch.utils.utils.get_function', 'utils.get_function', (['self._hparams.init_fn.type', "['numpy.random', 'numpy', 'texar.torch.custom']"], {}), "(self._hparams.init_fn.type, ['numpy.random', 'numpy',\n 'texar.torch.custom'])\n", (4699, 4780), False, 'from BIT_DL.pytorch.utils import utils\n'), ((5316, 5435), 'BIT_DL.pytorch.utils.utils.get_function', 'utils.get_function', (['self._hparams.read_fn', "['texar.torch.data.embedding', 'texar.torch.data', 'texar.torch.custom']"], {}), "(self._hparams.read_fn, ['texar.torch.data.embedding',\n 'texar.torch.data', 'texar.torch.custom'])\n", (5334, 5435), False, 'from BIT_DL.pytorch.utils import utils\n'), ((1998, 2017), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (2006, 2017), True, 'import numpy as np\n')] |
import numpy as np
## CONVOLUTIONAL NEURAL NETWORK
import torch
import torch.nn as nn
from torch.distributions import Normal, Categorical
import torch.optim as optim
import torch.optim as optim
# A function to randomly initialize the weights
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1) # Centered Gaussian law, sigma = 0.1
nn.init.constant_(m.bias, 0.1) # b = 0.1
def kernelOutputSize(input_size, kernel_size, stride, padding):
return int((input_size - kernel_size + 2*padding) / stride) + 1
class CNN(nn.Module):
def __init__(self,
height_visual_inputs = 240, # Our images : 240*320*3
width_visual_inputs = 320,
channels_visual_inputs = 3,
size_observation_space = 13, # Our robot : 9 joints + 4 sensors
num_actions = 9, # Our robot : 9 angles
init = True,
std = 0.0):
super(CNN, self).__init__()
self.convolutions_1 = nn.Sequential(
nn.Conv2d(channels_visual_inputs, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU()
)
self.convolutions_2 = nn.Sequential(
nn.Conv2d(64, 32, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.convolutions_3 = nn.Sequential(
nn.Conv2d(32, 16, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.dropout = nn.Dropout()
self.flatten = nn.Linear(16 * 15 * 20 * 2, 128) #*2 is because we have obs AND goal
self.other_obs_layer = nn.Linear(size_observation_space, 128)
self.fc_1 = nn.Linear(128 + 128, 128)
self.fc_2 = nn.Linear(128, 32)
self.fc_3 = nn.Linear(32, num_actions)
self.optimizer = optim.Adam(self.parameters())
self.num_actions = num_actions
def forward(self, visual_obs, others_obs, goal):
obs_and_goal = torch.cat((visual_obs, goal))
x = self.convolutions_1(obs_and_goal)
x = self.convolutions_2(x)
x = self.convolutions_3(x)
x = self.dropout(x)
x = torch.reshape(x, (-1,))
cnn_obs_pic = self.flatten(x)
obs_others = self.other_obs_layer(others_obs)
full_obs = torch.cat((cnn_obs_pic, obs_others))
out = self.fc_1(full_obs)
out = self.fc_2(out)
out = self.fc_3(out)
return out
def forward_noise(self, visual_obs, others_obs, goal, std=1.0):
actions = self.forward(visual_obs, others_obs, goal)
log_std = torch.ones(1, self.num_actions) * std
noisy_action = Normal(actions, std)
return noisy_action.sample()
def observationToCNNInput(observation, visual_width=320, visual_height=240):
"""
Transforms the observation of the environment into
an object with a suitable shape for the CNN
"""
joint_pos = torch.FloatTensor(observation["joint_positions"])
touch_sensors = torch.FloatTensor(observation["touch_sensors"])
retina = torch.FloatTensor(observation["retina"])
retina = torch.reshape(retina, (-1, 3, visual_height, visual_width))
goal = torch.FloatTensor(observation["goal"])
goal = torch.reshape(goal, (-1, 3, visual_height, visual_width))
return retina, torch.cat((joint_pos, touch_sensors)), goal
###########################################################################
class TestPolicy:
def __init__(self, action_space):
self.action_space = action_space
self.action = np.zeros(action_space.shape[0])
self.model = CNN()
def step(self, observation, reward, done):
#self.action += 0.1*np.pi*np.random.randn(self.action_space.shape[0])
visual_obs, other_obs, goal = observationToCNNInput(observation)
actions = self.model.forward_noise(visual_obs, other_obs, goal)
actions = actions.detach()
self.action = actions
return self.action
def update(self, rewards):
#TODO
loss = torch.FloatTensor(rewards).detach()
loss.requires_grad = True
loss = loss.mean()
self.model.optimizer.zero_grad()
loss.backward()
self.model.optimizer.step()
TestController = TestPolicy
| [
"torch.nn.Dropout",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.FloatTensor",
"torch.cat",
"numpy.zeros",
"torch.nn.init.normal_",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.distributions.Normal",
"torch.reshape"
] | [((3179, 3228), 'torch.FloatTensor', 'torch.FloatTensor', (["observation['joint_positions']"], {}), "(observation['joint_positions'])\n", (3196, 3228), False, 'import torch\n'), ((3249, 3296), 'torch.FloatTensor', 'torch.FloatTensor', (["observation['touch_sensors']"], {}), "(observation['touch_sensors'])\n", (3266, 3296), False, 'import torch\n'), ((3317, 3357), 'torch.FloatTensor', 'torch.FloatTensor', (["observation['retina']"], {}), "(observation['retina'])\n", (3334, 3357), False, 'import torch\n'), ((3378, 3437), 'torch.reshape', 'torch.reshape', (['retina', '(-1, 3, visual_height, visual_width)'], {}), '(retina, (-1, 3, visual_height, visual_width))\n', (3391, 3437), False, 'import torch\n'), ((3458, 3496), 'torch.FloatTensor', 'torch.FloatTensor', (["observation['goal']"], {}), "(observation['goal'])\n", (3475, 3496), False, 'import torch\n'), ((3517, 3574), 'torch.reshape', 'torch.reshape', (['goal', '(-1, 3, visual_height, visual_width)'], {}), '(goal, (-1, 3, visual_height, visual_width))\n', (3530, 3574), False, 'import torch\n'), ((309, 351), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'mean': '(0)', 'std': '(0.1)'}), '(m.weight, mean=0, std=0.1)\n', (324, 351), True, 'import torch.nn as nn\n'), ((398, 428), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0.1)'], {}), '(m.bias, 0.1)\n', (415, 428), True, 'import torch.nn as nn\n'), ((1690, 1702), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1700, 1702), True, 'import torch.nn as nn\n'), ((1727, 1759), 'torch.nn.Linear', 'nn.Linear', (['(16 * 15 * 20 * 2)', '(128)'], {}), '(16 * 15 * 20 * 2, 128)\n', (1736, 1759), True, 'import torch.nn as nn\n'), ((1828, 1866), 'torch.nn.Linear', 'nn.Linear', (['size_observation_space', '(128)'], {}), '(size_observation_space, 128)\n', (1837, 1866), True, 'import torch.nn as nn\n'), ((1891, 1916), 'torch.nn.Linear', 'nn.Linear', (['(128 + 128)', '(128)'], {}), '(128 + 128, 128)\n', (1900, 1916), True, 'import torch.nn as nn\n'), ((1940, 1958), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(32)'], {}), '(128, 32)\n', (1949, 1958), True, 'import torch.nn as nn\n'), ((1982, 2008), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'num_actions'], {}), '(32, num_actions)\n', (1991, 2008), True, 'import torch.nn as nn\n'), ((2192, 2221), 'torch.cat', 'torch.cat', (['(visual_obs, goal)'], {}), '((visual_obs, goal))\n', (2201, 2221), False, 'import torch\n'), ((2390, 2413), 'torch.reshape', 'torch.reshape', (['x', '(-1,)'], {}), '(x, (-1,))\n', (2403, 2413), False, 'import torch\n'), ((2530, 2566), 'torch.cat', 'torch.cat', (['(cnn_obs_pic, obs_others)'], {}), '((cnn_obs_pic, obs_others))\n', (2539, 2566), False, 'import torch\n'), ((2900, 2920), 'torch.distributions.Normal', 'Normal', (['actions', 'std'], {}), '(actions, std)\n', (2906, 2920), False, 'from torch.distributions import Normal, Categorical\n'), ((3595, 3632), 'torch.cat', 'torch.cat', (['(joint_pos, touch_sensors)'], {}), '((joint_pos, touch_sensors))\n', (3604, 3632), False, 'import torch\n'), ((3851, 3882), 'numpy.zeros', 'np.zeros', (['action_space.shape[0]'], {}), '(action_space.shape[0])\n', (3859, 3882), True, 'import numpy as np\n'), ((1153, 1226), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels_visual_inputs', '(64)'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)'}), '(channels_visual_inputs, 64, kernel_size=5, stride=1, padding=2)\n', (1162, 1226), True, 'import torch.nn as nn\n'), ((1240, 1249), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1247, 1249), True, 'import torch.nn as nn\n'), ((1322, 1375), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(32)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)'}), '(64, 32, kernel_size=5, stride=2, padding=2)\n', (1331, 1375), True, 'import torch.nn as nn\n'), ((1389, 1398), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1396, 1398), True, 'import torch.nn as nn\n'), ((1412, 1449), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (1424, 1449), True, 'import torch.nn as nn\n'), ((1523, 1576), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(16)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)'}), '(32, 16, kernel_size=5, stride=2, padding=2)\n', (1532, 1576), True, 'import torch.nn as nn\n'), ((1590, 1599), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1597, 1599), True, 'import torch.nn as nn\n'), ((1613, 1650), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (1625, 1650), True, 'import torch.nn as nn\n'), ((2839, 2870), 'torch.ones', 'torch.ones', (['(1)', 'self.num_actions'], {}), '(1, self.num_actions)\n', (2849, 2870), False, 'import torch\n'), ((4350, 4376), 'torch.FloatTensor', 'torch.FloatTensor', (['rewards'], {}), '(rewards)\n', (4367, 4376), False, 'import torch\n')] |
import numpy as np
from .bbox_overlaps import bbox_overlaps
def eval_res(gt0, dt0, thr):
"""
:param gt0: np.array[ng, 5], ground truth results [x, y, w, h, ignore]
:param dt0: np.array[nd, 5], detection results [x, y, w, h, score]
:param thr: float, IoU threshold
:return gt1: np.array[ng, 5], gt match types
dt1: np.array[nd, 6], dt match types
"""
nd = len(dt0)
ng = len(gt0)
# sort
dt = dt0[dt0[:, 4].argsort()[::-1]]
gt_ignore_mask = gt0[:, 4] == 1
gt = gt0[np.logical_not(gt_ignore_mask)]
ig = gt0[gt_ignore_mask]
ig[:, 4] = -ig[:, 4] # -1 indicates ignore
dt_format = dt[:, :4].copy()
gt_format = gt[:, :4].copy()
ig_format = ig[:, :4].copy()
dt_format[:, 2:] += dt_format[:, :2] # [x2, y2] = [w, h] + [x1, y1]
gt_format[:, 2:] += gt_format[:, :2]
ig_format[:, 2:] += ig_format[:, :2]
iou_dtgt = bbox_overlaps(dt_format, gt_format, mode='iou')
iof_dtig = bbox_overlaps(dt_format, gt_format, mode='iof')
oa = np.concatenate((iou_dtgt, iof_dtig), axis=1)
# [nd, 6]
dt1 = np.concatenate((dt, np.zeros((nd, 1), dtype=dt.dtype)), axis=1)
# [ng, 5]
gt1 = np.concatenate((gt, ig), axis=0)
for d in range(nd):
bst_oa = thr
bstg = -1 # index of matched gt
bstm = 0 # best match type
for g in range(ng):
m = gt1[g, 4]
# if gt already matched, continue to next gt
if m == 1:
continue
# if dt already matched, and on ignore gt, nothing more to do
if bstm != 0 and m == -1:
break
# continue to next gt until better match is found
if oa[d, g] < bst_oa:
continue
bst_oa = oa[d, g]
bstg = g
bstm = 1 if m == 0 else -1 # 1: matched to gt, -1: matched to ignore
# store match type for dt
dt1[d, 5] = bstm
# store match flag for gt
if bstm == 1:
gt1[bstg, 4] = 1
return gt1, dt1
def voc_ap(rec, prec):
mrec = np.concatenate(([0], rec, [1]))
mpre = np.concatenate(([0], prec, [0]))
for i in reversed(range(0, len(mpre)-1)):
mpre[i] = max(mpre[i], mpre[i + 1])
i = np.flatnonzero(mrec[1:] != mrec[:-1]) + 1
ap = np.sum((mrec[i] - mrec[i - 1]) * mpre[i])
return ap
def calc_accuracy(num_imgs, all_gt, all_det, per_class=False):
"""
:param num_imgs: int
:param all_gt: list of np.array[m, 8], [:, 4] == 1 indicates ignored regions,
which should be dropped before calling this function
:param all_det: list of np.array[m, 6], truncation and occlusion not necessary
:param per_class:
"""
assert num_imgs == len(all_gt) == len(all_det)
ap = np.zeros((10, 10), dtype=np.float32)
ar = np.zeros((10, 10, 4), dtype=np.float32)
eval_class = []
print('')
for id_class in range(1, 11):
print('evaluating object category {}/10...'.format(id_class))
for gt in all_gt:
if np.any(gt[:, 5] == id_class):
eval_class.append(id_class - 1)
x = 0
for thr in np.linspace(0.5, 0.95, num=10):
y = 0
for max_dets in (1, 10, 100, 500):
gt_match = []
det_match = []
for gt, det in zip(all_gt, all_det):
det_limited = det[:min(len(det), max_dets)]
mask_gt_cur_class = gt[:, 5] == id_class
mask_det_cur_class = det_limited[:, 5] == id_class
gt0 = gt[mask_gt_cur_class, :5]
dt0 = det_limited[mask_det_cur_class, :5]
gt1, dt1 = eval_res(gt0, dt0, thr)
# 1: matched, 0: unmatched, -1: ignore
gt_match.append(gt1[:, 4])
# [score, match type]
# 1: matched to gt, 0: unmatched, -1: matched to ignore
det_match.append(dt1[:, 4:6])
gt_match = np.concatenate(gt_match, axis=0)
det_match = np.concatenate(det_match, axis=0)
idrank = det_match[:, 0].argsort()[::-1]
tp = np.cumsum(det_match[idrank, 1] == 1)
rec = tp / max(1, len(gt_match)) # including ignore (already dropped)
if len(rec):
ar[id_class - 1, x, y] = np.max(rec) * 100
y += 1
fp = np.cumsum(det_match[idrank, 1] == 0)
prec = tp / (fp + tp).clip(min=1)
ap[id_class - 1, x] = voc_ap(rec, prec) * 100
x += 1
ap_all = np.mean(ap[eval_class, :])
ap_50 = np.mean(ap[eval_class, 0])
ap_75 = np.mean(ap[eval_class, 5])
ar_1 = np.mean(ar[eval_class, :, 0])
ar_10 = np.mean(ar[eval_class, :, 1])
ar_100 = np.mean(ar[eval_class, :, 2])
ar_500 = np.mean(ar[eval_class, :, 3])
results = (ap_all, ap_50, ap_75, ar_1, ar_10, ar_100, ar_500)
if per_class:
ap_classwise = np.mean(ap, axis=1)
results += (ap_classwise,)
print('Evaluation completed. The performance of the detector is presented as follows.')
return results
| [
"numpy.sum",
"numpy.flatnonzero",
"numpy.logical_not",
"numpy.zeros",
"numpy.any",
"numpy.cumsum",
"numpy.max",
"numpy.mean",
"numpy.linspace",
"numpy.concatenate"
] | [((1057, 1101), 'numpy.concatenate', 'np.concatenate', (['(iou_dtgt, iof_dtig)'], {'axis': '(1)'}), '((iou_dtgt, iof_dtig), axis=1)\n', (1071, 1101), True, 'import numpy as np\n'), ((1220, 1252), 'numpy.concatenate', 'np.concatenate', (['(gt, ig)'], {'axis': '(0)'}), '((gt, ig), axis=0)\n', (1234, 1252), True, 'import numpy as np\n'), ((2156, 2187), 'numpy.concatenate', 'np.concatenate', (['([0], rec, [1])'], {}), '(([0], rec, [1]))\n', (2170, 2187), True, 'import numpy as np\n'), ((2200, 2232), 'numpy.concatenate', 'np.concatenate', (['([0], prec, [0])'], {}), '(([0], prec, [0]))\n', (2214, 2232), True, 'import numpy as np\n'), ((2386, 2427), 'numpy.sum', 'np.sum', (['((mrec[i] - mrec[i - 1]) * mpre[i])'], {}), '((mrec[i] - mrec[i - 1]) * mpre[i])\n', (2392, 2427), True, 'import numpy as np\n'), ((2883, 2919), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {'dtype': 'np.float32'}), '((10, 10), dtype=np.float32)\n', (2891, 2919), True, 'import numpy as np\n'), ((2930, 2969), 'numpy.zeros', 'np.zeros', (['(10, 10, 4)'], {'dtype': 'np.float32'}), '((10, 10, 4), dtype=np.float32)\n', (2938, 2969), True, 'import numpy as np\n'), ((4793, 4819), 'numpy.mean', 'np.mean', (['ap[eval_class, :]'], {}), '(ap[eval_class, :])\n', (4800, 4819), True, 'import numpy as np\n'), ((4833, 4859), 'numpy.mean', 'np.mean', (['ap[eval_class, 0]'], {}), '(ap[eval_class, 0])\n', (4840, 4859), True, 'import numpy as np\n'), ((4873, 4899), 'numpy.mean', 'np.mean', (['ap[eval_class, 5]'], {}), '(ap[eval_class, 5])\n', (4880, 4899), True, 'import numpy as np\n'), ((4912, 4941), 'numpy.mean', 'np.mean', (['ar[eval_class, :, 0]'], {}), '(ar[eval_class, :, 0])\n', (4919, 4941), True, 'import numpy as np\n'), ((4955, 4984), 'numpy.mean', 'np.mean', (['ar[eval_class, :, 1]'], {}), '(ar[eval_class, :, 1])\n', (4962, 4984), True, 'import numpy as np\n'), ((4999, 5028), 'numpy.mean', 'np.mean', (['ar[eval_class, :, 2]'], {}), '(ar[eval_class, :, 2])\n', (5006, 5028), True, 'import numpy as np\n'), ((5043, 5072), 'numpy.mean', 'np.mean', (['ar[eval_class, :, 3]'], {}), '(ar[eval_class, :, 3])\n', (5050, 5072), True, 'import numpy as np\n'), ((544, 574), 'numpy.logical_not', 'np.logical_not', (['gt_ignore_mask'], {}), '(gt_ignore_mask)\n', (558, 574), True, 'import numpy as np\n'), ((2334, 2371), 'numpy.flatnonzero', 'np.flatnonzero', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (2348, 2371), True, 'import numpy as np\n'), ((3275, 3305), 'numpy.linspace', 'np.linspace', (['(0.5)', '(0.95)'], {'num': '(10)'}), '(0.5, 0.95, num=10)\n', (3286, 3305), True, 'import numpy as np\n'), ((5187, 5206), 'numpy.mean', 'np.mean', (['ap'], {'axis': '(1)'}), '(ap, axis=1)\n', (5194, 5206), True, 'import numpy as np\n'), ((1150, 1183), 'numpy.zeros', 'np.zeros', (['(nd, 1)'], {'dtype': 'dt.dtype'}), '((nd, 1), dtype=dt.dtype)\n', (1158, 1183), True, 'import numpy as np\n'), ((3159, 3187), 'numpy.any', 'np.any', (['(gt[:, 5] == id_class)'], {}), '(gt[:, 5] == id_class)\n', (3165, 3187), True, 'import numpy as np\n'), ((4612, 4648), 'numpy.cumsum', 'np.cumsum', (['(det_match[idrank, 1] == 0)'], {}), '(det_match[idrank, 1] == 0)\n', (4621, 4648), True, 'import numpy as np\n'), ((4169, 4201), 'numpy.concatenate', 'np.concatenate', (['gt_match'], {'axis': '(0)'}), '(gt_match, axis=0)\n', (4183, 4201), True, 'import numpy as np\n'), ((4231, 4264), 'numpy.concatenate', 'np.concatenate', (['det_match'], {'axis': '(0)'}), '(det_match, axis=0)\n', (4245, 4264), True, 'import numpy as np\n'), ((4347, 4383), 'numpy.cumsum', 'np.cumsum', (['(det_match[idrank, 1] == 1)'], {}), '(det_match[idrank, 1] == 1)\n', (4356, 4383), True, 'import numpy as np\n'), ((4548, 4559), 'numpy.max', 'np.max', (['rec'], {}), '(rec)\n', (4554, 4559), True, 'import numpy as np\n')] |
# Script is based on https://github.com/richzhang/colorization/blob/master/colorization/colorize.py
import numpy as np
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', default='colorization_deploy_v2.prototxt',required=True)
parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel',default='colorization_release_v2.caffemodel', required=True)
parser.add_argument('--kernel', help='Path to pts_in_hull.npy', default='pts_in_hull.npy', required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
#Network input size
W_in = 224
H_in = 224
imshowSize = (640, 480)
args = parse_args()
# Create network graph and load weights
net = cv.dnn.readNetFromCaffe(args.prototxt, args.caffemodel)
# load cluster centers
pts_in_hull = np.load(args.kernel)
# populate cluster centers as 1x1 convolution kernel
pts_in_hull = pts_in_hull.transpose().reshape(2, 313, 1, 1)
net.getLayer(net.getLayerId('class8_ab')).blobs = [pts_in_hull.astype(np.float32)]
net.getLayer(net.getLayerId('conv8_313_rh')).blobs = [np.full([1, 313], 2.606, np.float32)]
# Read the input image in BGR format
frame=cv.imread(args.input)
#convert it to rgb format
frame= frame[:,:,[2, 1, 0]]
# Scale the image to handle the variations in intensity
img_rgb = ( frame * 1.0 / 255).astype(np.float32)
#convert to Lab color space
img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab)
# pull out L channel
img_l = img_lab[:,:,0]
(H_orig,W_orig) = img_rgb.shape[:2] # original image size
# resize image to network input size
img_rs = cv.resize(img_rgb, (W_in, H_in)) # resize image to network input size
img_lab_rs = cv.cvtColor(img_rs, cv.COLOR_RGB2Lab)
img_l_rs = img_lab_rs[:,:,0]
# subtract 50 for mean-centering
img_l_rs -= 50
# Set the input for forwarding through the openCV DNN module
net.setInput(cv.dnn.blobFromImage(img_l_rs))
#Inference on network
ab_dec = net.forward('class8_ab')[0,:,:,:].transpose((1,2,0)) # this is our result
# Get the a and b channels
(H_out,W_out) = ab_dec.shape[:2]
#Resize to original size
ab_dec_us = cv.resize(ab_dec, (W_orig, H_orig))
# concatenate with original image i.e. L channel
img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2)
# convert to BGR space from Lab space
img_bgr_out = cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR)
# Clip and then rescale to 0-255
img_bgr_out = 255 * np.clip(img_bgr_out, 0, 1)
img_bgr_out = np.uint8(img_bgr_out)
#concatenate input and output image to display
con = np.hstack([frame,img_bgr_out])
cv.imwrite('out'+args.input,con)
| [
"numpy.full",
"numpy.load",
"numpy.uint8",
"argparse.ArgumentParser",
"numpy.concatenate",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.dnn.blobFromImage",
"numpy.clip",
"numpy.hstack",
"cv2.imread",
"cv2.dnn.readNetFromCaffe",
"cv2.resize"
] | [((184, 260), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""iColor: deep interactive colorization"""'}), "(description='iColor: deep interactive colorization')\n", (207, 260), False, 'import argparse\n'), ((1011, 1066), 'cv2.dnn.readNetFromCaffe', 'cv.dnn.readNetFromCaffe', (['args.prototxt', 'args.caffemodel'], {}), '(args.prototxt, args.caffemodel)\n', (1034, 1066), True, 'import cv2 as cv\n'), ((1112, 1132), 'numpy.load', 'np.load', (['args.kernel'], {}), '(args.kernel)\n', (1119, 1132), True, 'import numpy as np\n'), ((1491, 1512), 'cv2.imread', 'cv.imread', (['args.input'], {}), '(args.input)\n', (1500, 1512), True, 'import cv2 as cv\n'), ((1736, 1774), 'cv2.cvtColor', 'cv.cvtColor', (['img_rgb', 'cv.COLOR_RGB2Lab'], {}), '(img_rgb, cv.COLOR_RGB2Lab)\n', (1747, 1774), True, 'import cv2 as cv\n'), ((1944, 1976), 'cv2.resize', 'cv.resize', (['img_rgb', '(W_in, H_in)'], {}), '(img_rgb, (W_in, H_in))\n', (1953, 1976), True, 'import cv2 as cv\n'), ((2031, 2068), 'cv2.cvtColor', 'cv.cvtColor', (['img_rs', 'cv.COLOR_RGB2Lab'], {}), '(img_rs, cv.COLOR_RGB2Lab)\n', (2042, 2068), True, 'import cv2 as cv\n'), ((2501, 2536), 'cv2.resize', 'cv.resize', (['ab_dec', '(W_orig, H_orig)'], {}), '(ab_dec, (W_orig, H_orig))\n', (2510, 2536), True, 'import cv2 as cv\n'), ((2608, 2668), 'numpy.concatenate', 'np.concatenate', (['(img_l[:, :, np.newaxis], ab_dec_us)'], {'axis': '(2)'}), '((img_l[:, :, np.newaxis], ab_dec_us), axis=2)\n', (2622, 2668), True, 'import numpy as np\n'), ((2726, 2768), 'cv2.cvtColor', 'cv.cvtColor', (['img_lab_out', 'cv.COLOR_Lab2BGR'], {}), '(img_lab_out, cv.COLOR_Lab2BGR)\n', (2737, 2768), True, 'import cv2 as cv\n'), ((2875, 2896), 'numpy.uint8', 'np.uint8', (['img_bgr_out'], {}), '(img_bgr_out)\n', (2883, 2896), True, 'import numpy as np\n'), ((2958, 2989), 'numpy.hstack', 'np.hstack', (['[frame, img_bgr_out]'], {}), '([frame, img_bgr_out])\n', (2967, 2989), True, 'import numpy as np\n'), ((2994, 3029), 'cv2.imwrite', 'cv.imwrite', (["('out' + args.input)", 'con'], {}), "('out' + args.input, con)\n", (3004, 3029), True, 'import cv2 as cv\n'), ((1401, 1437), 'numpy.full', 'np.full', (['[1, 313]', '(2.606)', 'np.float32'], {}), '([1, 313], 2.606, np.float32)\n', (1408, 1437), True, 'import numpy as np\n'), ((2242, 2272), 'cv2.dnn.blobFromImage', 'cv.dnn.blobFromImage', (['img_l_rs'], {}), '(img_l_rs)\n', (2262, 2272), True, 'import cv2 as cv\n'), ((2830, 2856), 'numpy.clip', 'np.clip', (['img_bgr_out', '(0)', '(1)'], {}), '(img_bgr_out, 0, 1)\n', (2837, 2856), True, 'import numpy as np\n')] |
import os
import io
import base64
import numpy as np
from PIL import Image
from hangar.external import BasePlugin
class HangarPIL(BasePlugin):
def __init__(self):
provides = ['load', 'save', 'board_show']
accepts = ['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm', 'tif', 'tiff', 'webp']
super().__init__(provides, accepts)
def load(self, fpath, height=None, width=None):
"""
Load an image from file and returns the numpy array of the image along
with the name which will be used by hangar as sample name
Parameters
----------
fpath : str
File path. eg: path/to/test.jpg
height : int, float
height of the image
width : int, float
Width of the image
Notes
-----
Files are read using the Python Imaging Library.
See PIL docs [1]_ for a list of supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
im = Image.open(fpath)
if height and width:
shape = (int(height), int(width))
im = im.resize(shape)
return np.array(im), self.sample_name(fpath)
def save(self, arr, outdir, sample_n, format_str, **kwargs):
"""
Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Use the Python Imaging Library.
See PIL docs [1]_ for a list of other supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG
if format_str is None:
format_str = "PNG"
fpath = os.path.join(outdir, f"{sample_n}.{format_str}")
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % (arr.shape, ))
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = Image.fromarray(arr)
img.save(fpath, format=format_str, **kwargs)
def board_show(self, arr, format_str=None, **kwargs):
"""
Conver the numpy array from hangar to image and return that as base64
encoded to display in hangarboard
Parameters
----------
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
"""
buffer = io.BytesIO()
self.save(buffer, arr, format_str, **kwargs)
buffer.seek(0)
decoded = base64.b64encode(buffer.read()).decode('ascii')
return f"image/{format_str};base64,{decoded}"
| [
"io.BytesIO",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"os.path.join"
] | [((1078, 1095), 'PIL.Image.open', 'Image.open', (['fpath'], {}), '(fpath)\n', (1088, 1095), False, 'from PIL import Image\n'), ((2634, 2682), 'os.path.join', 'os.path.join', (['outdir', 'f"""{sample_n}.{format_str}"""'], {}), "(outdir, f'{sample_n}.{format_str}')\n", (2646, 2682), False, 'import os\n'), ((3039, 3059), 'PIL.Image.fromarray', 'Image.fromarray', (['arr'], {}), '(arr)\n', (3054, 3059), False, 'from PIL import Image\n'), ((4146, 4158), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4156, 4158), False, 'import io\n'), ((1220, 1232), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1228, 1232), True, 'import numpy as np\n')] |
import numpy as np
from manim_engine.constants import OUT
from manim_engine.constants import RIGHT
from functools import reduce
# Matrix operations
def get_norm(vect):
return sum([x**2 for x in vect])**0.5
def thick_diagonal(dim, thickness=2):
row_indices = np.arange(dim).repeat(dim).reshape((dim, dim))
col_indices = np.transpose(row_indices)
return (np.abs(row_indices - col_indices) < thickness).astype('uint8')
def rotation_matrix(angle, axis):
"""
Rotation in R^3 about a specified axis of rotation.
"""
about_z = rotation_about_z(angle)
z_to_axis = z_to_vector(axis)
axis_to_z = np.linalg.inv(z_to_axis)
return reduce(np.dot, [z_to_axis, about_z, axis_to_z])
def rotation_about_z(angle):
return [
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
]
def z_to_vector(vector):
"""
Returns some matrix in SO(3) which takes the z-axis to the
(normalized) vector provided as an argument
"""
norm = get_norm(vector)
if norm == 0:
return np.identity(3)
v = np.array(vector) / norm
phi = np.arccos(v[2])
if any(v[:2]):
# projection of vector to unit circle
axis_proj = v[:2] / get_norm(v[:2])
theta = np.arccos(axis_proj[0])
if axis_proj[1] < 0:
theta = -theta
else:
theta = 0
phi_down = np.array([
[np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]
])
return np.dot(rotation_about_z(theta), phi_down)
def rotate_vector(vector, angle, axis=OUT):
return np.dot(rotation_matrix(angle, axis), vector)
def angle_between(v1, v2):
return np.arccos(np.dot(
v1 / get_norm(v1),
v2 / get_norm(v2)
))
def angle_of_vector(vector):
"""
Returns polar coordinate theta when vector is project on xy plane
"""
z = complex(*vector[:2])
if z == 0:
return 0
return np.angle(complex(*vector[:2]))
def angle_between_vectors(v1, v2):
"""
Returns the angle between two 3D vectors.
This angle will always be btw 0 and TAU/2.
"""
l1 = get_norm(v1)
l2 = get_norm(v2)
return np.arccos(np.dot(v1, v2) / (l1 * l2))
def project_along_vector(point, vector):
matrix = np.identity(3) - np.outer(vector, vector)
return np.dot(point, matrix.T)
def normalize(vect):
norm = get_norm(vect)
if norm > 0:
return vect / norm
else:
return np.zeros(len(vect))
def cross(v1, v2):
return np.array([
v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0]
])
def get_unit_normal(v1, v2):
return normalize(cross(v1, v2))
###
def compass_directions(n=4, start_vect=RIGHT):
angle = 2 * np.pi / n
return np.array([
rotate_vector(start_vect, k * angle)
for k in range(n)
])
def complex_to_R3(complex_num):
return np.array((complex_num.real, complex_num.imag, 0))
def R3_to_complex(point):
return complex(*point[:2])
def complex_func_to_R3_func(complex_func):
return lambda p: complex_to_R3(complex_func(R3_to_complex(p)))
def center_of_mass(points):
points = [np.array(point).astype("float") for point in points]
return sum(points) / len(points)
def line_intersection(line1, line2):
"""
return intersection point of two lines,
each defined with a pair of vectors determining
the end points
"""
x_diff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
y_diff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(x_diff, y_diff)
if div == 0:
raise Exception("Lines do not intersect")
d = (det(*line1), det(*line2))
x = det(d, x_diff) / div
y = det(d, y_diff) / div
return np.array([x, y, 0])
| [
"numpy.outer",
"numpy.abs",
"numpy.transpose",
"numpy.identity",
"numpy.sin",
"numpy.linalg.inv",
"numpy.array",
"numpy.cos",
"numpy.arange",
"functools.reduce",
"numpy.dot",
"numpy.arccos"
] | [((337, 362), 'numpy.transpose', 'np.transpose', (['row_indices'], {}), '(row_indices)\n', (349, 362), True, 'import numpy as np\n'), ((634, 658), 'numpy.linalg.inv', 'np.linalg.inv', (['z_to_axis'], {}), '(z_to_axis)\n', (647, 658), True, 'import numpy as np\n'), ((670, 717), 'functools.reduce', 'reduce', (['np.dot', '[z_to_axis, about_z, axis_to_z]'], {}), '(np.dot, [z_to_axis, about_z, axis_to_z])\n', (676, 717), False, 'from functools import reduce\n'), ((1145, 1160), 'numpy.arccos', 'np.arccos', (['v[2]'], {}), '(v[2])\n', (1154, 1160), True, 'import numpy as np\n'), ((2365, 2388), 'numpy.dot', 'np.dot', (['point', 'matrix.T'], {}), '(point, matrix.T)\n', (2371, 2388), True, 'import numpy as np\n'), ((2559, 2667), 'numpy.array', 'np.array', (['[v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1\n ] - v1[1] * v2[0]]'], {}), '([v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[\n 0] * v2[1] - v1[1] * v2[0]])\n', (2567, 2667), True, 'import numpy as np\n'), ((2986, 3035), 'numpy.array', 'np.array', (['(complex_num.real, complex_num.imag, 0)'], {}), '((complex_num.real, complex_num.imag, 0))\n', (2994, 3035), True, 'import numpy as np\n'), ((3910, 3929), 'numpy.array', 'np.array', (['[x, y, 0]'], {}), '([x, y, 0])\n', (3918, 3929), True, 'import numpy as np\n'), ((1088, 1102), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1099, 1102), True, 'import numpy as np\n'), ((1111, 1127), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (1119, 1127), True, 'import numpy as np\n'), ((1286, 1309), 'numpy.arccos', 'np.arccos', (['axis_proj[0]'], {}), '(axis_proj[0])\n', (1295, 1309), True, 'import numpy as np\n'), ((2312, 2326), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (2323, 2326), True, 'import numpy as np\n'), ((2329, 2353), 'numpy.outer', 'np.outer', (['vector', 'vector'], {}), '(vector, vector)\n', (2337, 2353), True, 'import numpy as np\n'), ((771, 784), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (777, 784), True, 'import numpy as np\n'), ((815, 828), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (821, 828), True, 'import numpy as np\n'), ((830, 843), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (836, 843), True, 'import numpy as np\n'), ((2228, 2242), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (2234, 2242), True, 'import numpy as np\n'), ((375, 408), 'numpy.abs', 'np.abs', (['(row_indices - col_indices)'], {}), '(row_indices - col_indices)\n', (381, 408), True, 'import numpy as np\n'), ((787, 800), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (793, 800), True, 'import numpy as np\n'), ((1429, 1440), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1435, 1440), True, 'import numpy as np\n'), ((1445, 1456), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1451, 1456), True, 'import numpy as np\n'), ((1504, 1515), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1510, 1515), True, 'import numpy as np\n'), ((3251, 3266), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (3259, 3266), True, 'import numpy as np\n'), ((272, 286), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (281, 286), True, 'import numpy as np\n'), ((1488, 1499), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1494, 1499), True, 'import numpy as np\n')] |
import numpy as np
from rlpyt.samplers.collectors import (DecorrelatingStartCollector,
BaseEvalCollector)
from rlpyt.agents.base import AgentInputs
from rlpyt.utils.buffer import (torchify_buffer, numpify_buffer, buffer_from_example,
buffer_method)
class CpuResetCollector(DecorrelatingStartCollector):
mid_batch_reset = True
def collect_batch(self, agent_inputs, traj_infos, itr):
# Numpy arrays can be written to from numpy arrays or torch tensors
# (whereas torch tensors can only be written to from torch tensors).
agent_buf, env_buf = self.samples_np.agent, self.samples_np.env
completed_infos = list()
observation, action, reward = agent_inputs
obs_pyt, act_pyt, rew_pyt = torchify_buffer(agent_inputs)
agent_buf.prev_action[0] = action # Leading prev_action.
env_buf.prev_reward[0] = reward
animate = self.animate
animate = animate and (itr % 10 == 0)
first_sample = True
self.agent.sample_mode(itr)
for t in range(self.batch_T):
env_buf.observation[t] = observation
# Agent inputs and outputs are torch tensors.
act_pyt, agent_info = self.agent.step(obs_pyt, act_pyt, rew_pyt)
action = numpify_buffer(act_pyt)
for b, env in enumerate(self.envs):
animate_cur = animate and (b == 0) and first_sample
if animate_cur:
env.render()
import time
time.sleep(0.03)
# Environment inputs and outputs are numpy arrays.
o, r, d, env_info = env.step(action[b])
traj_infos[b].step(observation[b], action[b], r, d, agent_info[b],
env_info)
if getattr(env_info, "traj_done", d):
completed_infos.append(traj_infos[b].terminate(o))
traj_infos[b] = self.TrajInfoCls()
o = env.reset()
if d:
self.agent.reset_one(idx=b)
if b == 0:
first_sample = False
observation[b] = o
reward[b] = r
env_buf.done[t, b] = d
if env_info:
env_buf.env_info[t, b] = env_info
agent_buf.action[t] = action
env_buf.reward[t] = reward
if agent_info:
agent_buf.agent_info[t] = agent_info
if "bootstrap_value" in agent_buf:
# agent.value() should not advance rnn state.
agent_buf.bootstrap_value[:] = self.agent.value(obs_pyt, act_pyt, rew_pyt)
return AgentInputs(observation, action, reward), traj_infos, completed_infos
class CpuWaitResetCollector(DecorrelatingStartCollector):
mid_batch_reset = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.need_reset = np.zeros(len(self.envs), dtype=np.bool)
self.done = np.zeros(len(self.envs), dtype=np.bool)
self.temp_observation = buffer_method(
self.samples_np.env.observation[0, :len(self.envs)], "copy")
def collect_batch(self, agent_inputs, traj_infos, itr):
# Numpy arrays can be written to from numpy arrays or torch tensors
# (whereas torch tensors can only be written to from torch tensors).
agent_buf, env_buf = self.samples_np.agent, self.samples_np.env
completed_infos = list()
observation, action, reward = agent_inputs
b = np.where(self.done)[0]
observation[b] = self.temp_observation[b]
self.done[:] = False # Did resets between batches.
obs_pyt, act_pyt, rew_pyt = torchify_buffer(agent_inputs)
agent_buf.prev_action[0] = action # Leading prev_action.
env_buf.prev_reward[0] = reward
self.agent.sample_mode(itr)
for t in range(self.batch_T):
env_buf.observation[t] = observation
# Agent inputs and outputs are torch tensors.
act_pyt, agent_info = self.agent.step(obs_pyt, act_pyt, rew_pyt)
action = numpify_buffer(act_pyt)
for b, env in enumerate(self.envs):
if self.done[b]:
action[b] = 0 # Record blank.
reward[b] = 0
if agent_info:
agent_info[b] = 0
# Leave self.done[b] = True, record that.
continue
# Environment inputs and outputs are numpy arrays.
o, r, d, env_info = env.step(action[b])
traj_infos[b].step(observation[b], action[b], r, d, agent_info[b],
env_info)
if getattr(env_info, "traj_done", d):
completed_infos.append(traj_infos[b].terminate(o))
traj_infos[b] = self.TrajInfoCls()
self.need_reset[b] = True
if d:
self.temp_observation[b] = o
o = 0 # Record blank.
observation[b] = o
reward[b] = r
self.done[b] = d
if env_info:
env_buf.env_info[t, b] = env_info
agent_buf.action[t] = action
env_buf.reward[t] = reward
env_buf.done[t] = self.done
if agent_info:
agent_buf.agent_info[t] = agent_info
if "bootstrap_value" in agent_buf:
# agent.value() should not advance rnn state.
agent_buf.bootstrap_value[:] = self.agent.value(obs_pyt, act_pyt, rew_pyt)
return AgentInputs(observation, action, reward), traj_infos, completed_infos
def reset_if_needed(self, agent_inputs):
for b in np.where(self.need_reset)[0]:
agent_inputs[b] = 0
agent_inputs.observation[b] = self.envs[b].reset()
self.agent.reset_one(idx=b)
self.need_reset[:] = False
class CpuEvalCollector(BaseEvalCollector):
def collect_evaluation(self, itr):
traj_infos = [self.TrajInfoCls() for _ in range(len(self.envs))]
observations = list()
for env in self.envs:
observations.append(env.reset())
observation = buffer_from_example(observations[0], len(self.envs))
for b, o in enumerate(observations):
observation[b] = o
action = buffer_from_example(self.envs[0].action_space.null_value(),
len(self.envs))
reward = np.zeros(len(self.envs), dtype="float32")
obs_pyt, act_pyt, rew_pyt = torchify_buffer((observation, action, reward))
self.agent.reset()
self.agent.eval_mode(itr)
for t in range(self.max_T):
act_pyt, agent_info = self.agent.step(obs_pyt, act_pyt, rew_pyt)
action = numpify_buffer(act_pyt)
for b, env in enumerate(self.envs):
o, r, d, env_info = env.step(action[b])
traj_infos[b].step(observation[b], action[b], r, d,
agent_info[b], env_info)
if getattr(env_info, "traj_done", d):
self.traj_infos_queue.put(traj_infos[b].terminate(o))
traj_infos[b] = self.TrajInfoCls()
o = env.reset()
if d:
action[b] = 0 # Next prev_action.
r = 0
self.agent.reset_one(idx=b)
observation[b] = o
reward[b] = r
if self.sync.stop_eval.value:
break
self.traj_infos_queue.put(None) # End sentinel.
| [
"rlpyt.agents.base.AgentInputs",
"rlpyt.utils.buffer.torchify_buffer",
"rlpyt.utils.buffer.numpify_buffer",
"time.sleep",
"numpy.where"
] | [((749, 778), 'rlpyt.utils.buffer.torchify_buffer', 'torchify_buffer', (['agent_inputs'], {}), '(agent_inputs)\n', (764, 778), False, 'from rlpyt.utils.buffer import torchify_buffer, numpify_buffer, buffer_from_example, buffer_method\n'), ((3733, 3762), 'rlpyt.utils.buffer.torchify_buffer', 'torchify_buffer', (['agent_inputs'], {}), '(agent_inputs)\n', (3748, 3762), False, 'from rlpyt.utils.buffer import torchify_buffer, numpify_buffer, buffer_from_example, buffer_method\n'), ((6615, 6661), 'rlpyt.utils.buffer.torchify_buffer', 'torchify_buffer', (['(observation, action, reward)'], {}), '((observation, action, reward))\n', (6630, 6661), False, 'from rlpyt.utils.buffer import torchify_buffer, numpify_buffer, buffer_from_example, buffer_method\n'), ((1269, 1292), 'rlpyt.utils.buffer.numpify_buffer', 'numpify_buffer', (['act_pyt'], {}), '(act_pyt)\n', (1283, 1292), False, 'from rlpyt.utils.buffer import torchify_buffer, numpify_buffer, buffer_from_example, buffer_method\n'), ((2693, 2733), 'rlpyt.agents.base.AgentInputs', 'AgentInputs', (['observation', 'action', 'reward'], {}), '(observation, action, reward)\n', (2704, 2733), False, 'from rlpyt.agents.base import AgentInputs\n'), ((3564, 3583), 'numpy.where', 'np.where', (['self.done'], {}), '(self.done)\n', (3572, 3583), True, 'import numpy as np\n'), ((4148, 4171), 'rlpyt.utils.buffer.numpify_buffer', 'numpify_buffer', (['act_pyt'], {}), '(act_pyt)\n', (4162, 4171), False, 'from rlpyt.utils.buffer import torchify_buffer, numpify_buffer, buffer_from_example, buffer_method\n'), ((5668, 5708), 'rlpyt.agents.base.AgentInputs', 'AgentInputs', (['observation', 'action', 'reward'], {}), '(observation, action, reward)\n', (5679, 5708), False, 'from rlpyt.agents.base import AgentInputs\n'), ((5801, 5826), 'numpy.where', 'np.where', (['self.need_reset'], {}), '(self.need_reset)\n', (5809, 5826), True, 'import numpy as np\n'), ((6857, 6880), 'rlpyt.utils.buffer.numpify_buffer', 'numpify_buffer', (['act_pyt'], {}), '(act_pyt)\n', (6871, 6880), False, 'from rlpyt.utils.buffer import torchify_buffer, numpify_buffer, buffer_from_example, buffer_method\n'), ((1526, 1542), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (1536, 1542), False, 'import time\n')] |
"""
Created on Wed Feb 5 16:07:35 2020
@author: matias
"""
import numpy as np
from scipy.optimize import minimize
np.random.seed(42)
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_data import leer_data_cronometros
from funciones_cronometros import params_to_chi2
#ORDEN DE PRESENTACION DE LOS PARAMETROS: Mabs,omega_m,b,H_0,n
#%% Predeterminados:
n = 2
omega_m_true = 0.3
b_true = 2
H0_true = 73.48 #Unidades de (km/seg)/Mpc
#%%
os.chdir(path_git+'/Software/Estadística/Datos/')
z_data, H_data, dH = leer_data_cronometros('datos_cronometros.txt')
nll = lambda theta: params_to_chi2(theta,n,z_data,H_data,dH)
initial = np.array([omega_m_true,b_true,H0_true])
bnds = ((0.2, 0.4), (0,3),(68,80))
soln = minimize(nll, initial, bounds=bnds)
omega_m_ml, b_ml, H0_ml = soln.x
print(omega_m_ml,b_ml,H0_ml)
os.chdir(path_git + '/Software/Estadística/Resultados_simulaciones')
np.savez('valores_medios_ST_CC+H0_3params', sol=soln.x)
soln.fun/(len(z_data)+1-3) #0.5277728019958039
#%%
os.chdir(path_git+'/Software/Estadística/Resultados_simulaciones/')
with np.load('valores_medios_ST_CC+H0_3params') as data:
sol = data['sol']
sol
#Tardo 58h 28 min 17 seg!
| [
"sys.path.append",
"scipy.optimize.minimize",
"numpy.load",
"numpy.random.seed",
"funciones_data.leer_data_cronometros",
"funciones_cronometros.params_to_chi2",
"numpy.array",
"pc_path.definir_path",
"numpy.savez",
"os.chdir"
] | [((116, 134), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (130, 134), True, 'import numpy as np\n'), ((255, 269), 'pc_path.definir_path', 'definir_path', ([], {}), '()\n', (267, 269), False, 'from pc_path import definir_path\n'), ((270, 288), 'os.chdir', 'os.chdir', (['path_git'], {}), '(path_git)\n', (278, 288), False, 'import os\n'), ((289, 331), 'sys.path.append', 'sys.path.append', (['"""./Software/Funcionales/"""'], {}), "('./Software/Funcionales/')\n", (304, 331), False, 'import sys\n'), ((601, 652), 'os.chdir', 'os.chdir', (["(path_git + '/Software/Estadística/Datos/')"], {}), "(path_git + '/Software/Estadística/Datos/')\n", (609, 652), False, 'import os\n'), ((673, 719), 'funciones_data.leer_data_cronometros', 'leer_data_cronometros', (['"""datos_cronometros.txt"""'], {}), "('datos_cronometros.txt')\n", (694, 719), False, 'from funciones_data import leer_data_cronometros\n'), ((793, 834), 'numpy.array', 'np.array', (['[omega_m_true, b_true, H0_true]'], {}), '([omega_m_true, b_true, H0_true])\n', (801, 834), True, 'import numpy as np\n'), ((875, 910), 'scipy.optimize.minimize', 'minimize', (['nll', 'initial'], {'bounds': 'bnds'}), '(nll, initial, bounds=bnds)\n', (883, 910), False, 'from scipy.optimize import minimize\n'), ((974, 1042), 'os.chdir', 'os.chdir', (["(path_git + '/Software/Estadística/Resultados_simulaciones')"], {}), "(path_git + '/Software/Estadística/Resultados_simulaciones')\n", (982, 1042), False, 'import os\n'), ((1043, 1098), 'numpy.savez', 'np.savez', (['"""valores_medios_ST_CC+H0_3params"""'], {'sol': 'soln.x'}), "('valores_medios_ST_CC+H0_3params', sol=soln.x)\n", (1051, 1098), True, 'import numpy as np\n'), ((1150, 1219), 'os.chdir', 'os.chdir', (["(path_git + '/Software/Estadística/Resultados_simulaciones/')"], {}), "(path_git + '/Software/Estadística/Resultados_simulaciones/')\n", (1158, 1219), False, 'import os\n'), ((742, 786), 'funciones_cronometros.params_to_chi2', 'params_to_chi2', (['theta', 'n', 'z_data', 'H_data', 'dH'], {}), '(theta, n, z_data, H_data, dH)\n', (756, 786), False, 'from funciones_cronometros import params_to_chi2\n'), ((1223, 1265), 'numpy.load', 'np.load', (['"""valores_medios_ST_CC+H0_3params"""'], {}), "('valores_medios_ST_CC+H0_3params')\n", (1230, 1265), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2012, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CREATE-NET nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CREATE-NET ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CREATE-NET BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Joule Virtual Power Meter
"""
import sys
import optparse
import logging
import numpy as np
import time
import json
import os
import datetime
import scipy.io
from click import write_handler
DEFAULT_MODELS = './models.json'
DEFAULT_INTERVAL = 2000
LOG_FORMAT = '%(asctime)-15s %(message)s'
def compute_power(models, model, x_min, x_mbps, d_bytes):
""" Compure power consumption for one point. """
alpha0 = models[model]['alpha0']
alpha1 = models[model]['alpha1']
x_max = models[model]['x_max']
gamma = models['gamma']
if x_mbps < x_min:
return gamma
if x_mbps > x_max[str(d_bytes)]:
x_mbps = x_max[str(d_bytes)]
alpha_d = alpha0 * (1 + (alpha1 / d_bytes))
return alpha_d * x_mbps + gamma
class VirtualMeter(object):
""" Virtual Power meter. """
def __init__(self, models, interval):
self.models = models
self.interval = interval
self.packet_sizes = {}
x_max_rx = [int(x) for x in self.models['RX']['x_max'].keys()]
x_max_tx = [int(x) for x in self.models['TX']['x_max'].keys()]
self.packet_sizes['RX'] = sorted(x_max_rx, key=int)
self.packet_sizes['TX'] = sorted(x_max_tx, key=int)
self.bins = {}
self.bins['RX'] = self.generate_bins('RX')
self.bins['TX'] = self.generate_bins('TX')
self.last = time.time()
def fetch(self, field=None):
""" Fetch statistics. """
if self.interval > 0:
time.sleep(float(self.interval) / 1000)
delta = time.time() - self.last
self.last = time.time()
bins = {}
bins['RX'] = self.generate_bins('RX')
bins['TX'] = self.generate_bins('TX')
power_rx = self.compute(bins['RX'], self.bins['RX'], 'RX', delta)
power_tx = self.compute(bins['TX'], self.bins['TX'], 'TX', delta)
self.bins['RX'] = bins['RX'][:]
self.bins['TX'] = bins['TX'][:]
readings = {}
readings['power'] = power_tx + power_rx + self.models['gamma']
readings['at'] = \
datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if field != None:
return readings[field]
return readings
def compute(self, bins_curr, bins_prev, model, delta):
""" Compute power consumption. """
power = 0.0
diff = [x[0] for x in (bins_curr - bins_prev).tolist()]
# this should be generalized
x_min = 0.06
for i in range(0, len(diff)):
if diff[i] == 0.0:
continue
pkt_size = self.packet_sizes[model][i]
x_mbps = ((pkt_size * diff[i] * 8) / delta) / 1000000
d_bytes = self.packet_sizes[model][i]
pwr = compute_power(self.models, model, x_min, x_mbps, d_bytes) - \
self.models['gamma']
power = power + pwr
logging.debug("%u bytes, %u pkts, %f s -> %f [Mb/s] %f [W]",
d_bytes, diff[i], delta, x_mbps, pwr)
return power
def generate_bins(self, model):
""" Poll click process. """
results = write_handler('127.0.0.1', 5555,
"%s.write_text_file /tmp/%s" % (model, model))
if results[0] != '200':
return np.array([])
time.sleep(0.1)
try:
samples = np.genfromtxt('/tmp/%s' % model, dtype=int, comments="!")
except IOError:
samples = np.array([[]])
bins = np.zeros(shape=(len(self.packet_sizes[model]),1))
if np.ndim(samples) != 2:
return bins
for sample in samples:
if len(sample) == 0:
continue
# account for ethernet (14), ip (20), and udp (8) headers
size = sample[0] - 14 - 20 - 8
count = sample[1]
for i in range(0, len(self.packet_sizes[model])):
if size <= self.packet_sizes[model][i]:
bins[i] = bins[i] + count
break
return bins
def main():
""" Main method. """
parser = optparse.OptionParser()
parser.add_option('--interval', '-i',
dest="interval",
type="int",
default=DEFAULT_INTERVAL)
parser.add_option('--models', '-m',
dest="models",
default=DEFAULT_MODELS)
parser.add_option('--matlab', '-t',
dest="matlab")
parser.add_option('--verbose', '-v',
action="store_true",
dest="verbose",
default=False)
parser.add_option('--log', '-l',
dest="log")
options, _ = parser.parse_args()
with open(os.path.expanduser(options.models)) as data_file:
models = json.load(data_file)
if options.verbose:
lvl = logging.DEBUG
else:
lvl = logging.INFO
logging.basicConfig(level=lvl,
format=LOG_FORMAT,
filename=options.log,
filemode='w')
virtual = VirtualMeter(models, options.interval)
if options.matlab != None:
mat = []
while True:
try:
readings = virtual.fetch()
except KeyboardInterrupt:
logging.debug("Bye!")
sys.exit()
except:
logging.debug("0 [W]")
else:
logging.info("%f [W]", readings['power'])
if options.matlab != None:
scipy.io.savemat(options.matlab,
{'READINGS' : np.array(mat)},
oned_as='column')
if __name__ == "__main__":
main()
| [
"json.load",
"logging.debug",
"logging.basicConfig",
"optparse.OptionParser",
"numpy.ndim",
"numpy.genfromtxt",
"datetime.datetime.now",
"time.sleep",
"time.time",
"logging.info",
"numpy.array",
"os.path.expanduser",
"click.write_handler",
"sys.exit"
] | [((5615, 5638), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (5636, 5638), False, 'import optparse\n'), ((6473, 6562), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'lvl', 'format': 'LOG_FORMAT', 'filename': 'options.log', 'filemode': '"""w"""'}), "(level=lvl, format=LOG_FORMAT, filename=options.log,\n filemode='w')\n", (6492, 6562), False, 'import logging\n'), ((2882, 2893), 'time.time', 'time.time', ([], {}), '()\n', (2891, 2893), False, 'import time\n'), ((3106, 3117), 'time.time', 'time.time', ([], {}), '()\n', (3115, 3117), False, 'import time\n'), ((4644, 4723), 'click.write_handler', 'write_handler', (['"""127.0.0.1"""', '(5555)', "('%s.write_text_file /tmp/%s' % (model, model))"], {}), "('127.0.0.1', 5555, '%s.write_text_file /tmp/%s' % (model, model))\n", (4657, 4723), False, 'from click import write_handler\n'), ((4828, 4843), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4838, 4843), False, 'import time\n'), ((6357, 6377), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (6366, 6377), False, 'import json\n'), ((3062, 3073), 'time.time', 'time.time', ([], {}), '()\n', (3071, 3073), False, 'import time\n'), ((4415, 4518), 'logging.debug', 'logging.debug', (['"""%u bytes, %u pkts, %f s -> %f [Mb/s] %f [W]"""', 'd_bytes', 'diff[i]', 'delta', 'x_mbps', 'pwr'], {}), "('%u bytes, %u pkts, %f s -> %f [Mb/s] %f [W]', d_bytes, diff[\n i], delta, x_mbps, pwr)\n", (4428, 4518), False, 'import logging\n'), ((4807, 4819), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4815, 4819), True, 'import numpy as np\n'), ((4879, 4936), 'numpy.genfromtxt', 'np.genfromtxt', (["('/tmp/%s' % model)"], {'dtype': 'int', 'comments': '"""!"""'}), "('/tmp/%s' % model, dtype=int, comments='!')\n", (4892, 4936), True, 'import numpy as np\n'), ((5074, 5090), 'numpy.ndim', 'np.ndim', (['samples'], {}), '(samples)\n', (5081, 5090), True, 'import numpy as np\n'), ((6290, 6324), 'os.path.expanduser', 'os.path.expanduser', (['options.models'], {}), '(options.models)\n', (6308, 6324), False, 'import os\n'), ((6971, 7012), 'logging.info', 'logging.info', (['"""%f [W]"""', "readings['power']"], {}), "('%f [W]', readings['power'])\n", (6983, 7012), False, 'import logging\n'), ((3592, 3615), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3613, 3615), False, 'import datetime\n'), ((4983, 4997), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (4991, 4997), True, 'import numpy as np\n'), ((6849, 6870), 'logging.debug', 'logging.debug', (['"""Bye!"""'], {}), "('Bye!')\n", (6862, 6870), False, 'import logging\n'), ((6883, 6893), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6891, 6893), False, 'import sys\n'), ((6922, 6944), 'logging.debug', 'logging.debug', (['"""0 [W]"""'], {}), "('0 [W]')\n", (6935, 6944), False, 'import logging\n'), ((7137, 7150), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (7145, 7150), True, 'import numpy as np\n')] |
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
color_names = ["windows blue",
"red",
"amber",
"faded green",
"dusty purple",
"orange",
"clay",
"pink",
"greyish",
"mint",
"light cyan",
"steel blue",
"forest green",
"pastel purple",
"salmon",
"dark brown"]
colors = sns.xkcd_palette(color_names)
sns.set_style("white")
sns.set_context("paper")
def gradient_cmap(gcolors, nsteps=256, bounds=None):
"""
Make a colormap that interpolates between a set of colors
"""
ncolors = len(gcolors)
if bounds is None:
bounds = np.linspace(0, 1, ncolors)
reds = []
greens = []
blues = []
alphas = []
for b, c in zip(bounds, gcolors):
reds.append((b, c[0], c[0]))
greens.append((b, c[1], c[1]))
blues.append((b, c[2], c[2]))
alphas.append((b, c[3], c[3]) if len(c) == 4 else (b, 1., 1.))
cdict = {'red': tuple(reds),
'green': tuple(greens),
'blue': tuple(blues),
'alpha': tuple(alphas)}
cmap = LinearSegmentedColormap('grad_colormap', cdict, nsteps)
return cmap
def plot_dynamics(A, b=None, ax=None, plot_center=True,
xlim=(-4, 4), ylim=(-3, 3), npts=20,
color='r'):
D_latent = A.shape[0]
b = np.zeros((A.shape[0], 1)) if b is None else b
x = np.linspace(*xlim, npts)
y = np.linspace(*ylim, npts)
X, Y = np.meshgrid(x, y)
xy = np.column_stack((X.ravel(), Y.ravel()))
# dydt_m = xy.dot(A.T) + b.T - xy
dydt_m = xy.dot(A.T) + b.T - xy
if ax is None:
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
ax.quiver(xy[:, 0], xy[:, 1],
dydt_m[:, 0], dydt_m[:, 1],
color=color, alpha=1.0,
headwidth=5.)
# Plot the stable point
if plot_center:
try:
center = -np.linalg.solve(A - np.eye(D_latent), b)
ax.plot(center[0], center[1], 'o', color=color, markersize=8)
except:
print("Dynamics are not invertible!")
ax.set_xlabel('$x_1$', fontsize=12, labelpad=10)
ax.set_ylabel('$x_2$', fontsize=12, labelpad=10)
return ax
def plot_all_dynamics(dynamics_distns):
K = len(dynamics_distns)
D_latent = dynamics_distns[0].D_out
fig = plt.figure(figsize=(12, 3))
for k in range(K):
ax = fig.add_subplot(1, K, k + 1)
plot_dynamics(dynamics_distns[k].A[:, :D_latent],
b=dynamics_distns[k].A[:, D_latent:],
plot_center=False,
color=colors[k], ax=ax)
def plot_most_likely_dynamics(
reg, dynamics_distns,
xlim=(-4, 4), ylim=(-3, 3), nxpts=20, nypts=10,
alpha=0.8,
ax=None, figsize=(3, 3)):
K = len(dynamics_distns)
D_latent = dynamics_distns[0].D_out
x = np.linspace(*xlim, nxpts)
y = np.linspace(*ylim, nypts)
X, Y = np.meshgrid(x, y)
xy = np.column_stack((X.ravel(), Y.ravel()))
# Get the probability of each state at each xy location
Ts = reg.get_trans_matrices(xy)
prs = Ts[:, 0, :]
z = np.argmax(prs, axis=1)
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for k in range(K):
A = dynamics_distns[k].A[:, :D_latent]
b = dynamics_distns[k].A[:, D_latent:]
dydt_m = xy.dot(A.T) + b.T - xy
zk = z == k
if zk.sum(0) > 0:
ax.quiver(xy[zk, 0], xy[zk, 1],
dydt_m[zk, 0], dydt_m[zk, 1],
color=colors[k], alpha=alpha)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
plt.tight_layout()
return ax
def plot_trans_probs(reg, xlim=(-4, 4), ylim=(-3, 3), n_pts=50, ax=None):
K = reg.D_out + 1
XX, YY = np.meshgrid(np.linspace(*xlim, n_pts),
np.linspace(*ylim, n_pts))
XY = np.column_stack((np.ravel(XX), np.ravel(YY)))
test_prs = reg.get_trans_matrices(XY)[:, 0, :]
if ax is None:
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
for k in range(K):
start = np.array([1., 1., 1., 0.])
end = np.concatenate((colors[k % len(colors)], [0.5]))
cmap = gradient_cmap([start, end])
im1 = ax.imshow(test_prs[:, k].reshape(*XX.shape),
extent=xlim + tuple(reversed(ylim)),
vmin=0, vmax=1, cmap=cmap)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.tight_layout()
return ax
def plot_trajectory(zhat, x, ax=None, ls="-"):
zcps = np.concatenate(([0], np.where(np.diff(zhat))[0] + 1, [zhat.size]))
if ax is None:
fig = plt.figure(figsize=(4, 4))
ax = fig.gca()
for start, stop in zip(zcps[:-1], zcps[1:]):
ax.plot(x[start:stop + 1, 0],
x[start:stop + 1, 1],
lw=1, ls=ls,
color=colors[zhat[start] % len(colors)],
alpha=1.0)
return ax
def plot_trajectory_and_probs(z, x,
ax=None,
trans_distn=None,
title=None,
**trargs):
if ax is None:
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
if trans_distn is not None:
xlim = abs(x[:, 0]).max()
xlim = (-xlim, xlim)
ylim = abs(x[:, 0]).max()
ylim = (-ylim, ylim)
ax = plot_trans_probs(trans_distn, ax=ax,
xlim=xlim, ylim=ylim)
plot_trajectory(z, x, ax=ax, **trargs)
plt.tight_layout()
plt.title(title)
return ax
def plot_data(zhat, y, ax=None, ls="-"):
zcps = np.concatenate(([0], np.where(np.diff(zhat))[0] + 1, [zhat.size]))
if ax is None:
fig = plt.figure(figsize=(4, 4))
ax = fig.gca()
for start, stop in zip(zcps[:-1], zcps[1:]):
stop = min(y.shape[0], stop + 1)
ax.plot(np.arange(start, stop),
y[start:stop],
lw=1, ls=ls,
color=colors[zhat[start] % len(colors)],
alpha=1.0)
return ax
def plot_separate_trans_probs(reg, xlim=(-4, 4), ylim=(-3, 3), n_pts=100, ax=None):
K = reg.D_out
XX, YY = np.meshgrid(np.linspace(*xlim, n_pts),
np.linspace(*ylim, n_pts))
XY = np.column_stack((np.ravel(XX), np.ravel(YY)))
D_reg = reg.D_in
inputs = np.hstack((np.zeros((n_pts ** 2, D_reg - 2)), XY))
test_prs = reg.pi(inputs)
if ax is None:
fig = plt.figure(figsize=(12, 3))
for k in range(K):
ax = fig.add_subplot(1, K, k + 1)
cmap = gradient_cmap([np.ones(3), colors[k % len(colors)]])
im1 = ax.imshow(test_prs[:, k].reshape(*XX.shape),
extent=xlim + tuple(reversed(ylim)),
vmin=0, vmax=1, cmap=cmap)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im1, cax=cax, ax=ax)
plt.tight_layout()
return ax
def plot_z_samples(K, zs, zref=None,
plt_slice=None,
N_iters=None,
title=None,
ax=None):
if ax is None:
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
zs = np.array(zs)
if plt_slice is None:
plt_slice = (0, zs.shape[1])
if N_iters is None:
N_iters = zs.shape[0]
im = ax.imshow(zs[:, slice(*plt_slice)], aspect='auto', vmin=0, vmax=K - 1,
cmap=gradient_cmap(colors[:K]), interpolation="nearest",
extent=plt_slice + (N_iters, 0))
ax.set_xticks([])
ax.set_ylabel("Iteration")
if zref is not None:
divider = make_axes_locatable(ax)
ax2 = divider.append_axes("bottom", size="10%", pad=0.05)
zref = np.atleast_2d(zref)
im = ax2.imshow(zref[:, slice(*plt_slice)], aspect='auto', vmin=0, vmax=K - 1,
cmap=gradient_cmap(colors[:K]), interpolation="nearest")
ax.set_xticks([])
ax2.set_yticks([])
ax2.set_ylabel("True $z$", rotation=0)
ax2.yaxis.set_label_coords(-.15, -.5)
ax2.set_xlabel("Time")
if title is not None:
ax.set_title(title)
| [
"matplotlib.pyplot.title",
"matplotlib.colors.LinearSegmentedColormap",
"numpy.argmax",
"numpy.ravel",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.atleast_2d",
"numpy.meshgrid",
"seaborn.xkcd_palette",
"matplotlib.pyplot.colorbar",
"nump... | [((645, 674), 'seaborn.xkcd_palette', 'sns.xkcd_palette', (['color_names'], {}), '(color_names)\n', (661, 674), True, 'import seaborn as sns\n'), ((675, 697), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (688, 697), True, 'import seaborn as sns\n'), ((698, 722), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (713, 722), True, 'import seaborn as sns\n'), ((1390, 1445), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""grad_colormap"""', 'cdict', 'nsteps'], {}), "('grad_colormap', cdict, nsteps)\n", (1413, 1445), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((1693, 1717), 'numpy.linspace', 'np.linspace', (['*xlim', 'npts'], {}), '(*xlim, npts)\n', (1704, 1717), True, 'import numpy as np\n'), ((1726, 1750), 'numpy.linspace', 'np.linspace', (['*ylim', 'npts'], {}), '(*ylim, npts)\n', (1737, 1750), True, 'import numpy as np\n'), ((1762, 1779), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1773, 1779), True, 'import numpy as np\n'), ((2651, 2678), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (2661, 2678), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3223), 'numpy.linspace', 'np.linspace', (['*xlim', 'nxpts'], {}), '(*xlim, nxpts)\n', (3209, 3223), True, 'import numpy as np\n'), ((3232, 3257), 'numpy.linspace', 'np.linspace', (['*ylim', 'nypts'], {}), '(*ylim, nypts)\n', (3243, 3257), True, 'import numpy as np\n'), ((3269, 3286), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (3280, 3286), True, 'import numpy as np\n'), ((3463, 3485), 'numpy.argmax', 'np.argmax', (['prs'], {'axis': '(1)'}), '(prs, axis=1)\n', (3472, 3485), True, 'import numpy as np\n'), ((3995, 4013), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4011, 4013), True, 'import matplotlib.pyplot as plt\n'), ((4836, 4854), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4852, 4854), True, 'import matplotlib.pyplot as plt\n'), ((5943, 5961), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5959, 5961), True, 'import matplotlib.pyplot as plt\n'), ((5966, 5982), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5975, 5982), True, 'import matplotlib.pyplot as plt\n'), ((7443, 7461), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7459, 7461), True, 'import matplotlib.pyplot as plt\n'), ((7748, 7760), 'numpy.array', 'np.array', (['zs'], {}), '(zs)\n', (7756, 7760), True, 'import numpy as np\n'), ((923, 949), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ncolors'], {}), '(0, 1, ncolors)\n', (934, 949), True, 'import numpy as np\n'), ((1639, 1664), 'numpy.zeros', 'np.zeros', (['(A.shape[0], 1)'], {}), '((A.shape[0], 1))\n', (1647, 1664), True, 'import numpy as np\n'), ((1938, 1964), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1948, 1964), True, 'import matplotlib.pyplot as plt\n'), ((3520, 3547), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3530, 3547), True, 'import matplotlib.pyplot as plt\n'), ((4153, 4178), 'numpy.linspace', 'np.linspace', (['*xlim', 'n_pts'], {}), '(*xlim, n_pts)\n', (4164, 4178), True, 'import numpy as np\n'), ((4205, 4230), 'numpy.linspace', 'np.linspace', (['*ylim', 'n_pts'], {}), '(*ylim, n_pts)\n', (4216, 4230), True, 'import numpy as np\n'), ((4372, 4399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4382, 4399), True, 'import matplotlib.pyplot as plt\n'), ((4474, 4504), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 0.0]'], {}), '([1.0, 1.0, 1.0, 0.0])\n', (4482, 4504), True, 'import numpy as np\n'), ((5029, 5055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (5039, 5055), True, 'import matplotlib.pyplot as plt\n'), ((5573, 5600), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (5583, 5600), True, 'import matplotlib.pyplot as plt\n'), ((6151, 6177), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (6161, 6177), True, 'import matplotlib.pyplot as plt\n'), ((6619, 6644), 'numpy.linspace', 'np.linspace', (['*xlim', 'n_pts'], {}), '(*xlim, n_pts)\n', (6630, 6644), True, 'import numpy as np\n'), ((6671, 6696), 'numpy.linspace', 'np.linspace', (['*ylim', 'n_pts'], {}), '(*ylim, n_pts)\n', (6682, 6696), True, 'import numpy as np\n'), ((6903, 6930), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (6913, 6930), True, 'import matplotlib.pyplot as plt\n'), ((7308, 7331), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (7327, 7331), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((7404, 7437), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im1'], {'cax': 'cax', 'ax': 'ax'}), '(im1, cax=cax, ax=ax)\n', (7416, 7437), True, 'import matplotlib.pyplot as plt\n'), ((7676, 7703), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7686, 7703), True, 'import matplotlib.pyplot as plt\n'), ((8185, 8208), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (8204, 8208), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((8291, 8310), 'numpy.atleast_2d', 'np.atleast_2d', (['zref'], {}), '(zref)\n', (8304, 8310), True, 'import numpy as np\n'), ((4258, 4270), 'numpy.ravel', 'np.ravel', (['XX'], {}), '(XX)\n', (4266, 4270), True, 'import numpy as np\n'), ((4272, 4284), 'numpy.ravel', 'np.ravel', (['YY'], {}), '(YY)\n', (4280, 4284), True, 'import numpy as np\n'), ((6307, 6329), 'numpy.arange', 'np.arange', (['start', 'stop'], {}), '(start, stop)\n', (6316, 6329), True, 'import numpy as np\n'), ((6724, 6736), 'numpy.ravel', 'np.ravel', (['XX'], {}), '(XX)\n', (6732, 6736), True, 'import numpy as np\n'), ((6738, 6750), 'numpy.ravel', 'np.ravel', (['YY'], {}), '(YY)\n', (6746, 6750), True, 'import numpy as np\n'), ((6799, 6832), 'numpy.zeros', 'np.zeros', (['(n_pts ** 2, D_reg - 2)'], {}), '((n_pts ** 2, D_reg - 2))\n', (6807, 6832), True, 'import numpy as np\n'), ((7027, 7037), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (7034, 7037), True, 'import numpy as np\n'), ((2246, 2262), 'numpy.eye', 'np.eye', (['D_latent'], {}), '(D_latent)\n', (2252, 2262), True, 'import numpy as np\n'), ((4959, 4972), 'numpy.diff', 'np.diff', (['zhat'], {}), '(zhat)\n', (4966, 4972), True, 'import numpy as np\n'), ((6081, 6094), 'numpy.diff', 'np.diff', (['zhat'], {}), '(zhat)\n', (6088, 6094), True, 'import numpy as np\n')] |
import os
import collections
from os.path import join
import numpy as np
import pandas as pd
from itertools import chain
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import ShuffleSplit, GridSearchCV
from sklearn.metrics import (mean_absolute_error, mean_squared_error,
explained_variance_score, r2_score)
from ukbb_variables import (brain_dmri_fa, brain_dmri_icvf,
brain_dmri_isovf, brain_dmri_l1,
brain_dmri_l2, brain_dmri_l3,
brain_dmri_md, brain_dmri_mo,
brain_dmri_od, brain_smri_plus,
fluid_intelligence, neuroticism)
path_to_csv = '/storage/local/kdadi/work/rs_study/experiments/UKBB/ukb9543.csv'
path_to_matrices = '/storage/local/kdadi/work/data/UKBB/rfMRI_tangent_matrix_dim100'
path_to_merge_brain = '/storage/local/kdadi/work/rs_study/experiments/UKBB/para/roadmap/ukb_add1_merge_brain.csv'
X_iterate = zip([brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1,
brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo,
brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism],
['fa', 'icvf', 'isovf', 'l1', 'l2', 'l3', 'md', 'mo', 'od',
'smri', 'Fluid \n intelligence', 'Neuroticism'])
columns = []
for i in X_iterate:
columns.extend(i[0].keys())
columns.extend(['eid'])
ukbb = pd.read_csv(path_to_csv, usecols=['20016-2.0', 'eid',
'20127-0.0'])
y = ukbb[['eid', '20016-2.0']].dropna()
new_ukbb = pd.DataFrame(ukbb, index=y.index)
new_ukbb = new_ukbb.drop(columns=['20016-2.0'], errors='ignore')
# Random splitting of data to train our model
X_train, X_test, y_train, y_test = train_test_split(
new_ukbb, y, test_size=0.5, random_state=0)
X_train = X_train[['eid', '20127-0.0']].dropna()
X_test = X_test[['eid', '20127-0.0']].dropna()
merged_data = pd.read_csv(path_to_merge_brain, usecols=columns)
dmriDict = collections.OrderedDict(chain(brain_dmri_fa.items(),
brain_dmri_icvf.items(),
brain_dmri_isovf.items(),
brain_dmri_l1.items(),
brain_dmri_l2.items(),
brain_dmri_l3.items(),
brain_dmri_md.items(),
brain_dmri_mo.items(),
brain_dmri_od.items()))
dmriDict.update({'eid': 'eid'})
dmri = pd.DataFrame(merged_data, columns=dmriDict.keys())
dmri = dmri.dropna()
def load_combine_data(X_split, merged_data, dmri):
data_frame = []
connectomes = []
eids = []
for e_id in X_split.eid:
this_eid_data = merged_data[merged_data['eid'] == e_id]
this_path = os.path.join(
path_to_matrices, str(e_id) + '_20227_2_0.txt')
this_dmri_data = dmri[dmri['eid'] == e_id]
if not e_id == 3551399:
if os.path.exists(this_path) and not len(this_dmri_data) == 0:
eids.append(e_id)
data_frame.append(this_eid_data)
connectomes.append(np.loadtxt(this_path))
X_split = pd.concat(data_frame)
y_split = pd.DataFrame(X_split, columns=['20127-0.0'])
X_split = X_split.drop(columns=['eid', '20016-2.0',
'20127-0.0'], axis=1)
connectomes = pd.DataFrame(connectomes, index=X_split.index)
df = pd.concat([X_split, connectomes], axis=1)
return df, y_split
df, y_train = load_combine_data(X_train, merged_data, dmri)
df_test, y_test = load_combine_data(X_test, merged_data, dmri)
# Model
estimator = RandomForestRegressor(n_estimators=250, criterion='mse',
n_jobs=-1, verbose=1, random_state=0)
cv = ShuffleSplit(n_splits=100, test_size=0.1, random_state=0)
param_grid = {'max_depth': [5, 10, 20, 40, None],
'max_features': [1, 5, 'log2', 'sqrt', 'auto', None]}
grid_search = GridSearchCV(estimator, param_grid=param_grid,
cv=5, verbose=2, n_jobs=-1)
metrics = []
data = []
data_generalization = []
def predict_collect_save(data_pred, data_collect, y_true, test_index,
split, save_type):
scores = {}
pred_ = grid_search.predict(data_pred)
y_true_ = y_true.iloc[test_index]
predictions = pd.DataFrame(pred_, columns=['predicted'],
index=y_true_.index)
predictions['true'] = y_true_
predictions['test_indices'] = pd.DataFrame(test_index,
columns=['test indices'],
index=y_true_.index)
predictions['fold'] = pd.Series([split] * len(predictions),
index=predictions.index)
data_collect.append(predictions)
scores['mae'] = mean_absolute_error(y_true_, pred_)
scores['mse'] = mean_squared_error(y_true_, pred_)
scores['ev'] = explained_variance_score(y_true_, pred_)
scores['r2_score'] = r2_score(y_true_, pred_)
scores['fold'] = split
scores['Estimator'] = 'RandomForest'
scores['Permuted'] = 'no'
scores['model_testing'] = save_type
scores['modality'] = 'sMRI, dMRI, fMRI (full MRI)'
scores['target'] = 'Neuroticism'
metrics.append(scores)
return
for split, (train_index, test_index) in enumerate(cv.split(df, y_train)):
scores = {}
grid_search.fit(df.iloc[train_index], y_train.iloc[train_index])
predict_collect_save(data_pred=df.iloc[test_index], data_collect=data,
y_true=y_train, test_index=test_index,
split=split, save_type='validation')
predict_collect_save(data_pred=df_test, data_collect=data_generalization,
y_true=y_test,
test_index=np.arange(df_test.shape[0], dtype=np.int),
split=split, save_type='generalization')
# save outputs
savedir = join('outputs', 'imaging_neuroticism_prediction_from_full_mri')
if not os.path.exists(savedir):
os.makedirs(savedir)
scores = pd.DataFrame(metrics)
scores.to_csv(join(savedir, 'scores.csv'))
| [
"sklearn.model_selection.GridSearchCV",
"ukbb_variables.brain_dmri_l1.items",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.r2_score",
"sklearn.metrics.mean_absolute_error",
"ukbb_variables.brain_dmri_icvf.items",
"numpy.arange",
"os.path.join",
"ukbb_variables.br... | [((1547, 1614), 'pandas.read_csv', 'pd.read_csv', (['path_to_csv'], {'usecols': "['20016-2.0', 'eid', '20127-0.0']"}), "(path_to_csv, usecols=['20016-2.0', 'eid', '20127-0.0'])\n", (1558, 1614), True, 'import pandas as pd\n'), ((1707, 1740), 'pandas.DataFrame', 'pd.DataFrame', (['ukbb'], {'index': 'y.index'}), '(ukbb, index=y.index)\n', (1719, 1740), True, 'import pandas as pd\n'), ((1889, 1949), 'sklearn.model_selection.train_test_split', 'train_test_split', (['new_ukbb', 'y'], {'test_size': '(0.5)', 'random_state': '(0)'}), '(new_ukbb, y, test_size=0.5, random_state=0)\n', (1905, 1949), False, 'from sklearn.model_selection import train_test_split\n'), ((2067, 2116), 'pandas.read_csv', 'pd.read_csv', (['path_to_merge_brain'], {'usecols': 'columns'}), '(path_to_merge_brain, usecols=columns)\n', (2078, 2116), True, 'import pandas as pd\n'), ((3901, 4000), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(250)', 'criterion': '"""mse"""', 'n_jobs': '(-1)', 'verbose': '(1)', 'random_state': '(0)'}), "(n_estimators=250, criterion='mse', n_jobs=-1, verbose\n =1, random_state=0)\n", (3922, 4000), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((4036, 4093), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(100)', 'test_size': '(0.1)', 'random_state': '(0)'}), '(n_splits=100, test_size=0.1, random_state=0)\n', (4048, 4093), False, 'from sklearn.model_selection import ShuffleSplit, GridSearchCV\n'), ((4227, 4301), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['estimator'], {'param_grid': 'param_grid', 'cv': '(5)', 'verbose': '(2)', 'n_jobs': '(-1)'}), '(estimator, param_grid=param_grid, cv=5, verbose=2, n_jobs=-1)\n', (4239, 4301), False, 'from sklearn.model_selection import ShuffleSplit, GridSearchCV\n'), ((6245, 6308), 'os.path.join', 'join', (['"""outputs"""', '"""imaging_neuroticism_prediction_from_full_mri"""'], {}), "('outputs', 'imaging_neuroticism_prediction_from_full_mri')\n", (6249, 6308), False, 'from os.path import join\n'), ((6375, 6396), 'pandas.DataFrame', 'pd.DataFrame', (['metrics'], {}), '(metrics)\n', (6387, 6396), True, 'import pandas as pd\n'), ((3420, 3441), 'pandas.concat', 'pd.concat', (['data_frame'], {}), '(data_frame)\n', (3429, 3441), True, 'import pandas as pd\n'), ((3456, 3500), 'pandas.DataFrame', 'pd.DataFrame', (['X_split'], {'columns': "['20127-0.0']"}), "(X_split, columns=['20127-0.0'])\n", (3468, 3500), True, 'import pandas as pd\n'), ((3634, 3680), 'pandas.DataFrame', 'pd.DataFrame', (['connectomes'], {'index': 'X_split.index'}), '(connectomes, index=X_split.index)\n', (3646, 3680), True, 'import pandas as pd\n'), ((3690, 3731), 'pandas.concat', 'pd.concat', (['[X_split, connectomes]'], {'axis': '(1)'}), '([X_split, connectomes], axis=1)\n', (3699, 3731), True, 'import pandas as pd\n'), ((4608, 4671), 'pandas.DataFrame', 'pd.DataFrame', (['pred_'], {'columns': "['predicted']", 'index': 'y_true_.index'}), "(pred_, columns=['predicted'], index=y_true_.index)\n", (4620, 4671), True, 'import pandas as pd\n'), ((4771, 4842), 'pandas.DataFrame', 'pd.DataFrame', (['test_index'], {'columns': "['test indices']", 'index': 'y_true_.index'}), "(test_index, columns=['test indices'], index=y_true_.index)\n", (4783, 4842), True, 'import pandas as pd\n'), ((5119, 5154), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_true_', 'pred_'], {}), '(y_true_, pred_)\n', (5138, 5154), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, explained_variance_score, r2_score\n'), ((5175, 5209), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_true_', 'pred_'], {}), '(y_true_, pred_)\n', (5193, 5209), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, explained_variance_score, r2_score\n'), ((5229, 5269), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_true_', 'pred_'], {}), '(y_true_, pred_)\n', (5253, 5269), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, explained_variance_score, r2_score\n'), ((5295, 5319), 'sklearn.metrics.r2_score', 'r2_score', (['y_true_', 'pred_'], {}), '(y_true_, pred_)\n', (5303, 5319), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, explained_variance_score, r2_score\n'), ((6316, 6339), 'os.path.exists', 'os.path.exists', (['savedir'], {}), '(savedir)\n', (6330, 6339), False, 'import os\n'), ((6345, 6365), 'os.makedirs', 'os.makedirs', (['savedir'], {}), '(savedir)\n', (6356, 6365), False, 'import os\n'), ((6411, 6438), 'os.path.join', 'join', (['savedir', '"""scores.csv"""'], {}), "(savedir, 'scores.csv')\n", (6415, 6438), False, 'from os.path import join\n'), ((2159, 2180), 'ukbb_variables.brain_dmri_fa.items', 'brain_dmri_fa.items', ([], {}), '()\n', (2178, 2180), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((2223, 2246), 'ukbb_variables.brain_dmri_icvf.items', 'brain_dmri_icvf.items', ([], {}), '()\n', (2244, 2246), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((2289, 2313), 'ukbb_variables.brain_dmri_isovf.items', 'brain_dmri_isovf.items', ([], {}), '()\n', (2311, 2313), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((2356, 2377), 'ukbb_variables.brain_dmri_l1.items', 'brain_dmri_l1.items', ([], {}), '()\n', (2375, 2377), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((2420, 2441), 'ukbb_variables.brain_dmri_l2.items', 'brain_dmri_l2.items', ([], {}), '()\n', (2439, 2441), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((2484, 2505), 'ukbb_variables.brain_dmri_l3.items', 'brain_dmri_l3.items', ([], {}), '()\n', (2503, 2505), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((2548, 2569), 'ukbb_variables.brain_dmri_md.items', 'brain_dmri_md.items', ([], {}), '()\n', (2567, 2569), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((2612, 2633), 'ukbb_variables.brain_dmri_mo.items', 'brain_dmri_mo.items', ([], {}), '()\n', (2631, 2633), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((2676, 2697), 'ukbb_variables.brain_dmri_od.items', 'brain_dmri_od.items', ([], {}), '()\n', (2695, 2697), False, 'from ukbb_variables import brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1, brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo, brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism\n'), ((6106, 6147), 'numpy.arange', 'np.arange', (['df_test.shape[0]'], {'dtype': 'np.int'}), '(df_test.shape[0], dtype=np.int)\n', (6115, 6147), True, 'import numpy as np\n'), ((3204, 3229), 'os.path.exists', 'os.path.exists', (['this_path'], {}), '(this_path)\n', (3218, 3229), False, 'import os\n'), ((3382, 3403), 'numpy.loadtxt', 'np.loadtxt', (['this_path'], {}), '(this_path)\n', (3392, 3403), True, 'import numpy as np\n')] |
import sys # nopep8
cmd_folder = "../../../vis" # nopep8
if cmd_folder not in sys.path: # nopep8
sys.path.insert(0, cmd_folder)
from get_boxlib import get_files, get_time
from tile_mov import tile_movie
from make_mov import make_all, get_particle_trajectories
import numpy as np
import pylab as plt
import matplotlib as mpl
from matplotlib.image import NonUniformImage
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
# ==============================================================================
# MAKE MOVIES
# ==============================================================================
def get_rho(ds, c):
x, v = ds.get("vfrac-fluid")
x, r = ds.get("rho-fluid")
r = np.ma.masked_where(v <= 1e-2, r)
return {"x":x[0], "y":x[1], "value":r}
def get_T(ds, c):
x, v = ds.get("vfrac-fluid")
x, T = ds.get("T-fluid")
T = np.ma.masked_where(v <= 1e-2, T)
return {"x":x[0], "y":x[1], "value":T}
def get_vfrac(ds, c):
x, v = ds.get("vfrac-fluid")
return {"x":x[0], "y":x[1], "value":v}
def get_boxes(ds, c):
boxes = ds.get_boxes()
return {"boxes":boxes}
def plot(frame, data, output_name):
xc = data["vfrac"]["x"]
yc = data["vfrac"]["y"]
v = data["vfrac"]["value"][()]
rho = data["rho"]["value"][()]
T = data["T"]["value"][()][()]
boxes = data["boxes"]["boxes"][()]
rho_vmin = frame["rho"]["min"]
rho_vmax = 5.5 #frame["rho"]["max"]
T_vmin = frame["T"]["min"]
T_vmax = 6.0 #frame["T"]["max"]
limits = frame["q"]["xy_limits"]
fig = plt.figure()
axes = []
###
ax = fig.add_subplot(121); axes.append(ax)
norm = mpl.colors.Normalize(vmin=rho_vmin, vmax=rho_vmax)
im = NonUniformImage(ax, interpolation='bilinear', extent=(limits[0][0], limits[1][0], limits[0][1], limits[1][1]),
norm=norm,
cmap="viridis")
im.set_data(xc, yc, rho.T)
ax.images.append(im)
cs = ax.contour(xc, yc, v.T, levels=[0.5], colors=['k'], linewidths=[0.25])
line_x = cs.allsegs[0][0][:,0].tolist()
line_y = cs.allsegs[0][0][:,1].tolist()
plt.fill(line_x+[limits[1][0]], line_y+[limits[0][1]], color='k')
ax.text(0.05, 0.05, r'$\rho$', horizontalalignment='left',
verticalalignment='bottom', transform=ax.transAxes, fontsize=18, color='w')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax)
###
ax = fig.add_subplot(122); axes.append(ax)
norm = mpl.colors.Normalize(vmin=T_vmin, vmax=T_vmax)
im = NonUniformImage(ax, interpolation='bilinear', extent=(limits[0][0], limits[1][0], limits[0][1], limits[1][1]),
norm=norm,
cmap="viridis")
im.set_data(xc, yc, T.T)
ax.images.append(im)
cs = ax.contour(xc, yc, v.T, levels=[0.5], colors=['k'], linewidths=[0.25])
line_x = cs.allsegs[0][0][:,0].tolist()
line_y = cs.allsegs[0][0][:,1].tolist()
plt.fill(line_x+[limits[1][0]], line_y+[limits[0][1]], color='k')
# plot boxes
grid = []
for box in boxes:
sz = box[1] - box[0]
rect = Rectangle((box[0][0], box[0][1]), sz[0], sz[1])
grid.append(rect)
pc = PatchCollection(grid, facecolor='none', alpha=1.0, edgecolor='w', linewidth=0.25)
ax.add_collection(pc)
ax.text(0.05, 0.05, r'$T$', horizontalalignment='left',
verticalalignment='bottom', transform=ax.transAxes, fontsize=18, color='w')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax)
for ax in axes:
ax.set_xlim(limits[0][0], limits[1][0])
ax.set_ylim(limits[0][1], limits[1][1])
ax.set_aspect(1)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
fig.tight_layout()
fig.savefig(output_name, dpi=300, bbox_inches="tight")
plt.close(fig)
return
Q = []
q = {}
q["files_dir"] = "."
q["level"] = -1
# all the data we need to retrieve
q["get"] = [
{"func":get_rho, "tag":"rho"},
{"func":get_T, "tag":"T"},
{"func":get_vfrac, "tag":"vfrac"},
{"func":get_boxes, "tag":"boxes"},
]
# how to make a frame
q["plot"] = plot
q["name"] = "movie"
##
q["framerate"] = 30
q["mov_save"] = q["files_dir"] + "/mov"
q["offset"] = [0.0, 0.0]
q["xy_limits"] = [[-0.009, 0.0], [0.001, 0.01]]
q["file_include"] = ["plt"]
q["file_exclude"] = ["chk"]
q["cores"] = 11
q["force_data"] = False
q["force_frames"] = True
q["only_frames"] = False
q["redo_streaks"] = False
q["dpi"] = 300
q["normalize"] = "all"
# non-linear time sampling
files = get_files(q["files_dir"], include=q["file_include"], exclude=q["file_exclude"], get_all=True)
n_files = len(files)
times = []
for f in files:
times.append(get_time(f))
times = np.array(times)
log_i = np.logspace(0, np.log10(n_files-1), num=int(n_files/2), dtype=int)
log_i = list(dict.fromkeys(log_i))
q["time_span"] = times[log_i].tolist()
Q.append(q)
make_all(Q)
print("DONE")
| [
"mpl_toolkits.axes_grid1.make_axes_locatable",
"pylab.close",
"numpy.ma.masked_where",
"matplotlib.colors.Normalize",
"matplotlib.patches.Rectangle",
"matplotlib.collections.PatchCollection",
"sys.path.insert",
"make_mov.make_all",
"pylab.colorbar",
"numpy.array",
"pylab.figure",
"pylab.fill",... | [((4696, 4794), 'get_boxlib.get_files', 'get_files', (["q['files_dir']"], {'include': "q['file_include']", 'exclude': "q['file_exclude']", 'get_all': '(True)'}), "(q['files_dir'], include=q['file_include'], exclude=q[\n 'file_exclude'], get_all=True)\n", (4705, 4794), False, 'from get_boxlib import get_files, get_time\n'), ((4876, 4891), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (4884, 4891), True, 'import numpy as np\n'), ((5056, 5067), 'make_mov.make_all', 'make_all', (['Q'], {}), '(Q)\n', (5064, 5067), False, 'from make_mov import make_all, get_particle_trajectories\n'), ((105, 135), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cmd_folder'], {}), '(0, cmd_folder)\n', (120, 135), False, 'import sys\n'), ((797, 829), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(v <= 0.01)', 'r'], {}), '(v <= 0.01, r)\n', (815, 829), True, 'import numpy as np\n'), ((962, 994), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(v <= 0.01)', 'T'], {}), '(v <= 0.01, T)\n', (980, 994), True, 'import numpy as np\n'), ((1651, 1663), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (1661, 1663), True, 'import pylab as plt\n'), ((1746, 1796), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'rho_vmin', 'vmax': 'rho_vmax'}), '(vmin=rho_vmin, vmax=rho_vmax)\n', (1766, 1796), True, 'import matplotlib as mpl\n'), ((1806, 1948), 'matplotlib.image.NonUniformImage', 'NonUniformImage', (['ax'], {'interpolation': '"""bilinear"""', 'extent': '(limits[0][0], limits[1][0], limits[0][1], limits[1][1])', 'norm': 'norm', 'cmap': '"""viridis"""'}), "(ax, interpolation='bilinear', extent=(limits[0][0], limits[\n 1][0], limits[0][1], limits[1][1]), norm=norm, cmap='viridis')\n", (1821, 1948), False, 'from matplotlib.image import NonUniformImage\n'), ((2190, 2259), 'pylab.fill', 'plt.fill', (['(line_x + [limits[1][0]])', '(line_y + [limits[0][1]])'], {'color': '"""k"""'}), "(line_x + [limits[1][0]], line_y + [limits[0][1]], color='k')\n", (2198, 2259), True, 'import pylab as plt\n'), ((2420, 2443), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (2439, 2443), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2508, 2529), 'pylab.colorbar', 'plt.colorbar', (['im', 'cax'], {}), '(im, cax)\n', (2520, 2529), True, 'import pylab as plt\n'), ((2598, 2644), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'T_vmin', 'vmax': 'T_vmax'}), '(vmin=T_vmin, vmax=T_vmax)\n', (2618, 2644), True, 'import matplotlib as mpl\n'), ((2654, 2796), 'matplotlib.image.NonUniformImage', 'NonUniformImage', (['ax'], {'interpolation': '"""bilinear"""', 'extent': '(limits[0][0], limits[1][0], limits[0][1], limits[1][1])', 'norm': 'norm', 'cmap': '"""viridis"""'}), "(ax, interpolation='bilinear', extent=(limits[0][0], limits[\n 1][0], limits[0][1], limits[1][1]), norm=norm, cmap='viridis')\n", (2669, 2796), False, 'from matplotlib.image import NonUniformImage\n'), ((3036, 3105), 'pylab.fill', 'plt.fill', (['(line_x + [limits[1][0]])', '(line_y + [limits[0][1]])'], {'color': '"""k"""'}), "(line_x + [limits[1][0]], line_y + [limits[0][1]], color='k')\n", (3044, 3105), True, 'import pylab as plt\n'), ((3284, 3370), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['grid'], {'facecolor': '"""none"""', 'alpha': '(1.0)', 'edgecolor': '"""w"""', 'linewidth': '(0.25)'}), "(grid, facecolor='none', alpha=1.0, edgecolor='w', linewidth\n =0.25)\n", (3299, 3370), False, 'from matplotlib.collections import PatchCollection\n'), ((3553, 3576), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (3572, 3576), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3641, 3662), 'pylab.colorbar', 'plt.colorbar', (['im', 'cax'], {}), '(im, cax)\n', (3653, 3662), True, 'import pylab as plt\n'), ((3974, 3988), 'pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (3983, 3988), True, 'import pylab as plt\n'), ((4916, 4937), 'numpy.log10', 'np.log10', (['(n_files - 1)'], {}), '(n_files - 1)\n', (4924, 4937), True, 'import numpy as np\n'), ((3200, 3247), 'matplotlib.patches.Rectangle', 'Rectangle', (['(box[0][0], box[0][1])', 'sz[0]', 'sz[1]'], {}), '((box[0][0], box[0][1]), sz[0], sz[1])\n', (3209, 3247), False, 'from matplotlib.patches import Rectangle\n'), ((4855, 4866), 'get_boxlib.get_time', 'get_time', (['f'], {}), '(f)\n', (4863, 4866), False, 'from get_boxlib import get_files, get_time\n')] |
from __future__ import division
import os
import os.path as op
import numpy as np
from scipy.spatial import KDTree
import nibabel as nib
import nibabel.freesurfer as nifs
from nibabel.affines import apply_affine
def vol_to_surf_xfm(vol_fname, reg_fname):
"""Obtain a transformation from vol voxels -> Freesurfer surf coords.
Parameters
----------
vol_fname : string
Filename pointing at image defining the vol space.
reg_fname : string
Filename pointing at registration file (from bbregister) that maps
``vol_fname`` to the Freesurfer anatomy.
Returns
-------
xfm : 4 x 4 numpy array
Transformation matrix that can be applied to surf coords.
"""
# Load the Freesurfer "tkreg" style transform file
# Confusingly, this file actually encodes the anat-to-func transform
anat2func_xfm = np.genfromtxt(reg_fname, skip_header=4, skip_footer=1)
func2anat_xfm = np.linalg.inv(anat2func_xfm)
# Get a tkreg-compatibile mapping from IJK to RAS
vol_img = nib.load(vol_fname)
mgh_img = nib.MGHImage(np.zeros(vol_img.shape[:3]),
vol_img.affine,
vol_img.header)
vox2ras_tkr = mgh_img.header.get_vox2ras_tkr()
# Combine the two transformations
xfm = np.dot(func2anat_xfm, vox2ras_tkr)
return xfm
def surf_to_voxel_coords(subj, hemi, xfm, surf="graymid",
subjects_dir=None):
"""Obtain voxel coordinates of surface vertices in the EPI volume.
Parameters
----------
subj : string
Freesurfer subject ID.
hemi : lh | rh
Hemisphere of surface to map.
xfm : 4 x 4 array
Linear transformation matrix between spaces.
surf : string
Freesurfer surface name defining coords.
Returns
-------
i, j, k : 1d int arrays
Arrays of voxel indices.
"""
# Load the surface geometry
if subjects_dir is None:
subjects_dir = os.environ["SUBJECTS_DIR"]
surf_fname = os.path.join(subjects_dir, subj, "surf",
"{}.{}".format(hemi, surf))
coords, _ = nib.freesurfer.read_geometry(surf_fname)
return apply_affine(xfm, coords).round().astype(np.int).T
| [
"nibabel.affines.apply_affine",
"nibabel.load",
"numpy.zeros",
"numpy.genfromtxt",
"numpy.linalg.inv",
"nibabel.freesurfer.read_geometry",
"numpy.dot"
] | [((869, 923), 'numpy.genfromtxt', 'np.genfromtxt', (['reg_fname'], {'skip_header': '(4)', 'skip_footer': '(1)'}), '(reg_fname, skip_header=4, skip_footer=1)\n', (882, 923), True, 'import numpy as np\n'), ((944, 972), 'numpy.linalg.inv', 'np.linalg.inv', (['anat2func_xfm'], {}), '(anat2func_xfm)\n', (957, 972), True, 'import numpy as np\n'), ((1042, 1061), 'nibabel.load', 'nib.load', (['vol_fname'], {}), '(vol_fname)\n', (1050, 1061), True, 'import nibabel as nib\n'), ((1304, 1338), 'numpy.dot', 'np.dot', (['func2anat_xfm', 'vox2ras_tkr'], {}), '(func2anat_xfm, vox2ras_tkr)\n', (1310, 1338), True, 'import numpy as np\n'), ((2148, 2188), 'nibabel.freesurfer.read_geometry', 'nib.freesurfer.read_geometry', (['surf_fname'], {}), '(surf_fname)\n', (2176, 2188), True, 'import nibabel as nib\n'), ((1089, 1116), 'numpy.zeros', 'np.zeros', (['vol_img.shape[:3]'], {}), '(vol_img.shape[:3])\n', (1097, 1116), True, 'import numpy as np\n'), ((2201, 2226), 'nibabel.affines.apply_affine', 'apply_affine', (['xfm', 'coords'], {}), '(xfm, coords)\n', (2213, 2226), False, 'from nibabel.affines import apply_affine\n')] |
import numpy as np
def TSNR(noisy_stft, signal_gains, noise_estimation):
"""
Reconstructs the signal by re-adding phase components to the magnitude estimate
:param noisy_stft: stft of original noisy signal
:param signal_gains: gains of each stft frame returned by DD
:param noise_estimation: noise estimation average based on first n frames of noisy signal
:return:
signal_output: stft of signal after TSNR modification
TSNR_gains: ndarray containing gain for each bin in signal_output
"""
num_frames = noisy_stft.shape[1]
signal_output = np.zeros(noisy_stft.shape, dtype=complex)
TSNR_gains = []
for frame_number in range(num_frames):
noisy_frame = np.abs(noisy_stft[:, frame_number])
#Calculating SNR_prior for current frame
numerator = (signal_gains[:, frame_number] * noisy_frame) ** 2
prior_SNR = numerator / noise_estimation
#Calculating TSNR filter_gain for current frame
TSNR_gain = np.divide(prior_SNR, prior_SNR + 1)
TSNR_gains.append(TSNR_gain)
signal_output[:, frame_number] = TSNR_gain * noisy_stft[:, frame_number]
return signal_output, np.asarray(TSNR_gains).T | [
"numpy.abs",
"numpy.divide",
"numpy.zeros",
"numpy.asarray"
] | [((593, 634), 'numpy.zeros', 'np.zeros', (['noisy_stft.shape'], {'dtype': 'complex'}), '(noisy_stft.shape, dtype=complex)\n', (601, 634), True, 'import numpy as np\n'), ((721, 756), 'numpy.abs', 'np.abs', (['noisy_stft[:, frame_number]'], {}), '(noisy_stft[:, frame_number])\n', (727, 756), True, 'import numpy as np\n'), ((1004, 1039), 'numpy.divide', 'np.divide', (['prior_SNR', '(prior_SNR + 1)'], {}), '(prior_SNR, prior_SNR + 1)\n', (1013, 1039), True, 'import numpy as np\n'), ((1186, 1208), 'numpy.asarray', 'np.asarray', (['TSNR_gains'], {}), '(TSNR_gains)\n', (1196, 1208), True, 'import numpy as np\n')] |
import numpy as np
from functools import partial, update_wrapper
from itertools import product
from tensorflow.keras import backend as K
def wrapped_partial(func, *args, **kwargs):
partial_func = partial(func, *args, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def w_categorical_crossentropy(y_true, y_pred, weights):
nb_cl = len(weights)
final_mask = K.zeros_like(y_pred[:, 0])
y_pred_max = K.max(y_pred, axis=1)
y_pred_max = K.reshape(y_pred_max, (K.shape(y_pred)[0], 1))
y_pred_max_mat = K.cast(K.equal(y_pred, y_pred_max), K.floatx())
for c_p, c_t in product(range(nb_cl), range(nb_cl)):
final_mask += (weights[c_t, c_p] * y_pred_max_mat[:, c_p] * y_true[:, c_t])
return K.categorical_crossentropy(y_true, y_pred) * final_mask
def ncce(w_categorical_crossentropy):
w_array = np.ones((4, 4))
w_array[(0,1,3), 2] = 1.0
w_array[2, (0,1,3)] = 1.0
ncce = wrapped_partial(w_categorical_crossentropy, weights=w_array)
return ncce
def crop(img, cropx, cropy, position):
y = img.shape[0]
x = img.shape[1]
if position == 'center':
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
elif position == 'left':
startx = 0
starty = y//2-(cropy//2)
elif position == 'right':
startx = x-cropx
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def normalize(x):
# x /= 127.5
# x -= 1.
x = (x - np.mean(x)) / np.std(x)
return x
def create_circular_mask(h, w, center=None, radius=None):
if center is None: # use the middle of the image
center = [int(w/2), int(h/2)]
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w-center[0], h-center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
return mask
def mask_img(temp_img, center=None, radius=None):
mask = create_circular_mask(temp_img.shape[0], temp_img.shape[1], center=None, radius=None)
masked_img = temp_img.copy()
masked_img[~mask] = 0
return masked_img
def preprocess_c(img):
'''
Crop the images to make it square.
Normalize the image from -1 to 1.
And then apply a circular mask to make it rotatable.
'''
# short_edge = min(img.shape[0], img.shape[1])
# square_img = crop(img, short_edge, short_edge, position='center')
norm_img = normalize(img)
# norm_img = normalize(square_img)
masked_img = mask_img(norm_img)
return masked_img
def preprocess_l(img):
h = img.shape[0]
w = img.shape[1]
# short_edge = min(img.shape[0], img.shape[1])
# square_img = crop(img, short_edge, short_edge, position='center')
norm_img = normalize(img)
# norm_img = normalize(square_img)
masked_img = mask_img(norm_img, center=[int(h/2), int(h/2)])
return masked_img
def preprocess_r(img):
h = img.shape[0]
w = img.shape[1]
# short_edge = min(img.shape[0], img.shape[1])
# square_img = crop(img, short_edge, short_edge, position='center')
norm_img = normalize(img)
# norm_img = normalize(square_img)
masked_img = mask_img(norm_img, center=[int(w-h/2), int(h/2)])
return masked_img | [
"functools.partial",
"tensorflow.keras.backend.floatx",
"numpy.std",
"tensorflow.keras.backend.shape",
"tensorflow.keras.backend.zeros_like",
"numpy.ones",
"tensorflow.keras.backend.max",
"functools.update_wrapper",
"numpy.mean",
"tensorflow.keras.backend.categorical_crossentropy",
"tensorflow.k... | [((202, 232), 'functools.partial', 'partial', (['func', '*args'], {}), '(func, *args, **kwargs)\n', (209, 232), False, 'from functools import partial, update_wrapper\n'), ((237, 271), 'functools.update_wrapper', 'update_wrapper', (['partial_func', 'func'], {}), '(partial_func, func)\n', (251, 271), False, 'from functools import partial, update_wrapper\n'), ((397, 423), 'tensorflow.keras.backend.zeros_like', 'K.zeros_like', (['y_pred[:, 0]'], {}), '(y_pred[:, 0])\n', (409, 423), True, 'from tensorflow.keras import backend as K\n'), ((441, 462), 'tensorflow.keras.backend.max', 'K.max', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (446, 462), True, 'from tensorflow.keras import backend as K\n'), ((858, 873), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (865, 873), True, 'import numpy as np\n'), ((1885, 1937), 'numpy.sqrt', 'np.sqrt', (['((X - center[0]) ** 2 + (Y - center[1]) ** 2)'], {}), '((X - center[0]) ** 2 + (Y - center[1]) ** 2)\n', (1892, 1937), True, 'import numpy as np\n'), ((555, 582), 'tensorflow.keras.backend.equal', 'K.equal', (['y_pred', 'y_pred_max'], {}), '(y_pred, y_pred_max)\n', (562, 582), True, 'from tensorflow.keras import backend as K\n'), ((584, 594), 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (592, 594), True, 'from tensorflow.keras import backend as K\n'), ((748, 790), 'tensorflow.keras.backend.categorical_crossentropy', 'K.categorical_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (774, 790), True, 'from tensorflow.keras import backend as K\n'), ((1505, 1514), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1511, 1514), True, 'import numpy as np\n'), ((1491, 1501), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1498, 1501), True, 'import numpy as np\n'), ((503, 518), 'tensorflow.keras.backend.shape', 'K.shape', (['y_pred'], {}), '(y_pred)\n', (510, 518), True, 'from tensorflow.keras import backend as K\n')] |
# Domain Adaptation experiments
import os
import random
import argparse
import copy
import pprint
import distutils
import distutils.util
from omegaconf import OmegaConf
import numpy as np
from tqdm import tqdm
import torch
from adapt.models.models import get_model
from adapt.solvers.solver import get_solver
from datasets.base import UDADataset
import utils
from adapt import *
random.seed(1234)
torch.manual_seed(1234)
np.random.seed(1234)
torch.cuda.manual_seed(1234)
def main():
parser = argparse.ArgumentParser()
# Load existing configuration?
parser.add_argument('--load_from_cfg', type=lambda x:bool(distutils.util.strtobool(x)), default=False, help="Load from config?")
parser.add_argument('--cfg_file', type=str, help="Experiment configuration file", default="config/digits/dann.yml")
# Experiment identifer
parser.add_argument('--id', type=str, help="Experiment identifier")
parser.add_argument('--use_cuda', help="Use GPU?")
# Source and target domain
parser.add_argument('--source', help="Source dataset")
parser.add_argument('--target', help="Target dataset")
parser.add_argument('--img_dir', type=str, default="data/", help="Data directory where images are stored")
parser.add_argument('--LDS_type', type=str, default="natural", help="Label Distribution Shift type")
# CNN parameters
parser.add_argument('--cnn', type=str, help="CNN architecture")
parser.add_argument('--load_source', type=lambda x:bool(distutils.util.strtobool(x)), default=True, help="Load source checkpoint?")
parser.add_argument('--l2_normalize', type=lambda x:bool(distutils.util.strtobool(x)), help="L2 normalize features?")
parser.add_argument('--temperature', type=float, help="CNN softmax temperature")
# Class balancing parameters
parser.add_argument('--class_balance_source', type=lambda x:bool(distutils.util.strtobool(x)), help="Class-balance source?")
parser.add_argument('--pseudo_balance_target', type=lambda x:bool(distutils.util.strtobool(x)), help="Pseudo class-balance target?")
# DA details
parser.add_argument('--da_strat', type=str, help="DA strategy")
parser.add_argument('--load_da', type=lambda x:bool(distutils.util.strtobool(x)), help="Load saved DA checkpoint?")
# Training details
parser.add_argument('--optimizer', type=str, help="Optimizer")
parser.add_argument('--batch_size', type=int, help="Batch size")
parser.add_argument('--lr', type=float, help="Learning rate")
parser.add_argument('--wd', type=float, help="Weight decay")
parser.add_argument('--num_epochs', type=int, help="Number of Epochs")
parser.add_argument('--da_lr', type=float, help="Unsupervised DA Learning rate")
parser.add_argument('--da_num_epochs', type=int, help="DA Number of epochs")
# Loss weights
parser.add_argument('--src_sup_wt', type=float, help="Source supervised XE loss weight")
parser.add_argument('--tgt_sup_wt', type=float, help="Target self-training XE loss weight")
parser.add_argument('--unsup_wt', type=float, help="Target unsupervised loss weight")
parser.add_argument('--cent_wt', type=float, help="Target entropy minimization loss weight")
args_cmd = parser.parse_args()
if args_cmd.load_from_cfg:
args_cfg = dict(OmegaConf.load(args_cmd.cfg_file))
args_cmd = vars(args_cmd)
for k in args_cmd.keys():
if args_cmd[k] is not None: args_cfg[k] = args_cmd[k]
args = OmegaConf.create(args_cfg)
else:
args = args_cmd
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(args)
device = torch.device("cuda") if args.use_cuda else torch.device("cpu")
################################################################################################################
#### Setup source data loaders
################################################################################################################
print('Loading {} dataset'.format(args.source))
src_dset = UDADataset(args.source, args.LDS_type, is_target=False, img_dir=args.img_dir, batch_size=args.batch_size)
src_train_dset, _, _ = src_dset.get_dsets()
src_train_loader, src_val_loader, src_test_loader, src_train_idx = src_dset.get_loaders(class_balance_train=args.class_balance_source)
num_classes = src_dset.get_num_classes()
args.num_classes = num_classes
print('Number of classes: {}'.format(num_classes))
################################################################################################################
#### Train / load a source model
################################################################################################################
source_model = get_model(args.cnn, num_cls=num_classes, l2_normalize=args.l2_normalize, temperature=args.temperature)
source_file = '{}_{}_source.pth'.format(args.source, args.cnn)
source_path = os.path.join('checkpoints', 'source', source_file)
if args.load_source and os.path.exists(source_path):
print('\nFound source checkpoint at {}'.format(source_path))
source_model.load_state_dict(torch.load(source_path, map_location=device))
best_source_model = source_model
else:
print('\nSource checkpoint not found, training...')
best_source_model = utils.train_source_model(source_model, src_train_loader, src_val_loader, num_classes, args, device)
print('Evaluating source checkpoint on {} test set...'.format(args.source))
_, cm_source = utils.test(best_source_model, device, src_test_loader, split="test", num_classes=num_classes)
per_class_acc_source = cm_source.diagonal().numpy() / cm_source.sum(axis=1).numpy()
per_class_acc_source = per_class_acc_source.mean() * 100
out_str = '{} Avg. acc.: {:.2f}% '.format(args.source, per_class_acc_source)
print(out_str)
model = copy.deepcopy(best_source_model)
################################################################################################################
#### Setup target data loaders
################################################################################################################
print('\nLoading {} dataset'.format(args.target))
target_dset = UDADataset(args.target, args.LDS_type, is_target=True, img_dir=args.img_dir, valid_ratio=0, batch_size=args.batch_size)
target_dset.get_dsets()
# Manually long tail target training set for SVHN->MNIST-LT adaptation
if args.LDS_type in ['IF1', 'IF20', 'IF50', 'IF100']:
target_dset.long_tail_train('{}_ixs_{}'.format(args.target, args.LDS_type))
print('Evaluating source checkpoint on {} test set...'.format(args.target))
target_train_loader, target_val_loader, target_test_loader, tgt_train_idx = target_dset.get_loaders()
acc_before, cm_before = utils.test(model, device, target_test_loader, split="test", num_classes=num_classes)
per_class_acc_before = cm_before.diagonal().numpy() / cm_before.sum(axis=1).numpy()
per_class_acc_before = per_class_acc_before.mean() * 100
out_str = '{}->{}-LT ({}), Before {}:\t Avg. acc={:.2f}%\tAgg. acc={:.2f}%'.format(args.source, args.target, args.LDS_type, \
args.da_strat, per_class_acc_before, acc_before)
print(out_str)
################################################################################################################
#### Unsupervised adaptation of source model to target
################################################################################################################
da_file = '{:s}_{:s}_{}_{}_net_{:s}_{:s}_{:s}.pth'.format(args.id, args.da_strat, args.da_lr, args.cnn, \
args.source, args.target, args.LDS_type)
outdir = 'checkpoints'
os.makedirs(os.path.join(outdir, args.da_strat), exist_ok=True)
outfile = os.path.join(outdir, args.da_strat, da_file)
model_name = 'AdaptNet'
if args.load_da and os.path.exists(outfile):
print('Trained {} checkpoint found: {}, loading...\n'.format(args.da_strat, outfile))
net = get_model(model_name, num_cls=num_classes, weights_init=outfile, model=args.cnn, \
l2_normalize=args.l2_normalize, temperature=args.temperature)
source_model_adapt = net.tgt_net
else:
net = get_model(model_name, model=args.cnn, num_cls=num_classes, src_weights_init=source_path, \
l2_normalize=args.l2_normalize, temperature=args.temperature).to(device)
print(net)
print('Training {} {} model for {}->{}-LT ({})\n'.format(args.da_strat, args.cnn, args.source, args.target, args.LDS_type))
opt_net = utils.generate_optimizer(net.tgt_net, args, mode='da')
solver = get_solver(args.da_strat, net.tgt_net, src_train_loader, \
target_train_loader, tgt_train_idx, opt_net, device, num_classes, args)
for epoch in range(args.da_num_epochs):
if args.pseudo_balance_target:
print('\nEpoch {}: Re-estimating probabilities for pseudo-balancing...'.format(epoch))
# Approximately class-balance target dataloader using pseudolabels at the start of each epoch
target_dset_copy = copy.deepcopy(target_dset)
src_train_dset_copy = copy.deepcopy(src_train_loader.dataset)
_, gtlabels, plabels = utils.get_embedding(solver.net, target_train_loader, device, num_classes, args)
target_dset_copy.train_dataset.targets_copy = copy.deepcopy(target_dset_copy.train_dataset.targets) # Create backup of actual labels
target_dset_copy.train_dataset.targets = plabels
tgt_train_loader_pbalanced, _, _, _ = target_dset_copy.get_loaders(class_balance_train=True)
tgt_train_loader_pbalanced.dataset.targets_copy = target_dset_copy.train_dataset.targets_copy
solver.tgt_loader = tgt_train_loader_pbalanced
if args.da_strat == 'dann':
opt_dis = utils.generate_optimizer(net.discriminator, args, mode='da')
solver.solve(epoch, net.discriminator, opt_dis)
else:
solver.solve(epoch)
print('Saving to', outfile)
net.save(outfile)
source_model_adapt = net.tgt_net
# Evaluate adapted model
print('\nEvaluating adapted model on {} test set'.format(args.target))
acc_after, cm_after = utils.test(source_model_adapt, device, target_test_loader, split="test", num_classes=num_classes)
per_class_acc_after = cm_after.diagonal().numpy() / cm_after.sum(axis=1).numpy()
per_class_acc_after = per_class_acc_after.mean() * 100
print('###################################')
out_str = '{}->{}-LT ({}), Before {}:\t Avg. acc={:.2f}%\tAgg. acc={:.2f}%'.format(args.source, args.target, args.LDS_type, \
args.da_strat, per_class_acc_before, acc_before)
out_str += '\n\t\t\tAfter {}:\t Avg. acc={:.2f}%\tAgg. acc={:.2f}%'.format(args.da_strat, per_class_acc_after, acc_after)
print(out_str)
utils.plot_accuracy_statistics(cm_before, cm_after, num_classes, args, target_train_loader)
if __name__ == '__main__':
main() | [
"adapt.solvers.solver.get_solver",
"utils.test",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.device",
"os.path.join",
"utils.train_source_model",
"torch.load",
"os.path.exists",
"utils.get_embedding",
"random.seed",
"copy.deepcopy",
"adapt.models.models.get_model",
"distutils.ut... | [((383, 400), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (394, 400), False, 'import random\n'), ((401, 424), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (418, 424), False, 'import torch\n'), ((425, 445), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (439, 445), True, 'import numpy as np\n'), ((446, 474), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1234)'], {}), '(1234)\n', (468, 474), False, 'import torch\n'), ((499, 524), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (522, 524), False, 'import argparse\n'), ((3398, 3428), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (3418, 3428), False, 'import pprint\n'), ((3844, 3954), 'datasets.base.UDADataset', 'UDADataset', (['args.source', 'args.LDS_type'], {'is_target': '(False)', 'img_dir': 'args.img_dir', 'batch_size': 'args.batch_size'}), '(args.source, args.LDS_type, is_target=False, img_dir=args.\n img_dir, batch_size=args.batch_size)\n', (3854, 3954), False, 'from datasets.base import UDADataset\n'), ((4542, 4648), 'adapt.models.models.get_model', 'get_model', (['args.cnn'], {'num_cls': 'num_classes', 'l2_normalize': 'args.l2_normalize', 'temperature': 'args.temperature'}), '(args.cnn, num_cls=num_classes, l2_normalize=args.l2_normalize,\n temperature=args.temperature)\n', (4551, 4648), False, 'from adapt.models.models import get_model\n'), ((4726, 4776), 'os.path.join', 'os.path.join', (['"""checkpoints"""', '"""source"""', 'source_file'], {}), "('checkpoints', 'source', source_file)\n", (4738, 4776), False, 'import os\n'), ((5287, 5384), 'utils.test', 'utils.test', (['best_source_model', 'device', 'src_test_loader'], {'split': '"""test"""', 'num_classes': 'num_classes'}), "(best_source_model, device, src_test_loader, split='test',\n num_classes=num_classes)\n", (5297, 5384), False, 'import utils\n'), ((5633, 5665), 'copy.deepcopy', 'copy.deepcopy', (['best_source_model'], {}), '(best_source_model)\n', (5646, 5665), False, 'import copy\n'), ((5995, 6118), 'datasets.base.UDADataset', 'UDADataset', (['args.target', 'args.LDS_type'], {'is_target': '(True)', 'img_dir': 'args.img_dir', 'valid_ratio': '(0)', 'batch_size': 'args.batch_size'}), '(args.target, args.LDS_type, is_target=True, img_dir=args.img_dir,\n valid_ratio=0, batch_size=args.batch_size)\n', (6005, 6118), False, 'from datasets.base import UDADataset\n'), ((6554, 6643), 'utils.test', 'utils.test', (['model', 'device', 'target_test_loader'], {'split': '"""test"""', 'num_classes': 'num_classes'}), "(model, device, target_test_loader, split='test', num_classes=\n num_classes)\n", (6564, 6643), False, 'import utils\n'), ((7549, 7593), 'os.path.join', 'os.path.join', (['outdir', 'args.da_strat', 'da_file'], {}), '(outdir, args.da_strat, da_file)\n', (7561, 7593), False, 'import os\n'), ((9832, 9933), 'utils.test', 'utils.test', (['source_model_adapt', 'device', 'target_test_loader'], {'split': '"""test"""', 'num_classes': 'num_classes'}), "(source_model_adapt, device, target_test_loader, split='test',\n num_classes=num_classes)\n", (9842, 9933), False, 'import utils\n'), ((10458, 10553), 'utils.plot_accuracy_statistics', 'utils.plot_accuracy_statistics', (['cm_before', 'cm_after', 'num_classes', 'args', 'target_train_loader'], {}), '(cm_before, cm_after, num_classes, args,\n target_train_loader)\n', (10488, 10553), False, 'import utils\n'), ((3338, 3364), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (['args_cfg'], {}), '(args_cfg)\n', (3354, 3364), False, 'from omegaconf import OmegaConf\n'), ((3458, 3478), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3470, 3478), False, 'import torch\n'), ((3501, 3520), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3513, 3520), False, 'import torch\n'), ((4803, 4830), 'os.path.exists', 'os.path.exists', (['source_path'], {}), '(source_path)\n', (4817, 4830), False, 'import os\n'), ((5093, 5196), 'utils.train_source_model', 'utils.train_source_model', (['source_model', 'src_train_loader', 'src_val_loader', 'num_classes', 'args', 'device'], {}), '(source_model, src_train_loader, src_val_loader,\n num_classes, args, device)\n', (5117, 5196), False, 'import utils\n'), ((7486, 7521), 'os.path.join', 'os.path.join', (['outdir', 'args.da_strat'], {}), '(outdir, args.da_strat)\n', (7498, 7521), False, 'import os\n'), ((7641, 7664), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (7655, 7664), False, 'import os\n'), ((7764, 7911), 'adapt.models.models.get_model', 'get_model', (['model_name'], {'num_cls': 'num_classes', 'weights_init': 'outfile', 'model': 'args.cnn', 'l2_normalize': 'args.l2_normalize', 'temperature': 'args.temperature'}), '(model_name, num_cls=num_classes, weights_init=outfile, model=args\n .cnn, l2_normalize=args.l2_normalize, temperature=args.temperature)\n', (7773, 7911), False, 'from adapt.models.models import get_model\n'), ((8289, 8343), 'utils.generate_optimizer', 'utils.generate_optimizer', (['net.tgt_net', 'args'], {'mode': '"""da"""'}), "(net.tgt_net, args, mode='da')\n", (8313, 8343), False, 'import utils\n'), ((8355, 8487), 'adapt.solvers.solver.get_solver', 'get_solver', (['args.da_strat', 'net.tgt_net', 'src_train_loader', 'target_train_loader', 'tgt_train_idx', 'opt_net', 'device', 'num_classes', 'args'], {}), '(args.da_strat, net.tgt_net, src_train_loader,\n target_train_loader, tgt_train_idx, opt_net, device, num_classes, args)\n', (8365, 8487), False, 'from adapt.solvers.solver import get_solver\n'), ((3181, 3214), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['args_cmd.cfg_file'], {}), '(args_cmd.cfg_file)\n', (3195, 3214), False, 'from omegaconf import OmegaConf\n'), ((4926, 4970), 'torch.load', 'torch.load', (['source_path'], {'map_location': 'device'}), '(source_path, map_location=device)\n', (4936, 4970), False, 'import torch\n'), ((7965, 8120), 'adapt.models.models.get_model', 'get_model', (['model_name'], {'model': 'args.cnn', 'num_cls': 'num_classes', 'src_weights_init': 'source_path', 'l2_normalize': 'args.l2_normalize', 'temperature': 'args.temperature'}), '(model_name, model=args.cnn, num_cls=num_classes, src_weights_init\n =source_path, l2_normalize=args.l2_normalize, temperature=args.temperature)\n', (7974, 8120), False, 'from adapt.models.models import get_model\n'), ((8783, 8809), 'copy.deepcopy', 'copy.deepcopy', (['target_dset'], {}), '(target_dset)\n', (8796, 8809), False, 'import copy\n'), ((8836, 8875), 'copy.deepcopy', 'copy.deepcopy', (['src_train_loader.dataset'], {}), '(src_train_loader.dataset)\n', (8849, 8875), False, 'import copy\n'), ((8904, 8983), 'utils.get_embedding', 'utils.get_embedding', (['solver.net', 'target_train_loader', 'device', 'num_classes', 'args'], {}), '(solver.net, target_train_loader, device, num_classes, args)\n', (8923, 8983), False, 'import utils\n'), ((9035, 9088), 'copy.deepcopy', 'copy.deepcopy', (['target_dset_copy.train_dataset.targets'], {}), '(target_dset_copy.train_dataset.targets)\n', (9048, 9088), False, 'import copy\n'), ((9477, 9537), 'utils.generate_optimizer', 'utils.generate_optimizer', (['net.discriminator', 'args'], {'mode': '"""da"""'}), "(net.discriminator, args, mode='da')\n", (9501, 9537), False, 'import utils\n'), ((616, 643), 'distutils.util.strtobool', 'distutils.util.strtobool', (['x'], {}), '(x)\n', (640, 643), False, 'import distutils\n'), ((1441, 1468), 'distutils.util.strtobool', 'distutils.util.strtobool', (['x'], {}), '(x)\n', (1465, 1468), False, 'import distutils\n'), ((1576, 1603), 'distutils.util.strtobool', 'distutils.util.strtobool', (['x'], {}), '(x)\n', (1600, 1603), False, 'import distutils\n'), ((1816, 1843), 'distutils.util.strtobool', 'distutils.util.strtobool', (['x'], {}), '(x)\n', (1840, 1843), False, 'import distutils\n'), ((1943, 1970), 'distutils.util.strtobool', 'distutils.util.strtobool', (['x'], {}), '(x)\n', (1967, 1970), False, 'import distutils\n'), ((2143, 2170), 'distutils.util.strtobool', 'distutils.util.strtobool', (['x'], {}), '(x)\n', (2167, 2170), False, 'import distutils\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 20 19:06:55 2014
@author: <NAME>
This program controls the mirrors attached to the galvos. The position of one mirror is controlled by a sine wave, the other mirror is controlled by a cosine wave.
When a laser is reflected off both mirrors, it spins in a circle. This can be used to position the laser for shadowless TIRF microscopy.
Make sure that the National Instruments DAC PCI-6733 is "Dev1" with the National Instruments Measurement and Automation tool.
To view pins, look at http://www.ni.com/pdf/manuals/371232b.pdf figure 4.
- Pin 57 (ao2) is the sine wave.
- Pin 25 (ao3) is the cosine wave.
- Pin 60 (ao4) is the ttl pulse which is which is triggered at the beginning of every period and should be plugged into Pin 1 (top right) of the Cascade Photometrics Model 128 camera.
- Pin 69 (analog ground) should be plugged into Pin 3 of the Camera (top, third pin from right).
The blue laser (Laser 1) is pin 28 (ao5). Ground is pin 29.
The green laser (Laser 2) is pin 30 (ao6). Ground is pin 31.
To Use:
Run this program with python.
Open MetaMorph. In MetaMorph:
Acquire->Acquire
Trigger Mode: External (STROBED)
Live Trigger Mode: External (STROBED)
Acquire->Stream Acquisition->Camera Parameters
Aquisition Mode: Acquire images from each extrnal trigger
Make sure, if you check 'Display preview image during acquisition', that you aren't updating too often, or you will miss frames.
Radius: 0-10V
x_shift:-10 -10V
y_shift:-10 -10V
LASER CONTROL:
WIRES:
green - ground
blue - blue laser (488nm)
yellow - green laser
TTL control info:
Green laser is active low
Blue laser is active high
Blue laser requires "Digital:Power" mode in 'Coherent Connection' software to be operated via ttl pulse.
"""
from __future__ import division
import os
os.chdir(os.path.split(os.path.realpath(__file__))[0])
import dependency_check
from PyDAQmx import *
from PyDAQmx.DAQmxCallBack import *
import numpy as np
from PyQt4.QtGui import * # Qt is Nokias GUI rendering code written in C++. PyQt4 is a library in python which binds to Qt
from PyQt4.QtCore import *
from PyQt4.QtCore import pyqtSignal as Signal
from PyQt4.QtCore import pyqtSlot as Slot
import sys
from ctypes import byref
if sys.version_info.major==2:
import cPickle as pickle # pickle serializes python objects so they can be saved persistantly. It converts a python object into a savable data structure
else:
import pickle
import os, time
from os.path import expanduser
class Settings:
''' This class saves all the settings as you adjust them. This way, when you close the program and reopen it, all your settings will automatically load as they were just after the last adjustement'''
def __init__(self):
self.i=0
self.config_file=os.path.join(expanduser("~"),'.ShadowlessTIRF','config.p')
try:
self.d=pickle.load(open(self.config_file, "rb" ))
except IOError:
a=dict()
a['frequency']=50 #Hz
a['radius']=5 #in volts. Max amplitude is 10 volts
a['ellipticity']=1
a['phase']=0
a['x_shift']=0
a['y_shift']=0
a['alternate12']=False # When this is true, settings 1 and 2 are alternated every cycle.
a['alternate123']=False # When this is true, settings 1,2 and 3 are cycled through.
a['blue_laser']=False
a['green_laser']=False
a['green_laser_power']=5 #in volts
a['blue_laser_power']=5 #in volts
self.d=[a,a.copy(),a.copy(),a.copy()]
def __getitem__(self, item):
return self.d[self.i][item]
def __setitem__(self,key,item):
self.d[self.i][key]=item
def save(self):
'''save to a config file.'''
if not os.path.exists(os.path.dirname(self.config_file)):
os.makedirs(os.path.dirname(self.config_file))
pickle.dump(self.d, open(self.config_file, "wb" ))
def keys(self):
return self.d[self.i].keys()
class GalvoDriver(QWidget):
''' This class sends creates the signal which will control the two galvos and the lasers, and sends it to the DAQ.'''
finished_acquire_sig=Signal()
def __init__(self,settings):
QWidget.__init__(self)
self.settings=settings
self.sample_rate=10000 # Maximum for the NI PCI-6733 is 1MHz.
# SH: my PCI 6722 give out unless I reduce the sample rate
self.sampsPerPeriod=1 #dummy variable
self.calculate()
self.read = int32()
self.createTask()
self.hide()
def createTask(self):
self.analog_output = Task()
self.analog_output.CreateAOVoltageChan("Dev1/ao2","",-10.0,10.0,DAQmx_Val_Volts,None) #On the NI PCI-6733, ao2 is pin 57 and ground is 56
self.analog_output.CreateAOVoltageChan("Dev1/ao3","",-10.0,10.0,DAQmx_Val_Volts,None) #On the NI PCI-6733, ao3 is pin 25 and ground is 24
self.analog_output.CreateAOVoltageChan("Dev1/ao4","",-10.0,10.0,DAQmx_Val_Volts,None) #On the NI PCI-6733, ao4 is pin 60 and ground is 59
self.analog_output.CreateAOVoltageChan("Dev1/ao5","",-10.0,10.0,DAQmx_Val_Volts,None) #On the NI PCI-6733, ao5 is pin 28 and ground is 29. This is blue laser
self.analog_output.CreateAOVoltageChan("Dev1/ao6","",-10.0,10.0,DAQmx_Val_Volts,None) #On the NI PCI-6733, ao6 is pin 30 and ground is 31. This is green laser
# CfgSampClkTiming(source, rate, activeEdge, sampleMode, sampsPerChan)
self.analog_output.CfgSampClkTiming("",self.sample_rate,DAQmx_Val_Rising,DAQmx_Val_ContSamps,self.sampsPerPeriod)
# WriteAnalogF64(numSampsPerChan, autoStart, timeout, dataLayout, writeArray, sampsPerChanWritten, reserved)
self.analog_output.WriteAnalogF64(self.sampsPerPeriod,0,-1,DAQmx_Val_GroupByChannel,self.data,byref(self.read),None)
self.analog_output.StartTask()
self.stopped=False
self.acquiring=False
def getSinCosTTL(self,frequency,radius,ellipticity,phase,x_shift,y_shift,blue_laser,green_laser,blue_laser_power,green_laser_power,period=.005):
''' The period argument is only used when the value of the frequency is 0'''
if frequency==0:
t=np.arange(0,period,1/self.sample_rate)
sinwave=radius*np.sin(np.zeros(len(t)))+x_shift
coswave=(ellipticity*radius*np.cos(np.zeros(len(t))+phase*(2*np.pi/360)))+(y_shift)
else:
period=1/frequency
t=np.arange(0,period,1/self.sample_rate )
sinwave=radius*np.sin(frequency*(t*(2*np.pi)))+x_shift
coswave=(ellipticity*radius*np.cos(frequency*t*2*np.pi+phase*(2*np.pi/360)))+(y_shift)
camera_ttl=np.zeros(len(t))
camera_ttl[0]=5
camera_ttl=np.zeros(len(t))
camera_ttl[0]=5
if blue_laser:
blue_laser_ttl=blue_laser_power*np.ones(len(t))
#a=len(t)
#blue_laser_ttl[a/8:3*a/8]=blue_laser_power #right
#blue_laser_ttl[3*a/8:5*a/8]=blue_laser_power #bottom
#blue_laser_ttl[5*a/8:7*a/8]=blue_laser_power #left
#blue_laser_ttl[:a/8]=blue_laser_power; blue_laser_ttl[7*a/8:]=blue_laser_power #top
else:
blue_laser_ttl=-.08*np.ones(len(t))
if green_laser:
green_laser_ttl=green_laser_power*np.ones(len(t)) #0V is on for green laser
else:
green_laser_ttl=-.08*np.ones(len(t)) #5V is off for green laser
return sinwave,coswave,camera_ttl, blue_laser_ttl, green_laser_ttl
def calculate(self):
s=self.settings
if s['alternate12'] is False and s['alternate123'] is False:
sinwave,coswave,camera_ttl,blue_laser_ttl, green_laser_ttl=self.getSinCosTTL(s['frequency'],s['radius'],s['ellipticity'],s['phase'],s['x_shift'],s['y_shift'],s['blue_laser'],s['green_laser'],s['blue_laser_power'],s['green_laser_power'])
self.data=np.concatenate((sinwave,coswave,camera_ttl,blue_laser_ttl,green_laser_ttl))
self.sampsPerPeriod=len(sinwave)
elif s['alternate12']:
f1=s.d[1]['frequency']
f2=s.d[2]['frequency']
if f1==0 and f2==0:
period1=.005; period2=.005; #Alternate every 5ms if there is the frequency for both setting #1 and setting #2 is 0
elif f1==0:
period1=1/f2; period2=period1 #Give adopt the period of setting #2 if the frequency for setting #1 is 0
elif f2==0:
period1=1/f1; period2=period1
else:
period1=1/f1; period2=1/f2
sinwave1,coswave1,camera_ttl1,blue_laser_ttl1,green_laser_ttl1=self.getSinCosTTL(f1,s.d[1]['radius'],s.d[1]['ellipticity'],s.d[1]['phase'],s.d[1]['x_shift'],s.d[1]['y_shift'],s.d[1]['blue_laser'],s.d[1]['green_laser'],s.d[1]['blue_laser_power'],s.d[1]['green_laser_power'],period1)
sinwave2,coswave2,camera_ttl2,blue_laser_ttl2,green_laser_ttl2=self.getSinCosTTL(f2,s.d[2]['radius'],s.d[2]['ellipticity'],s.d[2]['phase'],s.d[2]['x_shift'],s.d[2]['y_shift'],s.d[2]['blue_laser'],s.d[2]['green_laser'],s.d[2]['blue_laser_power'],s.d[2]['green_laser_power'],period2)
self.data=np.concatenate((sinwave1,sinwave2,coswave1,coswave2,camera_ttl1,camera_ttl2,blue_laser_ttl1,blue_laser_ttl2,green_laser_ttl1,green_laser_ttl2))
self.sampsPerPeriod=len(sinwave1)+len(sinwave2)
elif s['alternate123']:
f1=s.d[1]['frequency']
f2=s.d[2]['frequency']
f3=s.d[3]['frequency']
if f1==0 and f2==0 and f3==0:
period1=.005; period2=.005; period3=.005;
elif f1==0 and f2==0:
period3=1/f3; period2=period3; period1=period3;
elif f1==0 and f3==0:
period2=1/f2; period1=period2; period3=period2
elif f2==0 and f3==0:
period1=1/f1; period2=period1; period3=period1
elif f1==0:
period2=1/f2; period3=1/f3; period1=period2
elif f2==0:
period1=1/f1; period2=period1; period3=1/f3
elif f3==0:
period1=1/f1; period2=1/f2; period3=period1
else:
period1=1/f1; period2=1/f2; period3=1/f3
sinwave1,coswave1,camera_ttl1,blue_laser_ttl1,green_laser_ttl1=self.getSinCosTTL(f1,s.d[1]['radius'],s.d[1]['ellipticity'],s.d[1]['phase'],s.d[1]['x_shift'],s.d[1]['y_shift'],s.d[1]['blue_laser'],s.d[1]['green_laser'],s.d[1]['blue_laser_power'],s.d[1]['green_laser_power'],period1)
sinwave2,coswave2,camera_ttl2,blue_laser_ttl2,green_laser_ttl2=self.getSinCosTTL(f2,s.d[2]['radius'],s.d[2]['ellipticity'],s.d[2]['phase'],s.d[2]['x_shift'],s.d[2]['y_shift'],s.d[2]['blue_laser'],s.d[2]['green_laser'],s.d[2]['blue_laser_power'],s.d[2]['green_laser_power'],period2)
sinwave3,coswave3,camera_ttl3,blue_laser_ttl3,green_laser_ttl3=self.getSinCosTTL(f3,s.d[3]['radius'],s.d[3]['ellipticity'],s.d[3]['phase'],s.d[3]['x_shift'],s.d[3]['y_shift'],s.d[3]['blue_laser'],s.d[3]['green_laser'],s.d[3]['blue_laser_power'],s.d[3]['green_laser_power'],period3)
self.data=np.concatenate((sinwave1,sinwave2,sinwave3,coswave1,coswave2,coswave3,camera_ttl1,camera_ttl2,camera_ttl3,blue_laser_ttl1,blue_laser_ttl2,blue_laser_ttl3,green_laser_ttl1,green_laser_ttl2,green_laser_ttl3))
self.sampsPerPeriod=len(sinwave1)+len(sinwave2)+len(sinwave3)
def startstop(self):
if self.stopped:
self.analog_output.StartTask()
self.stopped=False
self.refresh()
else:
self.settings.d[0]['frequency']=0
self.settings.d[0]['radius']=.6
self.settings.d[0]['alternate']=False
self.refresh()
self.analog_output.StopTask()
self.stopped=True
def refresh(self):
if self.stopped is False:
self.calculate()
self.analog_output.StopTask()
self.analog_output.CfgSampClkTiming("",self.sample_rate,DAQmx_Val_Rising,DAQmx_Val_ContSamps,self.sampsPerPeriod)
self.analog_output.WriteAnalogF64(self.sampsPerPeriod,0,-1,DAQmx_Val_GroupByChannel,self.data,byref(self.read),None)
self.analog_output.StartTask()
def acquire(self):
print('Acquiring')
self.acquiring=True
self.counter=0
self.tic=time.time()
radius=self.settings.d[0]['radius']; alternate12=self.settings.d[0]['alternate12']; alternate123=self.settings.d[0]['alternate123']
self.settings['radius']=.6
self.settings['alternate12']=False
self.settings['alternate123']=False
self.calculate()
self.settings['radius']=radius; self.settings['alternate12']=alternate12; self.settings['alternate123']=alternate123
if self.stopped is False:
self.analog_output.StopTask()
self.EveryNCallback = DAQmxEveryNSamplesEventCallbackPtr(self.EveryNCallback_py)
self.nSamples=int(self.sampsPerPeriod)
DAQmxRegisterEveryNSamplesEvent(self.analog_output.taskHandle,DAQmx_Val_Transferred_From_Buffer,self.nSamples,0,self.EveryNCallback,None)
self.analog_output.CfgSampClkTiming("",self.sample_rate,DAQmx_Val_Rising,DAQmx_Val_ContSamps,self.sampsPerPeriod)
self.analog_output.WriteAnalogF64(self.sampsPerPeriod,0,-1,DAQmx_Val_GroupByChannel,self.data,byref(self.read),None)
self.analog_output.StartTask()
self.stopped=False
def EveryNCallback_py(self,taskHandle, status, callbackData,sure):
self.counter+=1
if self.counter==100:
self.calculate()
self.analog_output.StopTask()
self.analog_output.CfgSampClkTiming("",self.sample_rate,DAQmx_Val_Rising,DAQmx_Val_ContSamps,self.sampsPerPeriod)
self.analog_output.WriteAnalogF64(self.sampsPerPeriod,0,-1,DAQmx_Val_GroupByChannel,self.data,byref(self.read),None)
self.analog_output.StartTask()
print('Opened "shutter" because counter reached {}'.format(self.counter))
if self.counter==200:
self.stopAcquiring()
print('Stopped Acquiring because counter reached {}'.format(self.counter))
#print(time.time()-self.tic)
return 0 # The function should return an integer
def stopAcquiring(self):
self.settings.d[0]['frequency']=0
self.settings.d[0]['radius']=.6
self.settings.d[0]['alternate12']=False
self.settings.d[0]['alternate123']=False
self.calculate()
self.createTask()
self.startstop()
self.acquiring=False
self.finished_acquire_sig.emit()
#maingui.acquireButton.setStyleSheet("background-color: green");
##############################################################################
#### GRAPHICAL USER INTERFACE ##############################################
##############################################################################
class SliderLabel(QWidget):
'''SliderLabel is a widget containing a QSlider and a QSpinBox (or QDoubleSpinBox if decimals are required)
The QSlider and SpinBox are connected so that a change in one causes the other to change.
'''
changeSignal=Signal(int)
def __init__(self,decimals=0): #decimals specifies the resolution of the slider. 0 means only integers, 1 means the tens place, etc.
QWidget.__init__(self)
self.slider=QSlider(Qt.Horizontal)
self.slider.setStyleSheet("QSlider {min-height: 68px; max-height: 68px; }\n"
"QSlider::groove:horizontal {border: 1px solid #262626; height: 5px; background: #393939; margin: 0 12px;}\n"
"QSlider::handle:horizontal { background: #22B14C; border: 5px solid #B5E61D; width: 23px; height: 100px; margin: -24px -12px; }\n") # sets the slider style
self.decimals=decimals
if self.decimals<=0:
self.label=QSpinBox()
else:
self.label=QDoubleSpinBox()
self.label.setDecimals(self.decimals)
self.layout=QHBoxLayout()
self.layout.addWidget(self.slider)
self.layout.addWidget(self.label)
self.setLayout(self.layout)
self.slider.valueChanged.connect(lambda val: self.updateLabel(val/10**self.decimals))
self.label.valueChanged.connect(self.updateSlider)
self.valueChanged=self.label.valueChanged
@Slot(int, float)
def updateSlider(self,value):
self.slider.setValue(int(value*10**self.decimals))
def updateLabel(self,value):
self.label.setValue(value)
def value(self):
return self.label.value()
def setRange(self,minn,maxx):
self.slider.setRange(minn*10**self.decimals,maxx*10**self.decimals)
self.label.setRange(minn,maxx)
def setMinimum(self,minn):
self.slider.setMinimum(minn*10**self.decimals)
self.label.setMinimum(minn)
def setMaximum(self,maxx):
self.slider.setMaximum(maxx*10**self.decimals)
self.label.setMaximum(maxx)
def setValue(self,value):
self.slider.setValue(value*10**self.decimals)
self.label.setValue(value)
class FrequencySlider(SliderLabel):
'''This is a modified SliderLabel class that prevents the user from setting a value between 0 and 1. This controls the frequency of the sin wave. Otherwise, the period could be too long, and you can only update any values at phase=0.
'''
def __init__(self,demicals=0):
SliderLabel.__init__(self,demicals)
def updateSlider(self,value):
if value>0 and value<1:
value=0
self.slider.setValue(int(value*10**self.decimals))
def updateLabel(self,value):
if value>0 and value<1:
value=0
self.label.setValue(value)
class CheckBox(QCheckBox):
''' I overwrote the QCheckBox class so that every graphical element has the method 'setValue'
'''
def __init__(self,parent=None):
QCheckBox.__init__(self,parent)
def setValue(self,value):
self.setChecked(value)
class MainGui(QWidget):
''' This class creates and controls the GUI '''
changeSignal=Signal()
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle('Shadowless TIRF Galvo Driver')
formlayout=QFormLayout()
self.settings=Settings()
self.galvoDriver=GalvoDriver(self.settings)
frequency=FrequencySlider(3); frequency.setRange(0,500)
radius=SliderLabel(4); radius.setRange(0,2)
ellipticity=SliderLabel(3); ellipticity.setRange(0,2.5)
phase=SliderLabel(3); phase.setRange(-90,90)
x_shift=SliderLabel(4); x_shift.setRange(-10,10)
y_shift=SliderLabel(4); y_shift.setRange(-10,10)
blue_laser_power=SliderLabel(3); blue_laser_power.setRange(-.08,5)
green_laser_power=SliderLabel(3); green_laser_power.setRange(-.08,5)
self.items=[]
self.items.append({'name':'frequency','string':'Frequency (Hz)','object':frequency})
self.items.append({'name':'radius','string':'Radius','object':radius})
self.items.append({'name':'ellipticity','string':'Ellipticity','object':ellipticity})
self.items.append({'name':'phase','string':'Phase','object':phase})
self.items.append({'name':'x_shift','string':'x-shift','object':x_shift})
self.items.append({'name':'y_shift','string':'y-shift','object':y_shift})
self.items.append({'name':'blue_laser','string':'Laser 1 On','object':CheckBox()})
self.items.append({'name':'green_laser','string':'Laser 2 On','object':CheckBox()})
self.items.append({'name':'blue_laser_power','string':'Laser 1 Power','object':blue_laser_power})
self.items.append({'name':'green_laser_power','string':'Laser 2 Power','object':green_laser_power})
alternate12=CheckBox(); alternate123=CheckBox();
self.items.append({'name':'alternate12','string':'Alternate between Setting 1 and Setting 2 every cycle','object':alternate12})
self.items.append({'name':'alternate123','string':'Alternate between Setting 1, 2, and 3 every cycle','object':alternate123})
for item in self.items:
formlayout.addRow(item['string'],item['object'])
item['object'].setValue(self.settings[item['name']])
self.save1=QPushButton('Save'); self.save1.setStyleSheet("background-color: red"); self.save1.clicked.connect(lambda: self.memstore(1))
self.save2=QPushButton('Save'); self.save2.setStyleSheet("background-color: red"); self.save2.clicked.connect(lambda: self.memstore(2))
self.save3=QPushButton('Save'); self.save3.setStyleSheet("background-color: red"); self.save3.clicked.connect(lambda: self.memstore(3))
self.recall1=QPushButton('Recall'); self.recall1.setStyleSheet("background-color: green"); self.recall1.clicked.connect(lambda: self.memrecall(1))
self.recall2=QPushButton('Recall'); self.recall2.setStyleSheet("background-color: green"); self.recall2.clicked.connect(lambda: self.memrecall(2))
self.recall3=QPushButton('Recall'); self.recall3.setStyleSheet("background-color: green"); self.recall3.clicked.connect(lambda: self.memrecall(3))
memlayout=QGridLayout()
memlayout.setHorizontalSpacing(70)
memlayout.addWidget(QLabel('Setting #1'),0,0); memlayout.addWidget(self.save1,1,0); memlayout.addWidget(self.recall1,2,0)
memlayout.addWidget(QLabel('Setting #2'),0,1); memlayout.addWidget(self.save2,1,1); memlayout.addWidget(self.recall2,2,1)
memlayout.addWidget(QLabel('Setting #3'),0,2); memlayout.addWidget(self.save3,1,2); memlayout.addWidget(self.recall3,2,2)
membox=QGroupBox("Settings")
membox.setLayout(memlayout)
self.stopButton=QPushButton('Stop'); self.stopButton.setStyleSheet("background-color: red"); self.stopButton.clicked.connect(self.startstop)
self.acquireButton=QPushButton('Acquire'); self.acquireButton.setStyleSheet("background-color: green"); self.acquireButton.clicked.connect(self.acquire)
self.acquireButton.hide()
self.galvoDriver.finished_acquire_sig.connect(self.finished_acquire)
stopacquirebox=QGridLayout()
stopacquirebox.addWidget(self.stopButton,0,0)
stopacquirebox.addWidget(self.acquireButton,0,1)
self.layout=QVBoxLayout()
self.layout.addLayout(formlayout)
self.layout.addWidget(membox)
self.layout.addSpacing(100)
self.layout.addLayout(stopacquirebox)
self.setLayout(self.layout)
self.connectToChangeSignal()
self.changeSignal.connect(self.updateValues)
self.setGeometry(QRect(488, 390, 704, 376))
self.startstop()#initialize in the off state
self.show()
def connectToChangeSignal(self):
for item in self.items:
methods=[method for method in dir(item['object']) if callable(getattr(item['object'], method))]
if 'valueChanged' in methods:
item['object'].valueChanged.connect(self.changeSignal)
elif 'stateChanged' in methods:
item['object'].stateChanged.connect(self.changeSignal)
elif 'currentIndexChanged' in methods:
item['object'].currentIndexChanged.connect(self.changeSignal)
def updateValues(self):
for item in self.items:
methods=[method for method in dir(item['object']) if callable(getattr(item['object'], method))]
if 'value' in methods:
item['value']=item['object'].value()
elif 'currentText' in methods:
item['value']=item['object'].currentText()
elif 'isChecked' in methods:
item['value']=item['object'].isChecked()
self.settings[item['name']]=item['value']
self.galvoDriver.refresh()
def memrecall(self,i):
'''i is the setting number we are recalling'''
self.changeSignal.disconnect(self.updateValues)
s=self.settings
s.d[0]=s.d[i].copy()
for item in self.items:
item['object'].setValue(s.d[0][item['name']])
self.changeSignal.connect(self.updateValues)
self.galvoDriver.refresh()
def memstore(self,i):
'''i is the setting number we are storing. settings.d[0] is always the current setting.'''
self.settings.d[i]=self.settings.d[0].copy()
self.settings.save()
def acquire(self):
if self.galvoDriver.acquiring is False: #if we haven't started acquiring
self.updateValues()
self.acquireButton.setText('Stop Acquiring')
self.acquireButton.setStyleSheet("background-color: red");
self.galvoDriver.acquire()
self.stopButton.hide()
else:
self.acquireButton.setText('Acquire')
self.acquireButton.setStyleSheet("background-color: green");
self.galvoDriver.stopAcquiring()
self.stopButton.show()
def finished_acquire(self):
self.acquireButton.setText('Acquire')
self.acquireButton.setStyleSheet("background-color: green");
self.stopButton.show()
def startstop(self):
if self.galvoDriver.stopped is False: #if we are free running
self.galvoDriver.startstop()
self.stopButton.setText('Free Run')
self.stopButton.setStyleSheet("background-color: green");
self.acquireButton.show()
else:
self.updateValues()
self.galvoDriver.startstop()
self.stopButton.setText('Stop Free Run')
self.stopButton.setStyleSheet("background-color: red");
self.acquireButton.hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
maingui=MainGui()
sys.exit(app.exec_())
| [
"ctypes.byref",
"os.path.realpath",
"os.path.dirname",
"time.time",
"numpy.sin",
"numpy.arange",
"PyQt4.QtCore.pyqtSlot",
"numpy.cos",
"os.path.expanduser",
"numpy.concatenate",
"PyQt4.QtCore.pyqtSignal"
] | [((4274, 4282), 'PyQt4.QtCore.pyqtSignal', 'Signal', ([], {}), '()\n', (4280, 4282), True, 'from PyQt4.QtCore import pyqtSignal as Signal\n'), ((15467, 15478), 'PyQt4.QtCore.pyqtSignal', 'Signal', (['int'], {}), '(int)\n', (15473, 15478), True, 'from PyQt4.QtCore import pyqtSignal as Signal\n'), ((16629, 16645), 'PyQt4.QtCore.pyqtSlot', 'Slot', (['int', 'float'], {}), '(int, float)\n', (16633, 16645), True, 'from PyQt4.QtCore import pyqtSlot as Slot\n'), ((18366, 18374), 'PyQt4.QtCore.pyqtSignal', 'Signal', ([], {}), '()\n', (18372, 18374), True, 'from PyQt4.QtCore import pyqtSignal as Signal\n'), ((12565, 12576), 'time.time', 'time.time', ([], {}), '()\n', (12574, 12576), False, 'import os, time\n'), ((1890, 1916), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1906, 1916), False, 'import os, time\n'), ((2861, 2876), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (2871, 2876), False, 'from os.path import expanduser\n'), ((5949, 5965), 'ctypes.byref', 'byref', (['self.read'], {}), '(self.read)\n', (5954, 5965), False, 'from ctypes import byref\n'), ((6377, 6419), 'numpy.arange', 'np.arange', (['(0)', 'period', '(1 / self.sample_rate)'], {}), '(0, period, 1 / self.sample_rate)\n', (6386, 6419), True, 'import numpy as np\n'), ((6631, 6673), 'numpy.arange', 'np.arange', (['(0)', 'period', '(1 / self.sample_rate)'], {}), '(0, period, 1 / self.sample_rate)\n', (6640, 6673), True, 'import numpy as np\n'), ((8101, 8180), 'numpy.concatenate', 'np.concatenate', (['(sinwave, coswave, camera_ttl, blue_laser_ttl, green_laser_ttl)'], {}), '((sinwave, coswave, camera_ttl, blue_laser_ttl, green_laser_ttl))\n', (8115, 8180), True, 'import numpy as np\n'), ((13571, 13587), 'ctypes.byref', 'byref', (['self.read'], {}), '(self.read)\n', (13576, 13587), False, 'from ctypes import byref\n'), ((3869, 3902), 'os.path.dirname', 'os.path.dirname', (['self.config_file'], {}), '(self.config_file)\n', (3884, 3902), False, 'import os, time\n'), ((3929, 3962), 'os.path.dirname', 'os.path.dirname', (['self.config_file'], {}), '(self.config_file)\n', (3944, 3962), False, 'import os, time\n'), ((9371, 9531), 'numpy.concatenate', 'np.concatenate', (['(sinwave1, sinwave2, coswave1, coswave2, camera_ttl1, camera_ttl2,\n blue_laser_ttl1, blue_laser_ttl2, green_laser_ttl1, green_laser_ttl2)'], {}), '((sinwave1, sinwave2, coswave1, coswave2, camera_ttl1,\n camera_ttl2, blue_laser_ttl1, blue_laser_ttl2, green_laser_ttl1,\n green_laser_ttl2))\n', (9385, 9531), True, 'import numpy as np\n'), ((12380, 12396), 'ctypes.byref', 'byref', (['self.read'], {}), '(self.read)\n', (12385, 12396), False, 'from ctypes import byref\n'), ((14106, 14122), 'ctypes.byref', 'byref', (['self.read'], {}), '(self.read)\n', (14111, 14122), False, 'from ctypes import byref\n'), ((6698, 6735), 'numpy.sin', 'np.sin', (['(frequency * (t * (2 * np.pi)))'], {}), '(frequency * (t * (2 * np.pi)))\n', (6704, 6735), True, 'import numpy as np\n'), ((6778, 6839), 'numpy.cos', 'np.cos', (['(frequency * t * 2 * np.pi + phase * (2 * np.pi / 360))'], {}), '(frequency * t * 2 * np.pi + phase * (2 * np.pi / 360))\n', (6784, 6839), True, 'import numpy as np\n'), ((11335, 11563), 'numpy.concatenate', 'np.concatenate', (['(sinwave1, sinwave2, sinwave3, coswave1, coswave2, coswave3, camera_ttl1,\n camera_ttl2, camera_ttl3, blue_laser_ttl1, blue_laser_ttl2,\n blue_laser_ttl3, green_laser_ttl1, green_laser_ttl2, green_laser_ttl3)'], {}), '((sinwave1, sinwave2, sinwave3, coswave1, coswave2, coswave3,\n camera_ttl1, camera_ttl2, camera_ttl3, blue_laser_ttl1, blue_laser_ttl2,\n blue_laser_ttl3, green_laser_ttl1, green_laser_ttl2, green_laser_ttl3))\n', (11349, 11563), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from tabulate import tabulate
np.set_printoptions(suppress=True)
np.set_printoptions(threshold=np.inf)
loc = "./build/results_neural-network-runtime.csv"
df = pd.read_csv(loc, sep=";", header=0)
nb_iters = 11
IEs = ["OpenVINO CPU", "OpenVINO GPU", "TensorRT"]
ret = []
for i in range(3):
tmp = df.iloc[int(i * nb_iters):int((i + 1) * nb_iters), :]
print("----")
print(tmp)
print("-")
print(np.mean(tmp, axis=0), np.std(tmp, axis=0))
print()
| [
"pandas.read_csv",
"numpy.set_printoptions",
"numpy.std",
"numpy.mean"
] | [((69, 103), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (88, 103), True, 'import numpy as np\n'), ((104, 141), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (123, 141), True, 'import numpy as np\n'), ((200, 235), 'pandas.read_csv', 'pd.read_csv', (['loc'], {'sep': '""";"""', 'header': '(0)'}), "(loc, sep=';', header=0)\n", (211, 235), True, 'import pandas as pd\n'), ((454, 474), 'numpy.mean', 'np.mean', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (461, 474), True, 'import numpy as np\n'), ((476, 495), 'numpy.std', 'np.std', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (482, 495), True, 'import numpy as np\n')] |
import numpy as np
from scipy.special import logsumexp
import ctypes
import os
import platform
if platform.system() == "Linux":
lpm_lib = np.ctypeslib.load_library("liblpm_lib.so", "bin/")
elif platform.system() == "Darwin":
lpm_lib = np.ctypeslib.load_library("liblpm_lib.dylib", "bin/")
np.random.seed(111)
curr_dir = os.getcwd()
model_lens = range(3, 10)
n_genes = 25
n_patients = 1000
fbp = 0.05
bgp = 0.05
n_reps = 100
_n_genes = ctypes.c_uint(n_genes)
_n_patients = ctypes.c_uint(n_patients)
_fbp = ctypes.c_double(fbp)
_bgp = ctypes.c_double(bgp)
for model_len in model_lens:
_model_len = ctypes.c_uint(model_len)
output_path = curr_dir + "/data/model_selection/model" + str(model_len)
for rep in range(n_reps):
dest = output_path + "/rep" + str(rep)
if not os.path.exists(dest):
os.makedirs(dest)
_dest = ctypes.create_string_buffer(dest.encode())
seed = np.random.randint(low=0, high=1000000)
_seed = ctypes.c_long(seed)
lpm_lib.generate_data(_seed, _dest, _model_len, _n_genes, _n_patients, _fbp, _bgp)
| [
"numpy.ctypeslib.load_library",
"numpy.random.seed",
"ctypes.c_double",
"os.makedirs",
"os.getcwd",
"os.path.exists",
"numpy.random.randint",
"ctypes.c_long",
"platform.system",
"ctypes.c_uint"
] | [((304, 323), 'numpy.random.seed', 'np.random.seed', (['(111)'], {}), '(111)\n', (318, 323), True, 'import numpy as np\n'), ((336, 347), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (345, 347), False, 'import os\n'), ((452, 474), 'ctypes.c_uint', 'ctypes.c_uint', (['n_genes'], {}), '(n_genes)\n', (465, 474), False, 'import ctypes\n'), ((489, 514), 'ctypes.c_uint', 'ctypes.c_uint', (['n_patients'], {}), '(n_patients)\n', (502, 514), False, 'import ctypes\n'), ((522, 542), 'ctypes.c_double', 'ctypes.c_double', (['fbp'], {}), '(fbp)\n', (537, 542), False, 'import ctypes\n'), ((550, 570), 'ctypes.c_double', 'ctypes.c_double', (['bgp'], {}), '(bgp)\n', (565, 570), False, 'import ctypes\n'), ((100, 117), 'platform.system', 'platform.system', ([], {}), '()\n', (115, 117), False, 'import platform\n'), ((144, 194), 'numpy.ctypeslib.load_library', 'np.ctypeslib.load_library', (['"""liblpm_lib.so"""', '"""bin/"""'], {}), "('liblpm_lib.so', 'bin/')\n", (169, 194), True, 'import numpy as np\n'), ((618, 642), 'ctypes.c_uint', 'ctypes.c_uint', (['model_len'], {}), '(model_len)\n', (631, 642), False, 'import ctypes\n'), ((204, 221), 'platform.system', 'platform.system', ([], {}), '()\n', (219, 221), False, 'import platform\n'), ((249, 302), 'numpy.ctypeslib.load_library', 'np.ctypeslib.load_library', (['"""liblpm_lib.dylib"""', '"""bin/"""'], {}), "('liblpm_lib.dylib', 'bin/')\n", (274, 302), True, 'import numpy as np\n'), ((937, 975), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(1000000)'}), '(low=0, high=1000000)\n', (954, 975), True, 'import numpy as np\n'), ((992, 1011), 'ctypes.c_long', 'ctypes.c_long', (['seed'], {}), '(seed)\n', (1005, 1011), False, 'import ctypes\n'), ((811, 831), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (825, 831), False, 'import os\n'), ((845, 862), 'os.makedirs', 'os.makedirs', (['dest'], {}), '(dest)\n', (856, 862), False, 'import os\n')] |
import numpy as np
new_inds = np.linspace(0, 159, num=10, dtype=int)
print(new_inds) | [
"numpy.linspace"
] | [((30, 68), 'numpy.linspace', 'np.linspace', (['(0)', '(159)'], {'num': '(10)', 'dtype': 'int'}), '(0, 159, num=10, dtype=int)\n', (41, 68), True, 'import numpy as np\n')] |
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| [
"scipy.sparse.issparse",
"numpy.asarray",
"numpy.arange",
"warnings.warn",
"numpy.issubdtype"
] | [((3586, 3602), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (3596, 3602), True, 'import numpy as np\n'), ((3610, 3643), 'numpy.issubdtype', 'np.issubdtype', (['mask.dtype', 'np.int'], {}), '(mask.dtype, np.int)\n', (3623, 3643), True, 'import numpy as np\n'), ((10473, 10484), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (10481, 10484), False, 'from scipy.sparse import issparse\n'), ((3710, 3734), 'numpy.arange', 'np.arange', (['mask.shape[0]'], {}), '(mask.shape[0])\n', (3719, 3734), True, 'import numpy as np\n'), ((7636, 7656), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (7645, 7656), True, 'import numpy as np\n'), ((13864, 13877), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (13874, 13877), True, 'import numpy as np\n'), ((2300, 2347), 'warnings.warn', 'warnings.warn', (['msg'], {'category': 'DeprecationWarning'}), '(msg, category=DeprecationWarning)\n', (2313, 2347), False, 'import warnings\n'), ((2821, 2868), 'warnings.warn', 'warnings.warn', (['msg'], {'category': 'DeprecationWarning'}), '(msg, category=DeprecationWarning)\n', (2834, 2868), False, 'import warnings\n'), ((7825, 7836), 'scipy.sparse.issparse', 'issparse', (['a'], {}), '(a)\n', (7833, 7836), False, 'from scipy.sparse import issparse\n'), ((4390, 4466), 'warnings.warn', 'warnings.warn', (['"""Copying input dataframe for slicing."""', 'DataConversionWarning'], {}), "('Copying input dataframe for slicing.', DataConversionWarning)\n", (4403, 4466), False, 'import warnings\n')] |
import argparse
import math
from collections import namedtuple
from itertools import count
from tqdm import tqdm
from tensorboardX import SummaryWriter
from environment import VoltageCtrl_nonlinear,create_56bus
import os
import gym
import numpy as np
from gym import wrappers
import torch
from ddpg import DDPG
from naf import NAF
from normalized_actions import NormalizedActions
from ounoise import OUNoise
from param_noise import AdaptiveParamNoiseSpec, ddpg_distance_metric
from replay_memory import ReplayMemory, Transition
import matplotlib.pyplot as plt
from scipy.io import loadmat
import pandapower as pp
import pandapower.networks as pn
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--env-name', default="HalfCheetah-v2",
help='name of the environment to run')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.01, metavar='G',
help='discount factor for model (default: 0.01)')
parser.add_argument('--ou_noise', type=bool, default=True)
parser.add_argument('--param_noise', type=bool, default=False)
parser.add_argument('--noise_scale', type=float, default=0.5, metavar='G',
help='initial noise scale (default: 0.3)')
parser.add_argument('--final_noise_scale', type=float, default=0.2, metavar='G',
help='final noise scale (default: 0.3)')
parser.add_argument('--exploration_end', type=int, default=200, metavar='N',
help='number of episodes with noise (default: 100)')
parser.add_argument('--seed', type=int, default=43, metavar='N',
help='random seed (default: 13)')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size (default: 128)')
parser.add_argument('--num_steps', type=int, default=1000, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--num_episodes', type=int, default=600, metavar='N',
help='number of episodes (default: 1000)')
parser.add_argument('--hidden_size', type=int, default=100, metavar='N',
help='number of episodes (default: 128)')
parser.add_argument('--updates_per_step', type=int, default=5, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 1000000)')
args = parser.parse_args()
writer = SummaryWriter()
pp_net = create_56bus()
injection_bus = np.array([17, 20, 29, 44, 52])
env = VoltageCtrl_nonlinear(pp_net, injection_bus)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
agent1 = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space.shape[0], env.action_space)
agent2 = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space.shape[0], env.action_space)
agent3 = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space.shape[0], env.action_space)
agent4 = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space.shape[0], env.action_space)
agent5 = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space.shape[0], env.action_space)
memory1 = ReplayMemory(args.replay_size)
memory2 = ReplayMemory(args.replay_size)
memory3 = ReplayMemory(args.replay_size)
memory4 = ReplayMemory(args.replay_size)
memory5 = ReplayMemory(args.replay_size)
ounoise = OUNoise(env.action_space.shape[0]) if args.ou_noise else None
param_noise = AdaptiveParamNoiseSpec(initial_stddev=0.05,
desired_action_stddev=args.noise_scale, adaptation_coefficient=1.05) if args.param_noise else None
rewards = []
total_numsteps = 0
updates = 0
best_val_reward = -100000
for i_episode in range(args.num_episodes):
state = torch.Tensor([env.reset()])
if args.ou_noise:
ounoise.scale = (args.noise_scale - args.final_noise_scale) * max(0.0, args.exploration_end -
i_episode) / args.exploration_end + args.final_noise_scale
# ounoise.scale = args.noise_scale
ounoise.reset()
# if args.param_noise and args.algo == "DDPG":
# agent.perturb_actor_parameters(param_noise)
episode_reward = 0
episode_len = 0
log = []
while True: #state:[1,1]
state1 = state[:,0].unsqueeze(-1)
state2 = state[:,1].unsqueeze(-1)
state3 = state[:,2].unsqueeze(-1)
state4 = state[:,3].unsqueeze(-1)
state5 = state[:,4].unsqueeze(-1)
action1 = agent1.select_action(state1, ounoise, param_noise)
action2 = agent2.select_action(state2, ounoise, param_noise)
action3 = agent3.select_action(state3, ounoise, param_noise)
action4 = agent4.select_action(state4, ounoise, param_noise)
action5 = agent5.select_action(state5, ounoise, param_noise)
action = torch.cat([action1, action2, action3, action4, action5], dim=1)
log.append(torch.cat([state,action],dim=1).detach().cpu().numpy())
next_state, reward, done, _ = env.step(action.numpy()[0])
total_numsteps += 1
episode_reward += np.mean(reward)
episode_len += 1
action = torch.Tensor(action)
mask = torch.Tensor([not done])
next_state = torch.Tensor([next_state])
reward = torch.Tensor([reward])
memory1.push(state1, action1, mask, next_state[:,0].unsqueeze(-1), reward)
memory2.push(state2, action2, mask, next_state[:,1].unsqueeze(-1), reward)
memory3.push(state3, action3, mask, next_state[:,2].unsqueeze(-1), reward)
memory4.push(state4, action4, mask, next_state[:,3].unsqueeze(-1), reward)
memory5.push(state5, action5, mask, next_state[:,4].unsqueeze(-1), reward)
state = next_state
if len(memory1) > args.batch_size:
for _ in range(args.updates_per_step):
transitions1 = memory1.sample(args.batch_size)
batch1 = Transition(*zip(*transitions1))
transitions2 = memory2.sample(args.batch_size)
batch2 = Transition(*zip(*transitions2))
transitions3 = memory3.sample(args.batch_size)
batch3 = Transition(*zip(*transitions3))
transitions4 = memory4.sample(args.batch_size)
batch4 = Transition(*zip(*transitions4))
transitions5 = memory5.sample(args.batch_size)
batch5 = Transition(*zip(*transitions5))
value_loss, policy_loss = agent1.update_parameters(batch1)
value_loss, policy_loss = agent2.update_parameters(batch2)
value_loss, policy_loss = agent3.update_parameters(batch3)
value_loss, policy_loss = agent4.update_parameters(batch4)
value_loss, policy_loss = agent5.update_parameters(batch5)
writer.add_scalar('loss/value', value_loss, updates)
writer.add_scalar('loss/policy', policy_loss, updates)
updates += 1
if done or episode_len==30:
log = np.vstack(log)
np.savetxt('train_log1g.txt',log, fmt='%1.4e')
break
# if i_episode %30 ==0:
# for name, parms in agent2.actor.named_parameters():
# if 'mu' in name:
# print('-->name:', name, '-->grad_requirs:',parms.requires_grad, \
# ' -->para_value:',parms.data)
writer.add_scalar('reward/train', episode_reward, i_episode)
# Update param_noise based on distance metric
# if args.param_noise:
# episode_transitions = memory.memory[memory.position-t:memory.position]
# states = torch.cat([transition[0] for transition in episode_transitions], 0)
# unperturbed_actions = agent.select_action(states, None, None)
# perturbed_actions = torch.cat([transition[1] for transition in episode_transitions], 0)
# ddpg_dist = ddpg_distance_metric(perturbed_actions.numpy(), unperturbed_actions.numpy())
# param_noise.adapt(ddpg_dist)
rewards.append(episode_reward)
log = []
if i_episode % 10 == 0:
if args.ou_noise:
ounoise.scale = 0.0
ounoise.reset()
state = torch.Tensor([env.reset()])
episode_reward = 0
test_len=0
while True:
state1 = state[:,0].unsqueeze(-1)
state2 = state[:,1].unsqueeze(-1)
state3 = state[:,2].unsqueeze(-1)
state4 = state[:,3].unsqueeze(-1)
state5 = state[:,4].unsqueeze(-1)
action1 = agent1.select_action(state1, ounoise, param_noise)
action2 = agent2.select_action(state2, ounoise, param_noise)
action3 = agent3.select_action(state3, ounoise, param_noise)
action4 = agent4.select_action(state4, ounoise, param_noise)
action5 = agent5.select_action(state5, ounoise, param_noise)
action = torch.cat([action1, action2, action3, action4, action5], dim=1)
log.append(torch.cat([state,action],dim=1).detach().cpu().numpy())
test_len+=1
next_state, reward, done, _ = env.step(action.numpy()[0])
episode_reward += np.mean(reward)
next_state = torch.Tensor([next_state])
state = next_state
if done or test_len==60:
log = np.vstack(log)
np.savetxt('test_log1g.txt',log, fmt='%1.4e')
break
writer.add_scalar('reward/test', episode_reward, i_episode)
# if episode_reward/test_len > best_val_reward and i_episode>50:
# best_val_reward = episode_reward/test_len
model1_pth = './models/gagent1.pt'
# model2_pth = './models/dagent2.pt'
# model3_pth = './models/dagent3.pt'
# model4_pth = './models/dagent4.pt'
# model5_pth = './models/dagent5.pt'
torch.save(agent1,model1_pth)
# torch.save(agent2,model2_pth)
# torch.save(agent3,model3_pth)
# torch.save(agent4,model4_pth)
# torch.save(agent5,model5_pth)
print("model saved")
rewards.append(episode_reward)
print("Episode: {}, total numsteps: {}, reward: {}, average reward: {}".format(i_episode, total_numsteps, rewards[-1], np.mean(rewards[-10:])))
env.close()
| [
"tensorboardX.SummaryWriter",
"replay_memory.ReplayMemory",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.manual_seed",
"param_noise.AdaptiveParamNoiseSpec",
"numpy.savetxt",
"environment.VoltageCtrl_nonlinear",
"environment.create_56bus",
"ddpg.DDPG",
"torch.cat",
"torch.save",
"nu... | [((659, 723), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch REINFORCE example"""'}), "(description='PyTorch REINFORCE example')\n", (682, 723), False, 'import argparse\n'), ((2659, 2674), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (2672, 2674), False, 'from tensorboardX import SummaryWriter\n'), ((2685, 2699), 'environment.create_56bus', 'create_56bus', ([], {}), '()\n', (2697, 2699), False, 'from environment import VoltageCtrl_nonlinear, create_56bus\n'), ((2716, 2746), 'numpy.array', 'np.array', (['[17, 20, 29, 44, 52]'], {}), '([17, 20, 29, 44, 52])\n', (2724, 2746), True, 'import numpy as np\n'), ((2753, 2797), 'environment.VoltageCtrl_nonlinear', 'VoltageCtrl_nonlinear', (['pp_net', 'injection_bus'], {}), '(pp_net, injection_bus)\n', (2774, 2797), False, 'from environment import VoltageCtrl_nonlinear, create_56bus\n'), ((2799, 2827), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2816, 2827), False, 'import torch\n'), ((2828, 2853), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2842, 2853), True, 'import numpy as np\n'), ((2864, 2962), 'ddpg.DDPG', 'DDPG', (['args.gamma', 'args.tau', 'args.hidden_size', 'env.observation_space.shape[0]', 'env.action_space'], {}), '(args.gamma, args.tau, args.hidden_size, env.observation_space.shape[0],\n env.action_space)\n', (2868, 2962), False, 'from ddpg import DDPG\n'), ((2988, 3086), 'ddpg.DDPG', 'DDPG', (['args.gamma', 'args.tau', 'args.hidden_size', 'env.observation_space.shape[0]', 'env.action_space'], {}), '(args.gamma, args.tau, args.hidden_size, env.observation_space.shape[0],\n env.action_space)\n', (2992, 3086), False, 'from ddpg import DDPG\n'), ((3112, 3210), 'ddpg.DDPG', 'DDPG', (['args.gamma', 'args.tau', 'args.hidden_size', 'env.observation_space.shape[0]', 'env.action_space'], {}), '(args.gamma, args.tau, args.hidden_size, env.observation_space.shape[0],\n env.action_space)\n', (3116, 3210), False, 'from ddpg import DDPG\n'), ((3236, 3334), 'ddpg.DDPG', 'DDPG', (['args.gamma', 'args.tau', 'args.hidden_size', 'env.observation_space.shape[0]', 'env.action_space'], {}), '(args.gamma, args.tau, args.hidden_size, env.observation_space.shape[0],\n env.action_space)\n', (3240, 3334), False, 'from ddpg import DDPG\n'), ((3400, 3498), 'ddpg.DDPG', 'DDPG', (['args.gamma', 'args.tau', 'args.hidden_size', 'env.observation_space.shape[0]', 'env.action_space'], {}), '(args.gamma, args.tau, args.hidden_size, env.observation_space.shape[0],\n env.action_space)\n', (3404, 3498), False, 'from ddpg import DDPG\n'), ((3526, 3556), 'replay_memory.ReplayMemory', 'ReplayMemory', (['args.replay_size'], {}), '(args.replay_size)\n', (3538, 3556), False, 'from replay_memory import ReplayMemory, Transition\n'), ((3567, 3597), 'replay_memory.ReplayMemory', 'ReplayMemory', (['args.replay_size'], {}), '(args.replay_size)\n', (3579, 3597), False, 'from replay_memory import ReplayMemory, Transition\n'), ((3608, 3638), 'replay_memory.ReplayMemory', 'ReplayMemory', (['args.replay_size'], {}), '(args.replay_size)\n', (3620, 3638), False, 'from replay_memory import ReplayMemory, Transition\n'), ((3649, 3679), 'replay_memory.ReplayMemory', 'ReplayMemory', (['args.replay_size'], {}), '(args.replay_size)\n', (3661, 3679), False, 'from replay_memory import ReplayMemory, Transition\n'), ((3690, 3720), 'replay_memory.ReplayMemory', 'ReplayMemory', (['args.replay_size'], {}), '(args.replay_size)\n', (3702, 3720), False, 'from replay_memory import ReplayMemory, Transition\n'), ((3732, 3766), 'ounoise.OUNoise', 'OUNoise', (['env.action_space.shape[0]'], {}), '(env.action_space.shape[0])\n', (3739, 3766), False, 'from ounoise import OUNoise\n'), ((3808, 3925), 'param_noise.AdaptiveParamNoiseSpec', 'AdaptiveParamNoiseSpec', ([], {'initial_stddev': '(0.05)', 'desired_action_stddev': 'args.noise_scale', 'adaptation_coefficient': '(1.05)'}), '(initial_stddev=0.05, desired_action_stddev=args.\n noise_scale, adaptation_coefficient=1.05)\n', (3830, 3925), False, 'from param_noise import AdaptiveParamNoiseSpec, ddpg_distance_metric\n'), ((5197, 5260), 'torch.cat', 'torch.cat', (['[action1, action2, action3, action4, action5]'], {'dim': '(1)'}), '([action1, action2, action3, action4, action5], dim=1)\n', (5206, 5260), False, 'import torch\n'), ((5456, 5471), 'numpy.mean', 'np.mean', (['reward'], {}), '(reward)\n', (5463, 5471), True, 'import numpy as np\n'), ((5515, 5535), 'torch.Tensor', 'torch.Tensor', (['action'], {}), '(action)\n', (5527, 5535), False, 'import torch\n'), ((5551, 5575), 'torch.Tensor', 'torch.Tensor', (['[not done]'], {}), '([not done])\n', (5563, 5575), False, 'import torch\n'), ((5597, 5623), 'torch.Tensor', 'torch.Tensor', (['[next_state]'], {}), '([next_state])\n', (5609, 5623), False, 'import torch\n'), ((5641, 5663), 'torch.Tensor', 'torch.Tensor', (['[reward]'], {}), '([reward])\n', (5653, 5663), False, 'import torch\n'), ((10216, 10246), 'torch.save', 'torch.save', (['agent1', 'model1_pth'], {}), '(agent1, model1_pth)\n', (10226, 10246), False, 'import torch\n'), ((7408, 7422), 'numpy.vstack', 'np.vstack', (['log'], {}), '(log)\n', (7417, 7422), True, 'import numpy as np\n'), ((7435, 7482), 'numpy.savetxt', 'np.savetxt', (['"""train_log1g.txt"""', 'log'], {'fmt': '"""%1.4e"""'}), "('train_log1g.txt', log, fmt='%1.4e')\n", (7445, 7482), True, 'import numpy as np\n'), ((9264, 9327), 'torch.cat', 'torch.cat', (['[action1, action2, action3, action4, action5]'], {'dim': '(1)'}), '([action1, action2, action3, action4, action5], dim=1)\n', (9273, 9327), False, 'import torch\n'), ((9532, 9547), 'numpy.mean', 'np.mean', (['reward'], {}), '(reward)\n', (9539, 9547), True, 'import numpy as np\n'), ((9574, 9600), 'torch.Tensor', 'torch.Tensor', (['[next_state]'], {}), '([next_state])\n', (9586, 9600), False, 'import torch\n'), ((9692, 9706), 'numpy.vstack', 'np.vstack', (['log'], {}), '(log)\n', (9701, 9706), True, 'import numpy as np\n'), ((9723, 9769), 'numpy.savetxt', 'np.savetxt', (['"""test_log1g.txt"""', 'log'], {'fmt': '"""%1.4e"""'}), "('test_log1g.txt', log, fmt='%1.4e')\n", (9733, 9769), True, 'import numpy as np\n'), ((10603, 10625), 'numpy.mean', 'np.mean', (['rewards[-10:]'], {}), '(rewards[-10:])\n', (10610, 10625), True, 'import numpy as np\n'), ((5280, 5313), 'torch.cat', 'torch.cat', (['[state, action]'], {'dim': '(1)'}), '([state, action], dim=1)\n', (5289, 5313), False, 'import torch\n'), ((9351, 9384), 'torch.cat', 'torch.cat', (['[state, action]'], {'dim': '(1)'}), '([state, action], dim=1)\n', (9360, 9384), False, 'import torch\n')] |
'''
@name: train_robot.py
@brief: Starts a ppo2-training process. It is expected that move_base, simulation etc is started.(roslaunch rl_setup_bringup setup.launch)
@author: <NAME>
@version: 3.5
@date: 2019/04/05
'''
from email import policy
import os
import sys
from rl_agent.env_wrapper.rto.ros_env_cont_rto import RosEnvContRto
from rl_agent.env_wrapper.rto_real.ros_env_cont_rto_real import RosEnvContRtoReal
from rl_agent.env_wrapper.youbot.ros_env_cont_youbot import RosEnvContYoubot
import rospy
import rospkg
import configparser
from rl_agent.env_wrapper.ros_env_disc_burger import RosEnvDiscBurger
from rl_agent.env_wrapper.burger.ros_env_cont_burger import RosEnvContBurger
from rl_agent.env_wrapper.jackal.ros_env_cont_jackal import RosEnvContJackal
from rl_agent.env_wrapper.ridgeback.ros_env_cont_ridgeback import RosEnvContRidgeback
from rl_agent.env_wrapper.ros_env_cont_agvota import RosEnvContAgvOta
from rl_agent.env_utils.state_collector_rosnav import StateCollector
from stable_baselines.common.vec_env import VecNormalize, SubprocVecEnv, VecFrameStack
from rl_agent.evaluation.Evaluation import Evaluation
from multiprocessing import Process
import random
from rl_agent.common_custom_policies import *
from stable_baselines.common.policies import *
from stable_baselines.ppo2 import PPO2
from stable_baselines.bench import Monitor
from stable_baselines.results_plotter import load_results, ts2xy
import numpy as np
best_mean_reward, n_callback = -np.inf, 0
agent_name = ""
path_to_models = ""
def train_callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
global n_callback, best_mean_reward, agent_name, path_to_models
# Print stats every 1000 calls
if (n_callback + 1) % 10 == 0:
# Evaluate policy performance
x, y = ts2xy(load_results('%s/%s/'%(path_to_models, agent_name)), 'timesteps')
if len(x) > 0:
mean_reward = np.mean(y[-100:])
print(x[-1], 'timesteps')
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > best_mean_reward:
best_mean_reward = mean_reward
# Example for saving best model
print("Saving new best model")
_locals['self'].save(path_to_models + '/%s/%s.pkl' % (agent_name, agent_name))
n_callback += 1
return True
def load_train_env(num_envs, robot_radius, rew_fnc, num_stacks, stack_offset, debug, task_mode, policy, disc_action_space, normalize, robot_model):
# Choosing environment for Trutlebot3 Burger
# env_temp = RosEnvDiscBurger
if robot_model == "burger":
env_temp = RosEnvContBurger
elif robot_model == "jackal":
env_temp = RosEnvContJackal
elif robot_model == "ridgeback":
env_temp = RosEnvContRidgeback
elif robot_model == "agvota":
env_temp = RosEnvContAgvOta
elif robot_model == "rto":
env_temp = RosEnvContRto
elif robot_model == "rto_real":
env_temp = RosEnvContRtoReal
elif robot_model == "youbot":
env_temp = RosEnvContYoubot
env = SubprocVecEnv([lambda k=k: Monitor(env_temp("sim%d" % (k+1), StateCollector("sim%s"%(k+1), "train") , stack_offset, num_stacks, robot_radius, rew_fnc, debug, "train", task_mode), '%s/%s/sim_%d'%(path_to_models, agent_name, k+1), allow_early_resets=True) for k in range(num_envs)])
# Normalizing?
if normalize:
env = VecNormalize(env, training=True, norm_obs=True, norm_reward=False, clip_obs=100.0, clip_reward=10.0,
gamma=0.99, epsilon=1e-08)
else:
env = env
# Stack of data?
if num_stacks > 1:
env = VecFrameStack(env, n_stack=num_stacks, n_offset=stack_offset)
return env
def train_agent_ppo2(config, agent_name, total_timesteps, policy,
gamma=0.99, n_steps=128, ent_coef=0.01, learning_rate=0.00025,
vf_coef=0.5, max_grad_norm=0.5, lam=0.95, nminibatches=4, noptepochs=4,
cliprange=0.2, num_envs=1, robot_radius = 0.46, rew_fnc=3, num_stacks=1, stack_offset=15, disc_action_space = False,
debug=False, normalize=False,
stage=0, pretrained_model_name="", task_mode="static", robot_model="burger"):
# Setting seed
seed = random.randint(0,1000)
np.random.seed(seed)
tf.random.set_random_seed(seed)
random.seed(seed)
# Define pathes to store things
path_to_tensorboard_log = config['PATHES']['path_to_tensorboard_log']
global path_to_models
path_to_models = config['PATHES']['path_to_models']
agent_dir='%s/%s'%(path_to_models, agent_name)
if not os.path.exists(agent_dir):
os.makedirs(agent_dir)
# Loading simulation environment
env = load_train_env(num_envs,
robot_radius,
rew_fnc,
num_stacks,
stack_offset,
debug,
task_mode,
policy,
disc_action_space,
normalize,
robot_model)
if stage==0:
model = PPO2(eval(policy), env, gamma=gamma,
n_steps=n_steps, ent_coef=ent_coef,
learning_rate=learning_rate, vf_coef=vf_coef, max_grad_norm=max_grad_norm,
lam=lam, nminibatches=nminibatches, noptepochs=noptepochs,
cliprange=cliprange, verbose=1,
tensorboard_log='%s' % (path_to_tensorboard_log))
else:
# Pretrained model is loaded to continue training.
model = PPO2.load("%s/%s/%s.pkl" % (path_to_models, pretrained_model_name, pretrained_model_name), env,
tensorboard_log='%s'%(path_to_tensorboard_log))
# Document agent
print("Starting PPO2 Training of agent: %s" %(agent_name))
print("------------------------------------------------------")
print("gamma \t\t\t\t %f" %model.gamma)
print("n_steps \t\t\t %d" %model.n_steps)
print("ent_coef \t\t\t %f" %model.ent_coef)
print("learning_rate \t\t\t %f" %learning_rate)
print("vf_coef \t\t\t %f" %model.vf_coef)
print("max_grad_norm \t\t\t %f" %model.max_grad_norm)
print("lam \t\t\t\t %f" %model.lam)
print("nminibatches \t\t\t %d" %model.nminibatches)
print("noptepochs \t\t\t %d" %model.noptepochs)
print("cliprange \t\t\t %f" %cliprange)
print("total_timesteps \t\t %d" %total_timesteps)
print("Policy \t\t\t\t %s" %policy)
print("reward_fnc \t\t\t %d" %rew_fnc)
print("Normalized state: %d" % normalize)
print("discrete action space %d" % disc_action_space)
print("Number of stacks: %d, stack offset: %d" % (num_stacks, stack_offset))
print("\n")
# Starting training
reset_num_timesteps = False
if stage==0:
reset_num_timesteps = True
model.learn(total_timesteps=total_timesteps, log_interval=100, callback=train_callback, tb_log_name=agent_name, reset_num_timesteps=reset_num_timesteps)
# Saving final model
model.save("%s/%s/%s" % (path_to_models, agent_name, "%s_stage_%d" % (agent_name, stage)))
print("Training finished.")
env.close()
def evaluate_during_training(ns, save_path, robot_radius):
rospy.init_node("evaluate_node", anonymous=True)
eval = Evaluation(StateCollector(ns, "train"), ns, robot_radius=robot_radius)
eval.evaluate_training(save_path)
if __name__ == '__main__':
record_evaluation_data = False
rospack = rospkg.RosPack()
rl_bringup_path = rospack.get_path('rl_bringup')
config = configparser.ConfigParser()
config.read('%s/config/path_config.ini'%rl_bringup_path)
path_to_eval_data_train = config['PATHES']['path_to_eval_data_train']
# for running via ./entrypoint_ppo2.sh
if (len(sys.argv) > 1):
agent_name = str(sys.argv[1])
stage = int(sys.argv[20])
record_processes = []
if record_evaluation_data:
save_path = "%s/%s_training" % (path_to_eval_data_train, str(sys.argv[1]))
for i in range(int(sys.argv[23])):
p = Process(target=evaluate_during_training, args=("sim%d" % (i + 1), save_path, float(sys.argv[14])))
p.start()
record_processes.append(p)
train_agent_ppo2(config, agent_name, int(sys.argv[2]), str(sys.argv[3]), gamma=float(sys.argv[4]),
n_steps=int(sys.argv[5]), ent_coef=float(sys.argv[6]),
learning_rate=float(sys.argv[7]), vf_coef=float(sys.argv[8]),
max_grad_norm=float(sys.argv[9]), lam=float(sys.argv[10]),
nminibatches=int(sys.argv[11]), noptepochs=int(sys.argv[12]),
cliprange=float(sys.argv[13]), robot_radius=float(sys.argv[14]),
rew_fnc=float(sys.argv[15]), num_stacks=int(sys.argv[16]),
stack_offset=int(sys.argv[17]),
disc_action_space=bool(int(sys.argv[18])), normalize=bool(int(sys.argv[19])),
stage=stage,
pretrained_model_name = str(sys.argv[21]),
task_mode=str(sys.argv[22]), num_envs=int(sys.argv[23]), robot_model=str(sys.argv[24]))
for p in record_processes:
p.terminate()
# for quick testing
else:
num_envs = 8
stage = 0
agent_name = "youbot"
## Robot models with their radius ##
burger = 0.105
waffle = 0.208
jackal = 0.267
ridgeback = 0.625
agvota = 0.629
rto = 0.225
youbot = 0.347
####################################
robot_radius = youbot
record_processes = []
if record_evaluation_data:
save_path = "%s/%s_training" % (path_to_eval_data_train, agent_name)
for i in range(num_envs):
p = Process(target=evaluate_during_training, args=("sim%d"%(i+1), save_path, robot_radius))
p.start()
record_processes.append(p)
train_agent_ppo2(config,
agent_name,
gamma=0.99,
n_steps=128,
ent_coef=0.005,
learning_rate=0.00025,
cliprange=0.2,
total_timesteps=10000000,
policy="CNN1DPolicy_multi_input",
# policy="CNN1DPolicy_multi_input_big",
num_envs=num_envs,
nminibatches=1,
noptepochs=1,
debug=True,
rew_fnc = 19,
num_stacks= 1,
stack_offset=5,
disc_action_space=False,
robot_radius = robot_radius,
stage=stage,
pretrained_model_name="ppo2_foo",
task_mode="ped",
robot_model="youbot")
for p in record_processes:
p.terminate()
| [
"numpy.random.seed",
"random.randint",
"os.makedirs",
"stable_baselines.ppo2.PPO2.load",
"rospkg.RosPack",
"stable_baselines.results_plotter.load_results",
"os.path.exists",
"rl_agent.env_utils.state_collector_rosnav.StateCollector",
"numpy.mean",
"random.seed",
"rospy.init_node",
"stable_base... | [((4535, 4558), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (4549, 4558), False, 'import random\n'), ((4562, 4582), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4576, 4582), True, 'import numpy as np\n'), ((4623, 4640), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4634, 4640), False, 'import random\n'), ((7650, 7698), 'rospy.init_node', 'rospy.init_node', (['"""evaluate_node"""'], {'anonymous': '(True)'}), "('evaluate_node', anonymous=True)\n", (7665, 7698), False, 'import rospy\n'), ((7897, 7913), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (7911, 7913), False, 'import rospkg\n'), ((7980, 8007), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (8005, 8007), False, 'import configparser\n'), ((3652, 3784), 'stable_baselines.common.vec_env.VecNormalize', 'VecNormalize', (['env'], {'training': '(True)', 'norm_obs': '(True)', 'norm_reward': '(False)', 'clip_obs': '(100.0)', 'clip_reward': '(10.0)', 'gamma': '(0.99)', 'epsilon': '(1e-08)'}), '(env, training=True, norm_obs=True, norm_reward=False, clip_obs\n =100.0, clip_reward=10.0, gamma=0.99, epsilon=1e-08)\n', (3664, 3784), False, 'from stable_baselines.common.vec_env import VecNormalize, SubprocVecEnv, VecFrameStack\n'), ((3894, 3955), 'stable_baselines.common.vec_env.VecFrameStack', 'VecFrameStack', (['env'], {'n_stack': 'num_stacks', 'n_offset': 'stack_offset'}), '(env, n_stack=num_stacks, n_offset=stack_offset)\n', (3907, 3955), False, 'from stable_baselines.common.vec_env import VecNormalize, SubprocVecEnv, VecFrameStack\n'), ((4897, 4922), 'os.path.exists', 'os.path.exists', (['agent_dir'], {}), '(agent_dir)\n', (4911, 4922), False, 'import os\n'), ((4932, 4954), 'os.makedirs', 'os.makedirs', (['agent_dir'], {}), '(agent_dir)\n', (4943, 4954), False, 'import os\n'), ((6003, 6155), 'stable_baselines.ppo2.PPO2.load', 'PPO2.load', (["('%s/%s/%s.pkl' % (path_to_models, pretrained_model_name,\n pretrained_model_name))", 'env'], {'tensorboard_log': "('%s' % path_to_tensorboard_log)"}), "('%s/%s/%s.pkl' % (path_to_models, pretrained_model_name,\n pretrained_model_name), env, tensorboard_log='%s' % path_to_tensorboard_log\n )\n", (6012, 6155), False, 'from stable_baselines.ppo2 import PPO2\n'), ((7721, 7748), 'rl_agent.env_utils.state_collector_rosnav.StateCollector', 'StateCollector', (['ns', '"""train"""'], {}), "(ns, 'train')\n", (7735, 7748), False, 'from rl_agent.env_utils.state_collector_rosnav import StateCollector\n'), ((1934, 1987), 'stable_baselines.results_plotter.load_results', 'load_results', (["('%s/%s/' % (path_to_models, agent_name))"], {}), "('%s/%s/' % (path_to_models, agent_name))\n", (1946, 1987), False, 'from stable_baselines.results_plotter import load_results, ts2xy\n'), ((2045, 2062), 'numpy.mean', 'np.mean', (['y[-100:]'], {}), '(y[-100:])\n', (2052, 2062), True, 'import numpy as np\n'), ((10329, 10424), 'multiprocessing.Process', 'Process', ([], {'target': 'evaluate_during_training', 'args': "('sim%d' % (i + 1), save_path, robot_radius)"}), "(target=evaluate_during_training, args=('sim%d' % (i + 1), save_path,\n robot_radius))\n", (10336, 10424), False, 'from multiprocessing import Process\n'), ((3380, 3422), 'rl_agent.env_utils.state_collector_rosnav.StateCollector', 'StateCollector', (["('sim%s' % (k + 1))", '"""train"""'], {}), "('sim%s' % (k + 1), 'train')\n", (3394, 3422), False, 'from rl_agent.env_utils.state_collector_rosnav import StateCollector\n')] |
"""Tile pyramid generation in standard formats.
Included methods are DeepZoom and Zoomify in addition to a generic
method.
These are generally intended for serialisation or streaming via a web
UI. The `get_tile` method returns a Pillow Image object which can be
easily serialised via the use of an io.BytesIO object or saved directly
to disk.
"""
import tarfile
import time
import warnings
import zipfile
from functools import lru_cache
from io import BytesIO
from pathlib import Path
from typing import Iterable, Tuple, Union
import defusedxml
import numpy as np
from PIL import Image
from tiatoolbox.utils.transforms import imresize
from tiatoolbox.wsicore.wsireader import WSIReader
defusedxml.defuse_stdlib()
class TilePyramidGenerator:
r"""Generic tile pyramid generator with sensible defaults.
Args:
wsi (WSIReader):
The WSI reader object. Must implement
`tiatoolbox.wsicore.wsi_Reader.WSIReader.read_rect`.
tile_size (int):
The size of tiles to generate. Default is 256. Note that the
output tile size will be :math:`\text{tile size} + 2
\times\text{overlap}`.
downsample (int):
The downsample factor between levels. Default is 2.
overlap (int):
The number of extra pixel to add to each edge of the tile.
Default is 0.
"""
def __init__(
self,
wsi: WSIReader,
tile_size: int = 256,
downsample: int = 2,
overlap: int = 0,
):
self.wsi = wsi
self.tile_size = tile_size
self.overlap = overlap
self.downsample = downsample
@property
def output_tile_size(self) -> int:
r"""The size of the tile which will be returned.
This is equivalent to :math:`\text{tile size} + 2*\text{overlay}`.
"""
return self.tile_size + 2 * self.overlap
@lru_cache(maxsize=None)
def level_downsample(self, level: int) -> float:
"""Find the downsample factor for a level."""
return 2 ** (self.level_count - level - 1)
@lru_cache(maxsize=None)
def level_dimensions(self, level: int) -> Tuple[int, int]:
"""The total pixel dimensions of the tile pyramid at a given level.
Args:
level (int):
The level to calculate the dimensions for.
"""
baseline_dims = self.wsi.info.slide_dimensions
level_dims = np.ceil(
np.divide(baseline_dims, self.level_downsample(level))
).astype(int)
return tuple(level_dims)
@lru_cache(maxsize=None)
def tile_grid_size(self, level: int) -> Tuple[int, int]:
"""Width and height of the minimal grid of tiles to cover the slide.
Args:
level (int):
The level to calculate the grid size for.
"""
if level < 0 or level >= self.level_count:
raise IndexError("Invalid level.")
return tuple(
np.ceil(np.divide(self.level_dimensions(level), self.tile_size)).astype(int)
)
@property
def sub_tile_level_count(self):
return 0
@property
def level_count(self) -> int:
"""Number of levels in the tile pyramid.
The number of levels is such that level_count - 1 is a 1:1 of
the slide baseline resolution (level 0 of the WSI).
"""
wsi_to_tile_ratio = np.divide(self.wsi.info.slide_dimensions, self.tile_size)
# Levels where a tile contains only part of the wsi
super_level_count = np.ceil(np.log2(wsi_to_tile_ratio)).max()
total_level_count = super_level_count + 1 + self.sub_tile_level_count
return int(total_level_count)
def get_thumb_tile(self) -> Image:
"""Return a thumbnail which fits the whole slide in one tile.
The thumbnail output size has the longest edge equal to the tile
size. The other edge preserves the original aspect ratio.
"""
slide_dims = np.array(self.wsi.info.slide_dimensions)
tile_dim = self.tile_size + self.overlap
out_dims = np.round(slide_dims / slide_dims.max() * tile_dim).astype(int)
bounds = (0, 0, *slide_dims)
thumb = self.wsi.read_bounds(
bounds, resolution=self.wsi.info.level_count - 1, units="level"
)
thumb = imresize(thumb, output_size=out_dims)
return Image.fromarray(thumb)
def get_tile(
self,
level: int,
x: int,
y: int,
pad_mode: str = "constant",
interpolation: str = "optimise",
) -> Image:
"""Get a tile at a given level and coordinate.
Note that levels are in the reverse order of those in WSIReader.
I.E. level 0 here corresponds to the lowest resolution whereas
level 0 in WSIReader corresponds to the maximum resolution
(baseline).
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
pad_mode (str):
Method for padding when reading areas outside of the
input image. Default is constant (0 padding). This is
passed to `read_func` which defaults to
:func:`safe_padded_read`. See :func:`safe_padded_read`
for supported pad modes. Setting to "none" or None will
result in no padding being applied.
interpolation (str):
Interpolation mode to use. Defaults to optimise.
Possible values are: linear, cubic, lanczos, nearest,
area, optimise. Linear most closely matches OpenSlide.
Returns:
Image:
Pillow image of the tile.
Example:
>>> from tiatoolbox.tools.pyramid import TilePyramidGenerator
>>> from tiatoolbox.wsicore.wsireader import get_wsireader
>>> wsi = get_wsireader("sample.svs")
>>> tile_generator = TilePyramidGenerator(
... wsi=reader,
... tile_size=256,
... )
>>> tile_0_0_0 = tile_generator.get_tile(level=0, x=0, y=0)
"""
if level < 0:
raise IndexError
if level > self.level_count:
raise IndexError("Invalid level.")
scale = self.level_downsample(level)
baseline_x = (x * self.tile_size * scale) - (self.overlap * scale)
baseline_y = (y * self.tile_size * scale) - (self.overlap * scale)
output_size = [self.output_tile_size] * 2
coord = [baseline_x, baseline_y]
if level < self.sub_tile_level_count:
output_size = self.output_tile_size // 2 ** (
self.sub_tile_level_count - level
)
output_size = np.repeat(output_size, 2).astype(int)
thumb = self.get_thumb_tile()
thumb.thumbnail(output_size)
return thumb
slide_dimensions = np.array(self.wsi.info.slide_dimensions)
if all(slide_dimensions < [baseline_x, baseline_y]):
raise IndexError
# Don't print out any warnings about interpolation etc.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rgb = self.wsi.read_rect(
coord,
size=output_size,
resolution=1 / scale,
units="baseline",
pad_mode=pad_mode,
interpolation=interpolation,
)
return Image.fromarray(rgb)
def tile_path(self, level: int, x: int, y: int) -> Path:
"""Generate the path for a specified tile.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Returns:
Path:
A pathlib path object with two parts.
"""
raise NotImplementedError
def dump( # noqa: CCR001
self, path: Union[str, Path], container=None, compression=None
):
"""Write all tiles to disk.
Arguments:
path (str or Path):
The path to write the tiles to.
container (str):
Container to use. Defaults to None which saves to a
directory. Possible values are "zip", "tar".
compression (str):
Compression method. Defaults to None. Possible values
are None, "deflate", "gzip", "bz2", "lzma". Note that
tar does not support deflate and zip does not support
gzip.
Examples:
>>> from tiatoolbox.tools.pyramid import TilePyramidGenerator
>>> from tiatoolbox.wsicore.wsireader import get_wsireader
>>> wsi = get_wsireader("sample.svs")
>>> tile_generator = TilePyramidGenerator(
... wsi=reader,
... tile_size=256,
... )
>>> tile_generator.dump(
... path="sample.gz.zip",
... container="zip",
... compression="gzip",
... )
"""
path = Path(path)
if container not in [None, "zip", "tar"]:
raise ValueError("Unsupported container.")
if container is None:
path.mkdir(parents=False)
if compression is not None:
raise ValueError("Unsupported compression for container None.")
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output directory."""
full_path = path / tile_path
full_path.parent.mkdir(parents=True, exist_ok=True)
tile.save(full_path)
elif container == "zip":
compression2enum = {
None: zipfile.ZIP_STORED,
"deflate": zipfile.ZIP_DEFLATED,
"bz2": zipfile.ZIP_BZIP2,
"lzma": zipfile.ZIP_LZMA,
}
if compression not in compression2enum:
raise ValueError("Unsupported compression for zip.")
archive = zipfile.ZipFile(
path, mode="w", compression=compression2enum[compression]
)
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output tar."""
bio = BytesIO()
tile.save(bio, format="jpeg")
bio.seek(0)
data = bio.read()
archive.writestr(
str(tile_path),
data,
compress_type=compression2enum[compression],
)
else: # container == "tar":
compression2mode = {
None: "w",
"gzip": "w:gz",
"bz2": "w:bz2",
"lzma": "w:xz",
}
if compression not in compression2mode:
raise ValueError("Unsupported compression for tar.")
archive = tarfile.TarFile.open(path, mode=compression2mode[compression])
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output zip."""
bio = BytesIO()
tile.save(bio, format="jpeg")
bio.seek(0)
tar_info = tarfile.TarInfo(name=str(tile_path))
tar_info.mtime = time.time()
tar_info.size = bio.tell()
archive.addfile(tarinfo=tar_info, fileobj=bio)
for level in range(self.level_count):
for x, y in np.ndindex(self.tile_grid_size(level)):
tile = self.get_tile(level=level, x=x, y=y)
tile_path = self.tile_path(level, x, y)
save_tile(tile_path, tile)
if container is not None:
archive.close()
def __len__(self) -> int:
return sum(
np.prod(self.tile_grid_size(level)) for level in range(self.level_count)
)
def __iter__(self) -> Iterable:
for level in range(self.level_count):
for x, y in np.ndindex(self.tile_grid_size(level)):
yield self.get_tile(level=level, x=x, y=y)
class ZoomifyGenerator(TilePyramidGenerator):
r"""Pyramid tile generator with extra Zoomify specific methods.
Zoomify splits tiles into groups of 256 (due to old file system
limitations). The extra `tile_group` method here is for calculating
these tile groups when generating tile paths.
An old description of the Zoomify format can be found `here`_.
.. _here:
https://ecommons.cornell.edu/bitstream/handle/1813/5410/Introducing_Zoomify_Image.pdf
Args:
wsi (WSIReader):
The WSI reader object. Must implement
`tiatoolbox.wsicore.wsi_Reader.WSIReader.read_rect`.
tile_size (int):
The size of tiles to generate. Default is 256. Note that the
output tile size will be :math:`\text{tile size} + 2
\times\text{overlap}`.
downsample (int):
The downsample factor between levels. Default is 2.
tile_overlap (int):
The number of extra pixel to add to each edge of the tile.
Default is 0.
"""
@lru_cache(maxsize=None)
def tile_group(self, level: int, x: int, y: int):
"""Find the tile group for a tile index.
Tile groups are numbered from level 0 (tile 0-0-0) and increment
every 256 tiles in ZXY axis order.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Returns:
int:
The tile group for the specified tile.
"""
grid_size = np.array(self.tile_grid_size(level))
if any(grid_size <= [x, y]):
raise IndexError
cumsum = sum(np.prod(self.tile_grid_size(n)) for n in range(level))
index_in_level = np.ravel_multi_index((y, x), self.tile_grid_size(level)[::-1])
tile_index = cumsum + index_in_level
return tile_index // 256 # the tile group
@lru_cache(maxsize=None)
def tile_path(self, level: int, x: int, y: int) -> Path:
"""Generate the Zoomify path for a specified tile.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Returns:
Path:
A pathlib path object with two parts.
"""
g = self.tile_group(level, x, y)
z = level
return Path(f"TileGroup{g}") / f"{z}-{x}-{y}.jpg"
| [
"numpy.divide",
"io.BytesIO",
"zipfile.ZipFile",
"warnings.simplefilter",
"numpy.log2",
"time.time",
"pathlib.Path",
"tiatoolbox.utils.transforms.imresize",
"numpy.array",
"defusedxml.defuse_stdlib",
"warnings.catch_warnings",
"PIL.Image.fromarray",
"tarfile.TarFile.open",
"functools.lru_c... | [((693, 719), 'defusedxml.defuse_stdlib', 'defusedxml.defuse_stdlib', ([], {}), '()\n', (717, 719), False, 'import defusedxml\n'), ((1911, 1934), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (1920, 1934), False, 'from functools import lru_cache\n'), ((2099, 2122), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (2108, 2122), False, 'from functools import lru_cache\n'), ((2587, 2610), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (2596, 2610), False, 'from functools import lru_cache\n'), ((13651, 13674), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (13660, 13674), False, 'from functools import lru_cache\n'), ((14687, 14710), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (14696, 14710), False, 'from functools import lru_cache\n'), ((3417, 3474), 'numpy.divide', 'np.divide', (['self.wsi.info.slide_dimensions', 'self.tile_size'], {}), '(self.wsi.info.slide_dimensions, self.tile_size)\n', (3426, 3474), True, 'import numpy as np\n'), ((4005, 4045), 'numpy.array', 'np.array', (['self.wsi.info.slide_dimensions'], {}), '(self.wsi.info.slide_dimensions)\n', (4013, 4045), True, 'import numpy as np\n'), ((4354, 4391), 'tiatoolbox.utils.transforms.imresize', 'imresize', (['thumb'], {'output_size': 'out_dims'}), '(thumb, output_size=out_dims)\n', (4362, 4391), False, 'from tiatoolbox.utils.transforms import imresize\n'), ((4407, 4429), 'PIL.Image.fromarray', 'Image.fromarray', (['thumb'], {}), '(thumb)\n', (4422, 4429), False, 'from PIL import Image\n'), ((7154, 7194), 'numpy.array', 'np.array', (['self.wsi.info.slide_dimensions'], {}), '(self.wsi.info.slide_dimensions)\n', (7162, 7194), True, 'import numpy as np\n'), ((7710, 7730), 'PIL.Image.fromarray', 'Image.fromarray', (['rgb'], {}), '(rgb)\n', (7725, 7730), False, 'from PIL import Image\n'), ((9498, 9508), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (9502, 9508), False, 'from pathlib import Path\n'), ((7363, 7388), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (7386, 7388), False, 'import warnings\n'), ((7402, 7433), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (7423, 7433), False, 'import warnings\n'), ((15308, 15329), 'pathlib.Path', 'Path', (['f"""TileGroup{g}"""'], {}), "(f'TileGroup{g}')\n", (15312, 15329), False, 'from pathlib import Path\n'), ((10487, 10561), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path'], {'mode': '"""w"""', 'compression': 'compression2enum[compression]'}), "(path, mode='w', compression=compression2enum[compression])\n", (10502, 10561), False, 'import zipfile\n'), ((11391, 11453), 'tarfile.TarFile.open', 'tarfile.TarFile.open', (['path'], {'mode': 'compression2mode[compression]'}), '(path, mode=compression2mode[compression])\n', (11411, 11453), False, 'import tarfile\n'), ((3571, 3597), 'numpy.log2', 'np.log2', (['wsi_to_tile_ratio'], {}), '(wsi_to_tile_ratio)\n', (3578, 3597), True, 'import numpy as np\n'), ((6981, 7006), 'numpy.repeat', 'np.repeat', (['output_size', '(2)'], {}), '(output_size, 2)\n', (6990, 7006), True, 'import numpy as np\n'), ((10742, 10751), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (10749, 10751), False, 'from io import BytesIO\n'), ((11604, 11613), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (11611, 11613), False, 'from io import BytesIO\n'), ((11785, 11796), 'time.time', 'time.time', ([], {}), '()\n', (11794, 11796), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import healpy
import numpy as np
import os
import matplotlib.pyplot as plt
print("-- This code will create a mask from the Sky Map for Plank/HFI.")
# Change this to find a suitable mask
THRESHOLD = 0.01
print("* Threshold: ", THRESHOLD)
# Load the HFI 353 GHz map from the FITS file downloaded from the Planck Legacy Archive (PLA)
print("-- Map Loading.")
hfi353 = healpy.read_map("Milky_Way/HFI_SkyMap_353-psb_2048_R3.01_full.fits", verbose=False)
print("-- Map Loaded.")
# Reduce the resolution of the map in order to save memory and computational time
hfi353 = healpy.ud_grade(hfi353, 256)
#plot galaxy map without threshold - normalized as hist
print("-- Saving map plot.")
healpy.mollview(hfi353, coord='GE', norm="hist")
plt.savefig('plots/elliptic_galaxy_map.png', dpi=600)
healpy.mollview(hfi353, norm="hist")
plt.savefig('plots/galactic_galaxy_map.png', dpi=600)
print("-- Map plot saved.")
#check if the dust mask already exists
exists = os.path.isfile("HFI_dust_mask.fits.gz")
if exists:
#if it exists, remove it
print("-- Dust mask file already present. Il will be rewritten.")
os.remove("HFI_dust_mask.fits.gz")
# Rotate the map from Galactic to Ecliptic coordinates
rotator = healpy.rotator.Rotator(coord=['G','E'])
hfi353 = rotator.rotate_map_pixel(hfi353)
# Apply a smoothing filter to the map
hfi353 = healpy.smoothing(hfi353, fwhm=np.deg2rad(1.0), verbose=False)
# Normalize the pixel values
hfi353 -= np.min(hfi353)
hfi353 /= np.max(hfi353)
# Clip the values
hfi353[hfi353 <= THRESHOLD] = 0
hfi353[hfi353 > THRESHOLD] = 1
print("-- Saving dust mask.")
# Save the map in a new file
healpy.write_map("HFI_dust_mask.fits.gz", hfi353, coord='E')
#save the map
dust_map = healpy.read_map("HFI_dust_mask.fits.gz", verbose=False)
print("-- Dust mask saved.")
print("-- Saving mask plot.")
#save picture of the dust map
healpy.mollview(dust_map)
plt.show()
plt.savefig('plots/eliptic_galaxy_mask.png', dpi=600)
print("-- Mask plot saved.")
print("-- Programm completed successfully.")
| [
"healpy.write_map",
"os.remove",
"matplotlib.pyplot.show",
"healpy.mollview",
"numpy.deg2rad",
"healpy.rotator.Rotator",
"healpy.ud_grade",
"os.path.isfile",
"numpy.min",
"numpy.max",
"healpy.read_map",
"matplotlib.pyplot.savefig"
] | [((417, 504), 'healpy.read_map', 'healpy.read_map', (['"""Milky_Way/HFI_SkyMap_353-psb_2048_R3.01_full.fits"""'], {'verbose': '(False)'}), "('Milky_Way/HFI_SkyMap_353-psb_2048_R3.01_full.fits',\n verbose=False)\n", (432, 504), False, 'import healpy\n'), ((616, 644), 'healpy.ud_grade', 'healpy.ud_grade', (['hfi353', '(256)'], {}), '(hfi353, 256)\n', (631, 644), False, 'import healpy\n'), ((731, 779), 'healpy.mollview', 'healpy.mollview', (['hfi353'], {'coord': '"""GE"""', 'norm': '"""hist"""'}), "(hfi353, coord='GE', norm='hist')\n", (746, 779), False, 'import healpy\n'), ((780, 833), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/elliptic_galaxy_map.png"""'], {'dpi': '(600)'}), "('plots/elliptic_galaxy_map.png', dpi=600)\n", (791, 833), True, 'import matplotlib.pyplot as plt\n'), ((835, 871), 'healpy.mollview', 'healpy.mollview', (['hfi353'], {'norm': '"""hist"""'}), "(hfi353, norm='hist')\n", (850, 871), False, 'import healpy\n'), ((872, 925), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/galactic_galaxy_map.png"""'], {'dpi': '(600)'}), "('plots/galactic_galaxy_map.png', dpi=600)\n", (883, 925), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1042), 'os.path.isfile', 'os.path.isfile', (['"""HFI_dust_mask.fits.gz"""'], {}), "('HFI_dust_mask.fits.gz')\n", (1017, 1042), False, 'import os\n'), ((1259, 1299), 'healpy.rotator.Rotator', 'healpy.rotator.Rotator', ([], {'coord': "['G', 'E']"}), "(coord=['G', 'E'])\n", (1281, 1299), False, 'import healpy\n'), ((1491, 1505), 'numpy.min', 'np.min', (['hfi353'], {}), '(hfi353)\n', (1497, 1505), True, 'import numpy as np\n'), ((1516, 1530), 'numpy.max', 'np.max', (['hfi353'], {}), '(hfi353)\n', (1522, 1530), True, 'import numpy as np\n'), ((1673, 1733), 'healpy.write_map', 'healpy.write_map', (['"""HFI_dust_mask.fits.gz"""', 'hfi353'], {'coord': '"""E"""'}), "('HFI_dust_mask.fits.gz', hfi353, coord='E')\n", (1689, 1733), False, 'import healpy\n'), ((1760, 1815), 'healpy.read_map', 'healpy.read_map', (['"""HFI_dust_mask.fits.gz"""'], {'verbose': '(False)'}), "('HFI_dust_mask.fits.gz', verbose=False)\n", (1775, 1815), False, 'import healpy\n'), ((1905, 1930), 'healpy.mollview', 'healpy.mollview', (['dust_map'], {}), '(dust_map)\n', (1920, 1930), False, 'import healpy\n'), ((1931, 1941), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1939, 1941), True, 'import matplotlib.pyplot as plt\n'), ((1942, 1995), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/eliptic_galaxy_mask.png"""'], {'dpi': '(600)'}), "('plots/eliptic_galaxy_mask.png', dpi=600)\n", (1953, 1995), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1191), 'os.remove', 'os.remove', (['"""HFI_dust_mask.fits.gz"""'], {}), "('HFI_dust_mask.fits.gz')\n", (1166, 1191), False, 'import os\n'), ((1419, 1434), 'numpy.deg2rad', 'np.deg2rad', (['(1.0)'], {}), '(1.0)\n', (1429, 1434), True, 'import numpy as np\n')] |
import epipack
import numpy as np
from epipack.stochastic_epi_models import StochasticEpiModel
from math import exp
from numpy import random
import networkx as nx
from smallworld import get_smallworld_graph
from scipy.stats import expon
import numpy as np
import networkx as nx
def _edge(i,j):
if i > j:
return (j,i)
elif j > i:
return (i,j)
else:
raise ValueError('self-loop')
def get_expon_small_world(N,k0,more_lattice_like=False,node_creation_order='random'):
G = nx.empty_graph(N)
degree_seq = [ int(k) for k in expon.rvs(scale=k0,size=N)]
stubs = list(degree_seq)
if sum(stubs) % 2 == 1:
stubs[np.random.randint(0,N-1)] += 1
if node_creation_order == 'random':
# generates small world but locally clustered
order = np.random.permutation(N)
elif node_creation_order == 'desc':
# generates locally clustered
order = np.argsort(stubs)[::-1]
elif node_creation_order == 'asc':
# generates locally clustered with short paths
order = np.argsort(stubs)
else:
raise ValueError("`node_creation_order` must be 'random', 'desc', or 'asc', not " + node_creation_order)
edges = []
cnt = 0
for i in order:
d = 1
up = True
while stubs[i] > 0:
if up:
j = (i+d) % N
else:
j = (i-d) % N
d += 1
if i == j:
break
if stubs[j] > 0:#and not G.has_edge(i,j):
edges.append(_edge(int(i),int(j)))
#G.add_edge(i,j)
stubs[i] -= 1
stubs[j] -= 1
up = not up
if d >= N//2:
break
#f d > N // 2:
# print(stubs[i], np.mean(stubs), np.min(stubs),np.max(stubs),cnt)
# raise ValueError('Couldn''t find stub')
cnt += 1
#print("leftover stubs:",sum(stubs))
#print("number of nodes with leftover stubs:",np.count_nonzero(stubs))
#print("len(edges) = ", len(edges), "len(set(edges)) = ", len(set(edges)), "difference = ", len(edges) - len(set(edges)))
G.add_edges_from(edges)
return G
def confignetwork(N, parameter,**kwargs):
p = parameter
k0 = p['number_of_contacts']
def expodegree(x):
return 1/k0*exp(-x/k0)
P = []
k_i = []
for i in range(N-1):
p_k = expodegree(i)
P.append(p_k)
k_i.append(i)
P = np.array(P)
P /= P.sum()
def seq(k_i,P):
expected_degree_sequence = np.linspace(0,1,2)
while sum(expected_degree_sequence) % 2 != 0:
expected_degree_sequence = np.random.choice(
k_i,
N,
p = P
)
return expected_degree_sequence
expected_degree_sequence = seq(k_i,P)
G = nx.configuration_model(expected_degree_sequence,create_using = nx.Graph())
G.remove_edges_from(nx.selfloop_edges(G))
edge_weight_tuples = [ (e[0], e[1], 1.0) for e in G.edges() ]
k_norm = 2*len(edge_weight_tuples) / N
del G
return edge_weight_tuples, k_norm
def swnetwork(N, parameter,**kwargs):
p = parameter
k_over_2 = int(p['number_of_contacts']/2)
beta = 10e-7 #for k = 20, N = 200_000 or k0=10
#beta = 1
G = get_smallworld_graph(N,k_over_2,beta)
edge_weight_tuples = [ (e[0], e[1], 1.0) for e in G.edges() ]
k_norm = 2*len(edge_weight_tuples) / N
del G
return edge_weight_tuples, k_norm
def exp_sw_network(N,parameter,**kwargs):
p = parameter
k0 = p['number_of_contacts']
G = get_expon_small_world(N,k0,node_creation_order='random')
edge_weight_tuples = [ (e[0], e[1], 1.0) for e in G.edges() ]
k_norm = 2*len(edge_weight_tuples) / N
del G
print(k_norm,N)
return edge_weight_tuples, k_norm
def simulation_code(kwargs):
def mixed(N, parameter, time, sampling_dt,quarantiningS, a, q, R0, **kwargs):
p = parameter
edge_weight_tuples, k_norm = confignetwork(N,parameter)
#edge_weight_tuples, k_norm = swnetwork(N, parameter)
kappa = (q*p['recovery_rate'])/(1-q)
IPa0 = int(random.binomial(p['I_0'], a, 1))
IP0 = int(p['I_0'] - IPa0)
Sa0 = int(random.binomial(N-p['I_0'], a, 1))
S0 = int(N - p['I_0'] - Sa0)
if quarantiningS == True:
model = epipack.StochasticEpiModel(['S','E','I_P','I_S','I_A','R','T','X','Sa','Ea','I_Pa','I_Sa','I_Aa','Ra','Ta','Xa','Qa','C'],N, edge_weight_tuples ,directed=False)
model.set_conditional_link_transmission_processes({
("Ta", "->", "Xa") : [
("Xa", "I_Pa", p["y"], "Xa", "Ta" ),
("Xa", "I_Sa", p["y"], "Xa", "Ta" ),
("Xa", "I_Aa", p["y"], "Xa", "Ta" ),
("Xa", "Ea", p["y"], "Xa", "Ta" ),
("Xa", "Sa", "->", "Xa", "Qa" ),
("Xa", "I_Pa", (1-p["y"]), "Xa", "C" ),
("Xa", "I_Sa", (1-p["y"]), "Xa", "C" ),
("Xa", "I_Aa", (1-p["y"]), "Xa", "C" ),
("Xa", "Ea", (1-p["y"]), "Xa", "C" )]
})
model.set_node_transition_processes([
('E',p['alpha'],'I_P'),
('I_P',(1-p['x'])*p['beta'],'I_S'),
('I_P',p['x']*p['beta'],'I_A'),
('I_A',p['recovery_rate'],'R'),
('I_S',p['recovery_rate'],'R'),
('I_S',kappa,'T'),
('T',p['chi'],'X'),
('Qa',p['omega'],'Sa'),
('Ea',p['alpha'],'I_Pa'),
('I_Pa',(1-p['x'])*p['beta'],'I_Sa'),
('I_Pa',p['x']*p['beta'],'I_Aa'),
('I_Aa',p['recovery_rate'],'Ra'),
('I_Sa',p['recovery_rate'],'Ra'),
('I_Sa',kappa,'Ta'),
('Ta',p["z"]*p['chi'],'Xa'),
('Ta',(1-p["z"])*p['chi'],'X')])
elif quarantiningS == False:
model = epipack.StochasticEpiModel(['S','E','I_P','I_S','I_A','R','T','X','Sa','Ea','I_Pa','I_Sa','I_Aa','Ra','Ta','Xa','C'],N, edge_weight_tuples ,directed=False)
model.set_conditional_link_transmission_processes({
("Ta", "->", "Xa") : [
("Xa", "I_Pa", y, "Xa", "Ta" ),
("Xa", "I_Sa", y, "Xa", "Ta" ),
("Xa", "I_Aa", y, "Xa", "Ta" ),
("Xa", "Ea", y, "Xa", "Ta" ),
("Xa", "I_Pa", (1-y), "Xa", "C" ),
("Xa", "I_Sa", (1-y), "Xa", "C" ),
("Xa", "I_Aa", (1-y), "Xa", "C" ),
("Xa", "Ea", (1-y), "Xa", "C" )]
})
model.set_node_transition_processes([
('E',p['alpha'],'I_P'),
('I_P',(1-p['x'])*p['beta'],'I_S'),
('I_P',p['x']*p['beta'],'I_A'),
('I_A',p['recovery_rate'],'R'),
('I_S',p['recovery_rate'],'R'),
('I_S',kappa,'T'),
('T',p['chi'],'X'),
('Ea',p['alpha'],'I_Pa'),
('I_Pa',(1-p['x'])*p['beta'],'I_Sa'),
('I_Pa',p['x']*p['beta'],'I_Aa'),
('I_Aa',p['recovery_rate'],'Ra'),
('I_Sa',p['recovery_rate'],'Ra'),
('I_Sa',kappa,'Ta'),
('Ta',p["z"]*p['chi'],'Xa'),
('Ta',(1-p["z"])*p['chi'],'X')])
model.set_link_transmission_processes([
('I_Pa','S',R0/k_norm*p['beta']/2,'I_Pa','E'),
('I_Aa','S',R0/k_norm*p['recovery_rate']/2,'I_Aa','E'),
('I_Sa','S',R0/k_norm*p['recovery_rate']/2,'I_Sa','E'),
('I_P','Sa',R0/k_norm*p['beta']/2,'I_P','Ea'),
('I_A','Sa',R0/k_norm*p['recovery_rate']/2,'I_A','Ea'),
('I_S','Sa',R0/k_norm*p['recovery_rate']/2,'I_S','Ea'),
('I_Pa','Sa',R0/k_norm*p['beta']/2,'I_Pa','Ea'),
('I_Aa','Sa',R0/k_norm*p['recovery_rate']/2,'I_Aa','Ea'),
('I_Sa','Sa',R0/k_norm*p['recovery_rate']/2,'I_Sa','Ea'),
('I_P','S',R0/k_norm*p['beta']/2,'I_P','E'),
('I_A','S',R0/k_norm*p['recovery_rate']/2,'I_A','E'),
('I_S','S',R0/k_norm*p['recovery_rate']/2,'I_S','E')])
model.set_network(N, edge_weight_tuples)
del edge_weight_tuples
model.set_random_initial_conditions({ 'Sa': Sa0, 'S': S0, 'I_P': IP0, 'I_Pa': IPa0})
del p
del a
del q
del N
t, result = model.simulate(tmax = time , sampling_dt = sampling_dt)
del model
del t
del time
del sampling_dt
results = max(result['R']),max(result['Ra']),max(result['X']),max(result['Xa']),max(result['C'])
del result
return results
results = mixed(**kwargs)
return results
| [
"numpy.random.choice",
"math.exp",
"numpy.random.binomial",
"networkx.selfloop_edges",
"scipy.stats.expon.rvs",
"numpy.argsort",
"numpy.random.randint",
"numpy.array",
"networkx.Graph",
"numpy.linspace",
"numpy.random.permutation",
"smallworld.get_smallworld_graph",
"epipack.StochasticEpiMod... | [((510, 527), 'networkx.empty_graph', 'nx.empty_graph', (['N'], {}), '(N)\n', (524, 527), True, 'import networkx as nx\n'), ((2486, 2497), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (2494, 2497), True, 'import numpy as np\n'), ((3316, 3355), 'smallworld.get_smallworld_graph', 'get_smallworld_graph', (['N', 'k_over_2', 'beta'], {}), '(N, k_over_2, beta)\n', (3336, 3355), False, 'from smallworld import get_smallworld_graph\n'), ((805, 829), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (826, 829), True, 'import numpy as np\n'), ((2570, 2590), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(2)'], {}), '(0, 1, 2)\n', (2581, 2590), True, 'import numpy as np\n'), ((2961, 2981), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['G'], {}), '(G)\n', (2978, 2981), True, 'import networkx as nx\n'), ((564, 591), 'scipy.stats.expon.rvs', 'expon.rvs', ([], {'scale': 'k0', 'size': 'N'}), '(scale=k0, size=N)\n', (573, 591), False, 'from scipy.stats import expon\n'), ((663, 690), 'numpy.random.randint', 'np.random.randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (680, 690), True, 'import numpy as np\n'), ((2346, 2358), 'math.exp', 'exp', (['(-x / k0)'], {}), '(-x / k0)\n', (2349, 2358), False, 'from math import exp\n'), ((2682, 2711), 'numpy.random.choice', 'np.random.choice', (['k_i', 'N'], {'p': 'P'}), '(k_i, N, p=P)\n', (2698, 2711), True, 'import numpy as np\n'), ((2925, 2935), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2933, 2935), True, 'import networkx as nx\n'), ((4172, 4203), 'numpy.random.binomial', 'random.binomial', (["p['I_0']", 'a', '(1)'], {}), "(p['I_0'], a, 1)\n", (4187, 4203), False, 'from numpy import random\n'), ((4258, 4293), 'numpy.random.binomial', 'random.binomial', (["(N - p['I_0'])", 'a', '(1)'], {}), "(N - p['I_0'], a, 1)\n", (4273, 4293), False, 'from numpy import random\n'), ((4384, 4570), 'epipack.StochasticEpiModel', 'epipack.StochasticEpiModel', (["['S', 'E', 'I_P', 'I_S', 'I_A', 'R', 'T', 'X', 'Sa', 'Ea', 'I_Pa', 'I_Sa',\n 'I_Aa', 'Ra', 'Ta', 'Xa', 'Qa', 'C']", 'N', 'edge_weight_tuples'], {'directed': '(False)'}), "(['S', 'E', 'I_P', 'I_S', 'I_A', 'R', 'T', 'X',\n 'Sa', 'Ea', 'I_Pa', 'I_Sa', 'I_Aa', 'Ra', 'Ta', 'Xa', 'Qa', 'C'], N,\n edge_weight_tuples, directed=False)\n", (4410, 4570), False, 'import epipack\n'), ((924, 941), 'numpy.argsort', 'np.argsort', (['stubs'], {}), '(stubs)\n', (934, 941), True, 'import numpy as np\n'), ((1058, 1075), 'numpy.argsort', 'np.argsort', (['stubs'], {}), '(stubs)\n', (1068, 1075), True, 'import numpy as np\n'), ((6144, 6324), 'epipack.StochasticEpiModel', 'epipack.StochasticEpiModel', (["['S', 'E', 'I_P', 'I_S', 'I_A', 'R', 'T', 'X', 'Sa', 'Ea', 'I_Pa', 'I_Sa',\n 'I_Aa', 'Ra', 'Ta', 'Xa', 'C']", 'N', 'edge_weight_tuples'], {'directed': '(False)'}), "(['S', 'E', 'I_P', 'I_S', 'I_A', 'R', 'T', 'X',\n 'Sa', 'Ea', 'I_Pa', 'I_Sa', 'I_Aa', 'Ra', 'Ta', 'Xa', 'C'], N,\n edge_weight_tuples, directed=False)\n", (6170, 6324), False, 'import epipack\n')] |
import os
import cv2
import math
import time
import torch
import ntpath
from PIL import Image
import numpy as np
from configs import load_config, load_config_far_away
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils.bbox_tools import xywh2xyxy, xyxy2xywh, draw_bbox, grid_analysis
from datasetsnx import create_data_manager
from visualization.visualizer import Visualizer
def merge_dict(src, dst):
for k in src.keys():
if k not in dst.keys():
dst[k] = src[k]
else:
dst[k].update(src[k])
return dst
def calc_img_wh(img_w, img_h, heat=True, marginal=True):
img_wh = np.concatenate((np.array(img_w)[..., np.newaxis], np.array(img_h)[..., np.newaxis]), axis=-1)
vis_dict = dict(
scatter=dict(
oridatainfo_imgwh=dict(
x=img_wh,
y=np.ones(len(img_wh)).astype(np.int32),
opts=dict(title='oridatainfo_imgwh', markersize=1, webgl=True),
),
),
)
if heat:
max_w = int(np.ceil(np.max(img_wh[..., 0]) / 100) * 100)
max_h = int(np.ceil(np.max(img_wh[..., 1]) / 100) * 100)
heat = np.zeros((max_w // 100 + 1, max_h // 100 + 1))
tmp = np.round(img_wh / 100).astype(np.int32)
for t in tmp:
heat[t[0], t[1]] += 1
vis_dict['heatmap'] = dict()
vis_dict['heatmap']['oridatainfo_imgwh_heat'] = dict(
x=heat,
opts=dict(
title='oridatainfo_imgwh_heat',
xtickvals=[i for i in range(max_w)],
xticklabels=[str(i * 100) for i in range(max_w)],
ytickvals=[i for i in range(max_h)],
yticklabels=[str(i * 100) for i in range(max_h)],
),
)
if marginal:
vis_dict['histogram'] = dict(
oridatainfo_imgw=dict(
x=np.array(img_w),
opts=dict(title='oridatainfo_imgw', numbins=20, webgl=True),
),
oridatainfo_imgh=dict(
x=np.array(img_h),
opts=dict(title='oridatainfo_imgh', numbins=20, webgl=True),
),
)
return vis_dict
def calc_box_wh(box_w, box_h, heat=True, marginal=True):
box_w_tmp = []
for w in box_w:
box_w_tmp += w.tolist()
box_h_tmp = []
for h in box_h:
box_h_tmp += h.tolist()
box_wh = np.concatenate((np.array(box_w_tmp)[..., np.newaxis], np.array(box_h_tmp)[..., np.newaxis]), axis=-1)
vis_dict = dict(
scatter=dict(
oridatainfo_boxwh=dict(
x=box_wh,
y=np.ones(len(box_wh)).astype(np.int32),
opts=dict(title='oridatainfo_boxwh', markersize=1, webgl=True),
),
),
)
if heat:
max_w = int(np.ceil(np.max(box_wh[..., 0]) / 10) * 10)
max_h = int(np.ceil(np.max(box_wh[..., 1]) / 10) * 10)
max_v = max(max_w, max_h)
s = max(10, math.ceil(max_v / 110) * 10)
heat = np.zeros((max_w // 10 + 1, max_h // 10 + 1))
tmp = np.round(box_wh / 10).astype(np.int32)
for t in tmp:
heat[t[0], t[1]] += 1
vis_dict['heatmap'] = dict()
vis_dict['heatmap']['oridatainfo_boxwh_heat'] = dict(
x=heat,
opts=dict(
title='oridatainfo_boxwh_heat',
ylabel='x10',
xlabel='x10',
),
)
if marginal:
vis_dict['histogram'] = dict(
oridatainfo_boxw=dict(
x=box_wh[..., 0],
opts=dict(title='oridatainfo_boxw', numbins=100, webgl=True),
),
oridatainfo_boxh=dict(
x=box_wh[..., 1],
opts=dict(title='oridatainfo_boxh', numbins=100, webgl=True),
),
)
return vis_dict
def calc_box_whir(box_w, box_h, img_w, img_h, marginal=True):
box_w_tmp = []
for bw, iw in zip(box_w, img_w):
box_w_tmp += (bw / iw).tolist()
box_h_tmp = []
for bh, ih in zip(box_h, img_h):
box_h_tmp += (bh / ih).tolist()
box_whir = np.concatenate((np.array(box_w_tmp)[..., np.newaxis], np.array(box_h_tmp)[..., np.newaxis]), axis=-1)
n = 10
heat = np.zeros((n + 1, n + 1))
tmp = np.round(box_whir * n).astype(np.int32)
for t in tmp:
heat[t[0], t[1]] += 1
vis_dict = dict(
heatmap=dict(
oridatainfo_boxwhir=dict(
x=heat,
opts=dict(
title='oridatainfo_boxwhir',
xtickvals=[i for i in range(n + 1)],
xticklabels=[str(i / n) for i in range(n + 1)],
ytickvals=[i for i in range(n + 1)],
yticklabels=[str(i / n) for i in range(n + 1)],
),
),
),
)
if marginal:
vis_dict['histogram'] = dict(
oridatainfo_boxwir=dict(
x=np.array(box_w_tmp),
opts=dict(title='oridatainfo_boxwir', numbins=100, webgl=True),
),
oridatainfo_boxhir=dict(
x=np.array(box_h_tmp),
opts=dict(title='oridatainfo_boxhir', numbins=100, webgl=True),
),
oridatainfo_boxwhr=dict(
x=np.array(box_w_tmp) / np.array(box_h_tmp),
opts=dict(title='oridatainfo_boxwhr', numbins=100, webgl=True),
),
)
return vis_dict
def calc_box_cir(box_cx, box_cy, img_w, img_h):
box_cx_tmp = []
for x, iw in zip(box_cx, img_w):
box_cx_tmp += (x / iw).tolist()
box_cy_tmp = []
for y, ih in zip(box_cy, img_h):
box_cy_tmp += (y / ih).tolist()
box_c = np.concatenate((np.array(box_cx_tmp)[..., np.newaxis], np.array(box_cy_tmp)[..., np.newaxis]), axis=-1)
n = 10
heat = np.zeros((n + 1, n + 1))
tmp = np.round(box_c * n).astype(np.int32)
for t in tmp:
heat[t[0], t[1]] += 1
vis_dict = dict(
heatmap=dict(
oridatainfo_boxcir=dict(
x=heat,
opts=dict(
title='oridatainfo_boxcir',
xtickvals=[i for i in range(n + 1)],
xticklabels=[str(i / n) for i in range(n + 1)],
ytickvals=[i for i in range(n + 1)],
yticklabels=[str(i / n) for i in range(n + 1)],
),
),
),
)
return vis_dict
def calc_box_clscount(box_cls, num_classes):
box_cls_count = np.zeros(num_classes).astype(np.int32)
for cs in box_cls:
for c in cs.tolist():
box_cls_count[c] += 1
vis_dict = dict(
bar=dict(
oridatainfo_cls=dict(
x=box_cls_count,
opts=dict(title='oridatainfo_cls'),
),
),
)
return vis_dict
if __name__ == '__main__':
np.set_printoptions(precision=4, suppress=True)
# torch.set_printoptions(precision=4, threshold=None, edgeitems=None, linewidth=None, profile=None)
cfg = load_config('spnx')
vis = Visualizer(cfg)
# vis.vis.bar(X=np.random.rand(20))
# exit()
# data_manager = create_data_manager(cfg.train_data)
data_manager = create_data_manager(cfg.test_data)
dataloader = data_manager.load_data()
info = data_manager.info
time.sleep(0.1)
rc = info['oobmab']
print(data_manager.dataset.bamboo.rper())
# print(data_manager.dataset.get_data_info(0))
classes = info['classes']
box_cls = []
box_w = []
box_h = []
box_cx = []
box_cy = []
img_w = []
img_h = []
for i in tqdm(range(len(data_manager.dataset))):
di = data_manager.dataset.get_data_info(i)
boxes = di['bbox']
boxes = xyxy2xywh(boxes)
img_w.append(di['w'])
img_h.append(di['h'])
box_cx.append(boxes[:, 0])
box_cy.append(boxes[:, 1])
box_w.append(boxes[:, 2])
box_h.append(boxes[:, 3])
box_cls.append(boxes[:, 4].astype(np.int32))
vis_dict = dict()
vis_dict = merge_dict(calc_img_wh(img_w, img_h), vis_dict)
vis_dict = merge_dict(calc_box_wh(box_w, box_h), vis_dict)
vis_dict = merge_dict(calc_box_whir(box_w, box_h, img_w, img_h), vis_dict)
vis_dict = merge_dict(calc_box_cir(box_cx, box_cy, img_w, img_h), vis_dict)
vis_dict = merge_dict(calc_box_clscount(box_cls, len(info['classes'])), vis_dict)
# print(vis_dict.keys())
# exit()
vis.visualize(vis_dict, 0, 0)
# plt.scatter(img_w, img_h, s=2, alpha=0.6, color='red')
# plt.show()
| [
"configs.load_config",
"numpy.set_printoptions",
"math.ceil",
"numpy.zeros",
"time.sleep",
"utils.bbox_tools.xyxy2xywh",
"numpy.max",
"datasetsnx.create_data_manager",
"numpy.array",
"numpy.round",
"visualization.visualizer.Visualizer"
] | [((4256, 4280), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {}), '((n + 1, n + 1))\n', (4264, 4280), True, 'import numpy as np\n'), ((5867, 5891), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {}), '((n + 1, n + 1))\n', (5875, 5891), True, 'import numpy as np\n'), ((6926, 6973), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)'}), '(precision=4, suppress=True)\n', (6945, 6973), True, 'import numpy as np\n'), ((7089, 7108), 'configs.load_config', 'load_config', (['"""spnx"""'], {}), "('spnx')\n", (7100, 7108), False, 'from configs import load_config, load_config_far_away\n'), ((7119, 7134), 'visualization.visualizer.Visualizer', 'Visualizer', (['cfg'], {}), '(cfg)\n', (7129, 7134), False, 'from visualization.visualizer import Visualizer\n'), ((7266, 7300), 'datasetsnx.create_data_manager', 'create_data_manager', (['cfg.test_data'], {}), '(cfg.test_data)\n', (7285, 7300), False, 'from datasetsnx import create_data_manager\n'), ((7381, 7396), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7391, 7396), False, 'import time\n'), ((1165, 1211), 'numpy.zeros', 'np.zeros', (['(max_w // 100 + 1, max_h // 100 + 1)'], {}), '((max_w // 100 + 1, max_h // 100 + 1))\n', (1173, 1211), True, 'import numpy as np\n'), ((3019, 3063), 'numpy.zeros', 'np.zeros', (['(max_w // 10 + 1, max_h // 10 + 1)'], {}), '((max_w // 10 + 1, max_h // 10 + 1))\n', (3027, 3063), True, 'import numpy as np\n'), ((7807, 7823), 'utils.bbox_tools.xyxy2xywh', 'xyxy2xywh', (['boxes'], {}), '(boxes)\n', (7816, 7823), False, 'from utils.bbox_tools import xywh2xyxy, xyxy2xywh, draw_bbox, grid_analysis\n'), ((4291, 4313), 'numpy.round', 'np.round', (['(box_whir * n)'], {}), '(box_whir * n)\n', (4299, 4313), True, 'import numpy as np\n'), ((5902, 5921), 'numpy.round', 'np.round', (['(box_c * n)'], {}), '(box_c * n)\n', (5910, 5921), True, 'import numpy as np\n'), ((6556, 6577), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (6564, 6577), True, 'import numpy as np\n'), ((653, 668), 'numpy.array', 'np.array', (['img_w'], {}), '(img_w)\n', (661, 668), True, 'import numpy as np\n'), ((687, 702), 'numpy.array', 'np.array', (['img_h'], {}), '(img_h)\n', (695, 702), True, 'import numpy as np\n'), ((1226, 1248), 'numpy.round', 'np.round', (['(img_wh / 100)'], {}), '(img_wh / 100)\n', (1234, 1248), True, 'import numpy as np\n'), ((2418, 2437), 'numpy.array', 'np.array', (['box_w_tmp'], {}), '(box_w_tmp)\n', (2426, 2437), True, 'import numpy as np\n'), ((2456, 2475), 'numpy.array', 'np.array', (['box_h_tmp'], {}), '(box_h_tmp)\n', (2464, 2475), True, 'import numpy as np\n'), ((2974, 2996), 'math.ceil', 'math.ceil', (['(max_v / 110)'], {}), '(max_v / 110)\n', (2983, 2996), False, 'import math\n'), ((3078, 3099), 'numpy.round', 'np.round', (['(box_wh / 10)'], {}), '(box_wh / 10)\n', (3086, 3099), True, 'import numpy as np\n'), ((4147, 4166), 'numpy.array', 'np.array', (['box_w_tmp'], {}), '(box_w_tmp)\n', (4155, 4166), True, 'import numpy as np\n'), ((4185, 4204), 'numpy.array', 'np.array', (['box_h_tmp'], {}), '(box_h_tmp)\n', (4193, 4204), True, 'import numpy as np\n'), ((5756, 5776), 'numpy.array', 'np.array', (['box_cx_tmp'], {}), '(box_cx_tmp)\n', (5764, 5776), True, 'import numpy as np\n'), ((5795, 5815), 'numpy.array', 'np.array', (['box_cy_tmp'], {}), '(box_cy_tmp)\n', (5803, 5815), True, 'import numpy as np\n'), ((1048, 1070), 'numpy.max', 'np.max', (['img_wh[..., 0]'], {}), '(img_wh[..., 0])\n', (1054, 1070), True, 'import numpy as np\n'), ((1113, 1135), 'numpy.max', 'np.max', (['img_wh[..., 1]'], {}), '(img_wh[..., 1])\n', (1119, 1135), True, 'import numpy as np\n'), ((1887, 1902), 'numpy.array', 'np.array', (['img_w'], {}), '(img_w)\n', (1895, 1902), True, 'import numpy as np\n'), ((2049, 2064), 'numpy.array', 'np.array', (['img_h'], {}), '(img_h)\n', (2057, 2064), True, 'import numpy as np\n'), ((2821, 2843), 'numpy.max', 'np.max', (['box_wh[..., 0]'], {}), '(box_wh[..., 0])\n', (2827, 2843), True, 'import numpy as np\n'), ((2884, 2906), 'numpy.max', 'np.max', (['box_wh[..., 1]'], {}), '(box_wh[..., 1])\n', (2890, 2906), True, 'import numpy as np\n'), ((4974, 4993), 'numpy.array', 'np.array', (['box_w_tmp'], {}), '(box_w_tmp)\n', (4982, 4993), True, 'import numpy as np\n'), ((5145, 5164), 'numpy.array', 'np.array', (['box_h_tmp'], {}), '(box_h_tmp)\n', (5153, 5164), True, 'import numpy as np\n'), ((5316, 5335), 'numpy.array', 'np.array', (['box_w_tmp'], {}), '(box_w_tmp)\n', (5324, 5335), True, 'import numpy as np\n'), ((5338, 5357), 'numpy.array', 'np.array', (['box_h_tmp'], {}), '(box_h_tmp)\n', (5346, 5357), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import norm
import pickle
import matplotlib.pyplot as plt
import itertools
from scipy.stats import norm as norm_d
from scipy.stats import expon
from scipy.stats import weibull_min as weibull
from scipy.stats import burr12 as burr
from scipy.stats import randint
from scipy.stats import uniform
from scipy.optimize import minimize
from scipy.signal import lfilter
from scipy.signal import savgol_filter
import copy
import math
import time
from scipy.optimize import minimize
from scipy.sparse.linalg import svds
from scipy.linalg import svdvals
import scipy
from sklearn.datasets import load_svmlight_file
import pickle
from pathlib import Path
def prepare_data(dataset):
filename = "datasets/" + dataset + ".txt"
data = load_svmlight_file(filename)
A, y = data[0], data[1]
m, n = A.shape
if (2 in y) & (1 in y):
y = 2 * y - 3
if (2 in y) & (4 in y):
y = y - 3
assert((-1 in y) & (1 in y))
sparsity_A = A.count_nonzero() / (m * n)
return A, y, m, n, sparsity_A
def prepare_data_distrib(dataset, data_size, num_of_workers):
filename = "datasets/" + dataset + ".txt"
data = load_svmlight_file(filename)
A, y = data[0], data[1]
m, n = A.shape
assert(data_size <= m)
size_of_local_data = int(data_size*1.0 / num_of_workers)
A = A[0:size_of_local_data*num_of_workers]
y = y[0:size_of_local_data*num_of_workers]
m, n = A.shape
assert(data_size == size_of_local_data*num_of_workers)
perm = np.random.permutation(m)
data_split = perm[0:size_of_local_data]
for i in range(num_of_workers-1):
data_split = np.vstack((data_split, perm[(i+1)*size_of_local_data:(i+2)*size_of_local_data]))
if (2 in y) & (1 in y):
y = 2 * y - 3
if (2 in y) & (4 in y):
y = y - 3
assert((-1 in y) & (1 in y))
sparsity_A = A.count_nonzero() / (m * n)
return A, y, m, n, sparsity_A, data_split
def compute_L(dataset, A):
filename = "dump/"+dataset+"_L.txt"
file_path = Path(filename)
if file_path.is_file():
with open(filename, 'rb') as file:
L, average_L, worst_L = pickle.load(file)
else:
sigmas = svds(A, return_singular_vectors=False)
m = A.shape[0]
L = sigmas.max()**2 / (4*m)
worst_L = 0
average_L = 0
denseA = A.toarray()
for i in range(m):
L_temp = (norm(denseA[i])**2)*1.0 / 4
average_L += L_temp / m
if L_temp > worst_L:
worst_L = L_temp
with open(filename, 'wb') as file:
pickle.dump([L, average_L, worst_L],file)
return L, average_L, worst_L
def compute_L_distrib(dataset, A):
filename = "dump/"+dataset+"_"+str(A.shape[0])+"_"+"_L.txt"
file_path = Path(filename)
if file_path.is_file():
with open(filename, 'rb') as file:
L, average_L, worst_L = pickle.load(file)
else:
sigmas = svds(A, return_singular_vectors=False)
m = A.shape[0]
L = sigmas.max()**2 / (4*m)
worst_L = 0
average_L = 0
denseA = A.toarray()
for i in range(m):
L_temp = (norm(denseA[i])**2)*1.0 / 4
average_L += L_temp / m
if L_temp > worst_L:
worst_L = L_temp
with open(filename, 'wb') as file:
pickle.dump([L, average_L, worst_L],file)
return L, average_L, worst_L
def save_split(dataset, num_of_workers, data_split):
filename = "dump/"+dataset+"_split_num_of_workers_"+str(num_of_workers)+".txt"
with open(filename, 'wb') as file:
pickle.dump(data_split, file)
def read_split(dataset, num_of_workers):
filename = "dump/"+dataset+"_split_num_of_workers_"+str(num_of_workers)+".txt"
with open(filename, 'rb') as file:
return pickle.load(file)
def save_problem(dataset, num_of_workers, params):
filename = "dump/"+dataset+"_problem_num_of_workers_"+str(num_of_workers)+".txt"
with open(filename, 'wb') as file:
pickle.dump(params, file)
def read_problem(dataset, num_of_workers):
filename = "dump/"+dataset+"_problem_num_of_workers_"+str(num_of_workers)+".txt"
with open(filename, 'rb') as file:
return pickle.load(file)
def save_solution(dataset, l2, l1, x_star, f_star):
filename = "dump/"+dataset+"_solution_l2_"+str(l2)+"_l1_"+str(l1)+".txt"
with open(filename, 'wb') as file:
pickle.dump([x_star, f_star], file)
def read_solution(dataset, l2, l1):
with open('dump/'+dataset+'_solution_l2_'+str(l2)+"_l1_"+str(l1)+".txt", 'rb') as file:
return pickle.load(file)
def read_results_from_file(filename, method, args):
if method == "EC_SGD_const_stepsize":
with open('dump/'+filename+'_EC_SGD_const_stepsize_gamma_'+str(args[0])+"_l2_"+str(args[1])
+"_num_of_epochs_"+str(args[2])+"_num_of_workers_"+str(args[3])
+"_sparsificator_"+args[4]+".txt", 'rb') as file:
return pickle.load(file)
if method == "EC_L-SVRG-DIANA":
with open('dump/'+filename+'_EC_L_SVRG_DIANA_gamma_'+str(args[0])+"_l2_"+str(args[1]) +"_alpha_"+str(args[2])
+"_p_"+str(args[3])
+"_num_of_epochs_"+str(args[4])+"_num_of_workers_"+str(args[5])
+"_sparsificator_"+args[6]+"_quantization_"+args[7]+".txt", 'rb') as file:
return pickle.load(file)
if method == "EC_L-SVRG":
with open('dump/'+filename+'_EC_L_SVRG_gamma_'+str(args[0])+"_l2_"+str(args[1]) +"_p_"+str(args[2])
+"_num_of_epochs_"+str(args[3])+"_num_of_workers_"+str(args[4])
+"_sparsificator_"+args[5]+".txt", 'rb') as file:
return pickle.load(file)
if method == "EC_GD_const_stepsize":
with open('dump/'+filename+'_EC_GD_const_stepsize_gamma_'+str(args[0])+"_l2_"+str(args[1])
+"_num_of_epochs_"+str(args[2])+"_num_of_workers_"+str(args[3])
+"_sparsificator_"+args[4]+".txt", 'rb') as file:
return pickle.load(file)
if method == "EC_GD_star_const_stepsize":
with open('dump/'+filename+'_EC_GD_star_const_stepsize_gamma_'+str(args[0])+"_l2_"+str(args[1])
+"_num_of_epochs_"+str(args[2])+"_num_of_workers_"+str(args[3])
+"_sparsificator_"+args[4]+".txt", 'rb') as file:
return pickle.load(file)
if method == "EC_DIANA_GD":
with open('dump/'+filename+'_EC_DIANA_GD_gamma_'+str(args[0])+"_alpha_"+str(args[1])
+"_l2_"+str(args[2])
+"_num_of_epochs_"+str(args[3])+"_num_of_workers_"+str(args[4])
+"_sparsificator_"+args[5]+"_quantization_"+args[6]+".txt", 'rb') as file:
return pickle.load(file)
if method == "EC_DIANA_SGD":
with open('dump/'+filename+'_EC_DIANA_SGD_gamma_'+str(args[0])+"_alpha_"+str(args[1])
+"_l2_"+str(args[2])
+"_num_of_epochs_"+str(args[3])+"_num_of_workers_"+str(args[4])
+"_sparsificator_"+args[5]+"_quantization_"+args[6]+".txt", 'rb') as file:
return pickle.load(file)
def make_plots(args):
supported_modes_y = ['func_vals', 'squared_distances', 'bits', 'avg_ecgrad_norms', 'avg_grad_norms', 'avg_error_norms', 'avg_ecgrad_topks', 'total_bits', 'non-zero-density']
supported_modes_x = ['time', 'data_passes', 'iters', 'bits']
filename = args[0]
mode_y = args[1]
mode_x = args[2]
figsize = args[3]
sizes = args[4]
title = args[5]
methods = args[6]
bbox_to_anchor = args[7]
legend_loc = args[8]
save_fig = args[9]
title_size = sizes[0]
linewidth = sizes[1]
markersize = sizes[2]
legend_size = sizes[3]
xlabel_size = sizes[4]
ylabel_size = sizes[5]
xticks_size = sizes[6]
yticks_size = sizes[7]
assert(mode_y in supported_modes_y)
assert(mode_x in supported_modes_x)
fig = plt.figure(figsize=figsize)
plt.title(title, fontsize=title_size)
marker = itertools.cycle(('+', 'd', 'x', 'o', '^', 's', '*', 'p', '<', '>', '^'))
color = itertools.cycle((('tab:blue', 'tab:red', 'tab:green', 'tab:orange', 'tab:purple', 'tab:cyan', 'tab:olive', 'tab:brown')))
num_of_methods = len(methods)
for idx, method in enumerate(methods):
res = read_results_from_file(filename, method[0], method[1])
if method[3] == None:
length = len(res['iters'])
else:
length = method[3]
#print("Length=", length)
if mode_y == 'avg_ecgrad_norms' or mode_y == 'avg_grad_norms' or mode_y == 'avg_error_norms' or mode_y == 'avg_ecgrad_topks':
plt.semilogy(res[mode_x][1:length], res[mode_y][1:length], linewidth=linewidth, marker=next(marker),
markersize = markersize,
markevery=range(-idx*int((length-1)/(10*num_of_methods)), len(res[mode_x][1:length]), int((length-1)/10)),
label = method[2], color=next(color))
# plt.plot(res[mode_x][1:length], res[mode_y][1:length], linewidth=linewidth, marker=next(marker),
# markersize = markersize,
# markevery=range(-idx*int((length-1)/(10*num_of_methods)), len(res[mode_x][1:length]), int((length-1)/10)),
# label = method[2], color=next(color))
elif mode_y == 'bits':
bits_prev = np.insert(res[mode_y][0:length-1], 0, 0, axis=0)
plt.plot(res[mode_x][1:length], res[mode_y][1:length] - bits_prev[1:length], linewidth=linewidth, marker=next(marker),
markersize = markersize,
markevery=range(-idx*int((length-1)/(10*num_of_methods)), len(res[mode_x][1:length]), int((length-1)/10)),
label = method[2], color=next(color))
elif mode_y == 'non-zero-density':
min_length=0
bits_prev = np.insert(res['bits'][0:length-1], 0, 0, axis=0)
plt.scatter(res[mode_x][min_length:length], (res['bits'][min_length:length] - bits_prev[min_length:length]!=0).astype(int), linewidth=linewidth,s=markersize, label = method[2], color=next(color))
elif mode_y == 'squared_distances':
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.semilogy(res[mode_x][0:length], res[mode_y][0:length], linewidth=linewidth, marker=next(marker),
markersize = markersize,
markevery=range(-idx*int(length/(10*num_of_methods)), len(res[mode_x][0:length]), int(length/10)),
label = method[2], color=next(color))
elif mode_x == 'bits':
print("Initial "+mode_y+" is:", res[mode_y][0])
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.semilogy(res[mode_x][0:length], res[mode_y][0:length] / res[mode_y][0], linewidth=linewidth, marker=next(marker),
markersize = markersize,
markevery=range(-idx*int(length/(10*num_of_methods)), len(res[mode_x][0:length]), int(length/10)),
label = method[2], color=next(color))
else:
print("Initial "+mode_y+" is:", res[mode_y][0])
plt.semilogy(res[mode_x][0:length], res[mode_y][0:length], linewidth=linewidth, marker=next(marker),
markersize = markersize,
markevery=range(-idx*int(length/(10*num_of_methods)), len(res[mode_x][0:length]), int(length/10)),
label = method[2], color=next(color))
plt.legend(bbox_to_anchor=bbox_to_anchor, loc=legend_loc, fontsize=legend_size)
if mode_x == 'bits':
plt.xlabel(r"Number of bits per worker", fontsize=xlabel_size)
if mode_x == 'time':
plt.xlabel(r"Time, $s$", fontsize=xlabel_size)
if mode_x == 'data_passes':
plt.xlabel(r"Epoch Number", fontsize=xlabel_size)
if mode_x == 'iters':
plt.xlabel(r"Iteration Number", fontsize=xlabel_size)
if mode_y == 'func_vals':
#plt.ylabel(r"$\frac{f(x_t)-f(x_*)}{f(x_0)-f(x_*)}$", fontsize=ylabel_size)
plt.ylabel(r"$f(x_t)-f(x_*)$", fontsize=ylabel_size)
if mode_y == 'squared_distances':
plt.ylabel(r"$||x_t - x_*||_2^2$", fontsize=ylabel_size)
if mode_y == 'bits':
plt.ylabel(r"$k_t$", fontsize=xlabel_size)
if mode_y == 'avg_ecgrad_norms':
plt.ylabel(r"$\frac{1}{n}\sum_{i=1}^n||p^i_t||^2$", fontsize=xlabel_size)
if mode_y == 'avg_grad_norms':
plt.ylabel(r"$\frac{1}{n}\sum_{i=1}^n||\gamma_t g^i_t||^2$", fontsize=xlabel_size)
if mode_y == 'avg_error_norms':
plt.ylabel(r"$\frac{1}{n}\sum_{i=1}^n||e^i_t||^2$", fontsize=xlabel_size)
if mode_y == 'avg_ecgrad_topks':
plt.ylabel(r"$\frac{1}{n}\sum_{i=1}^n$ Top-10$(|e^i_t+\gamma_t g^i_t|)$", fontsize=xlabel_size)
if mode_y == 'non-zero-density':
plt.ylabel(r"$k_t \neq 0$", fontsize=xlabel_size)
plt.grid(True, linewidth=0.5, linestyle='-')
plt.xticks(fontsize=xticks_size)
_ = plt.yticks(fontsize=yticks_size)
ax = fig.gca()
ax.xaxis.offsetText.set_fontsize(xlabel_size - 2)
ax.yaxis.offsetText.set_fontsize(ylabel_size - 2)
if save_fig[0]:
plt.savefig("plot/"+save_fig[1], bbox_inches='tight')
| [
"matplotlib.pyplot.title",
"pickle.dump",
"pathlib.Path",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.linalg.norm",
"itertools.cycle",
"scipy.sparse.linalg.svds",
"matplotlib.pyplot.yticks",
"numpy.insert",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.legend",
"sklearn.datasets.load_... | [((769, 797), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['filename'], {}), '(filename)\n', (787, 797), False, 'from sklearn.datasets import load_svmlight_file\n'), ((1185, 1213), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['filename'], {}), '(filename)\n', (1203, 1213), False, 'from sklearn.datasets import load_svmlight_file\n'), ((1542, 1566), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (1563, 1566), True, 'import numpy as np\n'), ((2066, 2080), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2070, 2080), False, 'from pathlib import Path\n'), ((2836, 2850), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2840, 2850), False, 'from pathlib import Path\n'), ((8053, 8080), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (8063, 8080), True, 'import matplotlib.pyplot as plt\n'), ((8085, 8122), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': 'title_size'}), '(title, fontsize=title_size)\n', (8094, 8122), True, 'import matplotlib.pyplot as plt\n'), ((8136, 8208), 'itertools.cycle', 'itertools.cycle', (["('+', 'd', 'x', 'o', '^', 's', '*', 'p', '<', '>', '^')"], {}), "(('+', 'd', 'x', 'o', '^', 's', '*', 'p', '<', '>', '^'))\n", (8151, 8208), False, 'import itertools\n'), ((8221, 8344), 'itertools.cycle', 'itertools.cycle', (["('tab:blue', 'tab:red', 'tab:green', 'tab:orange', 'tab:purple', 'tab:cyan',\n 'tab:olive', 'tab:brown')"], {}), "(('tab:blue', 'tab:red', 'tab:green', 'tab:orange',\n 'tab:purple', 'tab:cyan', 'tab:olive', 'tab:brown'))\n", (8236, 8344), False, 'import itertools\n'), ((11617, 11696), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': 'bbox_to_anchor', 'loc': 'legend_loc', 'fontsize': 'legend_size'}), '(bbox_to_anchor=bbox_to_anchor, loc=legend_loc, fontsize=legend_size)\n', (11627, 11696), True, 'import matplotlib.pyplot as plt\n'), ((13013, 13057), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'linewidth': '(0.5)', 'linestyle': '"""-"""'}), "(True, linewidth=0.5, linestyle='-')\n", (13021, 13057), True, 'import matplotlib.pyplot as plt\n'), ((13062, 13094), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': 'xticks_size'}), '(fontsize=xticks_size)\n', (13072, 13094), True, 'import matplotlib.pyplot as plt\n'), ((13103, 13135), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'yticks_size'}), '(fontsize=yticks_size)\n', (13113, 13135), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1762), 'numpy.vstack', 'np.vstack', (['(data_split, perm[(i + 1) * size_of_local_data:(i + 2) * size_of_local_data])'], {}), '((data_split, perm[(i + 1) * size_of_local_data:(i + 2) *\n size_of_local_data]))\n', (1679, 1762), True, 'import numpy as np\n'), ((2233, 2271), 'scipy.sparse.linalg.svds', 'svds', (['A'], {'return_singular_vectors': '(False)'}), '(A, return_singular_vectors=False)\n', (2237, 2271), False, 'from scipy.sparse.linalg import svds\n'), ((3003, 3041), 'scipy.sparse.linalg.svds', 'svds', (['A'], {'return_singular_vectors': '(False)'}), '(A, return_singular_vectors=False)\n', (3007, 3041), False, 'from scipy.sparse.linalg import svds\n'), ((3674, 3703), 'pickle.dump', 'pickle.dump', (['data_split', 'file'], {}), '(data_split, file)\n', (3685, 3703), False, 'import pickle\n'), ((3891, 3908), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3902, 3908), False, 'import pickle\n'), ((4093, 4118), 'pickle.dump', 'pickle.dump', (['params', 'file'], {}), '(params, file)\n', (4104, 4118), False, 'import pickle\n'), ((4310, 4327), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (4321, 4327), False, 'import pickle\n'), ((4506, 4541), 'pickle.dump', 'pickle.dump', (['[x_star, f_star]', 'file'], {}), '([x_star, f_star], file)\n', (4517, 4541), False, 'import pickle\n'), ((4686, 4703), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (4697, 4703), False, 'import pickle\n'), ((11730, 11791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of bits per worker"""'], {'fontsize': 'xlabel_size'}), "('Number of bits per worker', fontsize=xlabel_size)\n", (11740, 11791), True, 'import matplotlib.pyplot as plt\n'), ((11826, 11871), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time, $s$"""'], {'fontsize': 'xlabel_size'}), "('Time, $s$', fontsize=xlabel_size)\n", (11836, 11871), True, 'import matplotlib.pyplot as plt\n'), ((11913, 11961), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch Number"""'], {'fontsize': 'xlabel_size'}), "('Epoch Number', fontsize=xlabel_size)\n", (11923, 11961), True, 'import matplotlib.pyplot as plt\n'), ((11997, 12049), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration Number"""'], {'fontsize': 'xlabel_size'}), "('Iteration Number', fontsize=xlabel_size)\n", (12007, 12049), True, 'import matplotlib.pyplot as plt\n'), ((12173, 12224), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$f(x_t)-f(x_*)$"""'], {'fontsize': 'ylabel_size'}), "('$f(x_t)-f(x_*)$', fontsize=ylabel_size)\n", (12183, 12224), True, 'import matplotlib.pyplot as plt\n'), ((12272, 12327), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$||x_t - x_*||_2^2$"""'], {'fontsize': 'ylabel_size'}), "('$||x_t - x_*||_2^2$', fontsize=ylabel_size)\n", (12282, 12327), True, 'import matplotlib.pyplot as plt\n'), ((12362, 12403), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$k_t$"""'], {'fontsize': 'xlabel_size'}), "('$k_t$', fontsize=xlabel_size)\n", (12372, 12403), True, 'import matplotlib.pyplot as plt\n'), ((12450, 12524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\frac{1}{n}\\\\sum_{i=1}^n||p^i_t||^2$"""'], {'fontsize': 'xlabel_size'}), "('$\\\\frac{1}{n}\\\\sum_{i=1}^n||p^i_t||^2$', fontsize=xlabel_size)\n", (12460, 12524), True, 'import matplotlib.pyplot as plt\n'), ((12567, 12656), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\frac{1}{n}\\\\sum_{i=1}^n||\\\\gamma_t g^i_t||^2$"""'], {'fontsize': 'xlabel_size'}), "('$\\\\frac{1}{n}\\\\sum_{i=1}^n||\\\\gamma_t g^i_t||^2$', fontsize=\n xlabel_size)\n", (12577, 12656), True, 'import matplotlib.pyplot as plt\n'), ((12694, 12768), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\frac{1}{n}\\\\sum_{i=1}^n||e^i_t||^2$"""'], {'fontsize': 'xlabel_size'}), "('$\\\\frac{1}{n}\\\\sum_{i=1}^n||e^i_t||^2$', fontsize=xlabel_size)\n", (12704, 12768), True, 'import matplotlib.pyplot as plt\n'), ((12813, 12914), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\frac{1}{n}\\\\sum_{i=1}^n$ Top-10$(|e^i_t+\\\\gamma_t g^i_t|)$"""'], {'fontsize': 'xlabel_size'}), "('$\\\\frac{1}{n}\\\\sum_{i=1}^n$ Top-10$(|e^i_t+\\\\gamma_t g^i_t|)$',\n fontsize=xlabel_size)\n", (12823, 12914), True, 'import matplotlib.pyplot as plt\n'), ((12954, 13003), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$k_t \\\\neq 0$"""'], {'fontsize': 'xlabel_size'}), "('$k_t \\\\neq 0$', fontsize=xlabel_size)\n", (12964, 13003), True, 'import matplotlib.pyplot as plt\n'), ((13301, 13356), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plot/' + save_fig[1])"], {'bbox_inches': '"""tight"""'}), "('plot/' + save_fig[1], bbox_inches='tight')\n", (13312, 13356), True, 'import matplotlib.pyplot as plt\n'), ((2188, 2205), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2199, 2205), False, 'import pickle\n'), ((2645, 2687), 'pickle.dump', 'pickle.dump', (['[L, average_L, worst_L]', 'file'], {}), '([L, average_L, worst_L], file)\n', (2656, 2687), False, 'import pickle\n'), ((2958, 2975), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2969, 2975), False, 'import pickle\n'), ((3415, 3457), 'pickle.dump', 'pickle.dump', (['[L, average_L, worst_L]', 'file'], {}), '([L, average_L, worst_L], file)\n', (3426, 3457), False, 'import pickle\n'), ((5069, 5086), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5080, 5086), False, 'import pickle\n'), ((5473, 5490), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5484, 5490), False, 'import pickle\n'), ((5798, 5815), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5809, 5815), False, 'import pickle\n'), ((6125, 6142), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (6136, 6142), False, 'import pickle\n'), ((6462, 6479), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (6473, 6479), False, 'import pickle\n'), ((6838, 6855), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (6849, 6855), False, 'import pickle\n'), ((7216, 7233), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (7227, 7233), False, 'import pickle\n'), ((9499, 9549), 'numpy.insert', 'np.insert', (['res[mode_y][0:length - 1]', '(0)', '(0)'], {'axis': '(0)'}), '(res[mode_y][0:length - 1], 0, 0, axis=0)\n', (9508, 9549), True, 'import numpy as np\n'), ((9992, 10042), 'numpy.insert', 'np.insert', (["res['bits'][0:length - 1]", '(0)', '(0)'], {'axis': '(0)'}), "(res['bits'][0:length - 1], 0, 0, axis=0)\n", (10001, 10042), True, 'import numpy as np\n'), ((2460, 2475), 'numpy.linalg.norm', 'norm', (['denseA[i]'], {}), '(denseA[i])\n', (2464, 2475), False, 'from numpy.linalg import norm\n'), ((3230, 3245), 'numpy.linalg.norm', 'norm', (['denseA[i]'], {}), '(denseA[i])\n', (3234, 3245), False, 'from numpy.linalg import norm\n'), ((10305, 10366), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (10325, 10366), True, 'import matplotlib.pyplot as plt\n'), ((10795, 10856), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (10815, 10856), True, 'import matplotlib.pyplot as plt\n')] |
# Repository: https://gitlab.com/quantify-os/quantify-scheduler
# Licensed according to the LICENCE file on the master branch
"""
Contains function to generate most basic waveforms.
These functions are intended to be used to generate waveforms defined in the
:mod:`~.pulse_library`.
Examples of waveforms that are too advanced are flux pulses that require knowledge of
the flux sensitivity and interaction strengths and qubit frequencies.
"""
import numpy as np
from scipy import signal
from typing import Union, List
def square(t: Union[np.ndarray, List[float]], amp: Union[float, complex]) -> np.ndarray:
return amp * np.ones(len(t))
def square_imaginary(
t: Union[np.ndarray, List[float]], amp: Union[float, complex]
) -> np.ndarray:
return square(t, 1j * amp)
def ramp(t, amp, offset=0) -> np.ndarray:
return np.linspace(offset, amp + offset, len(t))
def staircase(
t: Union[np.ndarray, List[float]],
start_amp: Union[float, complex],
final_amp: Union[float, complex],
num_steps: int,
) -> np.ndarray:
"""
Ramps from zero to a finite value in discrete steps.
Parameters
----------
t
Times at which to evaluate the function.
start_amp
Starting amplitude.
final_amp
Final amplitude to reach on the last step.
num_steps
Number of steps to reach final value.
Returns
-------
:
The real valued waveform.
"""
amp_step = (final_amp - start_amp) / (num_steps - 1)
t_arr_plateau_len = int(len(t) // num_steps)
waveform = np.array([])
for i in range(num_steps):
t_current_plateau = t[i * t_arr_plateau_len : (i + 1) * t_arr_plateau_len]
waveform = np.append(
waveform,
square(
t_current_plateau,
i * amp_step,
)
+ start_amp,
)
t_rem = t[num_steps * t_arr_plateau_len :]
waveform = np.append(waveform, square(t_rem, final_amp))
return waveform
def soft_square(t, amp):
"""A softened square pulse.
Parameters
----------
t
amp
"""
data = square(t, amp)
if len(t) > 1:
window = signal.windows.hann(int(len(t) / 2))
data = signal.convolve(data, window, mode="same") / sum(window)
return data
def chirp(t: np.ndarray, amp: float, start_freq: float, end_freq: float) -> np.ndarray:
r"""
Produces a linear chirp signal. The frequency is determined according to the
relation:
.. math:
f(t) = ct + f_0,
c = \frac{f_1 - f_0}{T}
The waveform is produced simply by multiplying with a complex exponential.
Parameters
----------
t
Times at which to evaluate the function.
amp
Amplitude of the envelope.
start_freq
Start frequency of the Chirp.
end_freq
End frequency of the Chirp.
Returns
-------
:
The complex waveform.
"""
chirp_rate = (end_freq - start_freq) / (t[-1] - t[0])
return amp * np.exp(1.0j * 2 * np.pi * (chirp_rate * t / 2 + start_freq) * t)
# pylint: disable=too-many-arguments
def drag(
t: np.ndarray,
G_amp: float,
D_amp: float,
duration: float,
nr_sigma: int = 3,
phase: float = 0,
subtract_offset: str = "average",
) -> np.ndarray:
r"""
Generates a DRAG pulse consisting of a Gaussian :math:`G` as the I- and a
Derivative :math:`D` as the Q-component (:cite:t:`motzoi_simple_2009` and
:cite:t:`gambetta_analytic_2011`).
All inputs are in s and Hz.
phases are in degree.
:math:`G(t) = G_{amp} e^{-(t-\mu)^2/(2\sigma^2)}`.
:math:`D(t) = -D_{amp} \frac{(t-\mu)}{\sigma} G(t)`.
.. note:
One would expect a factor :math:`1/\sigma^2` in the prefactor of
:math:`1/\sigma^2`, we absorb this in the scaling factor :math:`D_{amp}` to
ensure the derivative component is scale invariant with the duration of
the pulse.
Parameters
----------
t
Times at which to evaluate the function.
G_amp
Amplitude of the Gaussian envelope.
D_amp
Amplitude of the derivative component, the DRAG-pulse parameter.
duration
Duration of the pulse in seconds.
nr_sigma
After how many sigma the Gaussian is cut off.
phase
Phase of the pulse in degrees.
subtract_offset
Instruction on how to subtract the offset in order to avoid jumps in the
waveform due to the cut-off.
- 'average': subtract the average of the first and last point.
- 'first': subtract the value of the waveform at the first sample.
- 'last': subtract the value of the waveform at the last sample.
- 'none', None: don't subtract any offset.
Returns
-------
:
complex waveform
"""
mu = t[0] + duration / 2
sigma = duration / (2 * nr_sigma)
gauss_env = G_amp * np.exp(-(0.5 * ((t - mu) ** 2) / sigma ** 2))
deriv_gauss_env = -D_amp * (t - mu) / (sigma ** 1) * gauss_env
# Subtract offsets
if subtract_offset.lower() == "none" or subtract_offset is None:
# Do not subtract offset
pass
elif subtract_offset.lower() == "average":
gauss_env -= (gauss_env[0] + gauss_env[-1]) / 2.0
deriv_gauss_env -= (deriv_gauss_env[0] + deriv_gauss_env[-1]) / 2.0
elif subtract_offset.lower() == "first":
gauss_env -= gauss_env[0]
deriv_gauss_env -= deriv_gauss_env[0]
elif subtract_offset.lower() == "last":
gauss_env -= gauss_env[-1]
deriv_gauss_env -= deriv_gauss_env[-1]
else:
raise ValueError(
'Unknown value "{}" for keyword argument subtract_offset".'.format(
subtract_offset
)
)
# generate pulses
drag_wave = gauss_env + 1j * deriv_gauss_env
# Apply phase rotation
rot_drag_wave = rotate_wave(drag_wave, phase=phase)
return rot_drag_wave
# ----------------------------------
# Utility functions
# ----------------------------------
def rotate_wave(wave: np.ndarray, phase: float) -> np.ndarray:
"""
Rotate a wave in the complex plane.
Parameters
----------
wave
Complex waveform, real component corresponds to I, imaginary component to Q.
phase
Rotation angle in degrees.
Returns
-------
:
Rotated complex waveform.
"""
angle = np.deg2rad(phase)
rot = (np.cos(angle) + 1.0j * np.sin(angle)) * wave
return rot
def modulate_wave(t: np.ndarray, wave: np.ndarray, freq_mod: float) -> np.ndarray:
"""
Apply single sideband (SSB) modulation to a waveform.
The frequency convention we adhere to is:
freq_base + freq_mod = freq_signal
Parameters
----------
t :
Times at which to determine the modulation.
wave :
Complex waveform, real component corresponds to I, imaginary component to Q.
freq_mod :
Modulation frequency in Hz.
Returns
-------
:
modulated waveform.
.. note::
Pulse modulation is generally not included when specifying waveform envelopes
as there are many hardware backends include this capability.
"""
cos_mod = np.cos(2 * np.pi * freq_mod * t)
sin_mod = np.sin(2 * np.pi * freq_mod * t)
mod_I = cos_mod * wave.real + sin_mod * wave.imag
mod_Q = -sin_mod * wave.real + cos_mod * wave.imag
return mod_I + 1j * mod_Q
| [
"numpy.deg2rad",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.cos",
"scipy.signal.convolve"
] | [((1560, 1572), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1568, 1572), True, 'import numpy as np\n'), ((6425, 6442), 'numpy.deg2rad', 'np.deg2rad', (['phase'], {}), '(phase)\n', (6435, 6442), True, 'import numpy as np\n'), ((7249, 7281), 'numpy.cos', 'np.cos', (['(2 * np.pi * freq_mod * t)'], {}), '(2 * np.pi * freq_mod * t)\n', (7255, 7281), True, 'import numpy as np\n'), ((7296, 7328), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq_mod * t)'], {}), '(2 * np.pi * freq_mod * t)\n', (7302, 7328), True, 'import numpy as np\n'), ((3023, 3087), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi * (chirp_rate * t / 2 + start_freq) * t)'], {}), '(1.0j * 2 * np.pi * (chirp_rate * t / 2 + start_freq) * t)\n', (3029, 3087), True, 'import numpy as np\n'), ((4923, 4966), 'numpy.exp', 'np.exp', (['(-(0.5 * (t - mu) ** 2 / sigma ** 2))'], {}), '(-(0.5 * (t - mu) ** 2 / sigma ** 2))\n', (4929, 4966), True, 'import numpy as np\n'), ((2229, 2271), 'scipy.signal.convolve', 'signal.convolve', (['data', 'window'], {'mode': '"""same"""'}), "(data, window, mode='same')\n", (2244, 2271), False, 'from scipy import signal\n'), ((6455, 6468), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (6461, 6468), True, 'import numpy as np\n'), ((6478, 6491), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (6484, 6491), True, 'import numpy as np\n')] |
import re
import time
import math
import sys
import os
import psutil
from abc import ABCMeta, abstractmethod
from pathlib import Path
from contextlib import contextmanager
import pandas as pd
import numpy as np
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
@contextmanager
def timer(name):
t0 = time.time()
print(f'[{name}] start')
yield
print(f'[{name}] done in {time.time() - t0:.0f} s')
class Feature(metaclass=ABCMeta):
prefix = ''
suffix = ''
dir = '.'
def __init__(self):
self.name = self.__class__.__name__
self.train = pd.DataFrame()
self.test = pd.DataFrame()
self.train_path = Path(self.dir) / f'{self.name}_train.ftr'
self.test_path = Path(self.dir) / f'{self.name}_test.ftr'
def run(self):
with timer(self.name):
self.create_features()
prefix = self.prefix + '_' if self.prefix else ''
suffix = '_' + self.suffix if self.suffix else ''
self.train.columns = prefix + self.train.columns + suffix
self.test.columns = prefix + self.test.columns + suffix
return self
@abstractmethod
def create_features(self):
raise NotImplementedError
def save(self):
self.train.to_feather(str(self.train_path))
self.test.to_feather(str(self.test_path))
def load_datasets(feats, fdir):
dfs = [pd.read_feather(f'{fdir}/{f}_train.ftr') for f in feats]
X_train = pd.concat(dfs, axis=1)
dfs = [pd.read_feather(f'{fdir}/{f}_test.ftr') for f in feats]
X_test = pd.concat(dfs, axis=1)
return X_train, X_test
@contextmanager
def trace(title):
t0 = time.time()
p = psutil.Process(os.getpid())
m0 = p.memory_info()[0] / 2. ** 30
yield
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
print(f"[{m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec] {title} ", file=sys.stderr) | [
"pandas.DataFrame",
"os.getpid",
"math.fabs",
"pandas.read_feather",
"numpy.iinfo",
"time.time",
"numpy.finfo",
"pathlib.Path",
"pandas.concat"
] | [((1720, 1731), 'time.time', 'time.time', ([], {}), '()\n', (1729, 1731), False, 'import time\n'), ((2891, 2913), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (2900, 2913), True, 'import pandas as pd\n'), ((2994, 3016), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (3003, 3016), True, 'import pandas as pd\n'), ((3088, 3099), 'time.time', 'time.time', ([], {}), '()\n', (3097, 3099), False, 'import time\n'), ((3294, 3310), 'math.fabs', 'math.fabs', (['delta'], {}), '(delta)\n', (3303, 3310), False, 'import math\n'), ((2003, 2017), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2015, 2017), True, 'import pandas as pd\n'), ((2038, 2052), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2050, 2052), True, 'import pandas as pd\n'), ((2820, 2860), 'pandas.read_feather', 'pd.read_feather', (['f"""{fdir}/{f}_train.ftr"""'], {}), "(f'{fdir}/{f}_train.ftr')\n", (2835, 2860), True, 'import pandas as pd\n'), ((2925, 2964), 'pandas.read_feather', 'pd.read_feather', (['f"""{fdir}/{f}_test.ftr"""'], {}), "(f'{fdir}/{f}_test.ftr')\n", (2940, 2964), True, 'import pandas as pd\n'), ((3123, 3134), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3132, 3134), False, 'import os\n'), ((2079, 2093), 'pathlib.Path', 'Path', (['self.dir'], {}), '(self.dir)\n', (2083, 2093), False, 'from pathlib import Path\n'), ((2146, 2160), 'pathlib.Path', 'Path', (['self.dir'], {}), '(self.dir)\n', (2150, 2160), False, 'from pathlib import Path\n'), ((1801, 1812), 'time.time', 'time.time', ([], {}), '()\n', (1810, 1812), False, 'import time\n'), ((3357, 3368), 'time.time', 'time.time', ([], {}), '()\n', (3366, 3368), False, 'import time\n'), ((602, 619), 'numpy.iinfo', 'np.iinfo', (['np.int8'], {}), '(np.int8)\n', (610, 619), True, 'import numpy as np\n'), ((636, 653), 'numpy.iinfo', 'np.iinfo', (['np.int8'], {}), '(np.int8)\n', (644, 653), True, 'import numpy as np\n'), ((1189, 1209), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (1197, 1209), True, 'import numpy as np\n'), ((1226, 1246), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1234, 1246), True, 'import numpy as np\n'), ((742, 760), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (750, 760), True, 'import numpy as np\n'), ((777, 795), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (785, 795), True, 'import numpy as np\n'), ((885, 903), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (893, 903), True, 'import numpy as np\n'), ((920, 938), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (928, 938), True, 'import numpy as np\n'), ((1028, 1046), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (1036, 1046), True, 'import numpy as np\n'), ((1063, 1081), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (1071, 1081), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 09:35:54 2018
@author: Pavel
Tests function in random_background.py
Since these tests are based on random functions
or random number generators, each function is tested several
times to ensure that it behaves correctly.
"""
import unittest
import os, sys
from PIL import Image
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
parent = os.path.abspath(os.path.join(dir_path, os.pardir))
base_path = os.path.abspath(os.path.join(parent,os.pardir)) # folder /src
if not (base_path in sys.path):
sys.path.append(base_path)
from ..RandomLib import random_background as rb
n_of_tests = 5
class TestResizeImages(unittest.TestCase):
def test_random_color(self):
"""
Tests that returns a L*L*3 array of values between 0 and1
"""
for i in range(n_of_tests):
result = rb.random_color(300)
self.assertEqual((300,300,3), np.shape(result))
for color in result:
for line in color:
for value in line:
self.assertTrue(value>=0 and value <=1.0)
def test_random_image(self):
"""
Test random_image by checking that the resultant image
is size*size*3 array of values between 0 and 2? for now,
Ong will solve the problem
"""
for i in range(n_of_tests):
result = rb.random_image(300)
self.assertEqual((300,300,3), np.shape(result))
for color in result:
for line in color:
for value in line:
self.assertTrue(value>=0 and value <=1.0)
def test_mix(self):
"""
Test that return array is size*size*3
this mixes the images with metaballs
It would be good to test what happens when images of wrong
size are passed
"""
for i in range(n_of_tests):
img1 = rb.random_image(300)
img2 = rb.random_image(300)
result = rb.mix(img1, img2,300)
self.assertEqual((300,300,3), np.shape(result))
for color in result:
for line in color:
for value in line:
self.assertTrue(value>=0 and value <=1.0)
def test_rand_background(self):
"""
Creates a single image of size*size*3 by merging N
random images. Usually use between 2 and 3 layers
"""
for i in range(n_of_tests):
result = rb.rand_background(4,300)
self.assertEqual((300,300,3), np.shape(result))
for color in result:
for line in color:
for value in line:
self.assertTrue(value>=0 and value <=1.0)
def test_generate_images(self):
"""
Generates few images, checks that they are of the
correct size and values and saves them
"""
master_path = os.path.abspath(os.path.join(base_path, os.pardir))
test_path = os.path.join(master_path, 'test_data', 'rendering_tests', 'rand_back')
# Clean the folder
for the_file in os.listdir(test_path):
file_path = os.path.join(test_path, the_file)
if os.path.isfile(file_path):
os.unlink(file_path)
self.assertEqual(0, len(os.listdir(test_path)))
rb.generate_images(test_path, 300, 0,5)
all_images = os.listdir(test_path)
self.assertEqual(5, len(all_images))
for the_file in os.listdir(test_path):
file_path = os.path.join(test_path, the_file)
with Image.open(file_path) as f:
self.assertEqual(300, f.size[0])
self.assertEqual(300, f.size[1])
if __name__=='__main__':
unittest.main() | [
"sys.path.append",
"unittest.main",
"os.unlink",
"os.path.realpath",
"PIL.Image.open",
"numpy.shape",
"os.path.isfile",
"os.path.join",
"os.listdir"
] | [((379, 405), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (395, 405), False, 'import os, sys\n'), ((432, 465), 'os.path.join', 'os.path.join', (['dir_path', 'os.pardir'], {}), '(dir_path, os.pardir)\n', (444, 465), False, 'import os, sys\n'), ((495, 526), 'os.path.join', 'os.path.join', (['parent', 'os.pardir'], {}), '(parent, os.pardir)\n', (507, 526), False, 'import os, sys\n'), ((578, 604), 'sys.path.append', 'sys.path.append', (['base_path'], {}), '(base_path)\n', (593, 604), False, 'import os, sys\n'), ((4005, 4020), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4018, 4020), False, 'import unittest\n'), ((3215, 3285), 'os.path.join', 'os.path.join', (['master_path', '"""test_data"""', '"""rendering_tests"""', '"""rand_back"""'], {}), "(master_path, 'test_data', 'rendering_tests', 'rand_back')\n", (3227, 3285), False, 'import os, sys\n'), ((3346, 3367), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (3356, 3367), False, 'import os, sys\n'), ((3651, 3672), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (3661, 3672), False, 'import os, sys\n'), ((3751, 3772), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (3761, 3772), False, 'import os, sys\n'), ((3159, 3193), 'os.path.join', 'os.path.join', (['base_path', 'os.pardir'], {}), '(base_path, os.pardir)\n', (3171, 3193), False, 'import os, sys\n'), ((3393, 3426), 'os.path.join', 'os.path.join', (['test_path', 'the_file'], {}), '(test_path, the_file)\n', (3405, 3426), False, 'import os, sys\n'), ((3442, 3467), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (3456, 3467), False, 'import os, sys\n'), ((3798, 3831), 'os.path.join', 'os.path.join', (['test_path', 'the_file'], {}), '(test_path, the_file)\n', (3810, 3831), False, 'import os, sys\n'), ((962, 978), 'numpy.shape', 'np.shape', (['result'], {}), '(result)\n', (970, 978), True, 'import numpy as np\n'), ((1527, 1543), 'numpy.shape', 'np.shape', (['result'], {}), '(result)\n', (1535, 1543), True, 'import numpy as np\n'), ((2184, 2200), 'numpy.shape', 'np.shape', (['result'], {}), '(result)\n', (2192, 2200), True, 'import numpy as np\n'), ((2717, 2733), 'numpy.shape', 'np.shape', (['result'], {}), '(result)\n', (2725, 2733), True, 'import numpy as np\n'), ((3485, 3505), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (3494, 3505), False, 'import os, sys\n'), ((3538, 3559), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (3548, 3559), False, 'import os, sys\n'), ((3849, 3870), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (3859, 3870), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
# mpc_nbody/mpc_nbody/parse_input.py
'''
----------------------------------------------------------------------------
mpc_nbody's module for parsing OrbFit + ele220 elements
Mar 2020
<NAME> & <NAME> & <NAME>
This module provides functionalities to
(a) read an OrbFit .fel/.eq file with heliocentric ecliptic cartesian els
(b) read ele220 element strings
(c) convert the above to barycentric equatorial cartesian elements
This is meant to prepare the elements for input into the n-body integrator
----------------------------------------------------------------------------
'''
# Import third-party packages
# -----------------------------------------------------------------------------
import os, sys
import numpy as np
from astropy.time import Time
import getpass
if getpass.getuser() in ['matthewjohnpayne']: # Payne's dev laptop set up differently ...:
sys.path.append('/Users/matthewjohnpayne/Envs/mpcvenv/')
import mpcpp.MPC_library as mpc
# Import neighbouring packages
# -----------------------------------------------------------------------------
# Default for caching stuff using lru_cache
# -----------------------------------------------------------------------------
# Constants and stuff
# -----------------------------------------------------------------------------
DATA_PATH = os.path.realpath(os.path.dirname(__file__))
au_km = 149597870.700 # This is now a definition
# Data classes/methods
# -----------------------------------------------------------------------------
class ParseElements():
'''
Class for parsing elements and returning them in the correct format.
'''
def __init__(self, input_file=None, filetype=None, save_parsed=False ):
# The variables that will be used to hold the elements
# - They get populated by *parse_orbfit* & *make_bary_equatorial*
self.helio_ecl_vec_EXISTS = False
self.helio_ecl_vec = None
self.helio_ecl_cov_EXISTS = False
self.helio_ecl_cov = None
self.bary_eq_vec_EXISTS = False
self.bary_eq_vec = None
self.bary_eq_cov_EXISTS = False
self.bary_eq_cov = None
# If input filename provided, process it:
if isinstance(input_file, str) & isinstance(filetype, str):
if filetype == 'ele220':
self.parse_ele220(input_file)
if (filetype == 'fel') | (filetype == 'eq'):
self.parse_orbfit(input_file)
self.make_bary_equatorial()
if save_parsed:
self.save_elements()
else:
print("Keywords 'input_file' and/or 'filetype' missing; "
"initiating empty object.")
def save_elements(self, output_file='holman_ic'):
"""
Save the barycentric equatorial cartesian elements to file.
Inputs:
-------
output_file : string, filename to write elements to.
The file is overwritten if it already exists.
"""
self.tstart = self.time.tdb.jd
outfile = open(output_file, 'w')
outfile.write(f"tstart {self.tstart:}\n")
outfile.write("tstep +20.0\n")
outfile.write("trange 600.\n")
outfile.write("geocentric 0\n")
outfile.write("state\n")
# For whatever reason, we are writing this over two lines
# - perhaps to compare against JPL?
for n,coeff in enumerate(self.bary_eq_vec):
suffix = '\n' if n in [2,5] else ''
outfile.write(f"{coeff: 18.15e} " + suffix)
def parse_ele220(self, ele220file=None):
'''
Parse a file containing a single ele220 line.
Currently returns junk data.
NOT ACTUALLY IMPLEMENTED YET!!!
'''
if ele220file is None:
raise TypeError("Required argument 'ele220file'"
" (pos 1) not found")
# make fake data & set appropriate variables
self._get_and_set_junk_data()
def parse_orbfit(self, felfile):
'''
Parse a file containing OrbFit elements for a single object & epoch.
Currently returns junk data.
Inputs:
-------
felfile : string, filename of fel/eq formatted OrbFit output
Populates:
--------
self.helio_ecl_vec_EXISTS : Boolean
self.helio_ecl_vec : 1D np.ndarray
self.helio_ecl_cov_EXISTS : Boolean
self.helio_ecl_cov : 1D np.ndarray
self.time : astropy Time object
'''
# Read the contents of the orbfit output "fel" file
obj = {}
with open(felfile,'r') as fh:
el = fh.readlines()
cart_head = '! Cartesian position and velocity vectors\n'
# Only do this if the file actually has cartesian coordinates.
if el.count(cart_head) > 0:
# get Cartesian Elements out of the file contents
carLoc = len(el) - 1 - list(reversed(el)).index(cart_head)
carEls = el[carLoc:carLoc + 25]
# Form an array of the heliocentric ecliptic cartesian coefficients
(_, car_x, car_y, car_z, car_dx, car_dy, car_dz
) = carEls[1].split()
self.helio_ecl_vec = np.array([ float(car_x), float(car_y), float(car_z), \
float(car_dx), float(car_dy), float(car_dz)]
)
self.helio_ecl_vec_EXISTS = True
# Using Astropy.time for time conversion,
# because life's too short for timezones and time scales.
_, mjd_tdt, _ = carEls[2].split()
self.time = Time(float(mjd_tdt), format='mjd', scale='tt')
# Parse carEls (the contents of the orbfit file) to get
# the cartesian covariance matrix
self.helio_ecl_cov_EXISTS, self.helio_ecl_cov = _parse_Covariance_List(carEls)
else:
raise TypeError("There does not seem to be any valid elements "
f"in the input file {felfile:}")
def make_bary_equatorial(self):
'''
Transform heliocentric-ecliptic coordinates into
barycentric equatorial coordinates
requires:
----------
self.helio_ecl_vec_EXISTS : Boolean
self.helio_ecl_vec : 1D np.ndarray
self.helio_ecl_cov_EXISTS : Boolean
self.helio_ecl_cov : 2D np.ndarray
populates:
----------
self.bary_eq_vec_EXISTS = Boolean
self.bary_eq_vec = 1D np.ndarray
self.bary_eq_cov_EXISTS = Boolean
self.bary_eq_cov = 2D np.ndarray
'''
if self.helio_ecl_vec_EXISTS :
# Transform the helio-ecl-coords to bary-eq-coords
# NB 2-step transformation for the vector (posn,vel)
self.bary_eq_vec = equatorial_helio2bary(
ecliptic_to_equatorial(self.helio_ecl_vec),
self.time.tdb.jd
)
# Set boolean as well (not sure if we'll really use these ...)
self.bary_eq_vec_EXISTS = True
if self.helio_ecl_cov_EXISTS:
# Only need to do a rotation for the CoV
self.bary_eq_cov = ecliptic_to_equatorial(self.helio_ecl_cov)
# Set booleans as well (not sure if we'll really use these ...)
self.bary_eq_cov_EXISTS = True
if not self.helio_ecl_vec_EXISTS and not self.helio_ecl_cov_EXISTS:
raise TypeError("There does not seem to be any valid helio_ecl to transform into bary_eq")
return True
def _get_and_set_junk_data(self, BaryEqDirect=False ):
"""Just make some junk data for saving."""
self.time = Time(2458849.5, format='jd', scale='tdb')
v = np.array( [3., 2., 1., 0.3, 0.2, 0.1] )
CoV = 0.01 * np.ones((6,6))
# Default is to make helio-ecl, then calc bary-eq from that
if not BaryEqDirect:
self.helio_ecl_vec = v
self.helio_ecl_vec_EXISTS = True
self.helio_ecl_cov = CoV
self.helio_ecl_cov_EXISTS = True
self.make_bary_equatorial()
# Alternative is to directly set bary-eq
else:
self.bary_eq_vec = v
self.bary_eq_vec_EXISTS = True
self.bary_eq_cov = CoV
self.bary_eq_cov_EXISTS = True
# Functions
# -----------------------------------------------------------------------------
def ecliptic_to_equatorial(input, backwards=False):
'''
Rotates a cartesian vector or Cov-Matrix from mean ecliptic to mean equatorial.
Backwards=True converts backwards, from equatorial to ecliptic.
inputs:
-------
input : 1-D or 2-D arrays
- If 1-D, then len(input) must be 3 or 6
- If 2-D, then input.shape must be (6,6)
output:
-------
output : np.ndarray
- same shape as input
'''
# Ensure we have an array
input = np.atleast_1d(input)
# The rotation matricees we may use
direction = -1 if backwards else +1
R3 = mpc.rotate_matrix(mpc.Constants.ecl * direction)
R6 = np.block( [ [R3, np.zeros((3,3))],[np.zeros((3,3)),R3] ])
# Vector input => Single rotation operation
if input.ndim == 1 and input.shape[0] in [3,6]:
R = R6 if input.shape[0] == 6 else R3
output = R @ input
# Matrix (CoV) input => R & R.T
elif input.ndim == 2 and input.shape == (6,6):
R = R6
output = R @ input @ R.T
# Unknown input
else:
sys.exit(f'Does not compute: input.ndim=={input.ndim} , input.shape={input.shape}')
assert output.shape == input.shape
return output
def equatorial_helio2bary(input_xyz, jd_tdb, backwards=False):
'''
Convert from heliocentric to barycentic cartesian coordinates.
backwards=True converts backwards, from bary to helio.
input:
input_xyz - np.ndarray length 3 or 6
backwards - boolean
output:
output_xyz - np.ndarray
- same shape as input_xyz
input_xyz MUST BE EQUATORIAL!!!
'''
direction = -1 if backwards else +1
# Ensure we have an array of the correct shape to work with
input_xyz = np.atleast_1d(input_xyz)
assert input_xyz.ndim == 1
assert input_xyz.shape[0] in [3,6]
# Position & Motion of the barycenter w.r.t. the heliocenter (and vice-versa)
delta, delta_vel = mpc.jpl_kernel[0, 10].compute_and_differentiate(jd_tdb)
# Work out whether we need xyz or xyzuvw
delta = delta if input_xyz.shape[0] == 3 else np.block([delta,delta_vel])
# Shift vectors & return
return input_xyz + delta * direction / au_km
def _old_parse_Covariance_List(Els):
'''
Convenience function for reading and splitting the covariance
lines of an OrbFit file.
Not intended for user usage.
'''
ElCov = []
covErr = ""
for El in Els:
if El[:4] == ' COV':
ElCov.append(El)
if len(ElCov) == 7:
_, c11, c12, c13 = ElCov[0].split()
_, c14, c15, c16 = ElCov[1].split()
_, c22, c23, c24 = ElCov[2].split()
_, c25, c26, c33 = ElCov[3].split()
_, c34, c35, c36 = ElCov[4].split()
_, c44, c45, c46 = ElCov[5].split()
_, c55, c56, c66 = ElCov[6].split()
if len(ElCov) != 7:
c11, c12, c13, c14, c15, c16, c22 = "", "", "", "", "", "", ""
c23, c24, c25, c26, c33, c34, c35 = "", "", "", "", "", "", ""
c36, c44, c45, c46, c55, c56, c66 = "", "", "", "", "", "", ""
covErr = ' Empty covariance Matrix for '
return (covErr, c11, c12, c13, c14, c15, c16, c22, c23, c24, c25, c26,
c33, c34, c35, c36, c44, c45, c46, c55, c56, c66)
def _parse_Covariance_List(Els):
'''
Convenience function for reading and splitting the covariance
lines of an OrbFit file.
Not intended for user usage.
# MJP : 20200901 : Suggest to just make & return the required matrix
'''
# Set-up array of zeroes
CoV = np.zeros( (6,6) )
CoV_EXISTS = False
# Populate triangle directly
ElCov=[]
for El in Els:
if El[:4] == ' COV':
ElCov.append(El)
if len(ElCov) == 7:
_, CoV[0,0],CoV[0,1],CoV[0,2] = ElCov[0].split() # c11, c12, c13
_, CoV[0,3],CoV[0,4],CoV[0,5] = ElCov[1].split() # c14, c15, c16
_, CoV[1,1],CoV[1,2],CoV[1,3] = ElCov[2].split() # c22, c23, c24
_, CoV[1,4],CoV[1,5],CoV[2,2] = ElCov[3].split() # c25, c26, c33
_, CoV[2,3],CoV[2,4],CoV[2,5] = ElCov[4].split() # c34, c35, c36
_, CoV[3,3],CoV[3,4],CoV[3,5] = ElCov[5].split() # c44, c45, c46
_, CoV[4,4],CoV[4,5],CoV[5,5] = ElCov[6].split() # c55, c56, c66
# Populate the symmetric part
for i in range(1,6):
for j in range(i):
# # MA: Killed totally annoying and unneccessary print
#print(f'Setting Cov[{i,j}] = CoV{[j,i]}')
CoV[i,j]=CoV[j,i]
# Set boolean
CoV_EXISTS = True
return CoV_EXISTS, CoV
| [
"sys.path.append",
"mpcpp.MPC_library.rotate_matrix",
"getpass.getuser",
"astropy.time.Time",
"os.path.dirname",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.atleast_1d",
"sys.exit",
"numpy.block"
] | [((829, 846), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (844, 846), False, 'import getpass\n'), ((923, 979), 'sys.path.append', 'sys.path.append', (['"""/Users/matthewjohnpayne/Envs/mpcvenv/"""'], {}), "('/Users/matthewjohnpayne/Envs/mpcvenv/')\n", (938, 979), False, 'import os, sys\n'), ((1392, 1417), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1407, 1417), False, 'import os, sys\n'), ((9722, 9742), 'numpy.atleast_1d', 'np.atleast_1d', (['input'], {}), '(input)\n', (9735, 9742), True, 'import numpy as np\n'), ((9841, 9889), 'mpcpp.MPC_library.rotate_matrix', 'mpc.rotate_matrix', (['(mpc.Constants.ecl * direction)'], {}), '(mpc.Constants.ecl * direction)\n', (9858, 9889), True, 'import mpcpp.MPC_library as mpc\n'), ((11048, 11072), 'numpy.atleast_1d', 'np.atleast_1d', (['input_xyz'], {}), '(input_xyz)\n', (11061, 11072), True, 'import numpy as np\n'), ((12923, 12939), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (12931, 12939), True, 'import numpy as np\n'), ((8287, 8328), 'astropy.time.Time', 'Time', (['(2458849.5)'], {'format': '"""jd"""', 'scale': '"""tdb"""'}), "(2458849.5, format='jd', scale='tdb')\n", (8291, 8328), False, 'from astropy.time import Time\n'), ((8344, 8384), 'numpy.array', 'np.array', (['[3.0, 2.0, 1.0, 0.3, 0.2, 0.1]'], {}), '([3.0, 2.0, 1.0, 0.3, 0.2, 0.1])\n', (8352, 8384), True, 'import numpy as np\n'), ((11417, 11445), 'numpy.block', 'np.block', (['[delta, delta_vel]'], {}), '([delta, delta_vel])\n', (11425, 11445), True, 'import numpy as np\n'), ((8406, 8421), 'numpy.ones', 'np.ones', (['(6, 6)'], {}), '((6, 6))\n', (8413, 8421), True, 'import numpy as np\n'), ((10344, 10432), 'sys.exit', 'sys.exit', (['f"""Does not compute: input.ndim=={input.ndim} , input.shape={input.shape}"""'], {}), "(\n f'Does not compute: input.ndim=={input.ndim} , input.shape={input.shape}')\n", (10352, 10432), False, 'import os, sys\n'), ((9917, 9933), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (9925, 9933), True, 'import numpy as np\n'), ((9935, 9951), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (9943, 9951), True, 'import numpy as np\n')] |
import cv2
import numpy as np
img = np.ones([5, 5], dtype=np.uint8) * 9
mask = np.zeros([5, 5], dtype=np.uint8)
mask[0:3, 0] = 1
mask[2:5, 2:4] = 1
roi = cv2.bitwise_and(img, img, mask=mask)
print("img=\n", img)
print("mask=\n", mask)
print("roi=\n", roi)
| [
"numpy.zeros",
"numpy.ones",
"cv2.bitwise_and"
] | [((80, 112), 'numpy.zeros', 'np.zeros', (['[5, 5]'], {'dtype': 'np.uint8'}), '([5, 5], dtype=np.uint8)\n', (88, 112), True, 'import numpy as np\n'), ((155, 191), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (170, 191), False, 'import cv2\n'), ((37, 68), 'numpy.ones', 'np.ones', (['[5, 5]'], {'dtype': 'np.uint8'}), '([5, 5], dtype=np.uint8)\n', (44, 68), True, 'import numpy as np\n')] |
r"""Module with spin-weight related utilities.
Conventions are $_{\pm |s|} X_{lm} = - (\pm)^{|s|} (G_{lm} \pm i C_{lm})$.
For CMB maps,
$ _{0}X_{lm} = T_{lm} $
$ _{\pm}X_{lm} = -1/2 (E_{lm} \pm i B_{lm}) $
hence
$ G^{0}_{lm} = -T_{lm} $
$ G^{2}_{lm} = E_{lm} $
$ C^{2}_{lm} = B_{lm} $.
"""
import healpy as hp
import numpy as np
def alm2map_spin(gclm, nside, spin, lmax, mmax=None):
assert spin >= 0, spin
assert len(gclm) == 2, len(gclm)
if spin > 0:
return hp.alm2map_spin(gclm, nside, spin, lmax, mmax=mmax)
elif spin == 0:
return hp.alm2map(-gclm[0], nside, lmax=lmax, mmax=mmax), 0.
def map2alm_spin(maps, spin, lmax=None, mmax=None):
assert spin >= 0, spin
if spin > 0:
return hp.map2alm_spin(maps, spin, lmax=lmax, mmax=mmax)
else:
return -hp.map2alm(maps[0], lmax=lmax, mmax=mmax, iter=0), 0.
try:
from plancklens.wigners import wigners # fortran 90 shared object
HASWIGNER = True
except:
HASWIGNER = False
print("could not load wigners.so fortran shared object")
print('try f2py -c -m wigners wigners.f90 from the command line in wigners directory ?')
GL_cache = {}
def wignerc(cl1, cl2, sp1, s1, sp2, s2, lmax_out=None):
"""Legendre coeff. of $ (\\xi_{sp1,s1} * \\xi_{sp2,s2})(\\cos \\theta)$ from their harmonic series.
Uses Gauss-Legendre quadrature to solve this exactly.
"""
assert HASWIGNER
lmax1 = len(cl1) - 1
lmax2 = len(cl2) - 1
lmax_out = lmax1 + lmax2 if lmax_out is None else lmax_out
lmaxtot = lmax1 + lmax2 + lmax_out
spo = sp1 + sp2
so = s1 + s2
if np.any(cl1) and np.any(cl2):
N = (lmaxtot + 2 - lmaxtot % 2) // 2
if not 'xg wg %s' % N in GL_cache.keys():
GL_cache['xg wg %s' % N] = wigners.get_xgwg(-1., 1., N)
xg, wg = GL_cache['xg wg %s' % N]
if HASWIGNER:
if np.iscomplexobj(cl1):
xi1 = wigners.wignerpos(np.real(cl1), xg, sp1, s1) + 1j * wigners.wignerpos(np.imag(cl1), xg, sp1, s1)
else:
xi1 = wigners.wignerpos(cl1, xg, sp1, s1)
if np.iscomplexobj(cl2):
xi2 = wigners.wignerpos(np.real(cl2), xg, sp2, s2) + 1j * wigners.wignerpos(np.imag(cl2), xg, sp2, s2)
else:
xi2 = wigners.wignerpos(cl2, xg, sp2, s2)
xi1xi2w = xi1 * xi2 * wg
if np.iscomplexobj(xi1xi2w):
ret = wigners.wignercoeff(np.real(xi1xi2w), xg, spo, so, lmax_out)
ret = ret + 1j * wigners.wignercoeff(np.imag(xi1xi2w), xg, spo, so, lmax_out)
return ret
else:
return wigners.wignercoeff(xi1xi2w, xg, spo, so, lmax_out)
else:
assert 0
else:
return np.zeros(lmax_out + 1, dtype=float)
def get_spin_raise(s, lmax):
r"""Response coefficient of spin-s spherical harmonic to spin raising operator.
:math:`\sqrt{ (l - s) (l + s + 1) }` for abs(s) <= l <= lmax
"""
ret = np.zeros(lmax + 1, dtype=float)
ret[abs(s):] = np.sqrt(np.arange(abs(s) -s, lmax - s + 1) * np.arange(abs(s) + s + 1, lmax + s + 2))
return ret
def get_spin_lower(s, lmax):
r"""Response coefficient of spin-s spherical harmonic to spin lowering operator.
:math:`-\sqrt{ (l + s) (l - s + 1) }` for abs(s) <= l <= lmax
"""
ret = np.zeros(lmax + 1, dtype=float)
ret[abs(s):] = -np.sqrt(np.arange(s + abs(s), lmax + s + 1) * np.arange(abs(s) - s + 1, lmax - s + 2))
return ret
def _dict_transpose(cls):
ret = {}
for k in cls.keys():
if len(k) == 1:
ret[k + k] = np.copy(cls[k])
else:
assert len(k) == 2
ret[k[1] + k[0]] = np.copy(cls[k])
return ret
def spin_cls(s1, s2, cls):
r"""Spin-weighted power spectrum :math:`_{s1}X_{lm} _{s2}X^{*}_{lm}`
The output is real unless necessary.
"""
if s1 < 0:
return (-1) ** (s1 + s2) * np.conjugate(spin_cls(-s1, -s2, _dict_transpose(cls)))
assert s1 in [0, -2, 2] and s2 in [0, -2, 2], (s1, s2, 'not implemented')
if s1 == 0:
if s2 == 0:
return cls['tt']
tb = cls.get('tb', None)
assert 'te' in cls.keys() or 'et' in cls.keys()
te = cls.get('te', cls.get('et'))
return -te if tb is None else -te + 1j * np.sign(s2) * tb
elif s1 == 2:
if s2 == 0:
assert 'te' in cls.keys() or 'et' in cls.keys()
tb = cls.get('bt', cls.get('tb', None))
et = cls.get('et', cls.get('te'))
return -et if tb is None else -et - 1j * tb
elif s2 == 2:
return cls['ee'] + cls['bb']
elif s2 == -2:
eb = cls.get('be', cls.get('eb', None))
return cls['ee'] - cls['bb'] if eb is None else cls['ee'] - cls['bb'] + 2j * eb
else:
assert 0
def get_spin_matrix(sout, sin, cls):
r"""Spin-space matrix R^{-1} cls[T, E, B] R where R is the mapping from _{0, \pm 2}X to T, E, B.
cls is dictionary with keys 'tt', 'te', 'ee', 'bb'.
If a key is not present the corresponding spectrum is assumed to be zero.
('t' 'e' and 'b' keys also works in place of 'tt' 'ee', 'bb'.)
Output is complex only when necessary (that is, TB and/or EB present and relevant).
"""
assert sin in [0, 2, -2] and sout in [0, 2, -2], (sin, sout)
if sin == 0:
if sout == 0:
return cls.get('tt', cls.get('t', 0.))
tb = cls.get('tb', None)
return (-cls.get('te', 0.) - 1j * np.sign(sout) * tb) if tb is not None else -cls.get('te', 0.)
if sin == 2:
if sout == 0:
te = cls.get('te', 0.)
tb = cls.get('tb', None)
return -0.5 * (te - 1j * tb) if tb is not None else -0.5 * te
if sout == 2:
return 0.5 * (cls.get('ee', cls.get('e', 0.)) + cls.get('bb', cls.get('b', 0.)))
if sout == -2:
ret = 0.5 * (cls.get('ee', cls.get('e', 0.)) - cls.get('bb', cls.get('b', 0.)))
eb = cls.get('eb', None)
return ret - 1j * eb if eb is not None else ret
if sin == -2:
if sout == 0:
te = cls.get('te', 0.)
tb = cls.get('tb', None)
return -0.5 * (te + 1j * tb) if tb is not None else -0.5 * te
if sout == 2:
ret = 0.5 * (cls.get('ee', cls.get('e', 0.)) - cls.get('bb', cls.get('b', 0.)))
eb = cls.get('eb', None)
return ret + 1j * eb if eb is not None else ret
if sout == -2:
return 0.5 * (cls.get('ee', cls.get('e', 0.)) + cls.get('bb', cls.get('b', 0.)))
assert 0, (sin, sout)
| [
"healpy.alm2map",
"numpy.iscomplexobj",
"numpy.copy",
"healpy.map2alm",
"numpy.zeros",
"plancklens.wigners.wigners.wignerpos",
"numpy.any",
"numpy.imag",
"healpy.map2alm_spin",
"numpy.real",
"numpy.sign",
"plancklens.wigners.wigners.get_xgwg",
"plancklens.wigners.wigners.wignercoeff",
"hea... | [((3046, 3077), 'numpy.zeros', 'np.zeros', (['(lmax + 1)'], {'dtype': 'float'}), '(lmax + 1, dtype=float)\n', (3054, 3077), True, 'import numpy as np\n'), ((3403, 3434), 'numpy.zeros', 'np.zeros', (['(lmax + 1)'], {'dtype': 'float'}), '(lmax + 1, dtype=float)\n', (3411, 3434), True, 'import numpy as np\n'), ((522, 573), 'healpy.alm2map_spin', 'hp.alm2map_spin', (['gclm', 'nside', 'spin', 'lmax'], {'mmax': 'mmax'}), '(gclm, nside, spin, lmax, mmax=mmax)\n', (537, 573), True, 'import healpy as hp\n'), ((775, 824), 'healpy.map2alm_spin', 'hp.map2alm_spin', (['maps', 'spin'], {'lmax': 'lmax', 'mmax': 'mmax'}), '(maps, spin, lmax=lmax, mmax=mmax)\n', (790, 824), True, 'import healpy as hp\n'), ((1651, 1662), 'numpy.any', 'np.any', (['cl1'], {}), '(cl1)\n', (1657, 1662), True, 'import numpy as np\n'), ((1667, 1678), 'numpy.any', 'np.any', (['cl2'], {}), '(cl2)\n', (1673, 1678), True, 'import numpy as np\n'), ((2806, 2841), 'numpy.zeros', 'np.zeros', (['(lmax_out + 1)'], {'dtype': 'float'}), '(lmax_out + 1, dtype=float)\n', (2814, 2841), True, 'import numpy as np\n'), ((1814, 1844), 'plancklens.wigners.wigners.get_xgwg', 'wigners.get_xgwg', (['(-1.0)', '(1.0)', 'N'], {}), '(-1.0, 1.0, N)\n', (1830, 1844), False, 'from plancklens.wigners import wigners\n'), ((1922, 1942), 'numpy.iscomplexobj', 'np.iscomplexobj', (['cl1'], {}), '(cl1)\n', (1937, 1942), True, 'import numpy as np\n'), ((2154, 2174), 'numpy.iscomplexobj', 'np.iscomplexobj', (['cl2'], {}), '(cl2)\n', (2169, 2174), True, 'import numpy as np\n'), ((2423, 2447), 'numpy.iscomplexobj', 'np.iscomplexobj', (['xi1xi2w'], {}), '(xi1xi2w)\n', (2438, 2447), True, 'import numpy as np\n'), ((3671, 3686), 'numpy.copy', 'np.copy', (['cls[k]'], {}), '(cls[k])\n', (3678, 3686), True, 'import numpy as np\n'), ((3763, 3778), 'numpy.copy', 'np.copy', (['cls[k]'], {}), '(cls[k])\n', (3770, 3778), True, 'import numpy as np\n'), ((609, 658), 'healpy.alm2map', 'hp.alm2map', (['(-gclm[0])', 'nside'], {'lmax': 'lmax', 'mmax': 'mmax'}), '(-gclm[0], nside, lmax=lmax, mmax=mmax)\n', (619, 658), True, 'import healpy as hp\n'), ((851, 900), 'healpy.map2alm', 'hp.map2alm', (['maps[0]'], {'lmax': 'lmax', 'mmax': 'mmax', 'iter': '(0)'}), '(maps[0], lmax=lmax, mmax=mmax, iter=0)\n', (861, 900), True, 'import healpy as hp\n'), ((2103, 2138), 'plancklens.wigners.wigners.wignerpos', 'wigners.wignerpos', (['cl1', 'xg', 'sp1', 's1'], {}), '(cl1, xg, sp1, s1)\n', (2120, 2138), False, 'from plancklens.wigners import wigners\n'), ((2335, 2370), 'plancklens.wigners.wigners.wignerpos', 'wigners.wignerpos', (['cl2', 'xg', 'sp2', 's2'], {}), '(cl2, xg, sp2, s2)\n', (2352, 2370), False, 'from plancklens.wigners import wigners\n'), ((2694, 2745), 'plancklens.wigners.wigners.wignercoeff', 'wigners.wignercoeff', (['xi1xi2w', 'xg', 'spo', 'so', 'lmax_out'], {}), '(xi1xi2w, xg, spo, so, lmax_out)\n', (2713, 2745), False, 'from plancklens.wigners import wigners\n'), ((2491, 2507), 'numpy.real', 'np.real', (['xi1xi2w'], {}), '(xi1xi2w)\n', (2498, 2507), True, 'import numpy as np\n'), ((1984, 1996), 'numpy.real', 'np.real', (['cl1'], {}), '(cl1)\n', (1991, 1996), True, 'import numpy as np\n'), ((2216, 2228), 'numpy.real', 'np.real', (['cl2'], {}), '(cl2)\n', (2223, 2228), True, 'import numpy as np\n'), ((4382, 4393), 'numpy.sign', 'np.sign', (['s2'], {}), '(s2)\n', (4389, 4393), True, 'import numpy as np\n'), ((5605, 5618), 'numpy.sign', 'np.sign', (['sout'], {}), '(sout)\n', (5612, 5618), True, 'import numpy as np\n'), ((2036, 2048), 'numpy.imag', 'np.imag', (['cl1'], {}), '(cl1)\n', (2043, 2048), True, 'import numpy as np\n'), ((2268, 2280), 'numpy.imag', 'np.imag', (['cl2'], {}), '(cl2)\n', (2275, 2280), True, 'import numpy as np\n'), ((2585, 2601), 'numpy.imag', 'np.imag', (['xi1xi2w'], {}), '(xi1xi2w)\n', (2592, 2601), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
SEED = 23455
rdm = np.random.RandomState(seed=SEED) # 生成[0,1)之间的随机数
x = rdm.rand(32, 2)
y_ = [[x1 + x2 + (rdm.rand() / 10.0 - 0.05)] for (x1, x2) in x] # 生成噪声[0,1)/10=[0,0.1); [0,0.1)-0.05=[-0.05,0.05)
x = tf.cast(x, dtype=tf.float32)
w1 = tf.Variable(tf.random.normal([2, 1], stddev=1, seed=1))
epoch = 15000
lr = 0.002
for epoch in range(epoch):
with tf.GradientTape() as tape:
y = tf.matmul(x, w1)
loss_mse = tf.reduce_mean(tf.square(y_ - y))
grads = tape.gradient(loss_mse, w1)
w1.assign_sub(lr * grads)
if epoch % 500 == 0:
print("After %d training steps,w1 is " % (epoch))
print(w1.numpy(), "\n")
print("Final w1 is: ", w1.numpy())
| [
"tensorflow.random.normal",
"numpy.random.RandomState",
"tensorflow.cast",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.GradientTape"
] | [((64, 96), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'SEED'}), '(seed=SEED)\n', (85, 96), True, 'import numpy as np\n'), ((253, 281), 'tensorflow.cast', 'tf.cast', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (260, 281), True, 'import tensorflow as tf\n'), ((300, 342), 'tensorflow.random.normal', 'tf.random.normal', (['[2, 1]'], {'stddev': '(1)', 'seed': '(1)'}), '([2, 1], stddev=1, seed=1)\n', (316, 342), True, 'import tensorflow as tf\n'), ((407, 424), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (422, 424), True, 'import tensorflow as tf\n'), ((446, 462), 'tensorflow.matmul', 'tf.matmul', (['x', 'w1'], {}), '(x, w1)\n', (455, 462), True, 'import tensorflow as tf\n'), ((497, 514), 'tensorflow.square', 'tf.square', (['(y_ - y)'], {}), '(y_ - y)\n', (506, 514), True, 'import tensorflow as tf\n')] |
import glob
import os
import numpy as np
import torch
from PIL import Image
from skimage.transform import resize
from torch.utils.data import Dataset
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
class ImageFolder(Dataset):
def __init__(self, folder_path, img_size=416):
self.files = sorted(glob.glob('%s/*.*' % folder_path))
self.img_shape = (img_size, img_size)
def __getitem__(self, index):
img_path = self.files[index % len(self.files)]
# Extract image
img = np.array(Image.open(img_path))
h, w, _ = img.shape
dim_diff = np.abs(h - w)
# Upper (left) and lower (right) padding
pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
# Determine padding
pad = ((pad1, pad2), (0, 0), (0, 0)) if h <= w else ((0, 0), (pad1, pad2), (0, 0))
# Add padding
input_img = np.pad(img, pad, 'constant', constant_values=127.5) / 255.
# Resize and normalize
input_img = resize(input_img, (*self.img_shape, 3), mode='reflect')
# Channels-first
input_img = np.transpose(input_img, (2, 0, 1))
# As pytorch tensor
input_img = torch.from_numpy(input_img).float()
return img_path, input_img
def __len__(self):
return len(self.files)
class ListDataset(Dataset):
def __init__(self, list_path, img_size=416):
with open(list_path, 'r') as file:
self.img_files = file.readlines()
self.label_files = [path.replace('images', 'labels').replace('.png', '.txt').replace('.jpg', '.txt') for path in self.img_files]
self.img_shape = (img_size, img_size)
self.max_objects = 50
def __getitem__(self, index):
img_path = self.img_files[index % len(self.img_files)].rstrip()
img = np.array(Image.open(img_path))
# Handles images with less than three channels
while len(img.shape) != 3:
index += 1
img_path = self.img_files[index % len(self.img_files)].rstrip()
img = np.array(Image.open(img_path))
h, w, _ = img.shape
dim_diff = np.abs(h - w)
# Upper (left) and lower (right) padding
pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
# Determine padding
pad = ((pad1, pad2), (0, 0), (0, 0)) if h <= w else ((0, 0), (pad1, pad2), (0, 0))
# Add padding
input_img = np.pad(img, pad, 'constant', constant_values=128) / 255.
padded_h, padded_w, _ = input_img.shape
# Resize and normalize
input_img = resize(input_img, (*self.img_shape, 3), mode='reflect')
# Channels-first
input_img = np.transpose(input_img, (2, 0, 1))
# As pytorch tensor
input_img = torch.from_numpy(input_img).float()
#---------
# Label
#---------
label_path = self.label_files[index % len(self.img_files)].rstrip()
labels = None
if os.path.exists(label_path):
labels = np.loadtxt(label_path).reshape(-1, 5)
# Extract coordinates for unpadded + unscaled image
x1 = w * (labels[:, 1] - labels[:, 3]/2)
y1 = h * (labels[:, 2] - labels[:, 4]/2)
x2 = w * (labels[:, 1] + labels[:, 3]/2)
y2 = h * (labels[:, 2] + labels[:, 4]/2)
# Adjust for added padding
x1 += pad[1][0]
y1 += pad[0][0]
x2 += pad[1][0]
y2 += pad[0][0]
# Calculate ratios from coordinates
labels[:, 1] = ((x1 + x2) / 2) / padded_w
labels[:, 2] = ((y1 + y2) / 2) / padded_h
labels[:, 3] *= w / padded_w
labels[:, 4] *= h / padded_h
# Fill matrix
filled_labels = np.zeros((self.max_objects, 5))
if labels is not None:
filled_labels[range(len(labels))[:self.max_objects]] = labels[:self.max_objects]
filled_labels = torch.from_numpy(filled_labels)
return img_path, input_img, filled_labels
def __len__(self):
return len(self.img_files)
| [
"numpy.pad",
"numpy.abs",
"numpy.zeros",
"numpy.transpose",
"os.path.exists",
"PIL.Image.open",
"skimage.transform.resize",
"numpy.loadtxt",
"glob.glob",
"torch.from_numpy"
] | [((619, 632), 'numpy.abs', 'np.abs', (['(h - w)'], {}), '(h - w)\n', (625, 632), True, 'import numpy as np\n'), ((1014, 1069), 'skimage.transform.resize', 'resize', (['input_img', '(*self.img_shape, 3)'], {'mode': '"""reflect"""'}), "(input_img, (*self.img_shape, 3), mode='reflect')\n", (1020, 1069), False, 'from skimage.transform import resize\n'), ((1115, 1149), 'numpy.transpose', 'np.transpose', (['input_img', '(2, 0, 1)'], {}), '(input_img, (2, 0, 1))\n', (1127, 1149), True, 'import numpy as np\n'), ((2145, 2158), 'numpy.abs', 'np.abs', (['(h - w)'], {}), '(h - w)\n', (2151, 2158), True, 'import numpy as np\n'), ((2586, 2641), 'skimage.transform.resize', 'resize', (['input_img', '(*self.img_shape, 3)'], {'mode': '"""reflect"""'}), "(input_img, (*self.img_shape, 3), mode='reflect')\n", (2592, 2641), False, 'from skimage.transform import resize\n'), ((2687, 2721), 'numpy.transpose', 'np.transpose', (['input_img', '(2, 0, 1)'], {}), '(input_img, (2, 0, 1))\n', (2699, 2721), True, 'import numpy as np\n'), ((2973, 2999), 'os.path.exists', 'os.path.exists', (['label_path'], {}), '(label_path)\n', (2987, 2999), False, 'import os\n'), ((3771, 3802), 'numpy.zeros', 'np.zeros', (['(self.max_objects, 5)'], {}), '((self.max_objects, 5))\n', (3779, 3802), True, 'import numpy as np\n'), ((3951, 3982), 'torch.from_numpy', 'torch.from_numpy', (['filled_labels'], {}), '(filled_labels)\n', (3967, 3982), False, 'import torch\n'), ((332, 365), 'glob.glob', 'glob.glob', (["('%s/*.*' % folder_path)"], {}), "('%s/*.*' % folder_path)\n", (341, 365), False, 'import glob\n'), ((550, 570), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (560, 570), False, 'from PIL import Image\n'), ((904, 955), 'numpy.pad', 'np.pad', (['img', 'pad', '"""constant"""'], {'constant_values': '(127.5)'}), "(img, pad, 'constant', constant_values=127.5)\n", (910, 955), True, 'import numpy as np\n'), ((1836, 1856), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1846, 1856), False, 'from PIL import Image\n'), ((2430, 2479), 'numpy.pad', 'np.pad', (['img', 'pad', '"""constant"""'], {'constant_values': '(128)'}), "(img, pad, 'constant', constant_values=128)\n", (2436, 2479), True, 'import numpy as np\n'), ((1198, 1225), 'torch.from_numpy', 'torch.from_numpy', (['input_img'], {}), '(input_img)\n', (1214, 1225), False, 'import torch\n'), ((2075, 2095), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2085, 2095), False, 'from PIL import Image\n'), ((2770, 2797), 'torch.from_numpy', 'torch.from_numpy', (['input_img'], {}), '(input_img)\n', (2786, 2797), False, 'import torch\n'), ((3022, 3044), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {}), '(label_path)\n', (3032, 3044), True, 'import numpy as np\n')] |
import numpy as np
import ruptures as rpt
from sss_object_detection.consts import ObjectID
class CPDetector:
"""Change point detector using window sliding for segmentation"""
def __init__(self):
self.buoy_width = 19
self.min_mean_diff_ratio = 1.55
def detect(self, ping):
"""Detection returns a dictionary with key being ObjectID and
value being a dictionary of position and confidence of the
detection."""
detections = {}
nadir_idx = self._detect_nadir(ping)
rope = self._detect_rope(ping, nadir_idx)
buoy = self._detect_buoy(ping, nadir_idx)
detections[ObjectID.NADIR] = {'pos': nadir_idx, 'confidence': .9}
if rope:
detections[ObjectID.ROPE] = {
'pos': rope[0][0],
'confidence': rope[1]
}
if buoy:
detections[ObjectID.BUOY] = {
'pos': buoy[0][0],
'confidence': buoy[1]
}
return detections
def _compare_region_with_surrounding(self, ping, bkps, window_size=50):
region_mean = np.mean(ping[bkps[0]:bkps[1]])
prev_window = ping[max(bkps[0] - window_size, 0):bkps[0]]
post_window = ping[bkps[1] + 1:min(bkps[1] +
window_size, ping.shape[0])]
surrounding_mean = (np.mean(prev_window) + np.mean(post_window)) / 2
return region_mean / surrounding_mean
def _detect_rope(self, ping, nadir_idx):
"""Given the tentative nadir_annotation, provide tentative rope
annotation by segmenting the nadir region. Return None if the
break point detected is unlikely to be a rope."""
bkps = self._window_sliding_segmentation(ping=ping,
start_idx=40,
end_idx=nadir_idx,
width=4,
n_bkps=1)
bkps = [bkps[0] - 1, bkps[0] + 1]
mean_diff_ratio = self._compare_region_with_surrounding(ping, bkps)
if mean_diff_ratio < self.min_mean_diff_ratio:
return None
confidence = 1 / mean_diff_ratio
return bkps, confidence
def _detect_buoy(self, ping, nadir_idx):
"""Given the tentative nadir_annotation, provide tentative buoy
detection by segmenting the nadir region. Return None if no
buoy detected."""
bkps = self._window_sliding_segmentation(ping=ping,
start_idx=40,
end_idx=nadir_idx,
width=self.buoy_width,
n_bkps=2)
# Check whether the segmentation is likely to be a buoy
if bkps[1] - bkps[0] > self.buoy_width * 2 or bkps[1] - bkps[
0] < self.buoy_width * .5:
return None
mean_diff_ratio = self._compare_region_with_surrounding(ping, bkps)
if mean_diff_ratio < self.min_mean_diff_ratio:
return None
confidence = 1 / mean_diff_ratio
return bkps, confidence
def _detect_nadir(self, ping):
"""Use window sliding segmentation to provide tentative
nadir location annotation. Return detected nadir index."""
bkps = self._window_sliding_segmentation(ping=ping,
n_bkps=1,
start_idx=100,
end_idx=ping.shape[0],
width=100)
return bkps[0]
def _window_sliding_segmentation(self, ping, n_bkps, start_idx, end_idx,
width):
"""Use window sliding method to segment the input numpy array from
start_idx to end_idx into (n_bkps + 1) segments. Return a list of
suggested break points."""
algo = rpt.Window(width=width, model='l2').fit(ping[start_idx:end_idx])
bkps = algo.predict(n_bkps=n_bkps)
bkps = [bkps[i] + start_idx for i in range(len(bkps))]
return bkps
| [
"ruptures.Window",
"numpy.mean"
] | [((1124, 1154), 'numpy.mean', 'np.mean', (['ping[bkps[0]:bkps[1]]'], {}), '(ping[bkps[0]:bkps[1]])\n', (1131, 1154), True, 'import numpy as np\n'), ((1374, 1394), 'numpy.mean', 'np.mean', (['prev_window'], {}), '(prev_window)\n', (1381, 1394), True, 'import numpy as np\n'), ((1397, 1417), 'numpy.mean', 'np.mean', (['post_window'], {}), '(post_window)\n', (1404, 1417), True, 'import numpy as np\n'), ((4087, 4122), 'ruptures.Window', 'rpt.Window', ([], {'width': 'width', 'model': '"""l2"""'}), "(width=width, model='l2')\n", (4097, 4122), True, 'import ruptures as rpt\n')] |
import numpy as np
def calculate_q(p_seq):
"""
Benjamini-Hochberg method
"""
p_arr = np.array(p_seq)
n_genes = len(p_arr)
sort_index_arr = np.argsort(p_arr)
p_sorted_arr = p_arr[sort_index_arr]
q_arr = p_sorted_arr * n_genes / (np.arange(n_genes) + 1)
q_min = q_arr[-1]
q_list = [q_min]
for q in q_arr[-2::-1]:
if q < q_min:
q_min = q
q_list.append(q_min)
q_arr = np.array(q_list)[::-1]
q_arr[sort_index_arr] = q_arr.copy()
return q_arr
if __name__ == '__main__':
p_arr = np.random.rand(5)
q_arr = calculate_q(p_arr)
print('p-values', p_arr)
print('q-values', q_arr)
| [
"numpy.argsort",
"numpy.random.rand",
"numpy.array",
"numpy.arange"
] | [((103, 118), 'numpy.array', 'np.array', (['p_seq'], {}), '(p_seq)\n', (111, 118), True, 'import numpy as np\n'), ((168, 185), 'numpy.argsort', 'np.argsort', (['p_arr'], {}), '(p_arr)\n', (178, 185), True, 'import numpy as np\n'), ((562, 579), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (576, 579), True, 'import numpy as np\n'), ((447, 463), 'numpy.array', 'np.array', (['q_list'], {}), '(q_list)\n', (455, 463), True, 'import numpy as np\n'), ((272, 290), 'numpy.arange', 'np.arange', (['n_genes'], {}), '(n_genes)\n', (281, 290), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plot
train_dataset = dsets.MNIST(root='./data/MNIST',
train=True,
transform=transforms.ToTensor(),
download=False)
test_dataset = dsets.MNIST(root='./data/MNIST',
train=False,
transform=transforms.ToTensor())
# Plotting a random image
randInstance = int(np.random.rand(1) * len(train_dataset))
show_img = train_dataset[randInstance][0].numpy().reshape(28,28)
plot.imshow(show_img, cmap = 'gray')
plot.xlabel('Its ' + str(int(train_dataset[randInstance][1])))
plot.show()
num_epochs = 100
class LogisticRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegressionModel, self).__init__()
self.linear1 = nn.Linear(input_dim, 1500)
self.linear2 = nn.Linear(1500, 1000)
self.linear3 = nn.Linear(1000, output_dim)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def forward(self,x):
out = self.linear1(x)
out = self.relu(out)
out = self.linear2(out)
out = self.relu(out)
out = self.linear3(out)
return out
input_dim = 28*28
output_dim = 10
model = LogisticRegressionModel(input_dim, output_dim)
if torch.cuda.is_available():
model.cuda()
criterion = torch.nn.CrossEntropyLoss()
learning_rate= 0.004
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
iter = 0
for epoch in range(num_epochs):
for i in range(len(train_dataset)):
if torch.cuda.is_available():
images = Variable(train_dataset[i][0].view(-1, 28 * 28).cuda())
labels = Variable(train_dataset[i][1].view(1).cuda())
else:
images = Variable(train_dataset[i][0].view(-1, 28 * 28))
labels = Variable(train_dataset[i][1].view(1))
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
outputs = model(images)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs, labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
iter += 1
if iter %100 == 0:
print('Iteration: {}. Loss: {} '.format(iter, loss.data[0]))
if iter %500 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for j in range(len(test_dataset)):
if torch.cuda.is_available():
images = Variable(test_dataset[j][0].view(-1, 28 * 28).cuda())
labels = Variable(test_dataset[j][1].view(1).cuda())
else:
images = Variable(test_dataset[j][0].view(-1, 28 * 28))
labels = Variable(test_dataset[j][1].view(1))
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += 1
correct += (predicted.cpu() == labels.cpu()).sum()
accuracy = 100 * correct / total
# Print Loss
print('%Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.data[0], accuracy))
| [
"matplotlib.pyplot.show",
"torch.nn.ReLU",
"matplotlib.pyplot.imshow",
"torch.nn.CrossEntropyLoss",
"torch.nn.Sigmoid",
"torch.cuda.is_available",
"torch.max",
"torch.nn.Linear",
"numpy.random.rand",
"torchvision.transforms.ToTensor"
] | [((699, 733), 'matplotlib.pyplot.imshow', 'plot.imshow', (['show_img'], {'cmap': '"""gray"""'}), "(show_img, cmap='gray')\n", (710, 733), True, 'import matplotlib.pyplot as plot\n'), ((799, 810), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (808, 810), True, 'import matplotlib.pyplot as plot\n'), ((1480, 1505), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1503, 1505), False, 'import torch\n'), ((1537, 1564), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1562, 1564), False, 'import torch\n'), ((332, 353), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (351, 353), True, 'import torchvision.transforms as transforms\n'), ((525, 546), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (544, 546), True, 'import torchvision.transforms as transforms\n'), ((594, 611), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (608, 611), True, 'import numpy as np\n'), ((998, 1024), 'torch.nn.Linear', 'nn.Linear', (['input_dim', '(1500)'], {}), '(input_dim, 1500)\n', (1007, 1024), True, 'import torch.nn as nn\n'), ((1048, 1069), 'torch.nn.Linear', 'nn.Linear', (['(1500)', '(1000)'], {}), '(1500, 1000)\n', (1057, 1069), True, 'import torch.nn as nn\n'), ((1093, 1120), 'torch.nn.Linear', 'nn.Linear', (['(1000)', 'output_dim'], {}), '(1000, output_dim)\n', (1102, 1120), True, 'import torch.nn as nn\n'), ((1144, 1156), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1154, 1156), True, 'import torch.nn as nn\n'), ((1177, 1186), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1184, 1186), True, 'import torch.nn as nn\n'), ((1747, 1772), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1770, 1772), False, 'import torch\n'), ((2773, 2798), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2796, 2798), False, 'import torch\n'), ((3308, 3334), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (3317, 3334), False, 'import torch\n')] |
import torch
from datasetsFunctions import Maps, pad
import argparse
from models import segmentationModel
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import json
def main():
parser = argparse.ArgumentParser(description='Tree Generation')
parser.add_argument('--batchSize', required=False, type=int, default = 1)
parser.add_argument('--datasetPath', required=False, type=str, default = f'C:/Users/hx21262/MAPHIS/datasets')
parser.add_argument('--fileFormat', required=False, type=str, default = '.jpg')
parser.add_argument('--feature', required=False, type=str, default = '')
parser.add_argument('--cityName', required=False, type=str, default = 'Luton')
parser.add_argument('--numWorkers', required=False, type=int, default = '0')
args = parser.parse_args()
datasetPath = Path(args.datasetPath)
Path('segmentedMaps').mkdir(parents=True, exist_ok=True)
transform = pad()
trainSet = Maps(datasetPath, args.cityName, fileFormat=args.fileFormat, transform=transform)
trainDataloader = torch.utils.data.DataLoader(trainSet, batch_size=args.batchSize,
shuffle=True, num_workers=args.numWorkers)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tilesSegmenterParameters = json.load(open(f'saves/SegmentModelParameters.json'))
tilesSegmenter = segmentationModel(tilesSegmenterParameters)
tilesSegmenter.load_state_dict(torch.load(f'saves/SegmentModelStateDict.pth'))
tilesSegmenter.to(device)
for i, data in enumerate(trainDataloader):
print(f'Map {i} / {len(trainDataloader)}')
map = data['map'][0].float().to(device)
segmented = np.zeros((7680,11776))
kS = 512
nRows = 15
nCCols= 23
with torch.no_grad():
for i in range(nRows):
print(f'Row {i} / {nRows}')
for j in range(nCCols):
thumbnail = map[:,:,kS*i:kS*(i+1), kS*j:kS*(j+1)]
segmented[kS*i:kS*(i+1), kS*j:kS*(j+1)] = tilesSegmenter(thumbnail).detach().cpu()
if i%10==0:
plt.imshow(segmented)
plt.title('segmented')
plt.show()
np.save(f'segmentedMaps/{data["mapName"][0].split(".")[0]}_segmented.npy', segmented)
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.imshow",
"torch.load",
"numpy.zeros",
"pathlib.Path",
"datasetsFunctions.pad",
"models.segmentationModel",
"torch.cuda.is_available",
"torch.no_grad",
"datasetsFu... | [((235, 289), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tree Generation"""'}), "(description='Tree Generation')\n", (258, 289), False, 'import argparse\n'), ((866, 888), 'pathlib.Path', 'Path', (['args.datasetPath'], {}), '(args.datasetPath)\n', (870, 888), False, 'from pathlib import Path\n'), ((974, 979), 'datasetsFunctions.pad', 'pad', ([], {}), '()\n', (977, 979), False, 'from datasetsFunctions import Maps, pad\n'), ((996, 1082), 'datasetsFunctions.Maps', 'Maps', (['datasetPath', 'args.cityName'], {'fileFormat': 'args.fileFormat', 'transform': 'transform'}), '(datasetPath, args.cityName, fileFormat=args.fileFormat, transform=\n transform)\n', (1000, 1082), False, 'from datasetsFunctions import Maps, pad\n'), ((1101, 1213), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainSet'], {'batch_size': 'args.batchSize', 'shuffle': '(True)', 'num_workers': 'args.numWorkers'}), '(trainSet, batch_size=args.batchSize, shuffle=\n True, num_workers=args.numWorkers)\n', (1128, 1213), False, 'import torch\n'), ((1443, 1486), 'models.segmentationModel', 'segmentationModel', (['tilesSegmenterParameters'], {}), '(tilesSegmenterParameters)\n', (1460, 1486), False, 'from models import segmentationModel\n'), ((1523, 1569), 'torch.load', 'torch.load', (['f"""saves/SegmentModelStateDict.pth"""'], {}), "(f'saves/SegmentModelStateDict.pth')\n", (1533, 1569), False, 'import torch\n'), ((1774, 1797), 'numpy.zeros', 'np.zeros', (['(7680, 11776)'], {}), '((7680, 11776))\n', (1782, 1797), True, 'import numpy as np\n'), ((894, 915), 'pathlib.Path', 'Path', (['"""segmentedMaps"""'], {}), "('segmentedMaps')\n", (898, 915), False, 'from pathlib import Path\n'), ((1295, 1320), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1318, 1320), False, 'import torch\n'), ((1869, 1884), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1882, 1884), False, 'import torch\n'), ((2237, 2258), 'matplotlib.pyplot.imshow', 'plt.imshow', (['segmented'], {}), '(segmented)\n', (2247, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2280, 2302), 'matplotlib.pyplot.title', 'plt.title', (['"""segmented"""'], {}), "('segmented')\n", (2289, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2324, 2334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2332, 2334), True, 'import matplotlib.pyplot as plt\n')] |
import random
import numpy as np
import cv2 as cv
frame1 = cv.imread(cv.samples.findFile('lena.jpg'))
if frame1 is None:
print("image not found")
exit()
frame = np.vstack((frame1,frame1))
facemark = cv.face.createFacemarkLBF()
try:
facemark.loadModel(cv.samples.findFile('lbfmodel.yaml'))
except cv.error:
print("Model not found\nlbfmodel.yaml can be download at")
print("https://raw.githubusercontent.com/kurnianggoro/GSOC2017/master/data/lbfmodel.yaml")
cascade = cv.CascadeClassifier(cv.samples.findFile('lbpcascade_frontalface_improved.xml'))
if cascade.empty() :
print("cascade not found")
exit()
faces = cascade.detectMultiScale(frame, 1.05, 3, cv.CASCADE_SCALE_IMAGE, (30, 30))
ok, landmarks = facemark.fit(frame, faces=faces)
cv.imshow("Image", frame)
for marks in landmarks:
couleur = (random.randint(0,255),
random.randint(0,255),
random.randint(0,255))
cv.face.drawFacemarks(frame, marks, couleur)
cv.imshow("Image Landmarks", frame)
cv.waitKey()
| [
"random.randint",
"cv2.waitKey",
"cv2.face.createFacemarkLBF",
"cv2.samples.findFile",
"cv2.face.drawFacemarks",
"cv2.imshow",
"numpy.vstack"
] | [((170, 197), 'numpy.vstack', 'np.vstack', (['(frame1, frame1)'], {}), '((frame1, frame1))\n', (179, 197), True, 'import numpy as np\n'), ((208, 235), 'cv2.face.createFacemarkLBF', 'cv.face.createFacemarkLBF', ([], {}), '()\n', (233, 235), True, 'import cv2 as cv\n'), ((764, 789), 'cv2.imshow', 'cv.imshow', (['"""Image"""', 'frame'], {}), "('Image', frame)\n", (773, 789), True, 'import cv2 as cv\n'), ((977, 1012), 'cv2.imshow', 'cv.imshow', (['"""Image Landmarks"""', 'frame'], {}), "('Image Landmarks', frame)\n", (986, 1012), True, 'import cv2 as cv\n'), ((1013, 1025), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (1023, 1025), True, 'import cv2 as cv\n'), ((70, 101), 'cv2.samples.findFile', 'cv.samples.findFile', (['"""lena.jpg"""'], {}), "('lena.jpg')\n", (89, 101), True, 'import cv2 as cv\n'), ((508, 566), 'cv2.samples.findFile', 'cv.samples.findFile', (['"""lbpcascade_frontalface_improved.xml"""'], {}), "('lbpcascade_frontalface_improved.xml')\n", (527, 566), True, 'import cv2 as cv\n'), ((932, 976), 'cv2.face.drawFacemarks', 'cv.face.drawFacemarks', (['frame', 'marks', 'couleur'], {}), '(frame, marks, couleur)\n', (953, 976), True, 'import cv2 as cv\n'), ((264, 300), 'cv2.samples.findFile', 'cv.samples.findFile', (['"""lbfmodel.yaml"""'], {}), "('lbfmodel.yaml')\n", (283, 300), True, 'import cv2 as cv\n'), ((829, 851), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (843, 851), False, 'import random\n'), ((867, 889), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (881, 889), False, 'import random\n'), ((905, 927), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (919, 927), False, 'import random\n')] |
# import external modules
import numpy, os
# Add Exasim to Python search path
cdir = os.getcwd(); ii = cdir.find("Exasim");
exec(open(cdir[0:(ii+6)] + "/Installation/setpath.py").read());
# import internal modules
import Preprocessing, Postprocessing, Gencode, Mesh
# Create pde object and mesh object
pde,mesh = Preprocessing.initializeexasim();
# Define a PDE model: governing equations and boundary conditions
pde['model'] = "ModelC"; # ModelC, ModelD, ModelW
pde['modelfile'] = "pdemodel"; # name of a file defining the PDE model
# Choose computing platform and set number of processors
#pde['platform'] = "gpu"; # choose this option if NVIDIA GPUs are available
pde['mpiprocs'] = 1; # number of MPI processors
# Set discretization parameters, physical parameters, and solver parameters
pde['porder'] = 3; # polynomial degree
pde['torder'] = 2; # time-stepping order of accuracy
pde['nstage'] = 2; # time-stepping number of stages
pde['dt'] = 0.02*numpy.ones(200); # time step sizes
pde['soltime'] = numpy.arange(10,pde['dt'].size+1,10); # steps at which solution are collected
pde['visdt'] = 1.0; # visualization timestep size
gam = 1e4; # gravity
pde['physicsparam'] = [gam];
pde['tau'] = numpy.array([1.0]); # DG stabilization parameter
pde['GMRESrestart']=15; # number of GMRES restarts
pde['linearsolvertol']=1e-12; # GMRES tolerance
pde['linearsolveriter']=16; # number of GMRES iterations
pde['precMatrixType']=2; # preconditioning type
pde['NLtol'] = 1e-12; # Newton tolerance
pde['NLiter']=2; # Newton iterations
# create a mesh of 10 by 10 quads on a square domain
mesh['p'], mesh['t'] = Mesh.SquareMesh(64,64,1)[0:2];
pi = numpy.pi;
mesh['p'] = (4*pi)*mesh['p'] - 2*pi;
# expressions for domain boundaries
mesh['boundaryexpr'] = [lambda p: (p[1,:] < -2*pi+1e-3), lambda p: (p[0,:] > 2*pi-1e-3), lambda p: (p[1,:] > 2*pi-1e-3), lambda p: (p[0,:] < -2*pi+1e-3)];
mesh['boundarycondition'] = numpy.array([1, 1, 1, 1]); # Set boundary condition for each boundary
mesh['periodicexpr'] = [[2, lambda p: p[1,:], 4, lambda p: p[1,:]], [1, lambda p: p[0,:], 3, lambda p: p[0,:]]];
# call exasim to generate and run C++ code to solve the PDE model
sol, pde, mesh = Postprocessing.exasim(pde,mesh)[0:3];
| [
"os.getcwd",
"numpy.ones",
"Preprocessing.initializeexasim",
"Postprocessing.exasim",
"numpy.arange",
"numpy.array",
"Mesh.SquareMesh"
] | [((86, 97), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (95, 97), False, 'import numpy, os\n'), ((316, 348), 'Preprocessing.initializeexasim', 'Preprocessing.initializeexasim', ([], {}), '()\n', (346, 348), False, 'import Preprocessing, Postprocessing, Gencode, Mesh\n'), ((1053, 1093), 'numpy.arange', 'numpy.arange', (['(10)', "(pde['dt'].size + 1)", '(10)'], {}), "(10, pde['dt'].size + 1, 10)\n", (1065, 1093), False, 'import numpy, os\n'), ((1266, 1284), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (1277, 1284), False, 'import numpy, os\n'), ((2040, 2065), 'numpy.array', 'numpy.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (2051, 2065), False, 'import numpy, os\n'), ((999, 1014), 'numpy.ones', 'numpy.ones', (['(200)'], {}), '(200)\n', (1009, 1014), False, 'import numpy, os\n'), ((1738, 1764), 'Mesh.SquareMesh', 'Mesh.SquareMesh', (['(64)', '(64)', '(1)'], {}), '(64, 64, 1)\n', (1753, 1764), False, 'import Preprocessing, Postprocessing, Gencode, Mesh\n'), ((2308, 2340), 'Postprocessing.exasim', 'Postprocessing.exasim', (['pde', 'mesh'], {}), '(pde, mesh)\n', (2329, 2340), False, 'import Preprocessing, Postprocessing, Gencode, Mesh\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.functional as F
from datasets.two_dim.NumpyDataLoader import NumpyDataSet
from trixi.experiment.pytorchexperiment import PytorchExperiment
from networks.RecursiveUNet import UNet
from models.fcn8s import FCN8s
from models.fcn32s import FCN32s
from models.fcn8s import FCN8s
from loss_functions.dice_loss import SoftDiceLoss
from loss_functions.metrics import dice_pytorch
class FCNExperiment(PytorchExperiment):
"""
The UnetExperiment is inherited from the PytorchExperiment. It implements the basic life cycle for a segmentation task with UNet(https://arxiv.org/abs/1505.04597).
It is optimized to work with the provided NumpyDataLoader.
The basic life cycle of a UnetExperiment is the same s PytorchExperiment:
setup()
(--> Automatically restore values if a previous checkpoint is given)
prepare()
for epoch in n_epochs:
train()
validate()
(--> save current checkpoint)
end()
"""
def setup(self):
pkl_dir = self.config.split_dir
with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
splits = pickle.load(f)
tr_keys = splits[self.config.fold]['train']
val_keys = splits[self.config.fold]['val']
test_keys = splits[self.config.fold]['test']
self.device = torch.device(self.config.device if torch.cuda.is_available() else 'cpu') #
self.train_data_loader = NumpyDataSet(self.config.scaled_image_64_dir, target_size=64, batch_size=self.config.batch_size,
keys=tr_keys, do_reshuffle=True)
self.val_data_loader = NumpyDataSet(self.config.scaled_image_64_dir, target_size=64, batch_size=self.config.batch_size,
keys=val_keys, mode="val", do_reshuffle=True)
self.test_data_loader = NumpyDataSet(self.config.scaled_image_64_dir, target_size=64, batch_size=self.config.batch_size,
keys=test_keys, mode="test", do_reshuffle=False)
self.model = UNet(num_classes=self.config.num_classes, num_downs=3)
self.model.to(self.device)
# We use a combination of DICE-loss and CE-Loss in this example.
# This proved good in the medical segmentation decathlon.
self.dice_loss = SoftDiceLoss(batch_dice=True) # Softmax für DICE Loss!
# weight = torch.tensor([1, 30, 30]).float().to(self.device)
self.ce_loss = torch.nn.CrossEntropyLoss() # Kein Softmax für CE Loss -> ist in torch schon mit drin!
# self.dice_pytorch = dice_pytorch(self.config.num_classes)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.learning_rate)
# self.optimizer = optim.SGD(self.model.parameters(), lr=self.config.learning_rate)
self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')
# If directory for checkpoint is provided, we load it.
if self.config.do_load_checkpoint:
if self.config.checkpoint_dir == '':
print('checkpoint_dir is empty, please provide directory to load checkpoint.')
else:
self.load_checkpoint(name=self.config.checkpoint_dir, save_types=("model"))
self.save_checkpoint(name="checkpoint_start")
self.elog.print('Experiment set up.')
def train(self, epoch):
self.elog.print('=====TRAIN=====')
self.model.train()
data = None
batch_counter = 0
for data_batch in self.train_data_loader:
self.optimizer.zero_grad()
# Shape of data_batch = [1, b, c, w, h]
# Desired shape = [b, c, w, h]
# Move data and target to the GPU
data = data_batch['data'][0].float().to(self.device)
target = data_batch['seg'][0].long().to(self.device)
max_value = target.max()
min_value = target.min()
pred = self.model(data)
pred_softmax = F.softmax(pred, dim=1) # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.
pred_image = torch.argmax(pred_softmax, dim=1)
t = target.squeeze()
# loss = self.dice_pytorch(outputs=pred_image, labels=target)
loss = self.ce_loss(pred, target.squeeze()) + self.dice_loss(pred_softmax, target.squeeze())
# loss = self.dice_loss(pred_softmax, target.squeeze())
loss.backward()
self.optimizer.step()
# Some logging and plotting
if (batch_counter % self.config.plot_freq) == 0:
self.elog.print('Epoch: %d Loss: %.4f' % (self._epoch_idx, loss))
self.add_result(value=loss.item(), name='Train_Loss', tag='Loss', counter=epoch + (batch_counter / self.train_data_loader.data_loader.num_batches))
self.clog.show_image_grid(data.float(), name="data", normalize=True, scale_each=True, n_iter=epoch)
self.clog.show_image_grid(target.float(), name="mask", title="Mask", n_iter=epoch)
self.clog.show_image_grid(torch.argmax(pred.cpu(), dim=1, keepdim=True), name="unt_argmax", title="Unet", n_iter=epoch)
self.clog.show_image_grid(pred.cpu()[:, 1:2, ], name="unt", normalize=True, scale_each=True, n_iter=epoch)
batch_counter += 1
assert data is not None, 'data is None. Please check if your dataloader works properly'
def validate(self, epoch):
self.elog.print('VALIDATE')
self.model.eval()
data = None
loss_list = []
with torch.no_grad():
for data_batch in self.val_data_loader:
data = data_batch['data'][0].float().to(self.device)
target = data_batch['seg'][0].long().to(self.device)
pred = self.model(data)
pred_softmax = F.softmax(pred) # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.
loss = self.dice_loss(pred_softmax, target.squeeze()) #self.ce_loss(pred, target.squeeze())
loss_list.append(loss.item())
assert data is not None, 'data is None. Please check if your dataloader works properly'
self.scheduler.step(np.mean(loss_list))
self.elog.print('Epoch: %d Loss: %.4f' % (self._epoch_idx, np.mean(loss_list)))
self.add_result(value=np.mean(loss_list), name='Val_Loss', tag='Loss', counter=epoch+1)
self.clog.show_image_grid(data.float(), name="data_val", normalize=True, scale_each=True, n_iter=epoch)
self.clog.show_image_grid(target.float(), name="mask_val", title="Mask", n_iter=epoch)
self.clog.show_image_grid(torch.argmax(pred.data.cpu(), dim=1, keepdim=True), name="unt_argmax_val", title="Unet", n_iter=epoch)
self.clog.show_image_grid(pred.data.cpu()[:, 1:2, ], name="unt_val", normalize=True, scale_each=True, n_iter=epoch)
def test(self):
self.model.eval()
data = None
dice_array = np.array([0])
num_of_parameters = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
print("number of parameters:", num_of_parameters)
with torch.no_grad():
for data_batch in self.test_data_loader:
data = data_batch['data'][0].float().to(self.device)
target = data_batch['seg'][0].long().to(self.device)
file_dir = data_batch['fnames'] # 8*tuple (a,)
pred = self.model(data)
pred_softmax = F.softmax(pred,
dim=1) # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.
pred_image = torch.argmax(pred_softmax, dim=1)
dice_result = dice_pytorch(outputs=pred_image, labels=target, N_class =self.config.num_classes)
dice_loss = self.dice_loss(pred_softmax, target.squeeze())
ce_loss = self.ce_loss(pred, target.squeeze())
print('ce_loss:%.4f dice:%s' % (ce_loss.data, dice_result.data))
data_image = data.data.cpu().numpy()
pred_image = pred_image.data.cpu().numpy()
target_image = target.data.cpu().numpy()
pred_softmax = pred_softmax.data.cpu().numpy()
dice_result = dice_result.data.cpu().numpy()
size = np.shape(dice_result)[0]
for i in range(size):
dice_array = np.concatenate((dice_array, [dice_result[i]]))
for k in range(self.config.batch_size):
##save the results
pred = pred_softmax[k].reshape((3,64,64))
filename = file_dir[k][0][-8:-4]
output_dir = os.path.join(self.config.cross_vali_result_all_dir, 'pred_' + self.config.dataset_name + filename )
if os.path.exists(output_dir + '.npy'):
all_image = np.load(output_dir + '.npy')
output = np.concatenate((data_image[k], target_image[k], pred), axis=0).reshape((1, 5, 64, 64))
all_image = np.concatenate((all_image, output), axis=0)
else:
all_image = np.concatenate((data_image[k], target_image[k], pred), axis=0).reshape((1, 5, 64, 64))
np.save(output_dir, all_image)
# saveName = filenames[k]
dice_array = dice_array[dice_array != 0]
print("average dice:", np.average(dice_array))
print('test_data loading finished')
assert data is not None, 'data is None. Please check if your dataloader works properly'
# print('TODO: Implement your test() method here')
| [
"numpy.load",
"torch.argmax",
"numpy.shape",
"pickle.load",
"numpy.mean",
"networks.RecursiveUNet.UNet",
"torch.no_grad",
"os.path.join",
"datasets.two_dim.NumpyDataLoader.NumpyDataSet",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"os.path.exists",
"loss_functions.metrics.dice_pytorch",
"n... | [((2286, 2420), 'datasets.two_dim.NumpyDataLoader.NumpyDataSet', 'NumpyDataSet', (['self.config.scaled_image_64_dir'], {'target_size': '(64)', 'batch_size': 'self.config.batch_size', 'keys': 'tr_keys', 'do_reshuffle': '(True)'}), '(self.config.scaled_image_64_dir, target_size=64, batch_size=\n self.config.batch_size, keys=tr_keys, do_reshuffle=True)\n', (2298, 2420), False, 'from datasets.two_dim.NumpyDataLoader import NumpyDataSet\n'), ((2493, 2640), 'datasets.two_dim.NumpyDataLoader.NumpyDataSet', 'NumpyDataSet', (['self.config.scaled_image_64_dir'], {'target_size': '(64)', 'batch_size': 'self.config.batch_size', 'keys': 'val_keys', 'mode': '"""val"""', 'do_reshuffle': '(True)'}), "(self.config.scaled_image_64_dir, target_size=64, batch_size=\n self.config.batch_size, keys=val_keys, mode='val', do_reshuffle=True)\n", (2505, 2640), False, 'from datasets.two_dim.NumpyDataLoader import NumpyDataSet\n'), ((2712, 2862), 'datasets.two_dim.NumpyDataLoader.NumpyDataSet', 'NumpyDataSet', (['self.config.scaled_image_64_dir'], {'target_size': '(64)', 'batch_size': 'self.config.batch_size', 'keys': 'test_keys', 'mode': '"""test"""', 'do_reshuffle': '(False)'}), "(self.config.scaled_image_64_dir, target_size=64, batch_size=\n self.config.batch_size, keys=test_keys, mode='test', do_reshuffle=False)\n", (2724, 2862), False, 'from datasets.two_dim.NumpyDataLoader import NumpyDataSet\n'), ((2924, 2978), 'networks.RecursiveUNet.UNet', 'UNet', ([], {'num_classes': 'self.config.num_classes', 'num_downs': '(3)'}), '(num_classes=self.config.num_classes, num_downs=3)\n', (2928, 2978), False, 'from networks.RecursiveUNet import UNet\n'), ((3180, 3209), 'loss_functions.dice_loss.SoftDiceLoss', 'SoftDiceLoss', ([], {'batch_dice': '(True)'}), '(batch_dice=True)\n', (3192, 3209), False, 'from loss_functions.dice_loss import SoftDiceLoss\n'), ((3329, 3356), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3354, 3356), False, 'import torch\n'), ((3695, 3735), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['self.optimizer', '"""min"""'], {}), "(self.optimizer, 'min')\n", (3712, 3735), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((7952, 7965), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (7960, 7965), True, 'import numpy as np\n'), ((1979, 1993), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1990, 1993), False, 'import pickle\n'), ((4844, 4866), 'torch.nn.functional.softmax', 'F.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (4853, 4866), True, 'import torch.nn.functional as F\n'), ((5011, 5044), 'torch.argmax', 'torch.argmax', (['pred_softmax'], {'dim': '(1)'}), '(pred_softmax, dim=1)\n', (5023, 5044), False, 'import torch\n'), ((6494, 6509), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6507, 6509), False, 'import torch\n'), ((7188, 7206), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (7195, 7206), True, 'import numpy as np\n'), ((8134, 8149), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8147, 8149), False, 'import torch\n'), ((1909, 1944), 'os.path.join', 'os.path.join', (['pkl_dir', '"""splits.pkl"""'], {}), "(pkl_dir, 'splits.pkl')\n", (1921, 1944), False, 'import os\n'), ((2209, 2234), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2232, 2234), False, 'import torch\n'), ((6773, 6788), 'torch.nn.functional.softmax', 'F.softmax', (['pred'], {}), '(pred)\n', (6782, 6788), True, 'import torch.nn.functional as F\n'), ((7328, 7346), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (7335, 7346), True, 'import numpy as np\n'), ((8479, 8501), 'torch.nn.functional.softmax', 'F.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (8488, 8501), True, 'import torch.nn.functional as F\n'), ((8691, 8724), 'torch.argmax', 'torch.argmax', (['pred_softmax'], {'dim': '(1)'}), '(pred_softmax, dim=1)\n', (8703, 8724), False, 'import torch\n'), ((8755, 8840), 'loss_functions.metrics.dice_pytorch', 'dice_pytorch', ([], {'outputs': 'pred_image', 'labels': 'target', 'N_class': 'self.config.num_classes'}), '(outputs=pred_image, labels=target, N_class=self.config.num_classes\n )\n', (8767, 8840), False, 'from loss_functions.metrics import dice_pytorch\n'), ((10526, 10548), 'numpy.average', 'np.average', (['dice_array'], {}), '(dice_array)\n', (10536, 10548), True, 'import numpy as np\n'), ((7276, 7294), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (7283, 7294), True, 'import numpy as np\n'), ((9378, 9399), 'numpy.shape', 'np.shape', (['dice_result'], {}), '(dice_result)\n', (9386, 9399), True, 'import numpy as np\n'), ((9474, 9520), 'numpy.concatenate', 'np.concatenate', (['(dice_array, [dice_result[i]])'], {}), '((dice_array, [dice_result[i]]))\n', (9488, 9520), True, 'import numpy as np\n'), ((9765, 9868), 'os.path.join', 'os.path.join', (['self.config.cross_vali_result_all_dir', "('pred_' + self.config.dataset_name + filename)"], {}), "(self.config.cross_vali_result_all_dir, 'pred_' + self.config.\n dataset_name + filename)\n", (9777, 9868), False, 'import os\n'), ((9889, 9924), 'os.path.exists', 'os.path.exists', (["(output_dir + '.npy')"], {}), "(output_dir + '.npy')\n", (9903, 9924), False, 'import os\n'), ((10361, 10391), 'numpy.save', 'np.save', (['output_dir', 'all_image'], {}), '(output_dir, all_image)\n', (10368, 10391), True, 'import numpy as np\n'), ((9962, 9990), 'numpy.load', 'np.load', (["(output_dir + '.npy')"], {}), "(output_dir + '.npy')\n", (9969, 9990), True, 'import numpy as np\n'), ((10147, 10190), 'numpy.concatenate', 'np.concatenate', (['(all_image, output)'], {'axis': '(0)'}), '((all_image, output), axis=0)\n', (10161, 10190), True, 'import numpy as np\n'), ((10024, 10086), 'numpy.concatenate', 'np.concatenate', (['(data_image[k], target_image[k], pred)'], {'axis': '(0)'}), '((data_image[k], target_image[k], pred), axis=0)\n', (10038, 10086), True, 'import numpy as np\n'), ((10253, 10315), 'numpy.concatenate', 'np.concatenate', (['(data_image[k], target_image[k], pred)'], {'axis': '(0)'}), '((data_image[k], target_image[k], pred), axis=0)\n', (10267, 10315), True, 'import numpy as np\n')] |
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
def _create_tff_parallel_clients_with_dataset_reduce():
@tf.function
def reduce_fn(x, y):
return x + y
@tf.function
def dataset_reduce_fn(ds, initial_val):
return ds.reduce(initial_val, reduce_fn)
@tff.tf_computation(tff.SequenceType(tf.int64))
def dataset_reduce_fn_wrapper(ds):
initial_val = tf.Variable(np.int64(1.0))
return dataset_reduce_fn(ds, initial_val)
@tff.federated_computation(tff.at_clients(tff.SequenceType(tf.int64)))
def parallel_client_run(client_datasets):
return tff.federated_map(dataset_reduce_fn_wrapper, client_datasets)
return parallel_client_run
def _create_tff_parallel_clients_with_iter_dataset():
@tf.function
def reduce_fn(x, y):
return x + y
@tf.function
def dataset_reduce_fn(ds, initial_val):
for batch in iter(ds):
initial_val = reduce_fn(initial_val, batch)
return initial_val
@tff.tf_computation(tff.SequenceType(tf.int64))
def dataset_reduce_fn_wrapper(ds):
initial_val = tf.Variable(np.int64(1.0))
return dataset_reduce_fn(ds, initial_val)
@tff.federated_computation(tff.at_clients(tff.SequenceType(tf.int64)))
def parallel_client_run(client_datasets):
return tff.federated_map(dataset_reduce_fn_wrapper, client_datasets)
return parallel_client_run
class LocalExecutorMultiTPUTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
tpu_devices = tf.config.list_logical_devices('TPU')
if len(tpu_devices) < 2:
self.skipTest('Skip multi-tpu tests when {} tpus are provided'.format(
len(tpu_devices)))
@parameterized.named_parameters(
('iter_server_on_cpu', 'CPU',
_create_tff_parallel_clients_with_iter_dataset),
('iter_server_on_tpu', 'TPU',
_create_tff_parallel_clients_with_iter_dataset),
('reduce_server_on_cpu', 'CPU',
_create_tff_parallel_clients_with_dataset_reduce),
('reduce_server_on_tpu', 'TPU',
_create_tff_parallel_clients_with_dataset_reduce),
)
def test_local_executor_multi_tpus(self, tf_device,
create_tff_parallel_clients_fn):
self.skipTest('b/157625321')
tf_devices = tf.config.list_logical_devices(tf_device)
server_tf_device = None if not tf_devices else tf_devices[0]
client_devices = tf.config.list_logical_devices('TPU')
tff.backends.native.set_local_python_execution_context(
server_tf_device=server_tf_device, client_tf_devices=client_devices)
parallel_client_run = create_tff_parallel_clients_fn()
client_data = [
tf.data.Dataset.range(10),
tf.data.Dataset.range(10).map(lambda x: x + 1)
]
client_results = parallel_client_run(client_data)
self.assertEqual(client_results, [np.int64(46), np.int64(56)])
if __name__ == '__main__':
absltest.main()
| [
"tensorflow_federated.SequenceType",
"absl.testing.absltest.main",
"tensorflow_federated.backends.native.set_local_python_execution_context",
"tensorflow.config.list_logical_devices",
"tensorflow.data.Dataset.range",
"numpy.int64",
"absl.testing.parameterized.named_parameters",
"tensorflow_federated.f... | [((2350, 2730), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('iter_server_on_cpu', 'CPU', _create_tff_parallel_clients_with_iter_dataset)", "('iter_server_on_tpu', 'TPU', _create_tff_parallel_clients_with_iter_dataset)", "('reduce_server_on_cpu', 'CPU',\n _create_tff_parallel_clients_with_dataset_reduce)", "('reduce_server_on_tpu', 'TPU',\n _create_tff_parallel_clients_with_dataset_reduce)"], {}), "(('iter_server_on_cpu', 'CPU',\n _create_tff_parallel_clients_with_iter_dataset), ('iter_server_on_tpu',\n 'TPU', _create_tff_parallel_clients_with_iter_dataset), (\n 'reduce_server_on_cpu', 'CPU',\n _create_tff_parallel_clients_with_dataset_reduce), (\n 'reduce_server_on_tpu', 'TPU',\n _create_tff_parallel_clients_with_dataset_reduce))\n", (2380, 2730), False, 'from absl.testing import parameterized\n'), ((3566, 3581), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3579, 3581), False, 'from absl.testing import absltest\n'), ((991, 1017), 'tensorflow_federated.SequenceType', 'tff.SequenceType', (['tf.int64'], {}), '(tf.int64)\n', (1007, 1017), True, 'import tensorflow_federated as tff\n'), ((1276, 1337), 'tensorflow_federated.federated_map', 'tff.federated_map', (['dataset_reduce_fn_wrapper', 'client_datasets'], {}), '(dataset_reduce_fn_wrapper, client_datasets)\n', (1293, 1337), True, 'import tensorflow_federated as tff\n'), ((1661, 1687), 'tensorflow_federated.SequenceType', 'tff.SequenceType', (['tf.int64'], {}), '(tf.int64)\n', (1677, 1687), True, 'import tensorflow_federated as tff\n'), ((1946, 2007), 'tensorflow_federated.federated_map', 'tff.federated_map', (['dataset_reduce_fn_wrapper', 'client_datasets'], {}), '(dataset_reduce_fn_wrapper, client_datasets)\n', (1963, 2007), True, 'import tensorflow_federated as tff\n'), ((2173, 2210), 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""TPU"""'], {}), "('TPU')\n", (2203, 2210), True, 'import tensorflow as tf\n'), ((2936, 2977), 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['tf_device'], {}), '(tf_device)\n', (2966, 2977), True, 'import tensorflow as tf\n'), ((3064, 3101), 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""TPU"""'], {}), "('TPU')\n", (3094, 3101), True, 'import tensorflow as tf\n'), ((3106, 3234), 'tensorflow_federated.backends.native.set_local_python_execution_context', 'tff.backends.native.set_local_python_execution_context', ([], {'server_tf_device': 'server_tf_device', 'client_tf_devices': 'client_devices'}), '(server_tf_device=\n server_tf_device, client_tf_devices=client_devices)\n', (3160, 3234), True, 'import tensorflow_federated as tff\n'), ((1086, 1099), 'numpy.int64', 'np.int64', (['(1.0)'], {}), '(1.0)\n', (1094, 1099), True, 'import numpy as np\n'), ((1192, 1218), 'tensorflow_federated.SequenceType', 'tff.SequenceType', (['tf.int64'], {}), '(tf.int64)\n', (1208, 1218), True, 'import tensorflow_federated as tff\n'), ((1756, 1769), 'numpy.int64', 'np.int64', (['(1.0)'], {}), '(1.0)\n', (1764, 1769), True, 'import numpy as np\n'), ((1862, 1888), 'tensorflow_federated.SequenceType', 'tff.SequenceType', (['tf.int64'], {}), '(tf.int64)\n', (1878, 1888), True, 'import tensorflow_federated as tff\n'), ((3326, 3351), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['(10)'], {}), '(10)\n', (3347, 3351), True, 'import tensorflow as tf\n'), ((3506, 3518), 'numpy.int64', 'np.int64', (['(46)'], {}), '(46)\n', (3514, 3518), True, 'import numpy as np\n'), ((3520, 3532), 'numpy.int64', 'np.int64', (['(56)'], {}), '(56)\n', (3528, 3532), True, 'import numpy as np\n'), ((3361, 3386), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['(10)'], {}), '(10)\n', (3382, 3386), True, 'import tensorflow as tf\n')] |
"""Provide classes to perform private training and private prediction with
logistic regression"""
import tensorflow as tf
import tf_encrypted as tfe
import math
import numpy as np
import time
from sklearn.linear_model import LogisticRegression
# class LogisticRegression:
# """Contains methods to build and train logistic regression."""
# def __init__(self, num_features):
# self.w = tfe.define_private_variable(
# tf.random_uniform([num_features, 1], -0.01, 0.01))
# self.w_masked = tfe.mask(self.w)
# self.b = tfe.define_private_variable(tf.zeros([1]))
# self.b_masked = tfe.mask(self.b)
# @property
# def weights(self):
# return self.w, self.b
# def forward(self, x):
# with tf.name_scope("forward"):
# out = tfe.matmul(x, self.w) + self.b
# y = tfe.sigmoid(out)
# return y
# def backward(self, x, dy, learning_rate=0.01):
# batch_size = x.shape.as_list()[0]
# with tf.name_scope("backward"):
# # store = []
# # for i in range(10):
# # store.append(tfe.diag(z[:,i]))
# # tmppp = tfe.concat(store, axis = 0)
# # gradients = tfe.matmul(tmppp, x)
# # gradients = tfe.reshape(gradients, [128, 40960])
# # gradients = gradients*gradients
# # norm_square = tfe.reduce_sum(gradients, axis=1)
# # norm_inverse = tfe.inverse_sqrt(norm_square)
# # C = 5
# # norm_inverse = norm_inverse * C
# # z1 = tfe.polynomial_piecewise(
# # norm_inverse,
# # (0, 1),
# # ((0,), (0, 1), (1,)), # use tuple because list is not hashable
# # )
# # z1 = tfe.reshape(z1,[1,128])
# # gradients_clipped = tfe.matmul(z1, gradients)
# # gradients_clipped = tfe.reshape(gradients_clipped, [10, 4096])
# dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
# db = tfe.reduce_sum(dy, axis=0) / batch_size
# dw_norm_inverse = tfe.inverse_sqrt(x)
# assign_ops = [
# tfe.assign(self.w, self.w - dw * learning_rate),
# tfe.assign(self.b, self.b - db * learning_rate),
# ]
# return assign_ops
# def loss_grad(self, y, y_hat):
# with tf.name_scope("loss-grad"):
# dy = y_hat - y
# return dy
# def fit_batch(self, x, y):
# with tf.name_scope("fit-batch"):
# y_hat = self.forward(x)
# dy = self.loss_grad(y, y_hat)
# fit_batch_op = self.backward(x, dy)
# return fit_batch_op
# def fit(self, sess, x, y, num_batches):
# fit_batch_op = self.fit_batch(x, y)
# for batch in range(num_batches):
# print("Batch {0: >4d}".format(batch))
# sess.run(fit_batch_op, tag='fit-batch')
# def evaluate(self, sess, x, y, data_owner):
# """Return the accuracy"""
# def print_accuracy(y_hat, y) -> tf.Operation:
# with tf.name_scope("print-accuracy"):
# correct_prediction = tf.equal(tf.round(y_hat), y)
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# print_op = tf.print("Accuracy on {}:".format(data_owner.player_name),
# accuracy)
# return print_op
# with tf.name_scope("evaluate"):
# y_hat = self.forward(x)
# print_accuracy_op = tfe.define_output(data_owner.player_name,
# [y_hat, y],
# print_accuracy)
# sess.run(print_accuracy_op, tag='evaluate')
class LogisticRegression_new:
"""Contains methods to build and train logistic regression."""
def __init__(self, num_features, class_num, batch_size):
# print(num_features)
initial_model = np.loadtxt("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/models/imdbinitial_model.csv",delimiter = ",")
self.w = tfe.define_private_variable(np.reshape(initial_model.T, (num_features + 1, class_num)))
# self.w = tfe.define_private_variable(tf.zeros([10,10]))
# self.w = tfe.define_private_variable(tf.zeros([num_features + 1, class_num]))
# self.w = tfe.define_private_variable(tf.random_normal([num_features + 1, class_num], 0, 0.2))
self.class_num = class_num
self.num_features = num_features
self.correct =0.0
self.batch_size = batch_size
self.record = []
@property
def weights(self):
return self.w
def forward(self, x):
with tf.name_scope("forward"):
out = tfe.matmul(x, self.w)
y = tfe.sigmoid(out)
return y
def backward(self,sess, x, dy, noise,learning_rate=0.2):
batch_size = x.shape.as_list()[0]
with tf.name_scope("backward"):
store = []
if self.class_num >2 :
for i in range(0, self.class_num):
store.append(tfe.diag(dy[:,i]))
tmppp = tfe.concat(store, axis = 1)
gradients = tfe.matmul( tfe.transpose(x), tmppp)
permutation = np.zeros([batch_size*10, batch_size*10])
for i in range(len(permutation)):
indice = (i%10)*batch_size+ int(i/10)
permutation[indice, i] = 1
permutation = tfe.define_constant(permutation)
gradients = gradients.matmul(permutation)
gradients = tfe.transpose(gradients)
gradients = tfe.reshape(gradients, [batch_size, (self.num_features+1)*self.class_num])
else:
gradients = x * dy
gradients_square = gradients*gradients
norm_square = tfe.reduce_sum(gradients_square, axis=1)
norm_inverse = tfe.inverse_sqrt(norm_square)
C = 3
norm_inverse = norm_inverse * C
z1 = tfe.polynomial_piecewise(
norm_inverse,
(0, 1),
((0,), (0, 1), (1,)),
)
z1 = tfe.reshape(z1,[1,batch_size])
gradients_clipped = tfe.matmul(z1, gradients)
gradients_clipped = tfe.reshape(gradients_clipped, [self.class_num, self.num_features+1])
gradients_clipped = gradients_clipped + noise
gradients_clipped = gradients_clipped / batch_size
gradients_clipped = tfe.transpose(gradients_clipped)
assign_ops = [
tfe.assign(self.w, self.w - gradients_clipped * learning_rate),
]
# self.dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
# db = tfe.reduce_sum(dy, axis=0) / batch_size
# dw_norm_inverse = tfe.inverse_sqrt(x)
# assign_ops = [
# tfe.assign(self.w, self.w - self.dw * learning_rate),
# ]
return assign_ops
def loss_grad(self, y, y_hat):
with tf.name_scope("loss-grad"):
if self.class_num == 1:
y = tfe.reshape(y, [self.batch_size, 1])
dy = y_hat - y
return dy
def fit_batch(self, sess, x, y, noise):
with tf.name_scope("fit-batch"):
y_hat = self.forward(x)
dy = self.loss_grad(y, y_hat)
fit_batch_op = self.backward(sess, x, dy, noise)
return fit_batch_op
def test(self, X_test,Y_test,theta, class_num):
tmp = X_test.dot(theta.T)
tmp = 1 / (1 + np.exp(-tmp))
correct = 0;
if class_num > 2:
for i in range(len(tmp)):
res = np.argmax(tmp[i])
if(Y_test[i][res]>0):
correct+=1;
else:
for i in range(len(tmp)):
if(abs((Y_test[i] - tmp[i]))<0.5):
correct+=1;
print(correct)
return correct/len(Y_test)
def fit(self, sess, x, y, x_test , y_test,num_batches, noise, data_owner):
# X_test = np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/transfer/features/simclr_r50_2x_sk1_cifar_test.npy")
# Y_test =np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/transfer/features/cifar-test-y.npy")
# # X_test = np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/raw_data/cifar-testraw-x.npy")
# Y_test =np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/raw_data/cifar-testraw-y.npy")
# X_test = np.concatenate([X_test, np.ones((len(X_test), 1))], axis = 1)
# Y_test = np.eye(10)[Y_test.astype(int)]
# X_test = np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/features/mnist_test_hog_x.npy")
# Y_test =np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/features/mnist_test_hog_y.npy")
# X_test = np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/raw_data/mnist_test_x.npy")
# Y_test = np.load("//disk/wqruan/Pretrain/Handcrafted-DP/transfer/raw_data/mnist_test_y.npy")
# Y_test = np.eye(10)[Y_test.astype(int)]
# X_test = np.concatenate([X_test, np.ones((len(X_test), 1))], axis = 1)
X_test = np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/features/imdb_test_x.npy")
Y_test = np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/features/imdb_test_y.npy")
# test = np.load("/disk/wqruan/Pretrain/Handcrafted-DP/transfer/raw_data/imdb_test.npy")
# X_test= test[:, 1:]
# X_test = X_test /15000
# Y_test = test[:, 0]
Y_test = Y_test.reshape((len(Y_test), 1))
X_test = np.concatenate([X_test, np.ones((len(X_test), 1))], axis = 1)
file = open("imdb-test.txt", 'a+')
www = sess.run(self.w.reveal())
print(www)
self.test(X_test, Y_test, www.T, self.class_num)
fit_batch_op = self.fit_batch(sess, x, y, noise)
i=0
for batch in range(num_batches):
if batch>=10 and batch%int(math.pow(10,int(math.log10(batch))))== 0:
www = sess.run(self.w.reveal())
tmp = self.test(X_test, Y_test, www.T, self.class_num)
file.write("iteration num: " + str(batch))
file.write("accuracy: " + str(tmp))
file.write("\n")
print("Batch {0: >4d}".format(batch))
sess.run(fit_batch_op, tag='fit-batch')
file.flush()
www = sess.run(self.w.reveal())
self.test(X_test, Y_test, www.T, self.class_num)
tmp = self.test(X_test, Y_test, www.T, self.class_num)
file.write("iteration num: " + str(num_batches))
file.write("accuracy: " + str(tmp))
file.write("\n")
def evaluate(self, sess, x, y, batch,data_owner):
"""Return the accuracy"""
def print_accuracy(y_hat, y) -> tf.Operation:
with tf.name_scope("print-accuracy"):
correct = 0.0
res = tf.argmax(y_hat, 1)
res_1 = tf.argmax(y, 1)
correct_prediction = tf.equal(res, res_1)
assign_ops = [
tf.assign(self.correct, self.correct + tf.reduce_sum(correct_prediction)),
]
sess.run(assign_ops)
tmp = tf.print("correct:",tf.reduce_sum(correct_prediction))
sess.run(tmp)
tmp1 = tf.print("correct:",self.correct)
sess.run(tmp1)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print_op = tf.print("")
return print_op
with tf.name_scope("evaluate"):
y_hat = self.forward(x)
for i in range(0, 50):
print_accuracy_op = tfe.define_output(data_owner.player_name,
[y_hat, y],
print_accuracy)
sess.run(print_accuracy_op, tag='evaluate')
tmp = self.correct/(50*x.shape.as_list()[0])
tmp = sess.run(tmp)
print(tmp)
self.record.append([batch, tmp])
class DataOwner:
"""Contains code meant to be executed by a data owner Player."""
def __init__(self, player_name, local_data_file, data_schema,
C = 1, noise_multiplier = 0.92, train_file = '', train_label_file = '', test_file = '', test_label_file ='', class_num = 1,header=False, index=False, field_delim=',', na_values=['nan'], batch_size=128, num_features = 32, mu = 0, sigma = 0.001):
self.player_name = player_name
self.local_data_file = local_data_file
self.data_schema = data_schema
self.batch_size = batch_size
self.header = header
self.index = index
self.na_values = na_values
self.field_delim = field_delim
self.num_features = num_features
self.mu = mu
self.sigma = sigma
self.train_file = train_file
self.train_label_file = train_label_file
self.test_file = test_file
self.class_num = class_num
self.test_label_file = test_label_file
self.C = C
self.noise_multiplier = noise_multiplier
self.train_initializer = None
tmp = list(player_name)
self.ran = 0
for i in range(0, len(tmp)):
self.ran += ord(tmp[i])
@property
def initializer(self):
return self.train_initializer
def provide_data(self):
def decode(line):
fields = tf.string_split([line], self.field_delim).values
if self.index: # Skip index
fields = fields[1:]
fields = tf.regex_replace(fields, '|'.join(self.na_values), 'nan')
fields = tf.string_to_number(fields, tf.float32)
return fields
def fill_na(fields, fill_values):
fields = tf.where(tf.is_nan(fields), fill_values, fields)
return fields
dataset = tf.data.TextLineDataset(self.local_data_file)
if self.header: # Skip header
dataset = dataset.skip(1)
dataset = dataset\
.map(decode)\
.map(lambda x: fill_na(x, self.data_schema.field_defaults))\
.repeat()\
.shuffle(buffer_size=10000)\
.batch(self.batch_size)
iterator = dataset.make_one_shot_iterator()
self.train_initializer = iterator.initializer
batch = iterator.get_next()
batch = tf.reshape(batch, [self.batch_size, self.data_schema.field_num])
return batch
def provide_train_data(self):
def decode(line):
fields = tf.string_split([line], self.field_delim).values
if self.index: # Skip index
fields = fields[1:]
fields = tf.regex_replace(fields, '|'.join(self.na_values), 'nan')
fields = tf.string_to_number(fields, tf.float32)
return fields
def fill_na(fields, fill_values):
fields = tf.where(tf.is_nan(fields), fill_values, fields)
return fields
dataset = tf.data.TextLineDataset(self.train_file)
if self.header: # Skip header
dataset = dataset.skip(1)
dataset = dataset\
.map(decode)\
.map(lambda x: fill_na(x, self.data_schema.field_defaults))\
.repeat()\
.shuffle(buffer_size=10000)\
.batch(self.batch_size)
iterator = dataset.make_initializable_iterator()
self.train_initializer = iterator.initializer
batch = iterator.get_next()
batch = tf.reshape(batch, [self.batch_size, self.data_schema.field_num])
if self.train_label_file == '':
train_label = batch[:, 0]
if self.class_num > 2:
train_label = tf.one_hot(tf.cast(train_label, dtype = tf.int32), self.class_num)
train_data = batch[:, 1:]
# train_data = train_data /15000
# train_data = 20 * train_data / tf.norm(train_data, ord = 2)
bias_term = tf.ones([self.batch_size, 1])
return tf.concat([train_data,bias_term], axis = 1), train_label
batch = tf.reshape(batch, [self.batch_size, self.num_features])
labels = tf.data.TextLineDataset(self.train_label_file)
if self.header: # Skip header
labels = labels.skip(1)
labels = labels\
.map(decode)\
.repeat()\
.batch(self.batch_size)
iterator1 = labels.make_one_shot_iterator()
batch_labels = iterator1.get_next()
batch_labels = tf.reshape(batch_labels, [self.batch_size])
batch_labels = tf.one_hot(tf.cast(batch_labels, dtype = tf.int32), self.class_num)
bias_term = tf.ones([self.batch_size, 1])
return tf.concat([batch, bias_term], axis = 1), batch_labels
def provide_test_data(self):
def decode(line):
fields = tf.string_split([line], self.field_delim).values
if self.index: # Skip index
fields = fields[1:]
fields = tf.regex_replace(fields, '|'.join(self.na_values), 'nan')
fields = tf.string_to_number(fields, tf.float32)
return fields
def fill_na(fields, fill_values):
fields = tf.where(tf.is_nan(fields), fill_values, fields)
return fields
dataset = tf.data.TextLineDataset(self.test_file)
if self.header: # Skip header
dataset = dataset.skip(1)
dataset = dataset\
.map(decode)\
.map(lambda x: fill_na(x, self.data_schema.field_defaults))\
.repeat()\
.batch(self.batch_size)
iterator = dataset.make_one_shot_iterator()
batch = iterator.get_next()
batch = tf.reshape(batch, [self.batch_size, self.data_schema.field_num])
if self.test_label_file == '':
test_label = batch[:, 0]
test_label = tf.one_hot(tf.cast(test_label, dtype = tf.int32), self.class_num)
test_data = batch[:, 1:]
# test_data = 20 * test_data / tf.norm(test_data, ord = 2)
bias_term = tf.ones([self.batch_size, 1])
return tf.concat([test_data, bias_term], axis = 1), test_label
batch = tf.reshape(batch, [self.batch_size, self.num_features])
labels = tf.data.TextLineDataset(self.test_label_file)
if self.header: # Skip header
labels = labels.skip(1)
labels = labels\
.map(decode)\
.repeat()\
.batch(self.batch_size)
iterator1 = labels.make_one_shot_iterator()
batch_labels = iterator1.get_next()
batch_labels = tf.reshape(batch_labels, [self.batch_size])
batch_labels = tf.one_hot(tf.cast(batch_labels, dtype = tf.int32), self.class_num)
bias_term = tf.ones([self.batch_size, 1])
return tf.concat([batch, bias_term], axis = 1), batch_labels
def provide_noise(self):
local_noise = tf.random_normal([self.class_num,self.num_features+1], mean = self.mu, stddev = (1.5**0.5)*self.noise_multiplier*self.C/(3**0.5), seed = time.clock() - self.ran)
return local_noise;
def random_vector(self):
random_vector = tf.ones([64])
return random_vector
class DataSchema:
def __init__(self, field_types, field_defaults):
self.field_types = field_types
self.field_defaults = field_defaults
@property
def field_num(self):
return len(self.field_types)
class ModelOwner:
"""Contains code meant to be executed by a model owner Player."""
def __init__(self, player_name):
self.player_name = player_name
@tfe.local_computation
def receive_weights(self, *weights):
return tf.print("Weights on {}:".format(self.player_name), weights)
class PredictionClient:
"""Contains methods meant to be executed by a prediction client."""
def __init__(self, player_name, num_features):
self.player_name = player_name
self.num_features = num_features
@tfe.local_computation
def provide_input(self):
return tf.random.uniform(
minval=-.5,
maxval=.5,
dtype=tf.float32,
shape=[1, self.num_features])
@tfe.local_computation
def receive_output(self, result):
return tf.print("Result on {}:".format(self.player_name), result)
| [
"numpy.load",
"tf_encrypted.reshape",
"tensorflow.reduce_sum",
"numpy.argmax",
"tensorflow.print",
"tensorflow.reshape",
"tensorflow.string_split",
"numpy.exp",
"tensorflow.string_to_number",
"tensorflow.random.uniform",
"tf_encrypted.define_constant",
"tf_encrypted.matmul",
"tf_encrypted.in... | [((3650, 3763), 'numpy.loadtxt', 'np.loadtxt', (['"""/disk/wqruan/Pretrain/Handcrafted-DP/transfer/models/imdbinitial_model.csv"""'], {'delimiter': '""","""'}), "(\n '/disk/wqruan/Pretrain/Handcrafted-DP/transfer/models/imdbinitial_model.csv'\n , delimiter=',')\n", (3660, 3763), True, 'import numpy as np\n'), ((8494, 8580), 'numpy.load', 'np.load', (['"""/disk/wqruan/Pretrain/Handcrafted-DP/transfer/features/imdb_test_x.npy"""'], {}), "(\n '/disk/wqruan/Pretrain/Handcrafted-DP/transfer/features/imdb_test_x.npy')\n", (8501, 8580), True, 'import numpy as np\n'), ((8589, 8675), 'numpy.load', 'np.load', (['"""/disk/wqruan/Pretrain/Handcrafted-DP/transfer/features/imdb_test_y.npy"""'], {}), "(\n '/disk/wqruan/Pretrain/Handcrafted-DP/transfer/features/imdb_test_y.npy')\n", (8596, 8675), True, 'import numpy as np\n'), ((12810, 12855), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['self.local_data_file'], {}), '(self.local_data_file)\n', (12833, 12855), True, 'import tensorflow as tf\n'), ((13266, 13330), 'tensorflow.reshape', 'tf.reshape', (['batch', '[self.batch_size, self.data_schema.field_num]'], {}), '(batch, [self.batch_size, self.data_schema.field_num])\n', (13276, 13330), True, 'import tensorflow as tf\n'), ((13819, 13859), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['self.train_file'], {}), '(self.train_file)\n', (13842, 13859), True, 'import tensorflow as tf\n'), ((14275, 14339), 'tensorflow.reshape', 'tf.reshape', (['batch', '[self.batch_size, self.data_schema.field_num]'], {}), '(batch, [self.batch_size, self.data_schema.field_num])\n', (14285, 14339), True, 'import tensorflow as tf\n'), ((14806, 14861), 'tensorflow.reshape', 'tf.reshape', (['batch', '[self.batch_size, self.num_features]'], {}), '(batch, [self.batch_size, self.num_features])\n', (14816, 14861), True, 'import tensorflow as tf\n'), ((14875, 14921), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['self.train_label_file'], {}), '(self.train_label_file)\n', (14898, 14921), True, 'import tensorflow as tf\n'), ((15187, 15230), 'tensorflow.reshape', 'tf.reshape', (['batch_labels', '[self.batch_size]'], {}), '(batch_labels, [self.batch_size])\n', (15197, 15230), True, 'import tensorflow as tf\n'), ((15334, 15363), 'tensorflow.ones', 'tf.ones', (['[self.batch_size, 1]'], {}), '([self.batch_size, 1])\n', (15341, 15363), True, 'import tensorflow as tf\n'), ((15898, 15937), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['self.test_file'], {}), '(self.test_file)\n', (15921, 15937), True, 'import tensorflow as tf\n'), ((16261, 16325), 'tensorflow.reshape', 'tf.reshape', (['batch', '[self.batch_size, self.data_schema.field_num]'], {}), '(batch, [self.batch_size, self.data_schema.field_num])\n', (16271, 16325), True, 'import tensorflow as tf\n'), ((16710, 16765), 'tensorflow.reshape', 'tf.reshape', (['batch', '[self.batch_size, self.num_features]'], {}), '(batch, [self.batch_size, self.num_features])\n', (16720, 16765), True, 'import tensorflow as tf\n'), ((16779, 16824), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['self.test_label_file'], {}), '(self.test_label_file)\n', (16802, 16824), True, 'import tensorflow as tf\n'), ((17090, 17133), 'tensorflow.reshape', 'tf.reshape', (['batch_labels', '[self.batch_size]'], {}), '(batch_labels, [self.batch_size])\n', (17100, 17133), True, 'import tensorflow as tf\n'), ((17237, 17266), 'tensorflow.ones', 'tf.ones', (['[self.batch_size, 1]'], {}), '([self.batch_size, 1])\n', (17244, 17266), True, 'import tensorflow as tf\n'), ((17621, 17634), 'tensorflow.ones', 'tf.ones', (['[64]'], {}), '([64])\n', (17628, 17634), True, 'import tensorflow as tf\n'), ((18449, 18544), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'minval': '(-0.5)', 'maxval': '(0.5)', 'dtype': 'tf.float32', 'shape': '[1, self.num_features]'}), '(minval=-0.5, maxval=0.5, dtype=tf.float32, shape=[1, self\n .num_features])\n', (18466, 18544), True, 'import tensorflow as tf\n'), ((3798, 3856), 'numpy.reshape', 'np.reshape', (['initial_model.T', '(num_features + 1, class_num)'], {}), '(initial_model.T, (num_features + 1, class_num))\n', (3808, 3856), True, 'import numpy as np\n'), ((4349, 4373), 'tensorflow.name_scope', 'tf.name_scope', (['"""forward"""'], {}), "('forward')\n", (4362, 4373), True, 'import tensorflow as tf\n'), ((4387, 4408), 'tf_encrypted.matmul', 'tfe.matmul', (['x', 'self.w'], {}), '(x, self.w)\n', (4397, 4408), True, 'import tf_encrypted as tfe\n'), ((4419, 4435), 'tf_encrypted.sigmoid', 'tfe.sigmoid', (['out'], {}), '(out)\n', (4430, 4435), True, 'import tf_encrypted as tfe\n'), ((4558, 4583), 'tensorflow.name_scope', 'tf.name_scope', (['"""backward"""'], {}), "('backward')\n", (4571, 4583), True, 'import tensorflow as tf\n'), ((5369, 5409), 'tf_encrypted.reduce_sum', 'tfe.reduce_sum', (['gradients_square'], {'axis': '(1)'}), '(gradients_square, axis=1)\n', (5383, 5409), True, 'import tf_encrypted as tfe\n'), ((5431, 5460), 'tf_encrypted.inverse_sqrt', 'tfe.inverse_sqrt', (['norm_square'], {}), '(norm_square)\n', (5447, 5460), True, 'import tf_encrypted as tfe\n'), ((5522, 5590), 'tf_encrypted.polynomial_piecewise', 'tfe.polynomial_piecewise', (['norm_inverse', '(0, 1)', '((0,), (0, 1), (1,))'], {}), '(norm_inverse, (0, 1), ((0,), (0, 1), (1,)))\n', (5546, 5590), True, 'import tf_encrypted as tfe\n'), ((5648, 5680), 'tf_encrypted.reshape', 'tfe.reshape', (['z1', '[1, batch_size]'], {}), '(z1, [1, batch_size])\n', (5659, 5680), True, 'import tf_encrypted as tfe\n'), ((5705, 5730), 'tf_encrypted.matmul', 'tfe.matmul', (['z1', 'gradients'], {}), '(z1, gradients)\n', (5715, 5730), True, 'import tf_encrypted as tfe\n'), ((5757, 5828), 'tf_encrypted.reshape', 'tfe.reshape', (['gradients_clipped', '[self.class_num, self.num_features + 1]'], {}), '(gradients_clipped, [self.class_num, self.num_features + 1])\n', (5768, 5828), True, 'import tf_encrypted as tfe\n'), ((5963, 5995), 'tf_encrypted.transpose', 'tfe.transpose', (['gradients_clipped'], {}), '(gradients_clipped)\n', (5976, 5995), True, 'import tf_encrypted as tfe\n'), ((6435, 6461), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss-grad"""'], {}), "('loss-grad')\n", (6448, 6461), True, 'import tensorflow as tf\n'), ((6631, 6657), 'tensorflow.name_scope', 'tf.name_scope', (['"""fit-batch"""'], {}), "('fit-batch')\n", (6644, 6657), True, 'import tensorflow as tf\n'), ((10684, 10709), 'tensorflow.name_scope', 'tf.name_scope', (['"""evaluate"""'], {}), "('evaluate')\n", (10697, 10709), True, 'import tensorflow as tf\n'), ((12612, 12651), 'tensorflow.string_to_number', 'tf.string_to_number', (['fields', 'tf.float32'], {}), '(fields, tf.float32)\n', (12631, 12651), True, 'import tensorflow as tf\n'), ((13621, 13660), 'tensorflow.string_to_number', 'tf.string_to_number', (['fields', 'tf.float32'], {}), '(fields, tf.float32)\n', (13640, 13660), True, 'import tensorflow as tf\n'), ((14685, 14714), 'tensorflow.ones', 'tf.ones', (['[self.batch_size, 1]'], {}), '([self.batch_size, 1])\n', (14692, 14714), True, 'import tensorflow as tf\n'), ((15261, 15298), 'tensorflow.cast', 'tf.cast', (['batch_labels'], {'dtype': 'tf.int32'}), '(batch_labels, dtype=tf.int32)\n', (15268, 15298), True, 'import tensorflow as tf\n'), ((15375, 15412), 'tensorflow.concat', 'tf.concat', (['[batch, bias_term]'], {'axis': '(1)'}), '([batch, bias_term], axis=1)\n', (15384, 15412), True, 'import tensorflow as tf\n'), ((15700, 15739), 'tensorflow.string_to_number', 'tf.string_to_number', (['fields', 'tf.float32'], {}), '(fields, tf.float32)\n', (15719, 15739), True, 'import tensorflow as tf\n'), ((16599, 16628), 'tensorflow.ones', 'tf.ones', (['[self.batch_size, 1]'], {}), '([self.batch_size, 1])\n', (16606, 16628), True, 'import tensorflow as tf\n'), ((17164, 17201), 'tensorflow.cast', 'tf.cast', (['batch_labels'], {'dtype': 'tf.int32'}), '(batch_labels, dtype=tf.int32)\n', (17171, 17201), True, 'import tensorflow as tf\n'), ((17278, 17315), 'tensorflow.concat', 'tf.concat', (['[batch, bias_term]'], {'axis': '(1)'}), '([batch, bias_term], axis=1)\n', (17287, 17315), True, 'import tensorflow as tf\n'), ((4737, 4762), 'tf_encrypted.concat', 'tfe.concat', (['store'], {'axis': '(1)'}), '(store, axis=1)\n', (4747, 4762), True, 'import tf_encrypted as tfe\n'), ((4844, 4888), 'numpy.zeros', 'np.zeros', (['[batch_size * 10, batch_size * 10]'], {}), '([batch_size * 10, batch_size * 10])\n', (4852, 4888), True, 'import numpy as np\n'), ((5042, 5074), 'tf_encrypted.define_constant', 'tfe.define_constant', (['permutation'], {}), '(permutation)\n', (5061, 5074), True, 'import tf_encrypted as tfe\n'), ((5145, 5169), 'tf_encrypted.transpose', 'tfe.transpose', (['gradients'], {}), '(gradients)\n', (5158, 5169), True, 'import tf_encrypted as tfe\n'), ((5190, 5268), 'tf_encrypted.reshape', 'tfe.reshape', (['gradients', '[batch_size, (self.num_features + 1) * self.class_num]'], {}), '(gradients, [batch_size, (self.num_features + 1) * self.class_num])\n', (5201, 5268), True, 'import tf_encrypted as tfe\n'), ((6027, 6089), 'tf_encrypted.assign', 'tfe.assign', (['self.w', '(self.w - gradients_clipped * learning_rate)'], {}), '(self.w, self.w - gradients_clipped * learning_rate)\n', (6037, 6089), True, 'import tf_encrypted as tfe\n'), ((6505, 6541), 'tf_encrypted.reshape', 'tfe.reshape', (['y', '[self.batch_size, 1]'], {}), '(y, [self.batch_size, 1])\n', (6516, 6541), True, 'import tf_encrypted as tfe\n'), ((6910, 6922), 'numpy.exp', 'np.exp', (['(-tmp)'], {}), '(-tmp)\n', (6916, 6922), True, 'import numpy as np\n'), ((7023, 7040), 'numpy.argmax', 'np.argmax', (['tmp[i]'], {}), '(tmp[i])\n', (7032, 7040), True, 'import numpy as np\n'), ((10059, 10090), 'tensorflow.name_scope', 'tf.name_scope', (['"""print-accuracy"""'], {}), "('print-accuracy')\n", (10072, 10090), True, 'import tensorflow as tf\n'), ((10128, 10147), 'tensorflow.argmax', 'tf.argmax', (['y_hat', '(1)'], {}), '(y_hat, 1)\n', (10137, 10147), True, 'import tensorflow as tf\n'), ((10164, 10179), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (10173, 10179), True, 'import tensorflow as tf\n'), ((10209, 10229), 'tensorflow.equal', 'tf.equal', (['res', 'res_1'], {}), '(res, res_1)\n', (10217, 10229), True, 'import tensorflow as tf\n'), ((10484, 10518), 'tensorflow.print', 'tf.print', (['"""correct:"""', 'self.correct'], {}), "('correct:', self.correct)\n", (10492, 10518), True, 'import tensorflow as tf\n'), ((10635, 10647), 'tensorflow.print', 'tf.print', (['""""""'], {}), "('')\n", (10643, 10647), True, 'import tensorflow as tf\n'), ((10805, 10874), 'tf_encrypted.define_output', 'tfe.define_output', (['data_owner.player_name', '[y_hat, y]', 'print_accuracy'], {}), '(data_owner.player_name, [y_hat, y], print_accuracy)\n', (10822, 10874), True, 'import tf_encrypted as tfe\n'), ((12413, 12454), 'tensorflow.string_split', 'tf.string_split', (['[line]', 'self.field_delim'], {}), '([line], self.field_delim)\n', (12428, 12454), True, 'import tensorflow as tf\n'), ((12735, 12752), 'tensorflow.is_nan', 'tf.is_nan', (['fields'], {}), '(fields)\n', (12744, 12752), True, 'import tensorflow as tf\n'), ((13420, 13461), 'tensorflow.string_split', 'tf.string_split', (['[line]', 'self.field_delim'], {}), '([line], self.field_delim)\n', (13435, 13461), True, 'import tensorflow as tf\n'), ((13744, 13761), 'tensorflow.is_nan', 'tf.is_nan', (['fields'], {}), '(fields)\n', (13753, 13761), True, 'import tensorflow as tf\n'), ((14737, 14779), 'tensorflow.concat', 'tf.concat', (['[train_data, bias_term]'], {'axis': '(1)'}), '([train_data, bias_term], axis=1)\n', (14746, 14779), True, 'import tensorflow as tf\n'), ((15499, 15540), 'tensorflow.string_split', 'tf.string_split', (['[line]', 'self.field_delim'], {}), '([line], self.field_delim)\n', (15514, 15540), True, 'import tensorflow as tf\n'), ((15823, 15840), 'tensorflow.is_nan', 'tf.is_nan', (['fields'], {}), '(fields)\n', (15832, 15840), True, 'import tensorflow as tf\n'), ((16424, 16459), 'tensorflow.cast', 'tf.cast', (['test_label'], {'dtype': 'tf.int32'}), '(test_label, dtype=tf.int32)\n', (16431, 16459), True, 'import tensorflow as tf\n'), ((16642, 16683), 'tensorflow.concat', 'tf.concat', (['[test_data, bias_term]'], {'axis': '(1)'}), '([test_data, bias_term], axis=1)\n', (16651, 16683), True, 'import tensorflow as tf\n'), ((4797, 4813), 'tf_encrypted.transpose', 'tfe.transpose', (['x'], {}), '(x)\n', (4810, 4813), True, 'import tf_encrypted as tfe\n'), ((10412, 10445), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['correct_prediction'], {}), '(correct_prediction)\n', (10425, 10445), True, 'import tensorflow as tf\n'), ((10575, 10614), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (10582, 10614), True, 'import tensorflow as tf\n'), ((14470, 14506), 'tensorflow.cast', 'tf.cast', (['train_label'], {'dtype': 'tf.int32'}), '(train_label, dtype=tf.int32)\n', (14477, 14506), True, 'import tensorflow as tf\n'), ((17524, 17536), 'time.clock', 'time.clock', ([], {}), '()\n', (17534, 17536), False, 'import time\n'), ((4702, 4720), 'tf_encrypted.diag', 'tfe.diag', (['dy[:, i]'], {}), '(dy[:, i])\n', (4710, 4720), True, 'import tf_encrypted as tfe\n'), ((10302, 10335), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['correct_prediction'], {}), '(correct_prediction)\n', (10315, 10335), True, 'import tensorflow as tf\n'), ((9264, 9281), 'math.log10', 'math.log10', (['batch'], {}), '(batch)\n', (9274, 9281), False, 'import math\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from modules.frame import MultiScaleFrameNetwork
from modules.geometric import global_to_local
from modules.rsconv import OrientedAnchoredRSConv
from ._registry import register_model
def get_hierarchical_idx(n, h=[512, 128, ]):
all = np.arange(n)
idx = []
for m in h:
# idx.append(np.random.choice(all, m, replace=False))
idx.append(all[:m]) # Farthest point sampling
return idx
@register_model('oriented_rscnn')
class OrientedRSCNN(nn.Module):
def __init__(self, cfg):
super().__init__()
self.frame_net = MultiScaleFrameNetwork(
hidden_dims = (cfg.frame.hidden_dim_s, cfg.frame.hidden_dim_v),
num_layers = cfg.frame.num_layers,
num_frames = cfg.frame.num_frames,
k = cfg.frame.knn,
scales=[1024,] + [512, 128, 1],
)
self.xyz_raising = nn.Sequential(
nn.Conv1d(cfg.frame.num_frames*3, 32, kernel_size=1),
nn.BatchNorm1d(32),
nn.ReLU(),
)
self.conv1 = OrientedAnchoredRSConv(32, 128, k=48, num_frames=cfg.frame.num_frames)
self.conv2 = OrientedAnchoredRSConv(128, 512, k=64, num_frames=cfg.frame.num_frames)
self.conv3 = OrientedAnchoredRSConv(512, 1024, k=128, num_frames=cfg.frame.num_frames)
self.classifier = nn.Sequential(
nn.Conv1d(1024, 512, kernel_size=1, bias=False),
nn.BatchNorm1d(512),
nn.Dropout(p=0.5),
nn.Conv1d(512, 256, kernel_size=1, bias=False),
nn.BatchNorm1d(256),
nn.Dropout(p=0.5),
nn.Conv1d(256, cfg.num_classes, kernel_size=1, bias=True),
)
def forward(self, p):
"""
Args:
p: (B, N, 3).
"""
R, h, p_anchor, R_anchor = self.frame_net(p, return_anchors=True)
p0 = p
p1, p2, p3 = p_anchor # (B, 512, 3), (B, 128, 3), (B, 1, 3)
R1, R2, R3 = R_anchor
p_center = p_anchor[-1].repeat(1, p.size(1), 1) # (B, N, 3)
R_center = R_anchor[-1].repeat(1, p.size(1), 1, 1) # (B, N, F*3, 3)
p_global = global_to_local(R_center, p_center, p) # (B, N, F*3)
h = self.xyz_raising(p_global.permute(0, 2, 1).contiguous()) # (B, 32, N)
h = h.permute(0, 2, 1).contiguous()
h = self.conv1(p0, p1, R1, h) # (B, 512, h1)
h = self.conv2(p1, p2, R2, h) # (B, 128, h2)
h = self.conv3(p2, p3, R3, h) # (B, 1, h3)
h = h.permute(0, 2, 1).contiguous() # (B, h3, 1)
out = self.classifier(h).squeeze(-1) # (B, n_cls)
return out
def get_loss(self, p, cls, return_result=True):
"""
Args:
p: (B, N, 3).
cls: (B, 1) or (B, ).
"""
logp_pred = self(p)
cls = cls.view([cls.size(0)])
loss = F.cross_entropy(logp_pred, cls, reduction='mean')
if return_result:
return loss, logp_pred
else:
return loss
| [
"modules.frame.MultiScaleFrameNetwork",
"torch.nn.Dropout",
"torch.nn.ReLU",
"modules.geometric.global_to_local",
"modules.rsconv.OrientedAnchoredRSConv",
"torch.nn.Conv1d",
"torch.nn.BatchNorm1d",
"torch.nn.functional.cross_entropy",
"numpy.arange"
] | [((327, 339), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (336, 339), True, 'import numpy as np\n'), ((651, 859), 'modules.frame.MultiScaleFrameNetwork', 'MultiScaleFrameNetwork', ([], {'hidden_dims': '(cfg.frame.hidden_dim_s, cfg.frame.hidden_dim_v)', 'num_layers': 'cfg.frame.num_layers', 'num_frames': 'cfg.frame.num_frames', 'k': 'cfg.frame.knn', 'scales': '([1024] + [512, 128, 1])'}), '(hidden_dims=(cfg.frame.hidden_dim_s, cfg.frame.\n hidden_dim_v), num_layers=cfg.frame.num_layers, num_frames=cfg.frame.\n num_frames, k=cfg.frame.knn, scales=[1024] + [512, 128, 1])\n', (673, 859), False, 'from modules.frame import MultiScaleFrameNetwork\n'), ((1126, 1196), 'modules.rsconv.OrientedAnchoredRSConv', 'OrientedAnchoredRSConv', (['(32)', '(128)'], {'k': '(48)', 'num_frames': 'cfg.frame.num_frames'}), '(32, 128, k=48, num_frames=cfg.frame.num_frames)\n', (1148, 1196), False, 'from modules.rsconv import OrientedAnchoredRSConv\n'), ((1218, 1289), 'modules.rsconv.OrientedAnchoredRSConv', 'OrientedAnchoredRSConv', (['(128)', '(512)'], {'k': '(64)', 'num_frames': 'cfg.frame.num_frames'}), '(128, 512, k=64, num_frames=cfg.frame.num_frames)\n', (1240, 1289), False, 'from modules.rsconv import OrientedAnchoredRSConv\n'), ((1311, 1384), 'modules.rsconv.OrientedAnchoredRSConv', 'OrientedAnchoredRSConv', (['(512)', '(1024)'], {'k': '(128)', 'num_frames': 'cfg.frame.num_frames'}), '(512, 1024, k=128, num_frames=cfg.frame.num_frames)\n', (1333, 1384), False, 'from modules.rsconv import OrientedAnchoredRSConv\n'), ((2208, 2246), 'modules.geometric.global_to_local', 'global_to_local', (['R_center', 'p_center', 'p'], {}), '(R_center, p_center, p)\n', (2223, 2246), False, 'from modules.geometric import global_to_local\n'), ((2931, 2980), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logp_pred', 'cls'], {'reduction': '"""mean"""'}), "(logp_pred, cls, reduction='mean')\n", (2946, 2980), True, 'import torch.nn.functional as F\n'), ((985, 1039), 'torch.nn.Conv1d', 'nn.Conv1d', (['(cfg.frame.num_frames * 3)', '(32)'], {'kernel_size': '(1)'}), '(cfg.frame.num_frames * 3, 32, kernel_size=1)\n', (994, 1039), True, 'import torch.nn as nn\n'), ((1051, 1069), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (1065, 1069), True, 'import torch.nn as nn\n'), ((1083, 1092), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1090, 1092), True, 'import torch.nn as nn\n'), ((1439, 1486), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1024)', '(512)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(1024, 512, kernel_size=1, bias=False)\n', (1448, 1486), True, 'import torch.nn as nn\n'), ((1500, 1519), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (1514, 1519), True, 'import torch.nn as nn\n'), ((1533, 1550), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1543, 1550), True, 'import torch.nn as nn\n'), ((1564, 1610), 'torch.nn.Conv1d', 'nn.Conv1d', (['(512)', '(256)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(512, 256, kernel_size=1, bias=False)\n', (1573, 1610), True, 'import torch.nn as nn\n'), ((1624, 1643), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (1638, 1643), True, 'import torch.nn as nn\n'), ((1657, 1674), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1667, 1674), True, 'import torch.nn as nn\n'), ((1688, 1745), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', 'cfg.num_classes'], {'kernel_size': '(1)', 'bias': '(True)'}), '(256, cfg.num_classes, kernel_size=1, bias=True)\n', (1697, 1745), True, 'import torch.nn as nn\n')] |
import os
import os.path as op
from shutil import copyfile
import numpy as np
from scipy import sparse
import pytest
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from mne.datasets import testing
from mne import read_surface, write_surface, decimate_surface, pick_types
from mne.surface import (read_morph_map, _compute_nearest,
fast_cross_3d, get_head_surf, read_curvature,
get_meg_helmet_surf)
from mne.utils import (_TempDir, requires_mayavi, requires_tvtk, catch_logging,
run_tests_if_main, object_diff, traits_test)
from mne.io import read_info
from mne.io.constants import FIFF
from mne.transforms import _get_trans
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
rng = np.random.RandomState(0)
def test_helmet():
"""Test loading helmet surfaces."""
base_dir = op.join(op.dirname(__file__), '..', 'io')
fname_raw = op.join(base_dir, 'tests', 'data', 'test_raw.fif')
fname_kit_raw = op.join(base_dir, 'kit', 'tests', 'data',
'test_bin_raw.fif')
fname_bti_raw = op.join(base_dir, 'bti', 'tests', 'data',
'exported4D_linux_raw.fif')
fname_ctf_raw = op.join(base_dir, 'tests', 'data', 'test_ctf_raw.fif')
fname_trans = op.join(base_dir, 'tests', 'data',
'sample-audvis-raw-trans.txt')
trans = _get_trans(fname_trans)[0]
new_info = read_info(fname_raw)
artemis_info = new_info.copy()
for pick in pick_types(new_info):
new_info['chs'][pick]['coil_type'] = 9999
artemis_info['chs'][pick]['coil_type'] = \
FIFF.FIFFV_COIL_ARTEMIS123_GRAD
for info, n, name in [(read_info(fname_raw), 304, '306m'),
(read_info(fname_kit_raw), 304, 'KIT'),
(read_info(fname_bti_raw), 304, 'Magnes'),
(read_info(fname_ctf_raw), 342, 'CTF'),
(new_info, 102, 'unknown'),
(artemis_info, 102, 'ARTEMIS123')
]:
with catch_logging() as log:
helmet = get_meg_helmet_surf(info, trans, verbose=True)
log = log.getvalue()
assert name in log
assert_equal(len(helmet['rr']), n)
assert_equal(len(helmet['rr']), len(helmet['nn']))
@testing.requires_testing_data
def test_head():
"""Test loading the head surface."""
surf_1 = get_head_surf('sample', subjects_dir=subjects_dir)
surf_2 = get_head_surf('sample', 'head', subjects_dir=subjects_dir)
assert len(surf_1['rr']) < len(surf_2['rr']) # BEM vs dense head
pytest.raises(TypeError, get_head_surf, subject=None,
subjects_dir=subjects_dir)
def test_fast_cross_3d():
"""Test cross product with lots of elements."""
x = rng.rand(100000, 3)
y = rng.rand(1, 3)
z = np.cross(x, y)
zz = fast_cross_3d(x, y)
assert_array_equal(z, zz)
# broadcasting and non-2D
zz = fast_cross_3d(x[:, np.newaxis], y[0])
assert_array_equal(z, zz[:, 0])
def test_compute_nearest():
"""Test nearest neighbor searches."""
x = rng.randn(500, 3)
x /= np.sqrt(np.sum(x ** 2, axis=1))[:, None]
nn_true = rng.permutation(np.arange(500, dtype=np.int))[:20]
y = x[nn_true]
nn1 = _compute_nearest(x, y, method='BallTree')
nn2 = _compute_nearest(x, y, method='cKDTree')
nn3 = _compute_nearest(x, y, method='cdist')
assert_array_equal(nn_true, nn1)
assert_array_equal(nn_true, nn2)
assert_array_equal(nn_true, nn3)
# test distance support
nnn1 = _compute_nearest(x, y, method='BallTree', return_dists=True)
nnn2 = _compute_nearest(x, y, method='cKDTree', return_dists=True)
nnn3 = _compute_nearest(x, y, method='cdist', return_dists=True)
assert_array_equal(nnn1[0], nn_true)
assert_array_equal(nnn1[1], np.zeros_like(nn1)) # all dists should be 0
assert_equal(len(nnn1), len(nnn2))
for nn1, nn2, nn3 in zip(nnn1, nnn2, nnn3):
assert_array_equal(nn1, nn2)
assert_array_equal(nn1, nn3)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_morph_maps():
"""Test reading and creating morph maps."""
# make a new fake subjects_dir
tempdir = _TempDir()
for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
os.mkdir(op.join(tempdir, subject))
os.mkdir(op.join(tempdir, subject, 'surf'))
regs = ('reg', 'left_right') if subject == 'fsaverage_ds' else ('reg',)
for hemi in ['lh', 'rh']:
for reg in regs:
args = [subject, 'surf', hemi + '.sphere.' + reg]
copyfile(op.join(subjects_dir, *args),
op.join(tempdir, *args))
for subject_from, subject_to, xhemi in (
('fsaverage_ds', 'sample_ds', False),
('fsaverage_ds', 'fsaverage_ds', True)):
# trigger the creation of morph-maps dir and create the map
with pytest.warns(None):
mmap = read_morph_map(subject_from, subject_to, tempdir,
xhemi=xhemi)
mmap2 = read_morph_map(subject_from, subject_to, subjects_dir,
xhemi=xhemi)
assert_equal(len(mmap), len(mmap2))
for m1, m2 in zip(mmap, mmap2):
# deal with sparse matrix stuff
diff = (m1 - m2).data
assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)
# This will also trigger creation, but it's trivial
with pytest.warns(None):
mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
for mm in mmap:
assert (mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0
@testing.requires_testing_data
def test_io_surface():
"""Test reading and writing of Freesurfer surface mesh files."""
tempdir = _TempDir()
fname_quad = op.join(data_path, 'subjects', 'bert', 'surf',
'lh.inflated.nofix')
fname_tri = op.join(data_path, 'subjects', 'fsaverage', 'surf',
'lh.inflated')
for fname in (fname_quad, fname_tri):
with pytest.warns(None): # no volume info
pts, tri, vol_info = read_surface(fname, read_metadata=True)
write_surface(op.join(tempdir, 'tmp'), pts, tri, volume_info=vol_info)
with pytest.warns(None): # no volume info
c_pts, c_tri, c_vol_info = read_surface(op.join(tempdir, 'tmp'),
read_metadata=True)
assert_array_equal(pts, c_pts)
assert_array_equal(tri, c_tri)
assert_equal(object_diff(vol_info, c_vol_info), '')
@testing.requires_testing_data
def test_read_curv():
"""Test reading curvature data."""
fname_curv = op.join(data_path, 'subjects', 'fsaverage', 'surf', 'lh.curv')
fname_surf = op.join(data_path, 'subjects', 'fsaverage', 'surf',
'lh.inflated')
bin_curv = read_curvature(fname_curv)
rr = read_surface(fname_surf)[0]
assert len(bin_curv) == len(rr)
assert np.logical_or(bin_curv == 0, bin_curv == 1).all()
@requires_tvtk
@requires_mayavi
@traits_test
def test_decimate_surface():
"""Test triangular surface decimation."""
points = np.array([[-0.00686118, -0.10369860, 0.02615170],
[-0.00713948, -0.10370162, 0.02614874],
[-0.00686208, -0.10368247, 0.02588313],
[-0.00713987, -0.10368724, 0.02587745]])
tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, 0]])
for n_tri in [4, 3, 2]: # quadric decimation creates even numbered output.
_, this_tris = decimate_surface(points, tris, n_tri)
assert len(this_tris) == n_tri if not n_tri % 2 else 2
nirvana = 5
tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, nirvana]])
pytest.raises(ValueError, decimate_surface, points, tris, n_tri)
run_tests_if_main()
| [
"numpy.sum",
"mne.pick_types",
"mne.utils._TempDir",
"mne.utils.run_tests_if_main",
"mne.surface.fast_cross_3d",
"mne.surface.get_meg_helmet_surf",
"numpy.arange",
"mne.surface.read_curvature",
"os.path.join",
"scipy.sparse.eye",
"mne.read_surface",
"numpy.zeros_like",
"mne.utils.catch_loggi... | [((741, 774), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {'download': '(False)'}), '(download=False)\n', (758, 774), False, 'from mne.datasets import testing\n'), ((790, 820), 'os.path.join', 'op.join', (['data_path', '"""subjects"""'], {}), "(data_path, 'subjects')\n", (797, 820), True, 'import os.path as op\n'), ((829, 904), 'os.path.join', 'op.join', (['subjects_dir', '"""sample"""', '"""bem"""', '"""sample-1280-1280-1280-bem-sol.fif"""'], {}), "(subjects_dir, 'sample', 'bem', 'sample-1280-1280-1280-bem-sol.fif')\n", (836, 904), True, 'import os.path as op\n'), ((928, 952), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (949, 952), True, 'import numpy as np\n'), ((8095, 8114), 'mne.utils.run_tests_if_main', 'run_tests_if_main', ([], {}), '()\n', (8112, 8114), False, 'from mne.utils import _TempDir, requires_mayavi, requires_tvtk, catch_logging, run_tests_if_main, object_diff, traits_test\n'), ((1087, 1137), 'os.path.join', 'op.join', (['base_dir', '"""tests"""', '"""data"""', '"""test_raw.fif"""'], {}), "(base_dir, 'tests', 'data', 'test_raw.fif')\n", (1094, 1137), True, 'import os.path as op\n'), ((1158, 1219), 'os.path.join', 'op.join', (['base_dir', '"""kit"""', '"""tests"""', '"""data"""', '"""test_bin_raw.fif"""'], {}), "(base_dir, 'kit', 'tests', 'data', 'test_bin_raw.fif')\n", (1165, 1219), True, 'import os.path as op\n'), ((1268, 1337), 'os.path.join', 'op.join', (['base_dir', '"""bti"""', '"""tests"""', '"""data"""', '"""exported4D_linux_raw.fif"""'], {}), "(base_dir, 'bti', 'tests', 'data', 'exported4D_linux_raw.fif')\n", (1275, 1337), True, 'import os.path as op\n'), ((1386, 1440), 'os.path.join', 'op.join', (['base_dir', '"""tests"""', '"""data"""', '"""test_ctf_raw.fif"""'], {}), "(base_dir, 'tests', 'data', 'test_ctf_raw.fif')\n", (1393, 1440), True, 'import os.path as op\n'), ((1459, 1524), 'os.path.join', 'op.join', (['base_dir', '"""tests"""', '"""data"""', '"""sample-audvis-raw-trans.txt"""'], {}), "(base_dir, 'tests', 'data', 'sample-audvis-raw-trans.txt')\n", (1466, 1524), True, 'import os.path as op\n'), ((1605, 1625), 'mne.io.read_info', 'read_info', (['fname_raw'], {}), '(fname_raw)\n', (1614, 1625), False, 'from mne.io import read_info\n'), ((1677, 1697), 'mne.pick_types', 'pick_types', (['new_info'], {}), '(new_info)\n', (1687, 1697), False, 'from mne import read_surface, write_surface, decimate_surface, pick_types\n'), ((2618, 2668), 'mne.surface.get_head_surf', 'get_head_surf', (['"""sample"""'], {'subjects_dir': 'subjects_dir'}), "('sample', subjects_dir=subjects_dir)\n", (2631, 2668), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((2682, 2740), 'mne.surface.get_head_surf', 'get_head_surf', (['"""sample"""', '"""head"""'], {'subjects_dir': 'subjects_dir'}), "('sample', 'head', subjects_dir=subjects_dir)\n", (2695, 2740), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((2815, 2900), 'pytest.raises', 'pytest.raises', (['TypeError', 'get_head_surf'], {'subject': 'None', 'subjects_dir': 'subjects_dir'}), '(TypeError, get_head_surf, subject=None, subjects_dir=subjects_dir\n )\n', (2828, 2900), False, 'import pytest\n'), ((3053, 3067), 'numpy.cross', 'np.cross', (['x', 'y'], {}), '(x, y)\n', (3061, 3067), True, 'import numpy as np\n'), ((3077, 3096), 'mne.surface.fast_cross_3d', 'fast_cross_3d', (['x', 'y'], {}), '(x, y)\n', (3090, 3096), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3101, 3126), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['z', 'zz'], {}), '(z, zz)\n', (3119, 3126), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((3166, 3203), 'mne.surface.fast_cross_3d', 'fast_cross_3d', (['x[:, np.newaxis]', 'y[0]'], {}), '(x[:, np.newaxis], y[0])\n', (3179, 3203), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3208, 3239), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['z', 'zz[:, 0]'], {}), '(z, zz[:, 0])\n', (3226, 3239), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((3483, 3524), 'mne.surface._compute_nearest', '_compute_nearest', (['x', 'y'], {'method': '"""BallTree"""'}), "(x, y, method='BallTree')\n", (3499, 3524), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3535, 3575), 'mne.surface._compute_nearest', '_compute_nearest', (['x', 'y'], {'method': '"""cKDTree"""'}), "(x, y, method='cKDTree')\n", (3551, 3575), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3586, 3624), 'mne.surface._compute_nearest', '_compute_nearest', (['x', 'y'], {'method': '"""cdist"""'}), "(x, y, method='cdist')\n", (3602, 3624), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3629, 3661), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['nn_true', 'nn1'], {}), '(nn_true, nn1)\n', (3647, 3661), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((3666, 3698), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['nn_true', 'nn2'], {}), '(nn_true, nn2)\n', (3684, 3698), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((3703, 3735), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['nn_true', 'nn3'], {}), '(nn_true, nn3)\n', (3721, 3735), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((3776, 3836), 'mne.surface._compute_nearest', '_compute_nearest', (['x', 'y'], {'method': '"""BallTree"""', 'return_dists': '(True)'}), "(x, y, method='BallTree', return_dists=True)\n", (3792, 3836), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3848, 3907), 'mne.surface._compute_nearest', '_compute_nearest', (['x', 'y'], {'method': '"""cKDTree"""', 'return_dists': '(True)'}), "(x, y, method='cKDTree', return_dists=True)\n", (3864, 3907), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3919, 3976), 'mne.surface._compute_nearest', '_compute_nearest', (['x', 'y'], {'method': '"""cdist"""', 'return_dists': '(True)'}), "(x, y, method='cdist', return_dists=True)\n", (3935, 3976), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3981, 4017), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['nnn1[0]', 'nn_true'], {}), '(nnn1[0], nn_true)\n', (3999, 4017), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((4436, 4446), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (4444, 4446), False, 'from mne.utils import _TempDir, requires_mayavi, requires_tvtk, catch_logging, run_tests_if_main, object_diff, traits_test\n'), ((6021, 6031), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (6029, 6031), False, 'from mne.utils import _TempDir, requires_mayavi, requires_tvtk, catch_logging, run_tests_if_main, object_diff, traits_test\n'), ((6049, 6116), 'os.path.join', 'op.join', (['data_path', '"""subjects"""', '"""bert"""', '"""surf"""', '"""lh.inflated.nofix"""'], {}), "(data_path, 'subjects', 'bert', 'surf', 'lh.inflated.nofix')\n", (6056, 6116), True, 'import os.path as op\n'), ((6158, 6224), 'os.path.join', 'op.join', (['data_path', '"""subjects"""', '"""fsaverage"""', '"""surf"""', '"""lh.inflated"""'], {}), "(data_path, 'subjects', 'fsaverage', 'surf', 'lh.inflated')\n", (6165, 6224), True, 'import os.path as op\n'), ((6943, 7005), 'os.path.join', 'op.join', (['data_path', '"""subjects"""', '"""fsaverage"""', '"""surf"""', '"""lh.curv"""'], {}), "(data_path, 'subjects', 'fsaverage', 'surf', 'lh.curv')\n", (6950, 7005), True, 'import os.path as op\n'), ((7023, 7089), 'os.path.join', 'op.join', (['data_path', '"""subjects"""', '"""fsaverage"""', '"""surf"""', '"""lh.inflated"""'], {}), "(data_path, 'subjects', 'fsaverage', 'surf', 'lh.inflated')\n", (7030, 7089), True, 'import os.path as op\n'), ((7130, 7156), 'mne.surface.read_curvature', 'read_curvature', (['fname_curv'], {}), '(fname_curv)\n', (7144, 7156), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((7426, 7604), 'numpy.array', 'np.array', (['[[-0.00686118, -0.1036986, 0.0261517], [-0.00713948, -0.10370162, \n 0.02614874], [-0.00686208, -0.10368247, 0.02588313], [-0.00713987, -\n 0.10368724, 0.02587745]]'], {}), '([[-0.00686118, -0.1036986, 0.0261517], [-0.00713948, -0.10370162, \n 0.02614874], [-0.00686208, -0.10368247, 0.02588313], [-0.00713987, -\n 0.10368724, 0.02587745]])\n', (7434, 7604), True, 'import numpy as np\n'), ((7677, 7731), 'numpy.array', 'np.array', (['[[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, 0]]'], {}), '([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, 0]])\n', (7685, 7731), True, 'import numpy as np\n'), ((7963, 8023), 'numpy.array', 'np.array', (['[[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, nirvana]]'], {}), '([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, nirvana]])\n', (7971, 8023), True, 'import numpy as np\n'), ((8028, 8092), 'pytest.raises', 'pytest.raises', (['ValueError', 'decimate_surface', 'points', 'tris', 'n_tri'], {}), '(ValueError, decimate_surface, points, tris, n_tri)\n', (8041, 8092), False, 'import pytest\n'), ((1037, 1057), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (1047, 1057), True, 'import os.path as op\n'), ((1563, 1586), 'mne.transforms._get_trans', '_get_trans', (['fname_trans'], {}), '(fname_trans)\n', (1573, 1586), False, 'from mne.transforms import _get_trans\n'), ((4050, 4068), 'numpy.zeros_like', 'np.zeros_like', (['nn1'], {}), '(nn1)\n', (4063, 4068), True, 'import numpy as np\n'), ((4190, 4218), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['nn1', 'nn2'], {}), '(nn1, nn2)\n', (4208, 4218), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((4227, 4255), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['nn1', 'nn3'], {}), '(nn1, nn3)\n', (4245, 4255), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((5299, 5366), 'mne.surface.read_morph_map', 'read_morph_map', (['subject_from', 'subject_to', 'subjects_dir'], {'xhemi': 'xhemi'}), '(subject_from, subject_to, subjects_dir, xhemi=xhemi)\n', (5313, 5366), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((5700, 5718), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (5712, 5718), False, 'import pytest\n'), ((5735, 5791), 'mne.surface.read_morph_map', 'read_morph_map', (['"""sample"""', '"""sample"""'], {'subjects_dir': 'tempdir'}), "('sample', 'sample', subjects_dir=tempdir)\n", (5749, 5791), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((6702, 6732), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['pts', 'c_pts'], {}), '(pts, c_pts)\n', (6720, 6732), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((6741, 6771), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['tri', 'c_tri'], {}), '(tri, c_tri)\n', (6759, 6771), False, 'from numpy.testing import assert_array_equal, assert_allclose, assert_equal\n'), ((7166, 7190), 'mne.read_surface', 'read_surface', (['fname_surf'], {}), '(fname_surf)\n', (7178, 7190), False, 'from mne import read_surface, write_surface, decimate_surface, pick_types\n'), ((7835, 7872), 'mne.decimate_surface', 'decimate_surface', (['points', 'tris', 'n_tri'], {}), '(points, tris, n_tri)\n', (7851, 7872), False, 'from mne import read_surface, write_surface, decimate_surface, pick_types\n'), ((1871, 1891), 'mne.io.read_info', 'read_info', (['fname_raw'], {}), '(fname_raw)\n', (1880, 1891), False, 'from mne.io import read_info\n'), ((1934, 1958), 'mne.io.read_info', 'read_info', (['fname_kit_raw'], {}), '(fname_kit_raw)\n', (1943, 1958), False, 'from mne.io import read_info\n'), ((2000, 2024), 'mne.io.read_info', 'read_info', (['fname_bti_raw'], {}), '(fname_bti_raw)\n', (2009, 2024), False, 'from mne.io import read_info\n'), ((2069, 2093), 'mne.io.read_info', 'read_info', (['fname_ctf_raw'], {}), '(fname_ctf_raw)\n', (2078, 2093), False, 'from mne.io import read_info\n'), ((2264, 2279), 'mne.utils.catch_logging', 'catch_logging', ([], {}), '()\n', (2277, 2279), False, 'from mne.utils import _TempDir, requires_mayavi, requires_tvtk, catch_logging, run_tests_if_main, object_diff, traits_test\n'), ((2309, 2355), 'mne.surface.get_meg_helmet_surf', 'get_meg_helmet_surf', (['info', 'trans'], {'verbose': '(True)'}), '(info, trans, verbose=True)\n', (2328, 2355), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((3355, 3377), 'numpy.sum', 'np.sum', (['(x ** 2)'], {'axis': '(1)'}), '(x ** 2, axis=1)\n', (3361, 3377), True, 'import numpy as np\n'), ((3418, 3446), 'numpy.arange', 'np.arange', (['(500)'], {'dtype': 'np.int'}), '(500, dtype=np.int)\n', (3427, 3446), True, 'import numpy as np\n'), ((4524, 4549), 'os.path.join', 'op.join', (['tempdir', 'subject'], {}), '(tempdir, subject)\n', (4531, 4549), True, 'import os.path as op\n'), ((4568, 4601), 'os.path.join', 'op.join', (['tempdir', 'subject', '"""surf"""'], {}), "(tempdir, subject, 'surf')\n", (4575, 4601), True, 'import os.path as op\n'), ((5147, 5165), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (5159, 5165), False, 'import pytest\n'), ((5186, 5248), 'mne.surface.read_morph_map', 'read_morph_map', (['subject_from', 'subject_to', 'tempdir'], {'xhemi': 'xhemi'}), '(subject_from, subject_to, tempdir, xhemi=xhemi)\n', (5200, 5248), False, 'from mne.surface import read_morph_map, _compute_nearest, fast_cross_3d, get_head_surf, read_curvature, get_meg_helmet_surf\n'), ((6304, 6322), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (6316, 6322), False, 'import pytest\n'), ((6375, 6414), 'mne.read_surface', 'read_surface', (['fname'], {'read_metadata': '(True)'}), '(fname, read_metadata=True)\n', (6387, 6414), False, 'from mne import read_surface, write_surface, decimate_surface, pick_types\n'), ((6437, 6460), 'os.path.join', 'op.join', (['tempdir', '"""tmp"""'], {}), "(tempdir, 'tmp')\n", (6444, 6460), True, 'import os.path as op\n'), ((6507, 6525), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (6519, 6525), False, 'import pytest\n'), ((6793, 6826), 'mne.utils.object_diff', 'object_diff', (['vol_info', 'c_vol_info'], {}), '(vol_info, c_vol_info)\n', (6804, 6826), False, 'from mne.utils import _TempDir, requires_mayavi, requires_tvtk, catch_logging, run_tests_if_main, object_diff, traits_test\n'), ((7241, 7284), 'numpy.logical_or', 'np.logical_or', (['(bin_curv == 0)', '(bin_curv == 1)'], {}), '(bin_curv == 0, bin_curv == 1)\n', (7254, 7284), True, 'import numpy as np\n'), ((5594, 5613), 'numpy.zeros_like', 'np.zeros_like', (['diff'], {}), '(diff)\n', (5607, 5613), True, 'import numpy as np\n'), ((6597, 6620), 'os.path.join', 'op.join', (['tempdir', '"""tmp"""'], {}), "(tempdir, 'tmp')\n", (6604, 6620), True, 'import os.path as op\n'), ((4837, 4865), 'os.path.join', 'op.join', (['subjects_dir', '*args'], {}), '(subjects_dir, *args)\n', (4844, 4865), True, 'import os.path as op\n'), ((4892, 4915), 'os.path.join', 'op.join', (['tempdir', '*args'], {}), '(tempdir, *args)\n', (4899, 4915), True, 'import os.path as op\n'), ((5833, 5869), 'scipy.sparse.eye', 'sparse.eye', (['mm.shape[0]', 'mm.shape[0]'], {}), '(mm.shape[0], mm.shape[0])\n', (5843, 5869), False, 'from scipy import sparse\n')] |
#!/usr/bin/env python3
import os
from importlib import import_module
from itertools import count
import numpy as np
import tensorflow as tf
import common
def flip_augment(image, fid, pid):
""" Returns both the original and the horizontal flip of an image. """
images = tf.stack([image, tf.reverse(image, [1])])
# I changed dimension with tf
# return images, [fid]*2, [pid]*2
return images, tf.stack([fid]*2), tf.stack([pid]*2)
def five_crops(image, crop_size):
""" Returns the central and four corner crops of `crop_size` from `image`. """
image_size = tf.shape(image)[:2]
crop_margin = tf.subtract(image_size, crop_size)
assert_size = tf.assert_non_negative(
crop_margin, message='Crop size must be smaller or equal to the image size.')
with tf.control_dependencies([assert_size]):
top_left = tf.floor_div(crop_margin, 2)
bottom_right = tf.add(top_left, crop_size)
center = image[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
top_left = image[:-crop_margin[0], :-crop_margin[1]]
top_right = image[:-crop_margin[0], crop_margin[1]:]
bottom_left = image[crop_margin[0]:, :-crop_margin[1]]
bottom_right = image[crop_margin[0]:, crop_margin[1]:]
return center, top_left, top_right, bottom_left, bottom_right
def calculate_emb_for_fids(args, data_fids):
'''
Calculate embeddings
:param args: input arguments
:param data_fids: relative paths to the imagies
:return: matrix with shape len(data_fids) x embedding_dim (embedding vector for each image - one row)
'''
###################################################################################################################
# LOAD DATA
###################################################################################################################
# Load the args from the original experiment.
net_input_height=256
net_input_width=128
pre_crop_height=288
pre_crop_width=144
net_input_size = (net_input_height, net_input_width)
pre_crop_size = (pre_crop_height, pre_crop_width)
###################################################################################################################
# PREPARE DATA
###################################################################################################################
# Setup a tf Dataset containing all images.
dataset = tf.data.Dataset.from_tensor_slices(data_fids)
# Convert filenames to actual image tensors.
# dataset tensor: [image_resized, fid, pid]
dataset = dataset.map(
lambda fid: common.fid_to_image(
fid, tf.constant("dummy", dtype=tf.string), image_root=args.image_root,
image_size=pre_crop_size),
num_parallel_calls=8)
# Augment the data if specified by the arguments.
# `modifiers` is a list of strings that keeps track of which augmentations
# have been applied, so that a human can understand it later on.
modifiers = ['original']
dataset = dataset.map(flip_augment)
dataset = dataset.apply(tf.contrib.data.unbatch())
modifiers = [o + m for m in ['', '_flip'] for o in modifiers]
dataset = dataset.map(lambda im, fid, pid:(tf.stack(five_crops(im, net_input_size)), tf.stack([fid] * 5), tf.stack([pid] * 5)))
dataset = dataset.apply(tf.contrib.data.unbatch())
modifiers = [o + m for o in modifiers for m in ['_center', '_top_left', '_top_right', '_bottom_left', '_bottom_right']]
# Group it back into PK batches.
dataset = dataset.batch(256)
# Overlap producing and consuming.
dataset = dataset.prefetch(1)
images, _, _ = dataset.make_one_shot_iterator().get_next()
###################################################################################################################
# CREATE MODEL
###################################################################################################################
# Get the weights
model = import_module('nets.resnet_v1_50')
embedding_dim = 128
block4_units = 1
endpoints = model.endpoints(images, block4_units = block4_units, is_training=False, embedding_dim=embedding_dim)
with tf.Session() as sess:
# Initialize the network/load the checkpoint.
print('Restoring from checkpoint: {}'.format(args.checkpoint))
tf.train.Saver().restore(sess, args.checkpoint)
# Go ahead and embed the whole dataset, with all augmented versions too.
emb_storage = np.zeros(
(len(data_fids) * len(modifiers), embedding_dim), np.float32)
for start_idx in count(step=256):
try:
emb = sess.run(endpoints['emb'])
print('\rEmbedded batch {}-{}/{}'.format(
start_idx, start_idx + len(emb), len(emb_storage)),
flush=True, end='')
emb_storage[start_idx:start_idx + len(emb)] = emb
except tf.errors.OutOfRangeError:
break # This just indicates the end of the dataset.
print()
print("Done with embedding, aggregating augmentations...", flush=True)
# Pull out the augmentations into a separate first dimension.
emb_storage = emb_storage.reshape(len(data_fids), len(modifiers), -1)
emb_storage = emb_storage.transpose((1,0,2)) # (Aug,FID,128D)
# Aggregate according to the specified parameter.
emb_storage = np.mean(emb_storage, axis=0)
tf.reset_default_graph()
return emb_storage
def run_embedding(args, dataset):
# Load the data from the CSV file.
# pids - person id (array corresponding to the images)
# fids - array of the paths to the images ({str_})
dataset=os.path.join(os.getcwd(), dataset)
img_root=os.path.join(os.getcwd(),args.image_root)
data_pids, data_fids = common.load_dataset(dataset, img_root, False)
return calculate_emb_for_fids(args, data_fids) | [
"tensorflow.reset_default_graph",
"numpy.mean",
"tensorflow.contrib.data.unbatch",
"common.load_dataset",
"tensorflow.subtract",
"tensorflow.stack",
"tensorflow.control_dependencies",
"importlib.import_module",
"tensorflow.reverse",
"tensorflow.train.Saver",
"tensorflow.add",
"tensorflow.Sessi... | [((624, 658), 'tensorflow.subtract', 'tf.subtract', (['image_size', 'crop_size'], {}), '(image_size, crop_size)\n', (635, 658), True, 'import tensorflow as tf\n'), ((677, 782), 'tensorflow.assert_non_negative', 'tf.assert_non_negative', (['crop_margin'], {'message': '"""Crop size must be smaller or equal to the image size."""'}), "(crop_margin, message=\n 'Crop size must be smaller or equal to the image size.')\n", (699, 782), True, 'import tensorflow as tf\n'), ((2425, 2470), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['data_fids'], {}), '(data_fids)\n', (2459, 2470), True, 'import tensorflow as tf\n'), ((3999, 4033), 'importlib.import_module', 'import_module', (['"""nets.resnet_v1_50"""'], {}), "('nets.resnet_v1_50')\n", (4012, 4033), False, 'from importlib import import_module\n'), ((5491, 5515), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5513, 5515), True, 'import tensorflow as tf\n'), ((5856, 5901), 'common.load_dataset', 'common.load_dataset', (['dataset', 'img_root', '(False)'], {}), '(dataset, img_root, False)\n', (5875, 5901), False, 'import common\n'), ((413, 432), 'tensorflow.stack', 'tf.stack', (['([fid] * 2)'], {}), '([fid] * 2)\n', (421, 432), True, 'import tensorflow as tf\n'), ((432, 451), 'tensorflow.stack', 'tf.stack', (['([pid] * 2)'], {}), '([pid] * 2)\n', (440, 451), True, 'import tensorflow as tf\n'), ((586, 601), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (594, 601), True, 'import tensorflow as tf\n'), ((796, 834), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_size]'], {}), '([assert_size])\n', (819, 834), True, 'import tensorflow as tf\n'), ((855, 883), 'tensorflow.floor_div', 'tf.floor_div', (['crop_margin', '(2)'], {}), '(crop_margin, 2)\n', (867, 883), True, 'import tensorflow as tf\n'), ((907, 934), 'tensorflow.add', 'tf.add', (['top_left', 'crop_size'], {}), '(top_left, crop_size)\n', (913, 934), True, 'import tensorflow as tf\n'), ((3090, 3115), 'tensorflow.contrib.data.unbatch', 'tf.contrib.data.unbatch', ([], {}), '()\n', (3113, 3115), True, 'import tensorflow as tf\n'), ((3344, 3369), 'tensorflow.contrib.data.unbatch', 'tf.contrib.data.unbatch', ([], {}), '()\n', (3367, 3369), True, 'import tensorflow as tf\n'), ((4206, 4218), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4216, 4218), True, 'import tensorflow as tf\n'), ((4622, 4637), 'itertools.count', 'count', ([], {'step': '(256)'}), '(step=256)\n', (4627, 4637), False, 'from itertools import count\n'), ((5457, 5485), 'numpy.mean', 'np.mean', (['emb_storage'], {'axis': '(0)'}), '(emb_storage, axis=0)\n', (5464, 5485), True, 'import numpy as np\n'), ((5752, 5763), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5761, 5763), False, 'import os\n'), ((5800, 5811), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5809, 5811), False, 'import os\n'), ((297, 319), 'tensorflow.reverse', 'tf.reverse', (['image', '[1]'], {}), '(image, [1])\n', (307, 319), True, 'import tensorflow as tf\n'), ((2654, 2691), 'tensorflow.constant', 'tf.constant', (['"""dummy"""'], {'dtype': 'tf.string'}), "('dummy', dtype=tf.string)\n", (2665, 2691), True, 'import tensorflow as tf\n'), ((3273, 3292), 'tensorflow.stack', 'tf.stack', (['([fid] * 5)'], {}), '([fid] * 5)\n', (3281, 3292), True, 'import tensorflow as tf\n'), ((3294, 3313), 'tensorflow.stack', 'tf.stack', (['([pid] * 5)'], {}), '([pid] * 5)\n', (3302, 3313), True, 'import tensorflow as tf\n'), ((4361, 4377), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4375, 4377), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 16:37:53 2019
@author: sdenaro
"""
import pandas as pd
import numpy as np
def setup(year,operating_horizon,perfect_foresight):
#read generator parameters into DataFrame
df_gen = pd.read_csv('PNW_data_file/generators.csv',header=0)
zone = ['PNW']
##time series of load for each zone
df_load = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load = df_load[zone]
df_load = df_load.loc[year*8760:year*8760+8759,:]
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily hydropower availability
df_hydro = pd.read_csv('Hydro_setup/PNW_dispatchable_hydro.csv',header=0)
##time series of wind generation for each zone
df_wind = pd.read_csv('../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv',header=0)
df_wind = df_wind.loc[:,'PNW']
df_wind = df_wind.loc[year*8760:year*8760+8759]
df_wind = df_wind.reset_index()
##time series solar for each TAC
df_solar = pd.read_csv('PNW_data_file/solar.csv',header=0)
##daily time series of dispatchable imports by path
df_imports = pd.read_csv('Path_setup/PNW_dispatchable_imports.csv',header=0)
##daily time series of dispatchable imports by path
forecast_days = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_imports3 = pd.read_csv('Path_setup/PNW_dispatchable_3.csv',header=0)
df_imports8 = pd.read_csv('Path_setup/PNW_dispatchable_8.csv',header=0)
df_imports14 = pd.read_csv('Path_setup/PNW_dispatchable_14.csv',header=0)
df_imports65 = pd.read_csv('Path_setup/PNW_dispatchable_65.csv',header=0)
df_imports66 = pd.read_csv('Path_setup/PNW_dispatchable_66.csv',header=0)
##hourly time series of exports by zone
df_exports3 = pd.read_csv('Path_setup/PNW_exports3.csv',header=0)
df_exports8 = pd.read_csv('Path_setup/PNW_exports8.csv',header=0)
df_exports14 = pd.read_csv('Path_setup/PNW_exports14.csv',header=0)
df_exports65 = pd.read_csv('Path_setup/PNW_exports65.csv',header=0)
df_exports66 = pd.read_csv('Path_setup/PNW_exports66.csv',header=0)
#must run resources (LFG,ag_waste,nuclear)
df_must = pd.read_csv('PNW_data_file/must_run.csv',header=0)
#natural gas prices
df_ng = pd.read_excel('../Stochastic_engine/Gas_prices/NG.xlsx', header=0)
df_ng = df_ng[zone]
df_ng = df_ng.loc[year*365:year*365+364,:]
df_ng = df_ng.reset_index()
#california imports hourly minimum flows
df_PNW_import_mins3 = pd.read_csv('Path_setup/PNW_path_mins3.csv', header=0)
df_PNW_import_mins8 = pd.read_csv('Path_setup/PNW_path_mins8.csv', header=0)
df_PNW_import_mins14 = pd.read_csv('Path_setup/PNW_path_mins14.csv', header=0)
df_PNW_import_mins65 = pd.read_csv('Path_setup/PNW_path_mins65.csv', header=0)
df_PNW_import_mins66 = pd.read_csv('Path_setup/PNW_path_mins66.csv', header=0)
#california hydro hourly minimum flows
df_PNW_hydro_mins = pd.read_csv('Hydro_setup/PNW_hydro_mins.csv', header=0)
#list zones
zones = ['PNW']
# must run generation
must_run_PNW = np.ones((len(df_load),1))*(df_must.loc[0,'PNW'])
df_total_must_run =pd.DataFrame(must_run_PNW)
df_total_must_run.columns = ['PNW']
############
# sets #
############
#write data.dat file
#write data.dat file
import os
from shutil import copy
from pathlib import Path
path=str(Path.cwd().parent) +str (Path('/UCED/LR/PNW' + str(year)))
os.makedirs(path,exist_ok=True)
generators_file='PNW_data_file/generators.csv'
dispatch_file='../UCED/PNW_dispatch.py'
dispatchLP_file='../UCED/PNW_dispatchLP.py'
wrapper_file='../UCED/PNW_wrapper.py'
simulation_file='../UCED/PNW_simulation.py'
price_cal_file='../UCED/PNW_price_calculation.py'
emission_gen_file = '../UCED/PNW_emissions_generator.csv'
emission_calc_file = '../UCED/PNW_emission_calculation.py'
copy(dispatch_file,path)
copy(wrapper_file,path)
copy(simulation_file,path)
copy(price_cal_file,path)
copy(dispatchLP_file,path)
copy(generators_file,path)
copy(emission_gen_file,path)
copy(emission_calc_file,path)
filename = path + '/data.dat'
with open(filename, 'w') as f:
# generator sets by zone
for z in zones:
# zone string
z_int = zones.index(z)
f.write('set Zone5Generators :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z:
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# WECC imports
f.write('set WECCImports :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# generator sets by type
# coal
f.write('set Coal :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'coal':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
#nuc
f.write('set Nuclear :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'nuc':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# oil
f.write('set Oil :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'oil':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Pumped Storage
f.write('set PSH :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'psh':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Slack
f.write('set Slack :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'slack':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Hydro
f.write('set Hydro :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Ramping
f.write('set Ramping :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro' or df_gen.loc[gen,'typ'] == 'imports':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# gas generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# Natural Gas
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Gas :=\n')
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# zones
f.write('set zones :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
################
# parameters #
################
# simulation details
SimHours = 8760
f.write('param SimHours := %d;' % SimHours)
f.write('\n')
f.write('param SimDays:= %d;' % int(SimHours/24))
f.write('\n\n')
HorizonHours = int(operating_horizon*24)
f.write('param HorizonHours := %d;' % HorizonHours)
f.write('\n\n')
HorizonDays = int(HorizonHours/24)
f.write('param HorizonDays := %d;' % HorizonDays)
f.write('\n\n')
# forecast days
f.write('set forecast_days :=\n')
for fd in range(1,operating_horizon+1):
f.write('fd%d ' % fd)
f.write(';\n\n')
# create parameter matrix for generators
f.write('param:' + '\t')
for c in df_gen.columns:
if c != 'name':
f.write(c + '\t')
f.write(':=\n\n')
for i in range(0,len(df_gen)):
for c in df_gen.columns:
if c == 'name':
unit_name = df_gen.loc[i,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + '\t')
else:
f.write(str((df_gen.loc[i,c])) + '\t')
f.write('\n')
f.write(';\n\n')
# times series data
# zonal (hourly)
f.write('param:' + '\t' + 'SimDemand' + '\t' + 'SimWind' \
+ '\t' + 'SimSolar' + '\t' + 'SimMustRun:=' + '\n')
for z in zones:
for h in range(0,len(df_load)):
f.write(z + '\t' + str(h+1) + '\t' + str(df_load.loc[h,z])\
+ '\t' + str(df_wind.loc[h,z]) + '\t' + str(df_solar.loc[h,z])\
+ '\t' + str(df_total_must_run.loc[h,z]) + '\n')
f.write(';\n\n')
# zonal (daily)
f.write('param:' + '\t' + 'SimGasPrice:=' + '\n')
for z in zones:
for d in range(0,int(SimHours/24)):
f.write(z + '\t' + str(d+1) + '\t' + str(df_ng.loc[d,z]) + '\n')
f.write(';\n\n')
if perfect_foresight > 0:
#system wide (daily)
f.write('param:' + '\t' + 'SimPath66_imports' + '\t' + 'SimPath65_imports' + '\t' + 'SimPath3_imports' + '\t' + 'SimPath8_imports' + '\t' + 'SimPath14_imports' + '\t' + 'SimPNW_hydro:=' + '\n')
for d in range(0,len(df_imports66)):
if d <= len(df_imports66) - len(forecast_days):
for fd in forecast_days:
fd_index = forecast_days.index(fd)
f.write(fd + '\t' + str(d+1) + '\t' + str(df_imports66.loc[d+fd_index,'fd1']) + '\t' + str(df_imports65.loc[d+fd_index,'fd1']) + '\t' + str(df_imports3.loc[d+fd_index,'fd1']) + '\t' + str(df_imports8.loc[d+fd_index,'fd1']) + '\t' + str(df_imports14.loc[d+fd_index,'fd1']) + '\t' + str(df_hydro.loc[d+fd_index,'fd1']) + '\n')
else:
diff = len(df_imports66) - d
for fd in forecast_days:
fd_index = forecast_days.index(fd)
if fd_index < diff:
f.write(fd + '\t' + str(d+1) + '\t' + str(df_imports66.loc[d+fd_index,'fd1']) + '\t' + str(df_imports65.loc[d+fd_index,'fd1']) + '\t' + str(df_imports3.loc[d+fd_index,'fd1']) + '\t' + str(df_imports8.loc[d+fd_index,'fd1']) + '\t' + str(df_imports14.loc[d+fd_index,'fd1']) + '\t' + str(df_hydro.loc[d+fd_index,'fd1']) + '\n')
else:
f.write(fd + '\t' + str(d+1) + '\t' + str(df_imports66.loc[d,fd]) + '\t' + str(df_imports65.loc[d,fd]) + '\t' + str(df_imports3.loc[d,fd]) + '\t' + str(df_imports8.loc[d,fd]) + '\t' + str(df_imports14.loc[d,fd]) + '\t' + str(df_hydro.loc[d,fd]) + '\n')
f.write(';\n\n')
else:
#system wide (daily)
f.write('param:' + '\t' + 'SimPath66_imports' + '\t' + 'SimPath65_imports' + '\t' + 'SimPath3_imports' + '\t' + 'SimPath8_imports' + '\t' + 'SimPath14_imports' + '\t' + 'SimPNW_hydro:=' + '\n')
for d in range(0,len(df_imports)):
for fd in forecast_days:
f.write(fd + '\t' + str(d+1) + '\t' + str(df_imports66.loc[d,fd]) + '\t' + str(df_imports65.loc[d,fd]) + '\t' + str(df_imports3.loc[d,fd]) + '\t' + str(df_imports8.loc[d,fd]) + '\t' + str(df_imports14.loc[d,fd]) + '\t' + str(df_hydro.loc[d,fd]) + '\n')
f.write(';\n\n')
#system wide (hourly)
f.write('param:' + '\t' + 'SimPath66_exports' + '\t' + 'SimPath65_exports' + '\t' + 'SimPath3_exports' + '\t' + 'SimPath8_exports' + '\t' + 'SimPath14_exports' + '\t' + 'SimPNW_hydro_minflow' + '\t' + 'SimPath3_imports_minflow' + '\t' + 'SimPath8_imports_minflow' + '\t' + 'SimPath65_imports_minflow' + '\t' + 'SimPath66_imports_minflow' + '\t' + 'SimPath14_imports_minflow:=' + '\n')
#first write information for obsolete days
for t in range(2,len(forecast_days)+1):
j=t
while j < len(forecast_days)+1:
fd =forecast_days[j-1]
for h in range(1,25):
f.write(fd + '\t' + str((t-2)*24+h) + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\n')
j=j+1
if perfect_foresight > 0:
for d in range(0,len(df_imports66)):
if d <= len(df_imports66) - len(forecast_days):
for fd in forecast_days:
fd_index = forecast_days.index(fd)
for h in range(0,24):
f.write(fd + '\t' + str(d*24+fd_index*24+h+1) + '\t' + str(df_exports66.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_exports65.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_exports3.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_exports8.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_exports14.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_hydro_mins.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins3.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins8.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins65.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins66.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins14.loc[(d+fd_index)*24+h,'fd1']) + '\n')
else:
diff = d - len(df_imports66)
for fd in forecast_days:
fd_index = forecast_days.index(fd)
if fd_index < diff:
for h in range(0,24):
f.write(fd + '\t' + str(d*24+fd_index*24+h+1) + '\t' + str(df_exports66.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_exports65.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_exports3.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_exports8.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_exports14.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_hydro_mins.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins3.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins8.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins65.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins66.loc[(d+fd_index)*24+h,'fd1']) + '\t' + str(df_PNW_import_mins14.loc[(d+fd_index)*24+h,'fd1']) + '\n')
else:
for h in range(0,24):
f.write(fd + '\t' + str(d*24+fd_index*24+h+1) + '\t' + str(df_exports66.loc[(d)*24+h,fd]) + '\t' + str(df_exports65.loc[(d)*24+h,fd]) + '\t' + str(df_exports3.loc[(d)*24+h,fd]) + '\t' + str(df_exports8.loc[(d)*24+h,fd]) + '\t' + str(df_exports14.loc[(d)*24+h,fd]) + '\t' + str(df_PNW_hydro_mins.loc[(d)*24+h,fd]) + '\t' + str(df_PNW_import_mins3.loc[(d)*24+h,fd]) + '\t' + str(df_PNW_import_mins8.loc[(d)*24+h,fd]) + '\t' + str(df_PNW_import_mins65.loc[(d)*24+h,fd]) + '\t' + str(df_PNW_import_mins66.loc[(d)*24+h,fd]) + '\t' + str(df_PNW_import_mins14.loc[(d)*24+h,fd]) + '\n')
else:
for d in range(0,len(df_imports66)):
for fd in forecast_days:
fd_index = forecast_days.index(fd)
for h in range(0,24):
f.write(fd + '\t' + str(d*24+fd_index*24+h+1) + '\t' + str(df_exports66.loc[d*24+h,fd]) + '\t' + str(df_exports65.loc[d*24+h,fd]) + '\t' + str(df_exports3.loc[d*24+h,fd]) + '\t' + str(df_exports8.loc[d*24+h,fd]) + '\t' + str(df_exports14.loc[d*24+h,fd]) + '\t' + str(df_PNW_hydro_mins.loc[d*24+h,fd]) + '\t' + str(df_PNW_import_mins3.loc[d*24+h,fd]) + '\t' + str(df_PNW_import_mins8.loc[d*24+h,fd]) + '\t' + str(df_PNW_import_mins65.loc[d*24+h,fd]) + '\t' + str(df_PNW_import_mins66.loc[d*24+h,fd]) + '\t' + str(df_PNW_import_mins14.loc[d*24+h,fd]) + '\n')
f.write(';\n\n')
#system wide (hourly)
f.write('param:' + '\t' + 'SimReserves:=' + '\n')
for h in range(0,len(df_load)):
f.write(str(h+1) + '\t' + str(df_reserves.loc[h,'reserves']) + '\n')
f.write(';\n\n')
return None
| [
"pandas.DataFrame",
"numpy.sum",
"os.makedirs",
"pandas.read_csv",
"pandas.read_excel",
"pathlib.Path.cwd",
"shutil.copy"
] | [((240, 293), 'pandas.read_csv', 'pd.read_csv', (['"""PNW_data_file/generators.csv"""'], {'header': '(0)'}), "('PNW_data_file/generators.csv', header=0)\n", (251, 293), True, 'import pandas as pd\n'), ((367, 468), 'pandas.read_csv', 'pd.read_csv', (['"""../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv"""'], {'header': '(0)'}), "(\n '../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv',\n header=0)\n", (378, 468), True, 'import pandas as pd\n'), ((798, 820), 'pandas.DataFrame', 'pd.DataFrame', (['reserves'], {}), '(reserves)\n', (810, 820), True, 'import pandas as pd\n'), ((916, 979), 'pandas.read_csv', 'pd.read_csv', (['"""Hydro_setup/PNW_dispatchable_hydro.csv"""'], {'header': '(0)'}), "('Hydro_setup/PNW_dispatchable_hydro.csv', header=0)\n", (927, 979), True, 'import pandas as pd\n'), ((1045, 1134), 'pandas.read_csv', 'pd.read_csv', (['"""../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv"""'], {'header': '(0)'}), "('../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv',\n header=0)\n", (1056, 1134), True, 'import pandas as pd\n'), ((1306, 1354), 'pandas.read_csv', 'pd.read_csv', (['"""PNW_data_file/solar.csv"""'], {'header': '(0)'}), "('PNW_data_file/solar.csv', header=0)\n", (1317, 1354), True, 'import pandas as pd\n'), ((1428, 1492), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_dispatchable_imports.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_dispatchable_imports.csv', header=0)\n", (1439, 1492), True, 'import pandas as pd\n'), ((1639, 1697), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_dispatchable_3.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_dispatchable_3.csv', header=0)\n", (1650, 1697), True, 'import pandas as pd\n'), ((1715, 1773), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_dispatchable_8.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_dispatchable_8.csv', header=0)\n", (1726, 1773), True, 'import pandas as pd\n'), ((1792, 1851), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_dispatchable_14.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_dispatchable_14.csv', header=0)\n", (1803, 1851), True, 'import pandas as pd\n'), ((1870, 1929), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_dispatchable_65.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_dispatchable_65.csv', header=0)\n", (1881, 1929), True, 'import pandas as pd\n'), ((1948, 2007), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_dispatchable_66.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_dispatchable_66.csv', header=0)\n", (1959, 2007), True, 'import pandas as pd\n'), ((2070, 2122), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_exports3.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_exports3.csv', header=0)\n", (2081, 2122), True, 'import pandas as pd\n'), ((2140, 2192), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_exports8.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_exports8.csv', header=0)\n", (2151, 2192), True, 'import pandas as pd\n'), ((2211, 2264), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_exports14.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_exports14.csv', header=0)\n", (2222, 2264), True, 'import pandas as pd\n'), ((2283, 2336), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_exports65.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_exports65.csv', header=0)\n", (2294, 2336), True, 'import pandas as pd\n'), ((2355, 2408), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_exports66.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_exports66.csv', header=0)\n", (2366, 2408), True, 'import pandas as pd\n'), ((2474, 2525), 'pandas.read_csv', 'pd.read_csv', (['"""PNW_data_file/must_run.csv"""'], {'header': '(0)'}), "('PNW_data_file/must_run.csv', header=0)\n", (2485, 2525), True, 'import pandas as pd\n'), ((2562, 2628), 'pandas.read_excel', 'pd.read_excel', (['"""../Stochastic_engine/Gas_prices/NG.xlsx"""'], {'header': '(0)'}), "('../Stochastic_engine/Gas_prices/NG.xlsx', header=0)\n", (2575, 2628), True, 'import pandas as pd\n'), ((2804, 2858), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_path_mins3.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_path_mins3.csv', header=0)\n", (2815, 2858), True, 'import pandas as pd\n'), ((2885, 2939), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_path_mins8.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_path_mins8.csv', header=0)\n", (2896, 2939), True, 'import pandas as pd\n'), ((2967, 3022), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_path_mins14.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_path_mins14.csv', header=0)\n", (2978, 3022), True, 'import pandas as pd\n'), ((3050, 3105), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_path_mins65.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_path_mins65.csv', header=0)\n", (3061, 3105), True, 'import pandas as pd\n'), ((3133, 3188), 'pandas.read_csv', 'pd.read_csv', (['"""Path_setup/PNW_path_mins66.csv"""'], {'header': '(0)'}), "('Path_setup/PNW_path_mins66.csv', header=0)\n", (3144, 3188), True, 'import pandas as pd\n'), ((3261, 3316), 'pandas.read_csv', 'pd.read_csv', (['"""Hydro_setup/PNW_hydro_mins.csv"""'], {'header': '(0)'}), "('Hydro_setup/PNW_hydro_mins.csv', header=0)\n", (3272, 3316), True, 'import pandas as pd\n'), ((3472, 3498), 'pandas.DataFrame', 'pd.DataFrame', (['must_run_PNW'], {}), '(must_run_PNW)\n', (3484, 3498), True, 'import pandas as pd\n'), ((3791, 3823), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (3802, 3823), False, 'import os\n'), ((4241, 4266), 'shutil.copy', 'copy', (['dispatch_file', 'path'], {}), '(dispatch_file, path)\n', (4245, 4266), False, 'from shutil import copy\n'), ((4270, 4294), 'shutil.copy', 'copy', (['wrapper_file', 'path'], {}), '(wrapper_file, path)\n', (4274, 4294), False, 'from shutil import copy\n'), ((4298, 4325), 'shutil.copy', 'copy', (['simulation_file', 'path'], {}), '(simulation_file, path)\n', (4302, 4325), False, 'from shutil import copy\n'), ((4329, 4355), 'shutil.copy', 'copy', (['price_cal_file', 'path'], {}), '(price_cal_file, path)\n', (4333, 4355), False, 'from shutil import copy\n'), ((4359, 4386), 'shutil.copy', 'copy', (['dispatchLP_file', 'path'], {}), '(dispatchLP_file, path)\n', (4363, 4386), False, 'from shutil import copy\n'), ((4390, 4417), 'shutil.copy', 'copy', (['generators_file', 'path'], {}), '(generators_file, path)\n', (4394, 4417), False, 'from shutil import copy\n'), ((4421, 4450), 'shutil.copy', 'copy', (['emission_gen_file', 'path'], {}), '(emission_gen_file, path)\n', (4425, 4450), False, 'from shutil import copy\n'), ((4454, 4484), 'shutil.copy', 'copy', (['emission_calc_file', 'path'], {}), '(emission_calc_file, path)\n', (4458, 4484), False, 'from shutil import copy\n'), ((760, 776), 'numpy.sum', 'np.sum', (['rv[i, :]'], {}), '(rv[i, :])\n', (766, 776), True, 'import numpy as np\n'), ((3728, 3738), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (3736, 3738), False, 'from pathlib import Path\n')] |
"""Add points on nD shapes in 3D using a mouse callback"""
import napari
import numpy as np
# Create rectangles in 4D
shapes_data = np.array(
[
[
[0, 50, 75, 75],
[0, 50, 125, 75],
[0, 100, 125, 125],
[0, 100, 75, 125]
],
[
[0, 10, 75, 75],
[0, 10, 125, 75],
[0, 40, 125, 125],
[0, 40, 75, 125]
],
[
[1, 100, 75, 75],
[1, 100, 125, 75],
[1, 50, 125, 125],
[1, 50, 75, 125]
]
]
)
# add an empty 4d points layer
viewer = napari.view_points(ndim=4, size=3)
points_layer = viewer.layers[0]
# add the shapes layer to the viewer
features = {'index': [0, 1, 2]}
shapes_layer = viewer.add_shapes(
shapes_data,
face_color=['magenta', 'green', 'blue'],
edge_color='white',
blending='additive',
features=features,
text='index'
)
@shapes_layer.mouse_drag_callbacks.append
def on_click(layer, event):
shape_index, intersection_point = layer.get_index_and_intersection(
event.position,
event.view_direction,
event.dims_displayed
)
if (shape_index is not None) and (intersection_point is not None):
points_layer.add(intersection_point)
# set the viewer to 3D rendering mode with the first two rectangles in view
viewer.dims.ndisplay = 3
viewer.dims.set_point(axis=0, value=0)
viewer.camera.angles = (70, 30, 150)
viewer.camera.zoom = 2.5
napari.run()
| [
"numpy.array",
"napari.view_points",
"napari.run"
] | [((133, 383), 'numpy.array', 'np.array', (['[[[0, 50, 75, 75], [0, 50, 125, 75], [0, 100, 125, 125], [0, 100, 75, 125]],\n [[0, 10, 75, 75], [0, 10, 125, 75], [0, 40, 125, 125], [0, 40, 75, 125]\n ], [[1, 100, 75, 75], [1, 100, 125, 75], [1, 50, 125, 125], [1, 50, 75,\n 125]]]'], {}), '([[[0, 50, 75, 75], [0, 50, 125, 75], [0, 100, 125, 125], [0, 100, \n 75, 125]], [[0, 10, 75, 75], [0, 10, 125, 75], [0, 40, 125, 125], [0, \n 40, 75, 125]], [[1, 100, 75, 75], [1, 100, 125, 75], [1, 50, 125, 125],\n [1, 50, 75, 125]]])\n', (141, 383), True, 'import numpy as np\n'), ((621, 655), 'napari.view_points', 'napari.view_points', ([], {'ndim': '(4)', 'size': '(3)'}), '(ndim=4, size=3)\n', (639, 655), False, 'import napari\n'), ((1500, 1512), 'napari.run', 'napari.run', ([], {}), '()\n', (1510, 1512), False, 'import napari\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training code for baseline model."""
from absl import app
from absl import flags
from absl import logging
import os
import numpy as np
from tqdm import trange
import common.data as data
from common.networks import AllConvModel
from training.train_baseline import TrainLoop
import training.utils as utils
FLAGS = flags.FLAGS
def encode(xs):
thresholds = np.arange(0, 1, .05)+.05
shape = xs.shape
less_than_threshold = xs[:,:,:,:,None] < thresholds
xs = np.array(less_than_threshold, dtype=np.float32)
xs = np.reshape(xs, [-1, shape[1], shape[2], shape[3]*len(thresholds)])
return xs
def main(argv):
del argv
dataset = data.load_dataset(FLAGS.dataset)
(x_train, y_train), (x_test, y_test), num_classes = dataset
x_train = encode(x_train)
x_test = encode(x_test)
print(x_train.shape)
dataset = (x_train, y_train), (x_test, y_test), num_classes
input_shape = x_train[0].shape
loop = TrainLoop(FLAGS.num_filters,
num_classes, input_shape)
loop.train(dataset=dataset,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs,
model_dir=os.path.join(FLAGS.model_dir, "discretize/"))
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
| [
"common.data.load_dataset",
"os.path.join",
"absl.app.run",
"numpy.array",
"numpy.arange",
"training.train_baseline.TrainLoop",
"absl.logging.set_verbosity"
] | [((1054, 1101), 'numpy.array', 'np.array', (['less_than_threshold'], {'dtype': 'np.float32'}), '(less_than_threshold, dtype=np.float32)\n', (1062, 1101), True, 'import numpy as np\n'), ((1242, 1274), 'common.data.load_dataset', 'data.load_dataset', (['FLAGS.dataset'], {}), '(FLAGS.dataset)\n', (1259, 1274), True, 'import common.data as data\n'), ((1541, 1595), 'training.train_baseline.TrainLoop', 'TrainLoop', (['FLAGS.num_filters', 'num_classes', 'input_shape'], {}), '(FLAGS.num_filters, num_classes, input_shape)\n', (1550, 1595), False, 'from training.train_baseline import TrainLoop\n'), ((1841, 1876), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (1862, 1876), False, 'from absl import logging\n'), ((1881, 1894), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (1888, 1894), False, 'from absl import app\n'), ((943, 964), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.05)'], {}), '(0, 1, 0.05)\n', (952, 964), True, 'import numpy as np\n'), ((1762, 1806), 'os.path.join', 'os.path.join', (['FLAGS.model_dir', '"""discretize/"""'], {}), "(FLAGS.model_dir, 'discretize/')\n", (1774, 1806), False, 'import os\n')] |
"""
The tests in this package are to ensure the proper resultant dtypes of
set operations.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Float64Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
)
import pandas._testing as tm
from pandas.api.types import is_datetime64tz_dtype, pandas_dtype
COMPATIBLE_INCONSISTENT_PAIRS = {
(Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex),
(Float64Index, Int64Index): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, RangeIndex): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, UInt64Index): (tm.makeFloatIndex, tm.makeUIntIndex),
}
def test_union_same_types(index):
# Union with a non-unique, non-monotonic index raises error
# Only needed for bool index factory
idx1 = index.sort_values()
idx2 = index.sort_values()
assert idx1.union(idx2).dtype == idx1.dtype
def test_union_different_types(index, index_fixture2):
# This test only considers combinations of indices
# GH 23525
idx1, idx2 = index, index_fixture2
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
pytest.xfail("This test only considers non compatible indexes.")
if any(isinstance(idx, pd.MultiIndex) for idx in (idx1, idx2)):
pytest.xfail("This test doesn't consider multiindixes.")
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.xfail("This test only considers non matching dtypes.")
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
# This is true before this PR as well.
# Union with a non-unique, non-monotonic index raises error
# This applies to the boolean index
idx1 = idx1.sort_values()
idx2 = idx2.sort_values()
assert idx1.union(idx2).dtype == np.dtype("O")
assert idx2.union(idx1).dtype == np.dtype("O")
@pytest.mark.parametrize("idx_fact1,idx_fact2", COMPATIBLE_INCONSISTENT_PAIRS.values())
def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
# GH 23525
idx1 = idx_fact1(10)
idx2 = idx_fact2(20)
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
assert res1.dtype in (idx1.dtype, idx2.dtype)
assert res2.dtype in (idx1.dtype, idx2.dtype)
@pytest.mark.parametrize(
"left, right, expected",
[
("int64", "int64", "int64"),
("int64", "uint64", "object"),
("int64", "float64", "float64"),
("uint64", "float64", "float64"),
("uint64", "uint64", "uint64"),
("float64", "float64", "float64"),
("datetime64[ns]", "int64", "object"),
("datetime64[ns]", "uint64", "object"),
("datetime64[ns]", "float64", "object"),
("datetime64[ns, CET]", "int64", "object"),
("datetime64[ns, CET]", "uint64", "object"),
("datetime64[ns, CET]", "float64", "object"),
("Period[D]", "int64", "object"),
("Period[D]", "uint64", "object"),
("Period[D]", "float64", "object"),
],
)
@pytest.mark.parametrize("names", [("foo", "foo", "foo"), ("foo", "bar", None)])
def test_union_dtypes(left, right, expected, names):
left = pandas_dtype(left)
right = pandas_dtype(right)
a = pd.Index([], dtype=left, name=names[0])
b = pd.Index([], dtype=right, name=names[1])
result = a.union(b)
assert result.dtype == expected
assert result.name == names[2]
# Testing name retention
# TODO: pin down desired dtype; do we want it to be commutative?
result = a.intersection(b)
assert result.name == names[2]
def test_dunder_inplace_setops_deprecated(index):
# GH#37374 these will become logical ops, not setops
with tm.assert_produces_warning(FutureWarning):
index |= index
with tm.assert_produces_warning(FutureWarning):
index &= index
with tm.assert_produces_warning(FutureWarning):
index ^= index
@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]])
def test_intersection_duplicates(values):
# GH#31326
a = pd.Index(values)
b = pd.Index([3, 3])
result = a.intersection(b)
expected = pd.Index([3])
tm.assert_index_equal(result, expected)
class TestSetOps:
# Set operation tests shared by all indexes in the `index` fixture
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_set_ops_error_cases(self, case, method, index):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(index, method)(case)
def test_intersection_base(self, index):
if isinstance(index, CategoricalIndex):
return
first = index[:5]
second = index[:3]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
if is_datetime64tz_dtype(index.dtype):
# The second.values below will drop tz, so the rest of this test
# is not applicable.
return
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3])
def test_union_base(self, index):
first = index[3:]
second = index[:5]
everything = index
union = first.union(second)
assert tm.equalContents(union, everything)
if is_datetime64tz_dtype(index.dtype):
# The second.values below will drop tz, so the rest of this test
# is not applicable.
return
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
if not isinstance(index, CategoricalIndex):
result = first.union(case)
assert tm.equalContents(result, everything), (
result,
everything,
type(case),
)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3])
def test_difference_base(self, sort, index):
first = index[2:]
second = index[:4]
if isinstance(index, CategoricalIndex) or index.is_boolean():
answer = []
else:
answer = index[4:]
result = first.difference(second, sort)
assert tm.equalContents(result, answer)
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
assert type(result) == type(answer)
tm.assert_numpy_array_equal(
result.sort_values().asi8, answer.sort_values().asi8
)
else:
result = first.difference(case, sort)
assert tm.equalContents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3], sort)
def test_symmetric_difference(self, index):
if isinstance(index, CategoricalIndex):
return
if len(index) < 2:
return
if index[0] in index[1:] or index[-1] in index[:-1]:
# index fixture has e.g. an index of bools that does not satisfy this,
# another with [0, 0, 1, 1, 2, 2]
return
first = index[1:]
second = index[:-1]
answer = index[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.symmetric_difference(case)
if is_datetime64tz_dtype(first):
# second.values casts to tznaive
expected = first.union(case)
tm.assert_index_equal(result, expected)
continue
assert tm.equalContents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3])
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH#9943, GH#9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH#35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_intersection_name_retention_with_nameless(self, index):
if isinstance(index, MultiIndex):
index = index.rename(list(range(index.nlevels)))
else:
index = index.rename("foo")
other = np.asarray(index)
result = index.intersection(other)
assert result.name == index.name
# empty other, same dtype
result = index.intersection(other[:0])
assert result.name == index.name
# empty `self`
result = index[:0].intersection(other)
assert result.name == index.name
def test_difference_preserves_type_empty(self, index, sort):
# GH#20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
if not index.is_unique:
return
result = index.difference(index, sort=sort)
expected = index[:0]
tm.assert_index_equal(result, expected, exact=True)
def test_difference_name_retention_equals(self, index, sort, names):
if isinstance(index, MultiIndex):
names = [[x] * index.nlevels for x in names]
index = index.rename(names[0])
other = index.rename(names[1])
assert index.equals(other)
result = index.difference(other)
expected = index[:0].rename(names[2])
tm.assert_index_equal(result, expected)
def test_intersection_difference_match_empty(self, index, sort):
# GH#20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
if not index.is_unique:
return
inter = index.intersection(index[:0])
diff = index.difference(index, sort=sort)
tm.assert_index_equal(inter, diff, exact=True)
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_setop_with_categorical(index, sort, method):
if isinstance(index, MultiIndex):
# tested separately in tests.indexes.multi.test_setops
return
other = index.astype("category")
result = getattr(index, method)(other, sort=sort)
expected = getattr(index, method)(index, sort=sort)
tm.assert_index_equal(result, expected)
result = getattr(index, method)(other[:5], sort=sort)
expected = getattr(index, method)(index[:5], sort=sort)
tm.assert_index_equal(result, expected)
def test_intersection_duplicates_all_indexes(index):
# GH#38743
if index.empty:
# No duplicates in empty indexes
return
def check_intersection_commutative(left, right):
assert left.intersection(right).equals(right.intersection(left))
idx = index
idx_non_unique = idx[[0, 0, 1, 2]]
check_intersection_commutative(idx, idx_non_unique)
assert idx.intersection(idx_non_unique).is_unique
| [
"pandas.api.types.pandas_dtype",
"pandas._testing.equalContents",
"pandas._testing.assert_produces_warning",
"numpy.asarray",
"numpy.dtype",
"pytest.skip",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.Index",
"pandas.api.types.is_datetime64tz_dtype",
"pytest.xfail",
"pytest.raises",
"pa... | [((2494, 3133), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""left, right, expected"""', "[('int64', 'int64', 'int64'), ('int64', 'uint64', 'object'), ('int64',\n 'float64', 'float64'), ('uint64', 'float64', 'float64'), ('uint64',\n 'uint64', 'uint64'), ('float64', 'float64', 'float64'), (\n 'datetime64[ns]', 'int64', 'object'), ('datetime64[ns]', 'uint64',\n 'object'), ('datetime64[ns]', 'float64', 'object'), (\n 'datetime64[ns, CET]', 'int64', 'object'), ('datetime64[ns, CET]',\n 'uint64', 'object'), ('datetime64[ns, CET]', 'float64', 'object'), (\n 'Period[D]', 'int64', 'object'), ('Period[D]', 'uint64', 'object'), (\n 'Period[D]', 'float64', 'object')]"], {}), "('left, right, expected', [('int64', 'int64',\n 'int64'), ('int64', 'uint64', 'object'), ('int64', 'float64', 'float64'\n ), ('uint64', 'float64', 'float64'), ('uint64', 'uint64', 'uint64'), (\n 'float64', 'float64', 'float64'), ('datetime64[ns]', 'int64', 'object'),\n ('datetime64[ns]', 'uint64', 'object'), ('datetime64[ns]', 'float64',\n 'object'), ('datetime64[ns, CET]', 'int64', 'object'), (\n 'datetime64[ns, CET]', 'uint64', 'object'), ('datetime64[ns, CET]',\n 'float64', 'object'), ('Period[D]', 'int64', 'object'), ('Period[D]',\n 'uint64', 'object'), ('Period[D]', 'float64', 'object')])\n", (2517, 3133), False, 'import pytest\n'), ((3238, 3317), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""names"""', "[('foo', 'foo', 'foo'), ('foo', 'bar', None)]"], {}), "('names', [('foo', 'foo', 'foo'), ('foo', 'bar', None)])\n", (3261, 3317), False, 'import pytest\n'), ((4130, 4187), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""values"""', '[[1, 2, 2, 3], [3, 3]]'], {}), "('values', [[1, 2, 2, 3], [3, 3]])\n", (4153, 4187), False, 'import pytest\n'), ((15860, 15962), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['intersection', 'union', 'difference', 'symmetric_difference']"], {}), "('method', ['intersection', 'union', 'difference',\n 'symmetric_difference'])\n", (15883, 15962), False, 'import pytest\n'), ((1554, 1592), 'pandas.core.dtypes.common.is_dtype_equal', 'is_dtype_equal', (['idx1.dtype', 'idx2.dtype'], {}), '(idx1.dtype, idx2.dtype)\n', (1568, 1592), False, 'from pandas.core.dtypes.common import is_dtype_equal\n'), ((3382, 3400), 'pandas.api.types.pandas_dtype', 'pandas_dtype', (['left'], {}), '(left)\n', (3394, 3400), False, 'from pandas.api.types import is_datetime64tz_dtype, pandas_dtype\n'), ((3413, 3432), 'pandas.api.types.pandas_dtype', 'pandas_dtype', (['right'], {}), '(right)\n', (3425, 3432), False, 'from pandas.api.types import is_datetime64tz_dtype, pandas_dtype\n'), ((3441, 3480), 'pandas.Index', 'pd.Index', (['[]'], {'dtype': 'left', 'name': 'names[0]'}), '([], dtype=left, name=names[0])\n', (3449, 3480), True, 'import pandas as pd\n'), ((3489, 3529), 'pandas.Index', 'pd.Index', (['[]'], {'dtype': 'right', 'name': 'names[1]'}), '([], dtype=right, name=names[1])\n', (3497, 3529), True, 'import pandas as pd\n'), ((4253, 4269), 'pandas.Index', 'pd.Index', (['values'], {}), '(values)\n', (4261, 4269), True, 'import pandas as pd\n'), ((4278, 4294), 'pandas.Index', 'pd.Index', (['[3, 3]'], {}), '([3, 3])\n', (4286, 4294), True, 'import pandas as pd\n'), ((4341, 4354), 'pandas.Index', 'pd.Index', (['[3]'], {}), '([3])\n', (4349, 4354), True, 'import pandas as pd\n'), ((4359, 4398), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (4380, 4398), True, 'import pandas._testing as tm\n'), ((4495, 4540), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""case"""', "[0.5, 'xxx']"], {}), "('case', [0.5, 'xxx'])\n", (4518, 4540), False, 'import pytest\n'), ((4546, 4648), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['intersection', 'union', 'difference', 'symmetric_difference']"], {}), "('method', ['intersection', 'union', 'difference',\n 'symmetric_difference'])\n", (4569, 4648), False, 'import pytest\n'), ((9042, 9196), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (9065, 9196), False, 'import pytest\n'), ((10710, 10864), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (10733, 10864), False, 'import pytest\n'), ((11482, 11636), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (11505, 11636), False, 'import pytest\n'), ((13207, 13361), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (13230, 13361), False, 'import pytest\n'), ((16288, 16327), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (16309, 16327), True, 'import pandas._testing as tm\n'), ((16451, 16490), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (16472, 16490), True, 'import pandas._testing as tm\n'), ((1347, 1411), 'pytest.xfail', 'pytest.xfail', (['"""This test only considers non compatible indexes."""'], {}), "('This test only considers non compatible indexes.')\n", (1359, 1411), False, 'import pytest\n'), ((1489, 1545), 'pytest.xfail', 'pytest.xfail', (['"""This test doesn\'t consider multiindixes."""'], {}), '("This test doesn\'t consider multiindixes.")\n', (1501, 1545), False, 'import pytest\n'), ((1602, 1663), 'pytest.xfail', 'pytest.xfail', (['"""This test only considers non matching dtypes."""'], {}), "('This test only considers non matching dtypes.')\n", (1614, 1663), False, 'import pytest\n'), ((2051, 2064), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (2059, 2064), True, 'import numpy as np\n'), ((2102, 2115), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (2110, 2115), True, 'import numpy as np\n'), ((3909, 3950), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (3935, 3950), True, 'import pandas._testing as tm\n'), ((3985, 4026), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (4011, 4026), True, 'import pandas._testing as tm\n'), ((4061, 4102), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (4087, 4102), True, 'import pandas._testing as tm\n'), ((5119, 5154), 'pandas._testing.equalContents', 'tm.equalContents', (['intersect', 'second'], {}), '(intersect, second)\n', (5135, 5154), True, 'import pandas._testing as tm\n'), ((5167, 5201), 'pandas.api.types.is_datetime64tz_dtype', 'is_datetime64tz_dtype', (['index.dtype'], {}), '(index.dtype)\n', (5188, 5201), False, 'from pandas.api.types import is_datetime64tz_dtype, pandas_dtype\n'), ((5935, 5970), 'pandas._testing.equalContents', 'tm.equalContents', (['union', 'everything'], {}), '(union, everything)\n', (5951, 5970), True, 'import pandas._testing as tm\n'), ((5983, 6017), 'pandas.api.types.is_datetime64tz_dtype', 'is_datetime64tz_dtype', (['index.dtype'], {}), '(index.dtype)\n', (6004, 6017), False, 'from pandas.api.types import is_datetime64tz_dtype, pandas_dtype\n'), ((7053, 7085), 'pandas._testing.equalContents', 'tm.equalContents', (['result', 'answer'], {}), '(result, answer)\n', (7069, 7085), True, 'import pandas._testing as tm\n'), ((8333, 8365), 'pandas._testing.equalContents', 'tm.equalContents', (['result', 'answer'], {}), '(result, answer)\n', (8349, 8365), True, 'import pandas._testing as tm\n'), ((9838, 9876), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (9859, 9876), True, 'import pandas._testing as tm\n'), ((10110, 10148), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (10131, 10148), True, 'import pandas._testing as tm\n'), ((10382, 10420), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (10403, 10420), True, 'import pandas._testing as tm\n'), ((10665, 10703), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (10686, 10703), True, 'import pandas._testing as tm\n'), ((11437, 11475), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (11458, 11475), True, 'import pandas._testing as tm\n'), ((12255, 12297), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (12276, 12297), True, 'import pandas._testing as tm\n'), ((12554, 12596), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (12575, 12596), True, 'import pandas._testing as tm\n'), ((12853, 12895), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (12874, 12895), True, 'import pandas._testing as tm\n'), ((13158, 13200), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (13179, 13200), True, 'import pandas._testing as tm\n'), ((13960, 14002), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (13981, 14002), True, 'import pandas._testing as tm\n'), ((14247, 14264), 'numpy.asarray', 'np.asarray', (['index'], {}), '(index)\n', (14257, 14264), True, 'import numpy as np\n'), ((14915, 14966), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {'exact': '(True)'}), '(result, expected, exact=True)\n', (14936, 14966), True, 'import pandas._testing as tm\n'), ((15350, 15389), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (15371, 15389), True, 'import pandas._testing as tm\n'), ((15810, 15856), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['inter', 'diff'], {'exact': '(True)'}), '(inter, diff, exact=True)\n', (15831, 15856), True, 'import pandas._testing as tm\n'), ((4812, 4847), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (4825, 4847), False, 'import pytest\n'), ((5522, 5554), 'pandas._testing.equalContents', 'tm.equalContents', (['result', 'second'], {}), '(result, second)\n', (5538, 5554), True, 'import pandas._testing as tm\n'), ((8560, 8588), 'pandas.api.types.is_datetime64tz_dtype', 'is_datetime64tz_dtype', (['first'], {}), '(first)\n', (8581, 8588), False, 'from pandas.api.types import is_datetime64tz_dtype, pandas_dtype\n'), ((8785, 8817), 'pandas._testing.equalContents', 'tm.equalContents', (['result', 'answer'], {}), '(result, answer)\n', (8801, 8817), True, 'import pandas._testing as tm\n'), ((9557, 9610), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (9568, 9610), False, 'import pytest\n'), ((11101, 11154), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (11112, 11154), False, 'import pytest\n'), ((11956, 12009), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (11967, 12009), False, 'import pytest\n'), ((13602, 13655), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (13613, 13655), False, 'import pytest\n'), ((5682, 5717), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (5695, 5717), False, 'import pytest\n'), ((6395, 6431), 'pandas._testing.equalContents', 'tm.equalContents', (['result', 'everything'], {}), '(result, everything)\n', (6411, 6431), True, 'import pandas._testing as tm\n'), ((6672, 6707), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (6685, 6707), False, 'import pytest\n'), ((7560, 7592), 'pandas._testing.equalContents', 'tm.equalContents', (['result', 'answer'], {}), '(result, answer)\n', (7576, 7592), True, 'import pandas._testing as tm\n'), ((7720, 7755), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (7733, 7755), False, 'import pytest\n'), ((8700, 8739), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (8721, 8739), True, 'import pandas._testing as tm\n'), ((8945, 8980), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (8958, 8980), False, 'import pytest\n')] |
import logging
import coloredlogs
import matplotlib.pyplot as plt
import numpy as np
from neubio.analyze import find_epsp_peak, epsp_slope
from neubio.filter import butter_lpf, subtract_baseline, t_crop
from neubio.io import load_frame_group
logger = logging.getLogger(__name__)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
coloredlogs.install(
level="error", fmt="%(asctime)s %(levelname)s %(message)s", datefmt="%H:%M:%S"
)
### data source
path = "../data/02_calcium/trial_1.h5"
### filter
fs = 10e3
lo_cutoff = 1e3
### plotter
fig, ax = plt.subplots()
def preprocess(index):
# load data
t, stim, rec = load_frame_group(path, index=index, stacked=False)
# determine stimuli split point
ts1 = np.argmax(stim)
# mask first stimuli
stim[ts1 : ts1 + 100] = 0
ts2 = np.argmax(stim)
ts1, ts2 = t[ts1], t[ts2]
logger.debug("stimuli timestamp: {}, {}".format(ts1, ts2))
logger.info("applying LPF and background subtraction")
# apply filter and subtract baseline
rec_tmp, rec_filt = [], []
for rec_ in rec:
rec_lpf = butter_lpf(rec_, lo_cutoff, fs)
rec_lpf = subtract_baseline(t, rec_lpf)
rec_filt.append(rec_lpf)
rec_ori = subtract_baseline(t, rec_)
rec_tmp.append(rec_ori)
rec = rec_tmp
logger.info("cropping")
# split stimuli
t_ = None
rec1, rec2 = [], []
rec_filt1, rec_filt2 = [], []
for rec_, rec_filt_ in zip(rec, rec_filt):
t_, rec1_ = t_crop(t, rec_, (ts1, ts2))
rec1.append(rec1_)
_, rec_filt1_ = t_crop(t, rec_filt_, (ts1, ts2))
rec_filt1.append(rec_filt1_)
_, rec2_ = t_crop(t, rec_, (ts2, 2 * ts2 - ts1))
rec2.append(rec2_)
_, rec_filt2_ = t_crop(t, rec_filt_, (ts2, 2 * ts2 - ts1))
rec_filt2.append(rec_filt2_)
# offset t
t_ -= t_[0]
return (
t_,
[np.stack(rec1, axis=0), np.stack(rec2, axis=0)],
[np.stack(rec_filt1, axis=0), np.stack(rec_filt2, axis=0)],
)
def extract_amplitudes(index, r_min=0.7):
t, rec, rec_filt = preprocess(index)
i = 0
amp = []
for rec1_, rec_filt1_, rec2_, rec_filt2_ in zip(
rec[0], rec_filt[0], rec[1], rec_filt[1]
):
# using filtered signal to extract slope
ipk, _ = find_epsp_peak(t, rec_filt1_)
# slope
_, r, _, _ = epsp_slope(t, rec1_, ipk, yf=rec_filt1_, return_pos=True)
if abs(r) < r_min:
i += 1
logger.warning("discarded new frame ({}), r={:.4f}, ".format(i, r))
else:
amp.append(rec1_[ipk])
# using filtered signal to extract slope
ipk, _ = find_epsp_peak(t, rec_filt2_)
# slope
_, r, _, _ = epsp_slope(t, rec2_, ipk, yf=rec_filt2_, return_pos=True)
if abs(r) < r_min:
i += 1
logger.warning("discarded new frame ({}), r={:.4f}, ".format(i, r))
else:
amp.append(rec2_[ipk])
return amp
mapping = {0.5: (301, 355), 2.5: (247, 300), 5.0: (400, 462)}
amp = []
for conc, index in mapping.items():
amp_ = extract_amplitudes(index)
amp.extend(amp_)
amp = np.array(amp)
print("n={}".format(len(amp)))
print()
# relative value
amp = np.abs(amp)
hist, edges = np.histogram(amp, bins=64)
bins = (edges[:-1] + edges[1:])/2
print(hist)
# plot histogram
plt.cla()
ax.bar(bins, hist, width=0.05)
# labels
ax.legend()
plt.xlabel('EPSP amplitdue (mV)')
plt.ylabel('Counts')
# final adjust
_, xmax = ax.get_xlim()
ax.set_xlim((0, xmax))
plt.savefig("quantal.png", dpi=300)
plt.waitforbuttonpress()
| [
"neubio.filter.subtract_baseline",
"numpy.abs",
"numpy.argmax",
"logging.getLogger",
"numpy.histogram",
"neubio.io.load_frame_group",
"matplotlib.pyplot.cla",
"neubio.analyze.find_epsp_peak",
"matplotlib.pyplot.subplots",
"numpy.stack",
"matplotlib.pyplot.waitforbuttonpress",
"matplotlib.pyplo... | [((254, 281), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (271, 281), False, 'import logging\n'), ((341, 445), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""error"""', 'fmt': '"""%(asctime)s %(levelname)s %(message)s"""', 'datefmt': '"""%H:%M:%S"""'}), "(level='error', fmt=\n '%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S')\n", (360, 445), False, 'import coloredlogs\n'), ((565, 579), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (577, 579), True, 'import matplotlib.pyplot as plt\n'), ((3164, 3177), 'numpy.array', 'np.array', (['amp'], {}), '(amp)\n', (3172, 3177), True, 'import numpy as np\n'), ((3242, 3253), 'numpy.abs', 'np.abs', (['amp'], {}), '(amp)\n', (3248, 3253), True, 'import numpy as np\n'), ((3269, 3295), 'numpy.histogram', 'np.histogram', (['amp'], {'bins': '(64)'}), '(amp, bins=64)\n', (3281, 3295), True, 'import numpy as np\n'), ((3361, 3370), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3368, 3370), True, 'import matplotlib.pyplot as plt\n'), ((3424, 3457), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""EPSP amplitdue (mV)"""'], {}), "('EPSP amplitdue (mV)')\n", (3434, 3457), True, 'import matplotlib.pyplot as plt\n'), ((3458, 3478), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (3468, 3478), True, 'import matplotlib.pyplot as plt\n'), ((3543, 3578), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""quantal.png"""'], {'dpi': '(300)'}), "('quantal.png', dpi=300)\n", (3554, 3578), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3603), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (3601, 3603), True, 'import matplotlib.pyplot as plt\n'), ((640, 690), 'neubio.io.load_frame_group', 'load_frame_group', (['path'], {'index': 'index', 'stacked': '(False)'}), '(path, index=index, stacked=False)\n', (656, 690), False, 'from neubio.io import load_frame_group\n'), ((738, 753), 'numpy.argmax', 'np.argmax', (['stim'], {}), '(stim)\n', (747, 753), True, 'import numpy as np\n'), ((819, 834), 'numpy.argmax', 'np.argmax', (['stim'], {}), '(stim)\n', (828, 834), True, 'import numpy as np\n'), ((282, 313), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (299, 313), False, 'import logging\n'), ((1099, 1130), 'neubio.filter.butter_lpf', 'butter_lpf', (['rec_', 'lo_cutoff', 'fs'], {}), '(rec_, lo_cutoff, fs)\n', (1109, 1130), False, 'from neubio.filter import butter_lpf, subtract_baseline, t_crop\n'), ((1149, 1178), 'neubio.filter.subtract_baseline', 'subtract_baseline', (['t', 'rec_lpf'], {}), '(t, rec_lpf)\n', (1166, 1178), False, 'from neubio.filter import butter_lpf, subtract_baseline, t_crop\n'), ((1231, 1257), 'neubio.filter.subtract_baseline', 'subtract_baseline', (['t', 'rec_'], {}), '(t, rec_)\n', (1248, 1257), False, 'from neubio.filter import butter_lpf, subtract_baseline, t_crop\n'), ((1496, 1523), 'neubio.filter.t_crop', 't_crop', (['t', 'rec_', '(ts1, ts2)'], {}), '(t, rec_, (ts1, ts2))\n', (1502, 1523), False, 'from neubio.filter import butter_lpf, subtract_baseline, t_crop\n'), ((1575, 1607), 'neubio.filter.t_crop', 't_crop', (['t', 'rec_filt_', '(ts1, ts2)'], {}), '(t, rec_filt_, (ts1, ts2))\n', (1581, 1607), False, 'from neubio.filter import butter_lpf, subtract_baseline, t_crop\n'), ((1665, 1702), 'neubio.filter.t_crop', 't_crop', (['t', 'rec_', '(ts2, 2 * ts2 - ts1)'], {}), '(t, rec_, (ts2, 2 * ts2 - ts1))\n', (1671, 1702), False, 'from neubio.filter import butter_lpf, subtract_baseline, t_crop\n'), ((1754, 1796), 'neubio.filter.t_crop', 't_crop', (['t', 'rec_filt_', '(ts2, 2 * ts2 - ts1)'], {}), '(t, rec_filt_, (ts2, 2 * ts2 - ts1))\n', (1760, 1796), False, 'from neubio.filter import butter_lpf, subtract_baseline, t_crop\n'), ((2307, 2336), 'neubio.analyze.find_epsp_peak', 'find_epsp_peak', (['t', 'rec_filt1_'], {}), '(t, rec_filt1_)\n', (2321, 2336), False, 'from neubio.analyze import find_epsp_peak, epsp_slope\n'), ((2374, 2431), 'neubio.analyze.epsp_slope', 'epsp_slope', (['t', 'rec1_', 'ipk'], {'yf': 'rec_filt1_', 'return_pos': '(True)'}), '(t, rec1_, ipk, yf=rec_filt1_, return_pos=True)\n', (2384, 2431), False, 'from neubio.analyze import find_epsp_peak, epsp_slope\n'), ((2674, 2703), 'neubio.analyze.find_epsp_peak', 'find_epsp_peak', (['t', 'rec_filt2_'], {}), '(t, rec_filt2_)\n', (2688, 2703), False, 'from neubio.analyze import find_epsp_peak, epsp_slope\n'), ((2741, 2798), 'neubio.analyze.epsp_slope', 'epsp_slope', (['t', 'rec2_', 'ipk'], {'yf': 'rec_filt2_', 'return_pos': '(True)'}), '(t, rec2_, ipk, yf=rec_filt2_, return_pos=True)\n', (2751, 2798), False, 'from neubio.analyze import find_epsp_peak, epsp_slope\n'), ((1900, 1922), 'numpy.stack', 'np.stack', (['rec1'], {'axis': '(0)'}), '(rec1, axis=0)\n', (1908, 1922), True, 'import numpy as np\n'), ((1924, 1946), 'numpy.stack', 'np.stack', (['rec2'], {'axis': '(0)'}), '(rec2, axis=0)\n', (1932, 1946), True, 'import numpy as np\n'), ((1958, 1985), 'numpy.stack', 'np.stack', (['rec_filt1'], {'axis': '(0)'}), '(rec_filt1, axis=0)\n', (1966, 1985), True, 'import numpy as np\n'), ((1987, 2014), 'numpy.stack', 'np.stack', (['rec_filt2'], {'axis': '(0)'}), '(rec_filt2, axis=0)\n', (1995, 2014), True, 'import numpy as np\n')] |
import pandas as pd
from pandas.plotting import lag_plot
import numpy as np
import matplotlib as mlp
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import seaborn as sns
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
import pmdarima as pm
from ipywidgets import *
from IPython.display import display, HTML
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
dataurl = 'https://raw.githubusercontent.com/ming-zhao/Business-Analytics/master/data/time_series/'
df_house = pd.read_csv(dataurl+'house_sales.csv', parse_dates=['date'], header=0, index_col='date')
df_house['year'] = [d.year for d in df_house.index]
df_house['month'] = [d.strftime('%b') for d in df_house.index]
df_drink = pd.read_csv(dataurl+'drink_sales.csv', parse_dates=['date'], header=0)
df_drink['date'] = [pd.to_datetime(''.join(df_drink.date.str.split('-')[i][-1::-1]))
+ pd.offsets.QuarterEnd(0) for i in df_drink.index]
df_drink = df_drink.set_index('date')
# df_drink[['q','year']]=df_drink['quarter'].str.split('-',expand=True)
df_drink['year'] = [d.year for d in df_drink.index]
df_drink['quarter'] = ['Q'+str(d.month//3) for d in df_drink.index]
def sinusoidal(x):
return np.sin(2 * np.pi * x)
def create_data(func, sample_size, std, domain=[0, 1]):
x = np.linspace(*domain, sample_size)
np.random.shuffle(x)
t = func(x) + np.random.normal(scale=std, size=x.shape)
return x, t
def training_data(show):
np.random.seed(11223)
x_train, t_train = create_data(sinusoidal, 13, 0.25)
x_test = np.linspace(0, 1, 100)
t_test = sinusoidal(x_test)
plt.scatter(x_train, t_train, facecolor="none", edgecolor="b", s=50, label="training data")
if show:
plt.plot(x_test, t_test, c="g", label="$\sin(2\pi x)$")
plt.ylim(-1.5, 1.5)
plt.legend(loc=1)
plt.show()
def poly_fit(show):
np.random.seed(11223)
x_train, t_train = create_data(sinusoidal, 13, 0.25)
x_test = np.linspace(0, 1, 100)
t_test = sinusoidal(x_test)
fig = plt.figure(figsize=(15, 4))
for i, degree in enumerate([1, 3, 9]):
plt.subplot(1, 3, i+1)
poly = PolynomialFeatures(degree=degree, include_bias=True)
model = LinearRegression()
model.fit(poly.fit_transform(x_train[:,None]),t_train[:,None])
t = model.predict(poly.fit_transform(x_test[:,None]))
plt.scatter(x_train, t_train, facecolor="none", edgecolor="b", s=50, label="training data")
if show:
plt.plot(x_test, t_test, c="g", label="$\sin(2\pi x)$")
plt.plot(x_test, t, c="r", label="fitting")
plt.ylim(-1.5, 1.5)
plt.legend(loc=1)
plt.title("polynomial fitting with dregree {}".format(degree))
plt.show()
def poly_fit_holdout(show, train, test):
np.random.seed(11223)
x_train, t_train = create_data(sinusoidal, 13, 0.25)
x_test = np.linspace(0, 1, 100)
t_test = sinusoidal(x_test)
fig = plt.figure(figsize=(15, 4))
for i, degree in enumerate([1, 3, 9]):
plt.subplot(1, 3, i+1)
poly = PolynomialFeatures(degree=degree, include_bias=True)
model = LinearRegression()
model.fit(poly.fit_transform(x_train[:-3,None]),t_train[:-3,None])
t = model.predict(poly.fit_transform(x_test[:,None]))
if train:
plt.scatter(x_train[:-3], t_train[:-3], facecolor="none", edgecolor="b", s=50, label="training data")
if test:
plt.scatter(x_train[-3:], t_train[-3:], facecolor="none", edgecolor="orange", s=50, label="testing data")
if show:
plt.plot(x_test, t_test, c="g", label="$\sin(2\pi x)$")
plt.plot(x_test, t, c="r", label="fitting")
plt.ylim(-1.5, 1.5)
plt.legend(loc=1)
plt.title("polynomial fitting with dregree {}".format(degree))
plt.show()
# noise = pd.Series(np.random.randn(200))
# def randomwalk(drift):
# return pd.Series(np.cumsum(np.random.uniform(-1,1,(200,1)) + drift*np.ones((200,1))))
def random_walk(drift):
np.random.seed(123)
# randomwalk(drift).plot(title='Random Walk')
pd.Series(np.cumsum(np.random.uniform(-1,1,(200,1)) + drift*np.ones((200,1)))).plot(title='Random Walk')
plt.show()
def plot_time_series(df, col_name, freq='Month', title=''):
ax = df.plot(y=col_name, figsize=(15,6), x_compat=True)
ax.set_xlim(pd.to_datetime(df.index[0]),
pd.to_datetime(str(pd.Timestamp(df.index[-1]).year+1) + '-01-01'))
if freq=='Month':
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=12))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%Y'))
plt.title(title)
plt.show()
def seasonal_plot(df, col_names, title=''):
np.random.seed(100)
years = pd.Series([x.year for x in df.index]).unique()
mycolors = np.random.choice(list(mlp.colors.XKCD_COLORS.keys()), len(years), replace=False)
plt.subplots(1, 1, figsize=(12,6), dpi=120)
label_shift = .4
if col_names[0]=='quarter':
label_shift = .8
for i, y in enumerate(years):
if i > 0:
plt.plot(col_names[0], col_names[1], data=df.loc[df.year==y, :], color=mycolors[i], label=y)
plt.text(df.loc[df.year==y, :].shape[0]-label_shift,
df.loc[df.year==y, col_names[1]][-1:].values[0], y, color=mycolors[i], fontsize=12)
plt.title(title)
def boxplot(df, col_names, title=''):
fig, axes = plt.subplots(1, 2, figsize=(18,6), dpi=120)
sns.boxplot(x='year', y=col_names[1], data=df, ax=axes[0])
axes[0].set_xticklabels(axes[0].get_xticklabels(), rotation=30)
sns.boxplot(x=col_names[0], y=col_names[1], data=df)
axes[0].set_title('Year-wise Box Plot for {}\n(The Trend)'.format(title), fontsize=14);
axes[1].set_title('Month-wise Box Plot for {}\n(The Seasonality)'.format(title), fontsize=14)
plt.show()
def moving_average(span):
fig, ax = plt.subplots(1, 1, figsize = (12,6))
df_ma = df_house.sales.rolling(span).mean()
df_ma.plot(ax=ax, title='Moving Average ({})'.format(span), c='red')
df_house.sales.plot(ax=ax, c='teal')
ax.legend(labels=['Moving Average', 'Original'])
fig.canvas.draw()
plt.show()
def lowess_smooth(frac=0.05):
from statsmodels.nonparametric.smoothers_lowess import lowess
fig, ax = plt.subplots(1, 1, figsize = (12,6))
df_loess= pd.DataFrame(lowess(df_house.sales, np.arange(len(df_house.sales)), frac=frac)[:, 1],
index=df_house.index, columns=['value'])
df_loess['value'].plot(ax=ax, title='Loess Smoothed {}%'.format(frac*100), c='red')
df_house.sales.plot(ax=ax, c='teal')
ax.legend(labels=['Lowess Smooth', 'Original'])
fig.canvas.draw()
plt.show()
def analysis(df, y, x, printlvl):
result = ols(formula=y+'~'+'+'.join(x), data=df).fit()
if printlvl>=4:
display(result.summary())
print('\nstandard error of estimate:{:.5f}\n'.format(np.sqrt(result.scale)))
if printlvl>=5:
print("\nANOVA Table:\n")
display(sm.stats.anova_lm(result, typ=2))
if printlvl>=1:
if len(x)==1:
fig, axes = plt.subplots(1,1,figsize=(8,5))
sns.regplot(x=x[0], y=y, data=df,
ci=None,
line_kws={'color':'green',
'label':"$Y$"+"$={:.2f}X+{:.2f}$\n$R^2$={:.3f}".format(result.params[1],
result.params[0],
result.rsquared)},
ax=axes);
axes.legend()
if printlvl>=2:
fig, axes = plt.subplots(1,3,figsize=(20,6))
axes[0].relim()
sns.residplot(result.fittedvalues, result.resid , lowess=False, scatter_kws={"s": 80},
line_kws={'color':'r', 'lw':1}, ax=axes[0])
axes[0].set_title('Residual plot')
axes[0].set_xlabel('Fitted values')
axes[0].set_ylabel('Residuals')
axes[1].relim()
stats.probplot(result.resid, dist='norm', plot=axes[1])
axes[1].set_title('Normal Q-Q plot')
axes[2].relim()
sns.distplot(result.resid, ax=axes[2]);
if printlvl==2:
fig.delaxes(axes[1])
fig.delaxes(axes[2])
plt.show()
if printlvl>2:
display(stats.kstest(result.resid, 'norm'))
return result
def ses_forecast(forecasts, holdouts, level, optimized):
from statsmodels.tsa.holtwinters import SimpleExpSmoothing
df_house.index.freq = 'MS'
plt.figure(figsize=(12, 6))
if holdouts==0:
train, test = df_house.iloc[:, 0], []
model = SimpleExpSmoothing(train).fit(smoothing_level=level, optimized=optimized)
pred = model.predict(start=train.index[0], end=train.index[-1] + forecasts*df_house.index.freq)
else:
train, test = df_house.iloc[:-holdouts, 0], df_house.iloc[-holdouts:, 0]
model = SimpleExpSmoothing(train).fit(smoothing_level=level, optimized=optimized)
pred = model.predict(start=train.index[0], end=test.index[-1] + forecasts*df_house.index.freq)
plt.plot(test.index, test, label='Holdouts', c='fuchsia')
plt.plot(train.index, train, label='Train', c='cornflowerblue')
plt.plot(pred.index, pred, label='Simple Exponential Smoothing', c='orange')
plt.legend(loc='best')
plt.title('House Sales')
plt.show()
def stationarity_test(df_col, title=''):
print('Test on {}:\n'.format(title))
from statsmodels.tsa.stattools import adfuller, kpss
# ADF Test
result = adfuller(df_col.values, autolag='AIC')
print('ADF Statistic \t: {:.5f}'.format(result[0]))
print('p-value \t: {:.5f}'.format(result[1]))
print('Critial Values \t:')
for key, value in result[4].items():
print('\t{:3.1f}% \t: {:.5f}'.format(float(key[:-1]), value))
print('\nH0: The time series is non-stationary')
if result[1]<0.05:
print('We reject the null hypothesis at 5% level.')
else:
print('We do not reject the null hypothesis.')
# KPSS Test
result = kpss(df_col.values, regression='c')
print('\nKPSS Statistic \t: {:.5f}'.format(result[0]))
print('p-value \t: {:.5f}'.format(result[1]))
print('Critial Values \t:')
for key, value in result[3].items():
print('\t{:3.1f}%\t: {:.5f}'.format(float(key[:-1]), value))
print('\nH0: The time series is stationary')
if result[1]<0.05:
print('We reject the null hypothesis at 5% level.')
else:
print('We do not reject the null hypothesis.')
def decomp(df_col):
from statsmodels.tsa.seasonal import seasonal_decompose
import statsmodels.api as sm
# Multiplicative Decomposition
result_mul = sm.tsa.seasonal_decompose(df_col, model='multiplicative', extrapolate_trend='freq')
print('Multiplicative Model\t: Observed {:.3f} = (Seasonal {:.3f} * Trend {:.3f} * Resid {:.3f})'.format(
result_mul.observed[0], result_mul.trend[0], result_mul.seasonal[0], result_mul.resid[0]))
# Additive Decomposition
result_add = sm.tsa.seasonal_decompose(df_col, model='additive', extrapolate_trend='freq')
print('Additive Model\t\t: Observed {:.3f} = (Seasonal {:.3f} + Trend {:.3f} + Resid {:.3f})'.format(
result_mul.observed[0], result_add.trend[0], result_add.seasonal[0], result_add.resid[0]))
# Setting extrapolate_trend='freq' takes care of any missing values
# in the trend and residuals at the beginning of the series.
plt.rcParams.update({'figure.figsize': (10,8)})
result_mul.plot().suptitle('Multiplicative Decompose', fontsize=18)
plt.subplots_adjust(top=.93)
result_add.plot().suptitle('Additive Decompose', fontsize=18)
plt.subplots_adjust(top=.93)
plt.show()
def detrend(df_col, model = 'multiplicative'):
# Using scipy: Subtract the line of best fit
from scipy import signal
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
result_mul = sm.tsa.seasonal_decompose(df_col, model='multiplicative', extrapolate_trend='freq')
result_add = sm.tsa.seasonal_decompose(df_col, model='additive', extrapolate_trend='freq')
plt.subplots(1, 2, figsize=(12,4), dpi=80)
detrended = signal.detrend(df_col.values)
plt.subplot(1, 2, 1)
plt.plot(detrended)
plt.title('Subtracting the least squares fit', fontsize=16)
if model=='multiplicative':
detrended = df_col.values / result_mul.trend
if model=='additive':
detrended = df_col.values - result_add.trend
plt.subplot(1, 2, 2)
plt.plot(detrended)
plt.title('Subtracting the trend component', fontsize=16)
plt.show()
def deseasonalize(df_col, model, title=''):
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
plt.subplots(1, 1, figsize=(12,8))
if model=='multiplicative' or model=='mul':
result_mul = sm.tsa.seasonal_decompose(df_col, model='multiplicative', extrapolate_trend='freq')
deseasonalized = df_col.values / result_mul.seasonal
if model=='additive' or model=='add':
result_add = sm.tsa.seasonal_decompose(df_col, model='additive', extrapolate_trend='freq')
deseasonalized = df_col.values - result_add.seasonal
plt.subplot(2,1,1)
plt.plot(deseasonalized)
plt.title('Deseasonalized {}'.format(title), fontsize=12)
def plot_autocorr(df_col, title=''):
from pandas.plotting import autocorrelation_plot
plt.rcParams.update({'figure.figsize':(8,3), 'figure.dpi':120})
autocorrelation_plot(df_col.values)
plt.title(title)
plt.show()
def plot_acf_pacf(df_col, acf_lag, pacf_lag):
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
fig, axes = plt.subplots(1,2,figsize=(16,3), dpi=100)
_ = plot_acf(df_col.values, lags=acf_lag, ax=axes[0])
_ = plot_pacf(df_col.tolist(), lags=pacf_lag, ax=axes[1])
def differencing(df, col_name, title='', period=2):
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
plt.rcParams.update({'figure.figsize':(9,6), 'figure.dpi':150})
# Original Series
fig, axes = plt.subplots(period+1, 2, sharex='col')
fig.tight_layout()
axes[0, 0].plot(df.index, df[col_name])
axes[0, 0].set_title('Original Series')
_ = plot_acf(df[col_name].values, lags=50, ax=axes[0, 1])
print('Standard deviation original series: {:.3f}'.format(np.std(df['sales'].values)))
for t in range(period):
axes[t+1, 0].plot(df.index, df[col_name].diff(t+1))
axes[t+1, 0].set_title('{}st Order Differencing'.format(t+1))
plot_acf(df[col_name].diff(t+1).dropna(), lags=50, ax=axes[t+1, 1])
print('Standard deviation {}st differencing: {:.3f}'.format(t+1,np.std(df['sales'].diff(t+1).dropna().values)))
plt.title(title)
plt.show()
def house_drink_lag(house_lag, drink_lag):
fig, axes = plt.subplots(1, 2, figsize=(12,4), dpi=100)
lag_plot(df_house.sales, lag=house_lag, ax=axes[0], c='firebrick')
axes[0].set_title('House Sales Lag Plot')
lag_plot(df_drink.sales, lag=drink_lag, ax=axes[1], c='firebrick')
axes[1].set_title('Drink Sales Lag Plot')
plt.show()
def noise_rndwalk_lag(noise_lag, rndwalk_lag):
noise = pd.Series(np.random.randn(200))
fig, axes = plt.subplots(1, 2, figsize=(12,4), dpi=100)
lag_plot(noise, lag=noise_lag, ax=axes[0], c='firebrick')
axes[0].set_title('White noise Lag Plot')
lag_plot(pd.Series(np.cumsum(np.random.uniform(-1,1,(200,1)) + 0.01*np.ones((200,1)))),
lag=rndwalk_lag, ax=axes[1], c='firebrick')
axes[1].set_title('Random Walk Lag Plot')
plt.show()
def arima_(p, d, q):
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(df_house.sales, order=(p, d, q))
model_fit = model.fit(disp=0)
display(model_fit.summary())
# Plot residual errors
residuals = pd.DataFrame(model_fit.resid)
fig, ax = plt.subplots(1,2, figsize=(12,3))
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind='kde', title='Density', ax=ax[1])
plt.show()
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
model_fit.plot_predict(dynamic=False)
plt.show()
def forecast_accuracy(forecast, actual):
from statsmodels.tsa.stattools import acf
mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) # MAPE
me = np.mean(forecast - actual) # ME
mae = np.mean(np.abs(forecast - actual)) # MAE
mpe = np.mean((forecast - actual)/actual) # MPE
rmse = np.mean((forecast - actual)**2)**.5 # RMSE
corr = np.corrcoef(forecast, actual)[0,1] # corr
mins = np.amin(np.hstack([forecast[:,None],
actual[:,None]]), axis=1)
maxs = np.amax(np.hstack([forecast[:,None],
actual[:,None]]), axis=1)
minmax = 1 - np.mean(mins/maxs) # minmax
acf1 = acf(forecast - actual)[1] # ACF1
return({'MAPE':mape, 'ME':me, 'MAE': mae,
'MPE': mpe, 'RMSE':rmse, 'ACF1':acf1,
'Corr':corr, 'Minmax':minmax})
def arima_validation(p, d, q):
from statsmodels.tsa.arima_model import ARIMA
test_size = int(df_house.shape[0]*.25)
train = df_house.sales[:-test_size]
test = df_house.sales[-test_size:]
model = ARIMA(train, order=(p, d, q))
model_fit = model.fit(disp=0)
display(model_fit.summary())
residuals = pd.DataFrame(model_fit.resid)
fig, ax = plt.subplots(1,2, figsize=(12,3))
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind='kde', title='Density', ax=ax[1])
plt.show()
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
# Forecast
fc, se, conf = model_fit.forecast(test_size, alpha=0.05) # 95% conf
# Make as pandas series
fc_series = pd.Series(fc, index=test.index)
lower_series = pd.Series(conf[:, 0], index=test.index)
upper_series = pd.Series(conf[:, 1], index=test.index)
# Plot
plt.plot(train, label='training')
plt.plot(test, label='actual')
plt.plot(fc_series, label='forecast')
plt.fill_between(lower_series.index, lower_series, upper_series,
color='k', alpha=.15)
plt.title('Forecast vs Actuals')
plt.legend(loc='upper left', fontsize=8)
plt.show()
print('{:7s}: {:8.4f}'.format('MAPE', forecast_accuracy(fc, test.values)['MAPE']))
def sarima_forcast(model, df, col_name, forecast_periods, freq):
if freq=='month':
periods = 12
if freq=='quarter':
periods = 4
fitted, confint = model.predict(n_periods=forecast_periods, return_conf_int=True)
if freq=='month':
index_of_fc = pd.date_range(df.index[-1], periods = forecast_periods, freq='M')
if freq=='quarter':
index_of_fc = pd.date_range(df.index[-1], periods = forecast_periods, freq='3M')
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.rcParams.update({'figure.figsize':(10,4), 'figure.dpi':120})
plt.plot(df[col_name])
plt.plot(fitted_series, color='darkgreen')
plt.fill_between(lower_series.index,
lower_series,
upper_series,
color='k', alpha=.15)
plt.title("SARIMAX Forecast of Drink Sales")
plt.show()
def add_seasonal_index(df, col_name, freq='month', model='multiplicative'):
from statsmodels.tsa.seasonal import seasonal_decompose
if freq=='month':
periods = 12
if freq=='quarter':
periods = 4
seasonal_index = seasonal_decompose(df[col_name][-periods*3:], # 3 years
model=model,
extrapolate_trend='freq').seasonal[-periods:].to_frame()
seasonal_index.columns = ['seasonal_index']
if freq=='month':
seasonal_index['month'] = [d.strftime('%b') for d in seasonal_index.index]
if freq=='quarter':
seasonal_index['quarter'] = ['Q'+str(d.month//3) for d in seasonal_index.index]
df_tmp = pd.merge(df, seasonal_index, how='left', on=freq)
df_tmp.index = df.index
return df_tmp
def sarimax_forcast(model, df, col_name, forecast_periods, freq):
if freq=='month':
periods = 12
if freq=='quarter':
periods = 4
fitted, confint = model.predict(n_periods=forecast_periods,
exogenous=np.tile(df.seasonal_index[:periods],
forecast_periods//periods).reshape(-1,1),
return_conf_int=True)
if freq=='month':
index_of_fc = pd.date_range(df.index[-1], periods = forecast_periods, freq='M')
if freq=='quarter':
index_of_fc = pd.date_range(df.index[-1], periods = forecast_periods, freq='3M')
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.rcParams.update({'figure.figsize':(10,4), 'figure.dpi':120})
plt.plot(df[col_name])
plt.plot(fitted_series, color='darkgreen')
plt.fill_between(lower_series.index,
lower_series,
upper_series,
color='k', alpha=.15)
plt.title("SARIMAX Forecast of Drink Sales")
plt.show() | [
"matplotlib.pyplot.title",
"statsmodels.api.tsa.seasonal_decompose",
"matplotlib.dates.MonthLocator",
"numpy.random.seed",
"statsmodels.tsa.arima_model.ARIMA",
"numpy.abs",
"pandas.read_csv",
"numpy.ones",
"statsmodels.api.stats.anova_lm",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.mean",... | [((622, 716), 'pandas.read_csv', 'pd.read_csv', (["(dataurl + 'house_sales.csv')"], {'parse_dates': "['date']", 'header': '(0)', 'index_col': '"""date"""'}), "(dataurl + 'house_sales.csv', parse_dates=['date'], header=0,\n index_col='date')\n", (633, 716), True, 'import pandas as pd\n'), ((838, 910), 'pandas.read_csv', 'pd.read_csv', (["(dataurl + 'drink_sales.csv')"], {'parse_dates': "['date']", 'header': '(0)'}), "(dataurl + 'drink_sales.csv', parse_dates=['date'], header=0)\n", (849, 910), True, 'import pandas as pd\n'), ((1332, 1353), 'numpy.sin', 'np.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (1338, 1353), True, 'import numpy as np\n'), ((1419, 1452), 'numpy.linspace', 'np.linspace', (['*domain', 'sample_size'], {}), '(*domain, sample_size)\n', (1430, 1452), True, 'import numpy as np\n'), ((1457, 1477), 'numpy.random.shuffle', 'np.random.shuffle', (['x'], {}), '(x)\n', (1474, 1477), True, 'import numpy as np\n'), ((1584, 1605), 'numpy.random.seed', 'np.random.seed', (['(11223)'], {}), '(11223)\n', (1598, 1605), True, 'import numpy as np\n'), ((1677, 1699), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1688, 1699), True, 'import numpy as np\n'), ((1737, 1833), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_train', 't_train'], {'facecolor': '"""none"""', 'edgecolor': '"""b"""', 's': '(50)', 'label': '"""training data"""'}), "(x_train, t_train, facecolor='none', edgecolor='b', s=50, label=\n 'training data')\n", (1748, 1833), True, 'import matplotlib.pyplot as plt\n'), ((1910, 1929), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1918, 1929), True, 'import matplotlib.pyplot as plt\n'), ((1934, 1951), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (1944, 1951), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1966), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1964, 1966), True, 'import matplotlib.pyplot as plt\n'), ((1996, 2017), 'numpy.random.seed', 'np.random.seed', (['(11223)'], {}), '(11223)\n', (2010, 2017), True, 'import numpy as np\n'), ((2089, 2111), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (2100, 2111), True, 'import numpy as np\n'), ((2155, 2182), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (2165, 2182), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2869), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2867, 2869), True, 'import matplotlib.pyplot as plt\n'), ((2920, 2941), 'numpy.random.seed', 'np.random.seed', (['(11223)'], {}), '(11223)\n', (2934, 2941), True, 'import numpy as np\n'), ((3013, 3035), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (3024, 3035), True, 'import numpy as np\n'), ((3079, 3106), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (3089, 3106), True, 'import matplotlib.pyplot as plt\n'), ((3954, 3964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3962, 3964), True, 'import matplotlib.pyplot as plt\n'), ((4158, 4177), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (4172, 4177), True, 'import numpy as np\n'), ((4341, 4351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4349, 4351), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4781), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4774, 4781), True, 'import matplotlib.pyplot as plt\n'), ((4786, 4796), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4794, 4796), True, 'import matplotlib.pyplot as plt\n'), ((4850, 4869), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (4864, 4869), True, 'import numpy as np\n'), ((5034, 5078), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 6)', 'dpi': '(120)'}), '(1, 1, figsize=(12, 6), dpi=120)\n', (5046, 5078), True, 'import matplotlib.pyplot as plt\n'), ((5506, 5522), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5515, 5522), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5622), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(18, 6)', 'dpi': '(120)'}), '(1, 2, figsize=(18, 6), dpi=120)\n', (5590, 5622), True, 'import matplotlib.pyplot as plt\n'), ((5626, 5684), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""year"""', 'y': 'col_names[1]', 'data': 'df', 'ax': 'axes[0]'}), "(x='year', y=col_names[1], data=df, ax=axes[0])\n", (5637, 5684), True, 'import seaborn as sns\n'), ((5757, 5809), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'col_names[0]', 'y': 'col_names[1]', 'data': 'df'}), '(x=col_names[0], y=col_names[1], data=df)\n', (5768, 5809), True, 'import seaborn as sns\n'), ((6006, 6016), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6014, 6016), True, 'import matplotlib.pyplot as plt\n'), ((6062, 6097), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 6)'}), '(1, 1, figsize=(12, 6))\n', (6074, 6097), True, 'import matplotlib.pyplot as plt\n'), ((6340, 6350), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6348, 6350), True, 'import matplotlib.pyplot as plt\n'), ((6466, 6501), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 6)'}), '(1, 1, figsize=(12, 6))\n', (6478, 6501), True, 'import matplotlib.pyplot as plt\n'), ((6878, 6888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6886, 6888), True, 'import matplotlib.pyplot as plt\n'), ((8541, 8551), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8549, 8551), True, 'import matplotlib.pyplot as plt\n'), ((8798, 8825), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (8808, 8825), True, 'import matplotlib.pyplot as plt\n'), ((9441, 9504), 'matplotlib.pyplot.plot', 'plt.plot', (['train.index', 'train'], {'label': '"""Train"""', 'c': '"""cornflowerblue"""'}), "(train.index, train, label='Train', c='cornflowerblue')\n", (9449, 9504), True, 'import matplotlib.pyplot as plt\n'), ((9509, 9585), 'matplotlib.pyplot.plot', 'plt.plot', (['pred.index', 'pred'], {'label': '"""Simple Exponential Smoothing"""', 'c': '"""orange"""'}), "(pred.index, pred, label='Simple Exponential Smoothing', c='orange')\n", (9517, 9585), True, 'import matplotlib.pyplot as plt\n'), ((9590, 9612), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (9600, 9612), True, 'import matplotlib.pyplot as plt\n'), ((9617, 9641), 'matplotlib.pyplot.title', 'plt.title', (['"""House Sales"""'], {}), "('House Sales')\n", (9626, 9641), True, 'import matplotlib.pyplot as plt\n'), ((9646, 9656), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9654, 9656), True, 'import matplotlib.pyplot as plt\n'), ((9834, 9872), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (['df_col.values'], {'autolag': '"""AIC"""'}), "(df_col.values, autolag='AIC')\n", (9842, 9872), False, 'from statsmodels.tsa.stattools import adfuller, kpss\n'), ((10354, 10389), 'statsmodels.tsa.stattools.kpss', 'kpss', (['df_col.values'], {'regression': '"""c"""'}), "(df_col.values, regression='c')\n", (10358, 10389), False, 'from statsmodels.tsa.stattools import adfuller, kpss\n'), ((11013, 11101), 'statsmodels.api.tsa.seasonal_decompose', 'sm.tsa.seasonal_decompose', (['df_col'], {'model': '"""multiplicative"""', 'extrapolate_trend': '"""freq"""'}), "(df_col, model='multiplicative', extrapolate_trend\n ='freq')\n", (11038, 11101), True, 'import statsmodels.api as sm\n'), ((11353, 11430), 'statsmodels.api.tsa.seasonal_decompose', 'sm.tsa.seasonal_decompose', (['df_col'], {'model': '"""additive"""', 'extrapolate_trend': '"""freq"""'}), "(df_col, model='additive', extrapolate_trend='freq')\n", (11378, 11430), True, 'import statsmodels.api as sm\n'), ((11809, 11857), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (10, 8)}"], {}), "({'figure.figsize': (10, 8)})\n", (11828, 11857), True, 'import matplotlib.pyplot as plt\n'), ((11933, 11962), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.93)'}), '(top=0.93)\n', (11952, 11962), True, 'import matplotlib.pyplot as plt\n'), ((12032, 12061), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.93)'}), '(top=0.93)\n', (12051, 12061), True, 'import matplotlib.pyplot as plt\n'), ((12065, 12075), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12073, 12075), True, 'import matplotlib.pyplot as plt\n'), ((12317, 12405), 'statsmodels.api.tsa.seasonal_decompose', 'sm.tsa.seasonal_decompose', (['df_col'], {'model': '"""multiplicative"""', 'extrapolate_trend': '"""freq"""'}), "(df_col, model='multiplicative', extrapolate_trend\n ='freq')\n", (12342, 12405), True, 'import statsmodels.api as sm\n'), ((12418, 12495), 'statsmodels.api.tsa.seasonal_decompose', 'sm.tsa.seasonal_decompose', (['df_col'], {'model': '"""additive"""', 'extrapolate_trend': '"""freq"""'}), "(df_col, model='additive', extrapolate_trend='freq')\n", (12443, 12495), True, 'import statsmodels.api as sm\n'), ((12505, 12548), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 4)', 'dpi': '(80)'}), '(1, 2, figsize=(12, 4), dpi=80)\n', (12517, 12548), True, 'import matplotlib.pyplot as plt\n'), ((12564, 12593), 'scipy.signal.detrend', 'signal.detrend', (['df_col.values'], {}), '(df_col.values)\n', (12578, 12593), False, 'from scipy import signal\n'), ((12598, 12618), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (12609, 12618), True, 'import matplotlib.pyplot as plt\n'), ((12623, 12642), 'matplotlib.pyplot.plot', 'plt.plot', (['detrended'], {}), '(detrended)\n', (12631, 12642), True, 'import matplotlib.pyplot as plt\n'), ((12647, 12706), 'matplotlib.pyplot.title', 'plt.title', (['"""Subtracting the least squares fit"""'], {'fontsize': '(16)'}), "('Subtracting the least squares fit', fontsize=16)\n", (12656, 12706), True, 'import matplotlib.pyplot as plt\n'), ((12876, 12896), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (12887, 12896), True, 'import matplotlib.pyplot as plt\n'), ((12901, 12920), 'matplotlib.pyplot.plot', 'plt.plot', (['detrended'], {}), '(detrended)\n', (12909, 12920), True, 'import matplotlib.pyplot as plt\n'), ((12925, 12982), 'matplotlib.pyplot.title', 'plt.title', (['"""Subtracting the trend component"""'], {'fontsize': '(16)'}), "('Subtracting the trend component', fontsize=16)\n", (12934, 12982), True, 'import matplotlib.pyplot as plt\n'), ((12987, 12997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12995, 12997), True, 'import matplotlib.pyplot as plt\n'), ((13149, 13184), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 8)'}), '(1, 1, figsize=(12, 8))\n', (13161, 13184), True, 'import matplotlib.pyplot as plt\n'), ((13614, 13634), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (13625, 13634), True, 'import matplotlib.pyplot as plt\n'), ((13637, 13661), 'matplotlib.pyplot.plot', 'plt.plot', (['deseasonalized'], {}), '(deseasonalized)\n', (13645, 13661), True, 'import matplotlib.pyplot as plt\n'), ((13820, 13886), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (8, 3), 'figure.dpi': 120}"], {}), "({'figure.figsize': (8, 3), 'figure.dpi': 120})\n", (13839, 13886), True, 'import matplotlib.pyplot as plt\n'), ((13888, 13923), 'pandas.plotting.autocorrelation_plot', 'autocorrelation_plot', (['df_col.values'], {}), '(df_col.values)\n', (13908, 13923), False, 'from pandas.plotting import autocorrelation_plot\n'), ((13928, 13944), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (13937, 13944), True, 'import matplotlib.pyplot as plt\n'), ((13949, 13959), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13957, 13959), True, 'import matplotlib.pyplot as plt\n'), ((14146, 14190), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 3)', 'dpi': '(100)'}), '(1, 2, figsize=(16, 3), dpi=100)\n', (14158, 14190), True, 'import matplotlib.pyplot as plt\n'), ((14196, 14245), 'statsmodels.graphics.tsaplots.plot_acf', 'plot_acf', (['df_col.values'], {'lags': 'acf_lag', 'ax': 'axes[0]'}), '(df_col.values, lags=acf_lag, ax=axes[0])\n', (14204, 14245), False, 'from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n'), ((14436, 14502), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (9, 6), 'figure.dpi': 150}"], {}), "({'figure.figsize': (9, 6), 'figure.dpi': 150})\n", (14455, 14502), True, 'import matplotlib.pyplot as plt\n'), ((14538, 14579), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(period + 1)', '(2)'], {'sharex': '"""col"""'}), "(period + 1, 2, sharex='col')\n", (14550, 14579), True, 'import matplotlib.pyplot as plt\n'), ((14698, 14751), 'statsmodels.graphics.tsaplots.plot_acf', 'plot_acf', (['df[col_name].values'], {'lags': '(50)', 'ax': 'axes[0, 1]'}), '(df[col_name].values, lags=50, ax=axes[0, 1])\n', (14706, 14751), False, 'from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n'), ((15215, 15231), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (15224, 15231), True, 'import matplotlib.pyplot as plt\n'), ((15236, 15246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15244, 15246), True, 'import matplotlib.pyplot as plt\n'), ((15311, 15355), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 4)', 'dpi': '(100)'}), '(1, 2, figsize=(12, 4), dpi=100)\n', (15323, 15355), True, 'import matplotlib.pyplot as plt\n'), ((15359, 15425), 'pandas.plotting.lag_plot', 'lag_plot', (['df_house.sales'], {'lag': 'house_lag', 'ax': 'axes[0]', 'c': '"""firebrick"""'}), "(df_house.sales, lag=house_lag, ax=axes[0], c='firebrick')\n", (15367, 15425), False, 'from pandas.plotting import lag_plot\n'), ((15476, 15542), 'pandas.plotting.lag_plot', 'lag_plot', (['df_drink.sales'], {'lag': 'drink_lag', 'ax': 'axes[1]', 'c': '"""firebrick"""'}), "(df_drink.sales, lag=drink_lag, ax=axes[1], c='firebrick')\n", (15484, 15542), False, 'from pandas.plotting import lag_plot\n'), ((15593, 15603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15601, 15603), True, 'import matplotlib.pyplot as plt\n'), ((15720, 15764), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 4)', 'dpi': '(100)'}), '(1, 2, figsize=(12, 4), dpi=100)\n', (15732, 15764), True, 'import matplotlib.pyplot as plt\n'), ((15768, 15825), 'pandas.plotting.lag_plot', 'lag_plot', (['noise'], {'lag': 'noise_lag', 'ax': 'axes[0]', 'c': '"""firebrick"""'}), "(noise, lag=noise_lag, ax=axes[0], c='firebrick')\n", (15776, 15825), False, 'from pandas.plotting import lag_plot\n'), ((16071, 16081), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16079, 16081), True, 'import matplotlib.pyplot as plt\n'), ((16175, 16213), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['df_house.sales'], {'order': '(p, d, q)'}), '(df_house.sales, order=(p, d, q))\n', (16180, 16213), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((16325, 16354), 'pandas.DataFrame', 'pd.DataFrame', (['model_fit.resid'], {}), '(model_fit.resid)\n', (16337, 16354), True, 'import pandas as pd\n'), ((16369, 16404), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 3)'}), '(1, 2, figsize=(12, 3))\n', (16381, 16404), True, 'import matplotlib.pyplot as plt\n'), ((16513, 16523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16521, 16523), True, 'import matplotlib.pyplot as plt\n'), ((16529, 16595), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (9, 3), 'figure.dpi': 120}"], {}), "({'figure.figsize': (9, 3), 'figure.dpi': 120})\n", (16548, 16595), True, 'import matplotlib.pyplot as plt\n'), ((16639, 16649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16647, 16649), True, 'import matplotlib.pyplot as plt\n'), ((16820, 16846), 'numpy.mean', 'np.mean', (['(forecast - actual)'], {}), '(forecast - actual)\n', (16827, 16846), True, 'import numpy as np\n'), ((16928, 16965), 'numpy.mean', 'np.mean', (['((forecast - actual) / actual)'], {}), '((forecast - actual) / actual)\n', (16935, 16965), True, 'import numpy as np\n'), ((17767, 17796), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['train'], {'order': '(p, d, q)'}), '(train, order=(p, d, q))\n', (17772, 17796), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((17889, 17918), 'pandas.DataFrame', 'pd.DataFrame', (['model_fit.resid'], {}), '(model_fit.resid)\n', (17901, 17918), True, 'import pandas as pd\n'), ((17933, 17968), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 3)'}), '(1, 2, figsize=(12, 3))\n', (17945, 17968), True, 'import matplotlib.pyplot as plt\n'), ((18077, 18087), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18085, 18087), True, 'import matplotlib.pyplot as plt\n'), ((18093, 18159), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (9, 3), 'figure.dpi': 120}"], {}), "({'figure.figsize': (9, 3), 'figure.dpi': 120})\n", (18112, 18159), True, 'import matplotlib.pyplot as plt\n'), ((18295, 18326), 'pandas.Series', 'pd.Series', (['fc'], {'index': 'test.index'}), '(fc, index=test.index)\n', (18304, 18326), True, 'import pandas as pd\n'), ((18346, 18385), 'pandas.Series', 'pd.Series', (['conf[:, 0]'], {'index': 'test.index'}), '(conf[:, 0], index=test.index)\n', (18355, 18385), True, 'import pandas as pd\n'), ((18405, 18444), 'pandas.Series', 'pd.Series', (['conf[:, 1]'], {'index': 'test.index'}), '(conf[:, 1], index=test.index)\n', (18414, 18444), True, 'import pandas as pd\n'), ((18461, 18494), 'matplotlib.pyplot.plot', 'plt.plot', (['train'], {'label': '"""training"""'}), "(train, label='training')\n", (18469, 18494), True, 'import matplotlib.pyplot as plt\n'), ((18499, 18529), 'matplotlib.pyplot.plot', 'plt.plot', (['test'], {'label': '"""actual"""'}), "(test, label='actual')\n", (18507, 18529), True, 'import matplotlib.pyplot as plt\n'), ((18534, 18571), 'matplotlib.pyplot.plot', 'plt.plot', (['fc_series'], {'label': '"""forecast"""'}), "(fc_series, label='forecast')\n", (18542, 18571), True, 'import matplotlib.pyplot as plt\n'), ((18576, 18667), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['lower_series.index', 'lower_series', 'upper_series'], {'color': '"""k"""', 'alpha': '(0.15)'}), "(lower_series.index, lower_series, upper_series, color='k',\n alpha=0.15)\n", (18592, 18667), True, 'import matplotlib.pyplot as plt\n'), ((18689, 18721), 'matplotlib.pyplot.title', 'plt.title', (['"""Forecast vs Actuals"""'], {}), "('Forecast vs Actuals')\n", (18698, 18721), True, 'import matplotlib.pyplot as plt\n'), ((18726, 18766), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(8)'}), "(loc='upper left', fontsize=8)\n", (18736, 18766), True, 'import matplotlib.pyplot as plt\n'), ((18771, 18781), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18779, 18781), True, 'import matplotlib.pyplot as plt\n'), ((19408, 19444), 'pandas.Series', 'pd.Series', (['fitted'], {'index': 'index_of_fc'}), '(fitted, index=index_of_fc)\n', (19417, 19444), True, 'import pandas as pd\n'), ((19464, 19507), 'pandas.Series', 'pd.Series', (['confint[:, 0]'], {'index': 'index_of_fc'}), '(confint[:, 0], index=index_of_fc)\n', (19473, 19507), True, 'import pandas as pd\n'), ((19527, 19570), 'pandas.Series', 'pd.Series', (['confint[:, 1]'], {'index': 'index_of_fc'}), '(confint[:, 1], index=index_of_fc)\n', (19536, 19570), True, 'import pandas as pd\n'), ((19587, 19654), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (10, 4), 'figure.dpi': 120}"], {}), "({'figure.figsize': (10, 4), 'figure.dpi': 120})\n", (19606, 19654), True, 'import matplotlib.pyplot as plt\n'), ((19656, 19678), 'matplotlib.pyplot.plot', 'plt.plot', (['df[col_name]'], {}), '(df[col_name])\n', (19664, 19678), True, 'import matplotlib.pyplot as plt\n'), ((19683, 19725), 'matplotlib.pyplot.plot', 'plt.plot', (['fitted_series'], {'color': '"""darkgreen"""'}), "(fitted_series, color='darkgreen')\n", (19691, 19725), True, 'import matplotlib.pyplot as plt\n'), ((19730, 19821), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['lower_series.index', 'lower_series', 'upper_series'], {'color': '"""k"""', 'alpha': '(0.15)'}), "(lower_series.index, lower_series, upper_series, color='k',\n alpha=0.15)\n", (19746, 19821), True, 'import matplotlib.pyplot as plt\n'), ((19888, 19932), 'matplotlib.pyplot.title', 'plt.title', (['"""SARIMAX Forecast of Drink Sales"""'], {}), "('SARIMAX Forecast of Drink Sales')\n", (19897, 19932), True, 'import matplotlib.pyplot as plt\n'), ((19937, 19947), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19945, 19947), True, 'import matplotlib.pyplot as plt\n'), ((20676, 20725), 'pandas.merge', 'pd.merge', (['df', 'seasonal_index'], {'how': '"""left"""', 'on': 'freq'}), "(df, seasonal_index, how='left', on=freq)\n", (20684, 20725), True, 'import pandas as pd\n'), ((21518, 21554), 'pandas.Series', 'pd.Series', (['fitted'], {'index': 'index_of_fc'}), '(fitted, index=index_of_fc)\n', (21527, 21554), True, 'import pandas as pd\n'), ((21574, 21617), 'pandas.Series', 'pd.Series', (['confint[:, 0]'], {'index': 'index_of_fc'}), '(confint[:, 0], index=index_of_fc)\n', (21583, 21617), True, 'import pandas as pd\n'), ((21637, 21680), 'pandas.Series', 'pd.Series', (['confint[:, 1]'], {'index': 'index_of_fc'}), '(confint[:, 1], index=index_of_fc)\n', (21646, 21680), True, 'import pandas as pd\n'), ((21697, 21764), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (10, 4), 'figure.dpi': 120}"], {}), "({'figure.figsize': (10, 4), 'figure.dpi': 120})\n", (21716, 21764), True, 'import matplotlib.pyplot as plt\n'), ((21766, 21788), 'matplotlib.pyplot.plot', 'plt.plot', (['df[col_name]'], {}), '(df[col_name])\n', (21774, 21788), True, 'import matplotlib.pyplot as plt\n'), ((21793, 21835), 'matplotlib.pyplot.plot', 'plt.plot', (['fitted_series'], {'color': '"""darkgreen"""'}), "(fitted_series, color='darkgreen')\n", (21801, 21835), True, 'import matplotlib.pyplot as plt\n'), ((21840, 21931), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['lower_series.index', 'lower_series', 'upper_series'], {'color': '"""k"""', 'alpha': '(0.15)'}), "(lower_series.index, lower_series, upper_series, color='k',\n alpha=0.15)\n", (21856, 21931), True, 'import matplotlib.pyplot as plt\n'), ((21998, 22042), 'matplotlib.pyplot.title', 'plt.title', (['"""SARIMAX Forecast of Drink Sales"""'], {}), "('SARIMAX Forecast of Drink Sales')\n", (22007, 22042), True, 'import matplotlib.pyplot as plt\n'), ((22047, 22057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22055, 22057), True, 'import matplotlib.pyplot as plt\n'), ((1020, 1044), 'pandas.offsets.QuarterEnd', 'pd.offsets.QuarterEnd', (['(0)'], {}), '(0)\n', (1041, 1044), True, 'import pandas as pd\n'), ((1496, 1537), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'std', 'size': 'x.shape'}), '(scale=std, size=x.shape)\n', (1512, 1537), True, 'import numpy as np\n'), ((1850, 1907), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 't_test'], {'c': '"""g"""', 'label': '"""$\\\\sin(2\\\\pi x)$"""'}), "(x_test, t_test, c='g', label='$\\\\sin(2\\\\pi x)$')\n", (1858, 1907), True, 'import matplotlib.pyplot as plt\n'), ((2234, 2258), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(i + 1)'], {}), '(1, 3, i + 1)\n', (2245, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2324), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'degree', 'include_bias': '(True)'}), '(degree=degree, include_bias=True)\n', (2290, 2324), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((2341, 2359), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2357, 2359), False, 'from sklearn.linear_model import LinearRegression\n'), ((2501, 2597), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_train', 't_train'], {'facecolor': '"""none"""', 'edgecolor': '"""b"""', 's': '(50)', 'label': '"""training data"""'}), "(x_train, t_train, facecolor='none', edgecolor='b', s=50, label=\n 'training data')\n", (2512, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2686, 2729), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 't'], {'c': '"""r"""', 'label': '"""fitting"""'}), "(x_test, t, c='r', label='fitting')\n", (2694, 2729), True, 'import matplotlib.pyplot as plt\n'), ((2738, 2757), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (2746, 2757), True, 'import matplotlib.pyplot as plt\n'), ((2766, 2783), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (2776, 2783), True, 'import matplotlib.pyplot as plt\n'), ((3158, 3182), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(i + 1)'], {}), '(1, 3, i + 1)\n', (3169, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3248), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'degree', 'include_bias': '(True)'}), '(degree=degree, include_bias=True)\n', (3214, 3248), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((3265, 3283), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3281, 3283), False, 'from sklearn.linear_model import LinearRegression\n'), ((3781, 3824), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 't'], {'c': '"""r"""', 'label': '"""fitting"""'}), "(x_test, t, c='r', label='fitting')\n", (3789, 3824), True, 'import matplotlib.pyplot as plt\n'), ((3833, 3852), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (3841, 3852), True, 'import matplotlib.pyplot as plt\n'), ((3861, 3878), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (3871, 3878), True, 'import matplotlib.pyplot as plt\n'), ((4489, 4516), 'pandas.to_datetime', 'pd.to_datetime', (['df.index[0]'], {}), '(df.index[0])\n', (4503, 4516), True, 'import pandas as pd\n'), ((7897, 7932), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(20, 6)'}), '(1, 3, figsize=(20, 6))\n', (7909, 7932), True, 'import matplotlib.pyplot as plt\n'), ((7962, 8098), 'seaborn.residplot', 'sns.residplot', (['result.fittedvalues', 'result.resid'], {'lowess': '(False)', 'scatter_kws': "{'s': 80}", 'line_kws': "{'color': 'r', 'lw': 1}", 'ax': 'axes[0]'}), "(result.fittedvalues, result.resid, lowess=False, scatter_kws=\n {'s': 80}, line_kws={'color': 'r', 'lw': 1}, ax=axes[0])\n", (7975, 8098), True, 'import seaborn as sns\n'), ((8274, 8329), 'scipy.stats.probplot', 'stats.probplot', (['result.resid'], {'dist': '"""norm"""', 'plot': 'axes[1]'}), "(result.resid, dist='norm', plot=axes[1])\n", (8288, 8329), False, 'from scipy import stats\n'), ((8407, 8445), 'seaborn.distplot', 'sns.distplot', (['result.resid'], {'ax': 'axes[2]'}), '(result.resid, ax=axes[2])\n', (8419, 8445), True, 'import seaborn as sns\n'), ((9378, 9435), 'matplotlib.pyplot.plot', 'plt.plot', (['test.index', 'test'], {'label': '"""Holdouts"""', 'c': '"""fuchsia"""'}), "(test.index, test, label='Holdouts', c='fuchsia')\n", (9386, 9435), True, 'import matplotlib.pyplot as plt\n'), ((13258, 13346), 'statsmodels.api.tsa.seasonal_decompose', 'sm.tsa.seasonal_decompose', (['df_col'], {'model': '"""multiplicative"""', 'extrapolate_trend': '"""freq"""'}), "(df_col, model='multiplicative', extrapolate_trend\n ='freq')\n", (13283, 13346), True, 'import statsmodels.api as sm\n'), ((13466, 13543), 'statsmodels.api.tsa.seasonal_decompose', 'sm.tsa.seasonal_decompose', (['df_col'], {'model': '"""additive"""', 'extrapolate_trend': '"""freq"""'}), "(df_col, model='additive', extrapolate_trend='freq')\n", (13491, 13543), True, 'import statsmodels.api as sm\n'), ((15682, 15702), 'numpy.random.randn', 'np.random.randn', (['(200)'], {}), '(200)\n', (15697, 15702), True, 'import numpy as np\n'), ((16882, 16907), 'numpy.abs', 'np.abs', (['(forecast - actual)'], {}), '(forecast - actual)\n', (16888, 16907), True, 'import numpy as np\n'), ((16983, 17016), 'numpy.mean', 'np.mean', (['((forecast - actual) ** 2)'], {}), '((forecast - actual) ** 2)\n', (16990, 17016), True, 'import numpy as np\n'), ((17038, 17067), 'numpy.corrcoef', 'np.corrcoef', (['forecast', 'actual'], {}), '(forecast, actual)\n', (17049, 17067), True, 'import numpy as np\n'), ((17101, 17148), 'numpy.hstack', 'np.hstack', (['[forecast[:, None], actual[:, None]]'], {}), '([forecast[:, None], actual[:, None]])\n', (17110, 17148), True, 'import numpy as np\n'), ((17206, 17253), 'numpy.hstack', 'np.hstack', (['[forecast[:, None], actual[:, None]]'], {}), '([forecast[:, None], actual[:, None]])\n', (17215, 17253), True, 'import numpy as np\n'), ((17309, 17329), 'numpy.mean', 'np.mean', (['(mins / maxs)'], {}), '(mins / maxs)\n', (17316, 17329), True, 'import numpy as np\n'), ((17360, 17382), 'statsmodels.tsa.stattools.acf', 'acf', (['(forecast - actual)'], {}), '(forecast - actual)\n', (17363, 17382), False, 'from statsmodels.tsa.stattools import acf\n'), ((19161, 19224), 'pandas.date_range', 'pd.date_range', (['df.index[-1]'], {'periods': 'forecast_periods', 'freq': '"""M"""'}), "(df.index[-1], periods=forecast_periods, freq='M')\n", (19174, 19224), True, 'import pandas as pd\n'), ((19273, 19337), 'pandas.date_range', 'pd.date_range', (['df.index[-1]'], {'periods': 'forecast_periods', 'freq': '"""3M"""'}), "(df.index[-1], periods=forecast_periods, freq='3M')\n", (19286, 19337), True, 'import pandas as pd\n'), ((21271, 21334), 'pandas.date_range', 'pd.date_range', (['df.index[-1]'], {'periods': 'forecast_periods', 'freq': '"""M"""'}), "(df.index[-1], periods=forecast_periods, freq='M')\n", (21284, 21334), True, 'import pandas as pd\n'), ((21383, 21447), 'pandas.date_range', 'pd.date_range', (['df.index[-1]'], {'periods': 'forecast_periods', 'freq': '"""3M"""'}), "(df.index[-1], periods=forecast_periods, freq='3M')\n", (21396, 21447), True, 'import pandas as pd\n'), ((2622, 2679), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 't_test'], {'c': '"""g"""', 'label': '"""$\\\\sin(2\\\\pi x)$"""'}), "(x_test, t_test, c='g', label='$\\\\sin(2\\\\pi x)$')\n", (2630, 2679), True, 'import matplotlib.pyplot as plt\n'), ((3451, 3557), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_train[:-3]', 't_train[:-3]'], {'facecolor': '"""none"""', 'edgecolor': '"""b"""', 's': '(50)', 'label': '"""training data"""'}), "(x_train[:-3], t_train[:-3], facecolor='none', edgecolor='b', s=\n 50, label='training data')\n", (3462, 3557), True, 'import matplotlib.pyplot as plt\n'), ((3582, 3692), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_train[-3:]', 't_train[-3:]'], {'facecolor': '"""none"""', 'edgecolor': '"""orange"""', 's': '(50)', 'label': '"""testing data"""'}), "(x_train[-3:], t_train[-3:], facecolor='none', edgecolor=\n 'orange', s=50, label='testing data')\n", (3593, 3692), True, 'import matplotlib.pyplot as plt\n'), ((3717, 3774), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 't_test'], {'c': '"""g"""', 'label': '"""$\\\\sin(2\\\\pi x)$"""'}), "(x_test, t_test, c='g', label='$\\\\sin(2\\\\pi x)$')\n", (3725, 3774), True, 'import matplotlib.pyplot as plt\n'), ((4659, 4691), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(12)'}), '(interval=12)\n', (4678, 4691), True, 'import matplotlib.dates as mdates\n'), ((4730, 4759), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%Y"""'], {}), "('%m-%Y')\n", (4750, 4759), True, 'import matplotlib.dates as mdates\n'), ((4882, 4919), 'pandas.Series', 'pd.Series', (['[x.year for x in df.index]'], {}), '([x.year for x in df.index])\n', (4891, 4919), True, 'import pandas as pd\n'), ((4966, 4995), 'matplotlib.colors.XKCD_COLORS.keys', 'mlp.colors.XKCD_COLORS.keys', ([], {}), '()\n', (4993, 4995), True, 'import matplotlib as mlp\n'), ((5238, 5337), 'matplotlib.pyplot.plot', 'plt.plot', (['col_names[0]', 'col_names[1]'], {'data': 'df.loc[df.year == y, :]', 'color': 'mycolors[i]', 'label': 'y'}), '(col_names[0], col_names[1], data=df.loc[df.year == y, :], color=\n mycolors[i], label=y)\n', (5246, 5337), True, 'import matplotlib.pyplot as plt\n'), ((5343, 5489), 'matplotlib.pyplot.text', 'plt.text', (['(df.loc[df.year == y, :].shape[0] - label_shift)', 'df.loc[df.year == y, col_names[1]][-1:].values[0]', 'y'], {'color': 'mycolors[i]', 'fontsize': '(12)'}), '(df.loc[df.year == y, :].shape[0] - label_shift, df.loc[df.year ==\n y, col_names[1]][-1:].values[0], y, color=mycolors[i], fontsize=12)\n', (5351, 5489), True, 'import matplotlib.pyplot as plt\n'), ((7201, 7233), 'statsmodels.api.stats.anova_lm', 'sm.stats.anova_lm', (['result'], {'typ': '(2)'}), '(result, typ=2)\n', (7218, 7233), True, 'import statsmodels.api as sm\n'), ((7310, 7344), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 5)'}), '(1, 1, figsize=(8, 5))\n', (7322, 7344), True, 'import matplotlib.pyplot as plt\n'), ((8587, 8621), 'scipy.stats.kstest', 'stats.kstest', (['result.resid', '"""norm"""'], {}), "(result.resid, 'norm')\n", (8599, 8621), False, 'from scipy import stats\n'), ((14814, 14840), 'numpy.std', 'np.std', (["df['sales'].values"], {}), "(df['sales'].values)\n", (14820, 14840), True, 'import numpy as np\n'), ((16761, 16786), 'numpy.abs', 'np.abs', (['(forecast - actual)'], {}), '(forecast - actual)\n', (16767, 16786), True, 'import numpy as np\n'), ((16787, 16801), 'numpy.abs', 'np.abs', (['actual'], {}), '(actual)\n', (16793, 16801), True, 'import numpy as np\n'), ((7098, 7119), 'numpy.sqrt', 'np.sqrt', (['result.scale'], {}), '(result.scale)\n', (7105, 7119), True, 'import numpy as np\n'), ((8908, 8933), 'statsmodels.tsa.holtwinters.SimpleExpSmoothing', 'SimpleExpSmoothing', (['train'], {}), '(train)\n', (8926, 8933), False, 'from statsmodels.tsa.holtwinters import SimpleExpSmoothing\n'), ((9193, 9218), 'statsmodels.tsa.holtwinters.SimpleExpSmoothing', 'SimpleExpSmoothing', (['train'], {}), '(train)\n', (9211, 9218), False, 'from statsmodels.tsa.holtwinters import SimpleExpSmoothing\n'), ((15905, 15939), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(200, 1)'], {}), '(-1, 1, (200, 1))\n', (15922, 15939), True, 'import numpy as np\n'), ((20197, 20287), 'statsmodels.tsa.seasonal.seasonal_decompose', 'seasonal_decompose', (['df[col_name][-periods * 3:]'], {'model': 'model', 'extrapolate_trend': '"""freq"""'}), "(df[col_name][-periods * 3:], model=model,\n extrapolate_trend='freq')\n", (20215, 20287), False, 'from statsmodels.tsa.seasonal import seasonal_decompose\n'), ((21038, 21103), 'numpy.tile', 'np.tile', (['df.seasonal_index[:periods]', '(forecast_periods // periods)'], {}), '(df.seasonal_index[:periods], forecast_periods // periods)\n', (21045, 21103), True, 'import numpy as np\n'), ((4252, 4286), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(200, 1)'], {}), '(-1, 1, (200, 1))\n', (4269, 4286), True, 'import numpy as np\n'), ((15944, 15961), 'numpy.ones', 'np.ones', (['(200, 1)'], {}), '((200, 1))\n', (15951, 15961), True, 'import numpy as np\n'), ((4292, 4309), 'numpy.ones', 'np.ones', (['(200, 1)'], {}), '((200, 1))\n', (4299, 4309), True, 'import numpy as np\n'), ((4554, 4580), 'pandas.Timestamp', 'pd.Timestamp', (['df.index[-1]'], {}), '(df.index[-1])\n', (4566, 4580), True, 'import pandas as pd\n')] |
import unittest
import numpy as np
from spectralcluster import refinement
ThresholdType = refinement.ThresholdType
SymmetrizeType = refinement.SymmetrizeType
class TestCropDiagonal(unittest.TestCase):
"""Tests for the CropDiagonal class."""
def test_3by3_matrix(self):
matrix = np.array([[1, 2, 3], [3, 4, 5], [4, 2, 1]])
adjusted_matrix = refinement.CropDiagonal().refine(matrix)
expected = np.array([[3, 2, 3], [3, 5, 5], [4, 2, 4]])
self.assertTrue(np.array_equal(expected, adjusted_matrix))
class TestGaussianBlur(unittest.TestCase):
"""Tests for the GaussianBlur class."""
def test_3by3_matrix(self):
matrix = np.array([[1.0, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.GaussianBlur(sigma=1).refine(matrix)
expected = np.array([[2.12, 2.61, 3.10], [2.76, 2.90, 3.06],
[3.16, 2.78, 2.46]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.01))
class TestRowWiseThreshold(unittest.TestCase):
"""Tests for the RowWiseThreshold class."""
def test_3by3_matrix_percentile(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseThreshold(
p_percentile=0.5,
thresholding_soft_multiplier=0.01,
thresholding_type=ThresholdType.Percentile).refine(matrix)
expected = np.array([[0.005, 2.0, 3.0], [0.03, 4.0, 5.0], [4.0, 2.0, 0.01]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
def test_3by3_matrix_row_max(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseThreshold(
p_percentile=0.5,
thresholding_soft_multiplier=0.01,
thresholding_type=ThresholdType.RowMax).refine(matrix)
expected = np.array([[0.005, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 0.01]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
def test_3by3_matrix_binarization(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseThreshold(
p_percentile=0.5,
thresholding_soft_multiplier=0.01,
thresholding_type=ThresholdType.RowMax,
thresholding_with_binarization=True).refine(matrix)
expected = np.array([[0.005, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 0.01]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
def test_3by3_matrix_preserve_diagonal(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseThreshold(
p_percentile=0.5,
thresholding_soft_multiplier=0.01,
thresholding_type=ThresholdType.RowMax,
thresholding_with_binarization=True,
thresholding_preserve_diagonal=True).refine(matrix)
expected = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
class TestSymmetrize(unittest.TestCase):
"""Tests for the Symmetrize class."""
def test_3by3_matrix(self):
matrix = np.array([[1, 2, 3], [3, 4, 5], [4, 2, 1]])
adjusted_matrix = refinement.Symmetrize().refine(matrix)
expected = np.array([[1, 3, 4], [3, 4, 5], [4, 5, 1]])
self.assertTrue(np.array_equal(expected, adjusted_matrix))
def test_3by3_matrix_symmetrize_average(self):
matrix = np.array([[1, 2, 3], [3, 4, 5], [4, 2, 1]])
adjusted_matrix = refinement.Symmetrize(
symmetrize_type=SymmetrizeType.Average).refine(matrix)
expected = np.array([[1, 2.5, 3.5], [2.5, 4, 3.5], [3.5, 3.5, 1]])
self.assertTrue(np.array_equal(expected, adjusted_matrix))
class TestDiffuse(unittest.TestCase):
"""Tests for the Diffuse class."""
def test_2by2_matrix(self):
matrix = np.array([[1, 2], [3, 4]])
adjusted_matrix = refinement.Diffuse().refine(matrix)
expected = np.array([[5, 11], [11, 25]])
self.assertTrue(np.array_equal(expected, adjusted_matrix))
class TestRowWiseNormalize(unittest.TestCase):
"""Tests for the RowWiseNormalize class."""
def test_3by3_matrix(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseNormalize().refine(matrix)
expected = np.array([[0.167, 0.667, 1.0], [0.6, 0.8, 1.0], [1.0, 0.5,
0.25]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"spectralcluster.refinement.RowWiseThreshold",
"spectralcluster.refinement.RowWiseNormalize",
"spectralcluster.refinement.CropDiagonal",
"numpy.allclose",
"numpy.array",
"spectralcluster.refinement.GaussianBlur",
"spectralcluster.refinement.Symmetrize",
"numpy.array_equal",
"spect... | [((4544, 4559), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4557, 4559), False, 'import unittest\n'), ((290, 333), 'numpy.array', 'np.array', (['[[1, 2, 3], [3, 4, 5], [4, 2, 1]]'], {}), '([[1, 2, 3], [3, 4, 5], [4, 2, 1]])\n', (298, 333), True, 'import numpy as np\n'), ((412, 455), 'numpy.array', 'np.array', (['[[3, 2, 3], [3, 5, 5], [4, 2, 4]]'], {}), '([[3, 2, 3], [3, 5, 5], [4, 2, 4]])\n', (420, 455), True, 'import numpy as np\n'), ((650, 711), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]]'], {}), '([[1.0, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])\n', (658, 711), True, 'import numpy as np\n'), ((797, 865), 'numpy.array', 'np.array', (['[[2.12, 2.61, 3.1], [2.76, 2.9, 3.06], [3.16, 2.78, 2.46]]'], {}), '([[2.12, 2.61, 3.1], [2.76, 2.9, 3.06], [3.16, 2.78, 2.46]])\n', (805, 865), True, 'import numpy as np\n'), ((1114, 1175), 'numpy.array', 'np.array', (['[[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]]'], {}), '([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])\n', (1122, 1175), True, 'import numpy as np\n'), ((1378, 1443), 'numpy.array', 'np.array', (['[[0.005, 2.0, 3.0], [0.03, 4.0, 5.0], [4.0, 2.0, 0.01]]'], {}), '([[0.005, 2.0, 3.0], [0.03, 4.0, 5.0], [4.0, 2.0, 0.01]])\n', (1386, 1443), True, 'import numpy as np\n'), ((1568, 1629), 'numpy.array', 'np.array', (['[[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]]'], {}), '([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])\n', (1576, 1629), True, 'import numpy as np\n'), ((1828, 1892), 'numpy.array', 'np.array', (['[[0.005, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 0.01]]'], {}), '([[0.005, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 0.01]])\n', (1836, 1892), True, 'import numpy as np\n'), ((2022, 2083), 'numpy.array', 'np.array', (['[[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]]'], {}), '([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])\n', (2030, 2083), True, 'import numpy as np\n'), ((2327, 2391), 'numpy.array', 'np.array', (['[[0.005, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 0.01]]'], {}), '([[0.005, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 0.01]])\n', (2335, 2391), True, 'import numpy as np\n'), ((2526, 2587), 'numpy.array', 'np.array', (['[[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]]'], {}), '([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])\n', (2534, 2587), True, 'import numpy as np\n'), ((2876, 2937), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])\n', (2884, 2937), True, 'import numpy as np\n'), ((3137, 3180), 'numpy.array', 'np.array', (['[[1, 2, 3], [3, 4, 5], [4, 2, 1]]'], {}), '([[1, 2, 3], [3, 4, 5], [4, 2, 1]])\n', (3145, 3180), True, 'import numpy as np\n'), ((3257, 3300), 'numpy.array', 'np.array', (['[[1, 3, 4], [3, 4, 5], [4, 5, 1]]'], {}), '([[1, 3, 4], [3, 4, 5], [4, 5, 1]])\n', (3265, 3300), True, 'import numpy as np\n'), ((3427, 3470), 'numpy.array', 'np.array', (['[[1, 2, 3], [3, 4, 5], [4, 2, 1]]'], {}), '([[1, 2, 3], [3, 4, 5], [4, 2, 1]])\n', (3435, 3470), True, 'import numpy as np\n'), ((3594, 3649), 'numpy.array', 'np.array', (['[[1, 2.5, 3.5], [2.5, 4, 3.5], [3.5, 3.5, 1]]'], {}), '([[1, 2.5, 3.5], [2.5, 4, 3.5], [3.5, 3.5, 1]])\n', (3602, 3649), True, 'import numpy as np\n'), ((3834, 3860), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3842, 3860), True, 'import numpy as np\n'), ((3934, 3963), 'numpy.array', 'np.array', (['[[5, 11], [11, 25]]'], {}), '([[5, 11], [11, 25]])\n', (3942, 3963), True, 'import numpy as np\n'), ((4166, 4227), 'numpy.array', 'np.array', (['[[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]]'], {}), '([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])\n', (4174, 4227), True, 'import numpy as np\n'), ((4310, 4376), 'numpy.array', 'np.array', (['[[0.167, 0.667, 1.0], [0.6, 0.8, 1.0], [1.0, 0.5, 0.25]]'], {}), '([[0.167, 0.667, 1.0], [0.6, 0.8, 1.0], [1.0, 0.5, 0.25]])\n', (4318, 4376), True, 'import numpy as np\n'), ((476, 517), 'numpy.array_equal', 'np.array_equal', (['expected', 'adjusted_matrix'], {}), '(expected, adjusted_matrix)\n', (490, 517), True, 'import numpy as np\n'), ((913, 962), 'numpy.allclose', 'np.allclose', (['expected', 'adjusted_matrix'], {'atol': '(0.01)'}), '(expected, adjusted_matrix, atol=0.01)\n', (924, 962), True, 'import numpy as np\n'), ((1464, 1514), 'numpy.allclose', 'np.allclose', (['expected', 'adjusted_matrix'], {'atol': '(0.001)'}), '(expected, adjusted_matrix, atol=0.001)\n', (1475, 1514), True, 'import numpy as np\n'), ((1913, 1963), 'numpy.allclose', 'np.allclose', (['expected', 'adjusted_matrix'], {'atol': '(0.001)'}), '(expected, adjusted_matrix, atol=0.001)\n', (1924, 1963), True, 'import numpy as np\n'), ((2412, 2462), 'numpy.allclose', 'np.allclose', (['expected', 'adjusted_matrix'], {'atol': '(0.001)'}), '(expected, adjusted_matrix, atol=0.001)\n', (2423, 2462), True, 'import numpy as np\n'), ((2958, 3008), 'numpy.allclose', 'np.allclose', (['expected', 'adjusted_matrix'], {'atol': '(0.001)'}), '(expected, adjusted_matrix, atol=0.001)\n', (2969, 3008), True, 'import numpy as np\n'), ((3321, 3362), 'numpy.array_equal', 'np.array_equal', (['expected', 'adjusted_matrix'], {}), '(expected, adjusted_matrix)\n', (3335, 3362), True, 'import numpy as np\n'), ((3670, 3711), 'numpy.array_equal', 'np.array_equal', (['expected', 'adjusted_matrix'], {}), '(expected, adjusted_matrix)\n', (3684, 3711), True, 'import numpy as np\n'), ((3984, 4025), 'numpy.array_equal', 'np.array_equal', (['expected', 'adjusted_matrix'], {}), '(expected, adjusted_matrix)\n', (3998, 4025), True, 'import numpy as np\n'), ((4461, 4511), 'numpy.allclose', 'np.allclose', (['expected', 'adjusted_matrix'], {'atol': '(0.001)'}), '(expected, adjusted_matrix, atol=0.001)\n', (4472, 4511), True, 'import numpy as np\n'), ((356, 381), 'spectralcluster.refinement.CropDiagonal', 'refinement.CropDiagonal', ([], {}), '()\n', (379, 381), False, 'from spectralcluster import refinement\n'), ((734, 766), 'spectralcluster.refinement.GaussianBlur', 'refinement.GaussianBlur', ([], {'sigma': '(1)'}), '(sigma=1)\n', (757, 766), False, 'from spectralcluster import refinement\n'), ((1198, 1327), 'spectralcluster.refinement.RowWiseThreshold', 'refinement.RowWiseThreshold', ([], {'p_percentile': '(0.5)', 'thresholding_soft_multiplier': '(0.01)', 'thresholding_type': 'ThresholdType.Percentile'}), '(p_percentile=0.5, thresholding_soft_multiplier=\n 0.01, thresholding_type=ThresholdType.Percentile)\n', (1225, 1327), False, 'from spectralcluster import refinement\n'), ((1652, 1777), 'spectralcluster.refinement.RowWiseThreshold', 'refinement.RowWiseThreshold', ([], {'p_percentile': '(0.5)', 'thresholding_soft_multiplier': '(0.01)', 'thresholding_type': 'ThresholdType.RowMax'}), '(p_percentile=0.5, thresholding_soft_multiplier=\n 0.01, thresholding_type=ThresholdType.RowMax)\n', (1679, 1777), False, 'from spectralcluster import refinement\n'), ((2106, 2272), 'spectralcluster.refinement.RowWiseThreshold', 'refinement.RowWiseThreshold', ([], {'p_percentile': '(0.5)', 'thresholding_soft_multiplier': '(0.01)', 'thresholding_type': 'ThresholdType.RowMax', 'thresholding_with_binarization': '(True)'}), '(p_percentile=0.5, thresholding_soft_multiplier=\n 0.01, thresholding_type=ThresholdType.RowMax,\n thresholding_with_binarization=True)\n', (2133, 2272), False, 'from spectralcluster import refinement\n'), ((2610, 2813), 'spectralcluster.refinement.RowWiseThreshold', 'refinement.RowWiseThreshold', ([], {'p_percentile': '(0.5)', 'thresholding_soft_multiplier': '(0.01)', 'thresholding_type': 'ThresholdType.RowMax', 'thresholding_with_binarization': '(True)', 'thresholding_preserve_diagonal': '(True)'}), '(p_percentile=0.5, thresholding_soft_multiplier=\n 0.01, thresholding_type=ThresholdType.RowMax,\n thresholding_with_binarization=True, thresholding_preserve_diagonal=True)\n', (2637, 2813), False, 'from spectralcluster import refinement\n'), ((3203, 3226), 'spectralcluster.refinement.Symmetrize', 'refinement.Symmetrize', ([], {}), '()\n', (3224, 3226), False, 'from spectralcluster import refinement\n'), ((3493, 3554), 'spectralcluster.refinement.Symmetrize', 'refinement.Symmetrize', ([], {'symmetrize_type': 'SymmetrizeType.Average'}), '(symmetrize_type=SymmetrizeType.Average)\n', (3514, 3554), False, 'from spectralcluster import refinement\n'), ((3883, 3903), 'spectralcluster.refinement.Diffuse', 'refinement.Diffuse', ([], {}), '()\n', (3901, 3903), False, 'from spectralcluster import refinement\n'), ((4250, 4279), 'spectralcluster.refinement.RowWiseNormalize', 'refinement.RowWiseNormalize', ([], {}), '()\n', (4277, 4279), False, 'from spectralcluster import refinement\n')] |
import codecs
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from adjustText import adjust_text
from keras.models import model_from_json
from keras.utils.vis_utils import plot_model
from sklearn.utils import shuffle
FTRAIN = 'data/training.csv'
FTEST = 'data/test.csv'
FLOOKUP = 'data/IdLookupTable.csv'
def load(test=False, cols=None):
"""Loads data from FTEST if *test* is True, otherwise from FTRAIN.
Pass a list of *cols* if you're only interested in a subset of the
target columns.
"""
f_name = FTEST if test else FTRAIN
df = pd.read_csv(f_name)
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
if cols:
df = df[list(cols) + ['Image']]
df = df.dropna()
x = np.vstack(df['Image'].values) / 255.
x = x.astype(np.float32)
if not test:
y = df[df.columns[:-1]].values
y = (y - 48) / 48
x, y = shuffle(x, y, random_state=42)
y = y.astype(np.float32)
else:
y = None
return x, y
def plot_sample(x, y, axis):
img = x.reshape(96, 96)
axis.imshow(img, cmap='gray')
axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
def plot_loss(model, history):
loss = history['loss']
val_loss = history['val_loss']
plt.plot(loss, linewidth=3, label='train')
plt.plot(val_loss, linewidth=3, label='valid')
plt.grid()
plt.legend()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.yscale('log')
plt.title(model.name)
final_score = "Loss: " + str(round(history['loss'][-1], 8)) + \
"\nVal_Loss: " + str(round(history['val_loss'][-1], 8))
adjust_text([plt.text(len(history['loss']), np.mean(history['loss']), final_score, size='large')], history["loss"],
np.arange(len(history["loss"])))
plt.show()
def show_examples(model):
x_test, _ = load(test=True)
if "CNN_Model" in model.name:
x_test = x_test.reshape((-1, 96, 96, 1))
y_pred = model.predict(x_test)
fig = plt.figure(figsize=(6, 6))
fig.suptitle(model.name)
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(16):
ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
plot_sample(x_test[i], y_pred[i], ax)
plt.show()
def test_model(model, epochs=10, batch_size=32, save=True):
# x, y = load(test=False)
x = np.load('data/x.npy')
y = np.load('data/y.npy')
if "CNN_Model" in model.name:
x = x.reshape((-1, 96, 96, 1))
history = model.fit(x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2).history
if save:
save_model(model, history)
return model, history
def save_model(model, history):
model_json = model.to_json()
with open("models/" + model.name + ".json", "w") as json_file:
json_file.write(model_json)
model.save_weights("models/" + model.name + ".h5")
with open("models/" + model.name + "_history.json", "w") as file:
json.dump(history, file)
print("Saved model to disk")
def load_model(model_name):
json_file = open("models/" + model_name + ".json", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("models/" + model_name + ".h5")
with codecs.open("models/" + model_name + "_history.json", "r", "utf-8") as json_file:
history = json.loads(json_file.read())
print("Loaded model from disk")
return loaded_model, history
def plot_model_to_file(model):
plot_model(model, to_file="images/" + model.name + "_plot.png", show_shapes=True,
show_layer_names=False)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"numpy.load",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.mean",
"codecs.open",
"keras.utils.vis_utils.plot_model",
"numpy.fromstring",
"json.dump",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabe... | [((595, 614), 'pandas.read_csv', 'pd.read_csv', (['f_name'], {}), '(f_name)\n', (606, 614), True, 'import pandas as pd\n'), ((1307, 1349), 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {'linewidth': '(3)', 'label': '"""train"""'}), "(loss, linewidth=3, label='train')\n", (1315, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1354, 1400), 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss'], {'linewidth': '(3)', 'label': '"""valid"""'}), "(val_loss, linewidth=3, label='valid')\n", (1362, 1400), True, 'import matplotlib.pyplot as plt\n'), ((1405, 1415), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1413, 1415), True, 'import matplotlib.pyplot as plt\n'), ((1420, 1432), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1430, 1432), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1456), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (1447, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (1471, 1479), True, 'import matplotlib.pyplot as plt\n'), ((1484, 1501), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1494, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1506, 1527), 'matplotlib.pyplot.title', 'plt.title', (['model.name'], {}), '(model.name)\n', (1515, 1527), True, 'import matplotlib.pyplot as plt\n'), ((1843, 1853), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1851, 1853), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (2052, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2321, 2331), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2329, 2331), True, 'import matplotlib.pyplot as plt\n'), ((2432, 2453), 'numpy.load', 'np.load', (['"""data/x.npy"""'], {}), "('data/x.npy')\n", (2439, 2453), True, 'import numpy as np\n'), ((2462, 2483), 'numpy.load', 'np.load', (['"""data/y.npy"""'], {}), "('data/y.npy')\n", (2469, 2483), True, 'import numpy as np\n'), ((3263, 3297), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (3278, 3297), False, 'from keras.models import model_from_json\n'), ((3604, 3714), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': "('images/' + model.name + '_plot.png')", 'show_shapes': '(True)', 'show_layer_names': '(False)'}), "(model, to_file='images/' + model.name + '_plot.png', show_shapes\n =True, show_layer_names=False)\n", (3614, 3714), False, 'from keras.utils.vis_utils import plot_model\n'), ((772, 801), 'numpy.vstack', 'np.vstack', (["df['Image'].values"], {}), "(df['Image'].values)\n", (781, 801), True, 'import numpy as np\n'), ((935, 965), 'sklearn.utils.shuffle', 'shuffle', (['x', 'y'], {'random_state': '(42)'}), '(x, y, random_state=42)\n', (942, 965), False, 'from sklearn.utils import shuffle\n'), ((3033, 3057), 'json.dump', 'json.dump', (['history', 'file'], {}), '(history, file)\n', (3042, 3057), False, 'import json\n'), ((3369, 3436), 'codecs.open', 'codecs.open', (["('models/' + model_name + '_history.json')", '"""r"""', '"""utf-8"""'], {}), "('models/' + model_name + '_history.json', 'r', 'utf-8')\n", (3380, 3436), False, 'import codecs\n'), ((662, 688), 'numpy.fromstring', 'np.fromstring', (['im'], {'sep': '""" """'}), "(im, sep=' ')\n", (675, 688), True, 'import numpy as np\n'), ((1718, 1742), 'numpy.mean', 'np.mean', (["history['loss']"], {}), "(history['loss'])\n", (1725, 1742), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import logging
def find_all_columns(csv_file, columns_to_exclude, range_fraction=0.1, separator=','):
"""
Sometimes, csv files have way too many columns to make you want to list them all. This method will create
a list of column objects for you, excluding whatever columns are in the columns_to_exclude_list.
If columns are numeric/ranges acceptable range is set to 10 percent (range_fraction, modify if you want) of the
average of the field. If you need more fine-grained control over this,
:param csv_file: Full path to csv file.
:param columns_to_exclude: List of column headers you DO NOT want Column objects created for.
:param range_fraction: How much numeric columns can vary by, as a fraction of the mean of the column
:param separator: Delimiter used by pandas when reading the report. Allows for parsing of .tsv ('\t' delimiter) as
well as .csv (',' delimiter) files. Default is ','
:return: List of column objects to be used by a Validator
"""
column_list = list()
df = pd.read_csv(csv_file, sep=separator)
column_headers = list(df.columns)
for column in column_headers:
if column not in columns_to_exclude:
# Check if column appears to be numeric
if np.issubdtype(df[column].dtype, np.number):
# Find average.
average_column_value = df[column].mean()
# Create column with acceptable range of plus/minus of range_fraction
acceptable_range = average_column_value * range_fraction
# Now finally create the column.
column_list.append(Column(name=column,
column_type='Range',
acceptable_range=acceptable_range))
else:
column_list.append(Column(name=column))
return column_list
def percent_depth_columns(csv_file, columns_to_exclude, percent_range, depth_range, separator=','):
column_list = list()
df = pd.read_csv(csv_file, sep=separator)
column_headers = list(df.columns)
for column in column_headers:
if column not in columns_to_exclude:
column_list.append(Column(name=column,
column_type='Percent_Depth',
percent_range=percent_range,
depth_range=depth_range))
return column_list
class Column(object):
def __init__(self, name, column_type='Categorical', acceptable_range=None, percent_range=None, depth_range=None):
self.name = name
self.column_type = column_type
self.acceptable_range = acceptable_range
self.percent_range = percent_range
self.depth_range = depth_range
class Validator(object):
def __init__(self, reference_csv, test_csv, column_list, identifying_column, separator=','):
self.identifying_column = identifying_column
self.separator = separator
self.reference_csv_df = pd.read_csv(reference_csv, sep=self.separator)
self.test_csv_df = pd.read_csv(test_csv, sep=self.separator)
self.column_list = column_list
self.reference_headers = list(self.reference_csv_df.columns)
self.test_headers = list(self.test_csv_df.columns)
def remove_duplicate_header_rows(self):
"""
Some genesippr reports (specifically mlst and rMLST) have multiple header-ish rows, which messes everything up.
This will remove those rows from the df so that other methods can actually work.
:return:
"""
self.reference_csv_df = self.reference_csv_df[~self.reference_csv_df[
self.identifying_column].isin([self.identifying_column])]
self.test_csv_df = self.test_csv_df[~self.test_csv_df[self.identifying_column].isin([self.identifying_column])]
def same_columns_in_ref_and_test(self):
if set(self.reference_headers) != set(self.test_headers):
return False
else:
return True
def all_test_columns_in_ref_and_test(self):
all_columns_present = True
for column in self.column_list:
if column.name not in self.reference_headers:
logging.warning('{} was not found in Reference CSV.'.format(column.name))
all_columns_present = False
if column.name not in self.test_headers:
logging.warning('{} was not found in Test CSV.'.format(column.name))
all_columns_present = False
return all_columns_present
def check_samples_present(self):
samples_in_ref = set(self.reference_csv_df[self.identifying_column])
samples_in_test = set(self.test_csv_df[self.identifying_column])
if samples_in_ref != samples_in_test:
logging.warning('Not all samples in Test set are found in Reference set.')
logging.warning('Samples in Test but not Reference: {}'.format(samples_in_test.difference(samples_in_ref)))
logging.warning('Samples in Reference but not Test: {}'.format(samples_in_ref.difference(samples_in_test)))
return False
else:
return True
def check_columns_match(self):
columns_match = True
for testindex, testrow in self.test_csv_df.iterrows():
for refindex, refrow in self.reference_csv_df.iterrows():
if testrow[self.identifying_column] == refrow[self.identifying_column]:
for column in self.column_list:
if pd.isna(testrow[column.name]) and pd.isna(refrow[column.name]):
pass # Equality doesn't work for na values in pandas, so have to check this first.
# Ensure that the value for the test and reference is not 'ND' before proceeding
elif testrow[column.name] == 'ND' and refrow[column.name] == 'ND':
pass
elif column.column_type == 'Categorical':
if testrow[column.name] != refrow[column.name]:
logging.warning('Attribute {header} ({test}) does not match reference value ({ref}) '
'for sample {sample}'
.format(header=column.name,
test=testrow[column.name],
ref=refrow[column.name],
sample=testrow[self.identifying_column]))
print(column.name, testrow[column.name], refrow[column.name])
columns_match = False
elif column.column_type == 'Range':
lower_bound = float(refrow[column.name]) - column.acceptable_range
upper_bound = float(refrow[column.name]) + column.acceptable_range
if not lower_bound <= float(testrow[column.name]) <= upper_bound:
logging.warning('Attribute {} is out of range for sample {}'
.format(column.name,
testrow[self.identifying_column]))
columns_match = False
elif column.column_type == 'Percent_Depth':
test_percent = float(testrow[column.name].split()[0].replace('%', ''))
test_depth = float(testrow[column.name].split()[1].replace('(', ''))
ref_percent = float(refrow[column.name].split()[0].replace('%', ''))
ref_depth = float(refrow[column.name].split()[1].replace('(', ''))
upper_percent_bound = ref_percent + column.percent_range
lower_percent_bound = ref_percent - column.percent_range
upper_depth_bound = ref_depth + column.depth_range
lower_depth_bound = ref_depth - column.depth_range
if not lower_depth_bound <= test_depth <= upper_depth_bound:
logging.warning('Depth is out of range for column {} for sample {}'
.format(column.name,
testrow[self.identifying_column]))
columns_match = False
if not lower_percent_bound <= test_percent <= upper_percent_bound:
logging.warning('Percent is out of range for column {} for sample {}'
.format(column.name,
testrow[self.identifying_column]))
columns_match = False
return columns_match
def get_resfinderesque_dictionaries(self):
current_id = '-999999999'
# Test row dictionary is a dict, where key is an ID. Value for each ID is a list, with each index in the list as
# a dictionary with column name as key and column value as value
test_row_dict = dict()
for testindex, testrow in self.test_csv_df.iterrows():
# Check if current ID is none or equal to previous ID. If so, continue using that ID.
# Otherwise, update the current ID to whatever it is.
if testrow[self.identifying_column] == current_id:
if testrow[self.identifying_column] not in test_row_dict:
test_row_dict[testrow[self.identifying_column]] = list()
elif pd.isna(testrow[self.identifying_column]):
testrow[self.identifying_column] = current_id
if testrow[self.identifying_column] not in test_row_dict:
test_row_dict[testrow[self.identifying_column]] = list()
else:
current_id = testrow[self.identifying_column]
if testrow[self.identifying_column] not in test_row_dict:
test_row_dict[testrow[self.identifying_column]] = list()
# Now iterate through columns to create necessary dictionary.
dict_to_append = dict()
for column in self.column_list:
dict_to_append[column.name] = testrow[column.name]
test_row_dict[testrow[self.identifying_column]].append(dict_to_append)
# Repeat process with reference info
current_id = '-999999999'
ref_row_dict = dict()
for refindex, refrow in self.reference_csv_df.iterrows():
# Check if current ID is none or equal to previous ID. If so, continue using that ID.
# Otherwise, update the current ID to whatever it is.
if refrow[self.identifying_column] == current_id:
if refrow[self.identifying_column] not in test_row_dict:
test_row_dict[refrow[self.identifying_column]] = list()
elif pd.isna(refrow[self.identifying_column]):
refrow[self.identifying_column] = current_id
if refrow[self.identifying_column] not in test_row_dict:
test_row_dict[refrow[self.identifying_column]] = list()
else:
current_id = refrow[self.identifying_column]
if refrow[self.identifying_column] not in ref_row_dict:
ref_row_dict[refrow[self.identifying_column]] = list()
# Now iterate through columns to create necessary dictionary.
dict_to_append = dict()
for column in self.column_list:
dict_to_append[column.name] = refrow[column.name]
ref_row_dict[refrow[self.identifying_column]].append(dict_to_append)
return test_row_dict, ref_row_dict
def get_one_to_one_resfinderesque_dictionaries(self):
current_id = '-999999999'
# Test row dictionary is a dict, where key is an ID. Value for each ID is a list, with each index in the list as
# a dictionary with column name as key and column value as value
test_row_dict = dict()
for testindex, testrow in self.test_csv_df.iterrows():
# Check if current ID is none or equal to previous ID. If so, continue using that ID.
# Otherwise, update the current ID to whatever it is.
if testrow[self.identifying_column] == current_id:
if testrow[self.identifying_column] not in test_row_dict:
test_row_dict[testrow[self.identifying_column]] = list()
elif pd.isna(testrow[self.identifying_column]):
testrow[self.identifying_column] = current_id
if testrow[self.identifying_column] not in test_row_dict:
test_row_dict[testrow[self.identifying_column]] = list()
else:
current_id = testrow[self.identifying_column]
if testrow[self.identifying_column] not in test_row_dict:
test_row_dict[testrow[self.identifying_column]] = list()
# Now iterate through columns to create necessary dictionary.
dict_to_append = dict()
for column in self.column_list:
dict_to_append[column.name] = testrow[column.name]
test_row_dict[testrow[self.identifying_column]].append(dict_to_append)
# Repeat process with reference info
current_id = '-999999999'
ref_row_dict = dict()
for refindex, refrow in self.reference_csv_df.iterrows():
# Check if current ID is none or equal to previous ID. If so, continue using that ID.
# Otherwise, update the current ID to whatever it is.
if refrow[self.identifying_column] == current_id:
if refrow[self.identifying_column] not in ref_row_dict:
ref_row_dict[refrow[self.identifying_column]] = list()
elif pd.isna(refrow[self.identifying_column]):
refrow[self.identifying_column] = current_id
if refrow[self.identifying_column] not in ref_row_dict:
ref_row_dict[refrow[self.identifying_column]] = list()
else:
current_id = refrow[self.identifying_column]
if refrow[self.identifying_column] not in ref_row_dict:
ref_row_dict[refrow[self.identifying_column]] = list()
# Now iterate through columns to create necessary dictionary.
dict_to_append = dict()
for column in self.column_list:
dict_to_append[column.name] = refrow[column.name]
ref_row_dict[refrow[self.identifying_column]].append(dict_to_append)
return test_row_dict, ref_row_dict
def check_resfinderesque_output(self, one_to_one=False, check_rows=True):
"""
Genesippr's resfinder/virulence modules don't play nice with the standard column matching used in
check_columns_match, which assumes that the identifying column only has one entry, whereas resfinder output
has many genes per strain, and each gene gets its own row.
To handle this, need to get all rows associated with each ID, and then 1) check that each ID has same number
of entries in test and reference set and 2) sort the rows somehow in case they weren't already sorted, and then
do row by row comparisons
:return:
"""
# First, get dictionaries. Each dict has identifying column names as keys, and then has a list as the value
# for each. Each entry in each list is a dictionary where keys are column headers, and values are the column
# values
if one_to_one:
test_row_dict, ref_row_dict = self.get_one_to_one_resfinderesque_dictionaries()
else:
test_row_dict, ref_row_dict = self.get_resfinderesque_dictionaries()
checks_pass = True
# Now check that all IDs are present in both test and reference.
for identifier in test_row_dict:
if identifier not in ref_row_dict:
logging.warning('Identifier {} found in test but not reference.'.format(identifier))
checks_pass = False
for identifier in ref_row_dict:
if identifier not in test_row_dict:
logging.warning('Identifier {} found in reference but not test.'.format(identifier))
checks_pass = False
if checks_pass is False:
return False
# With that checked, check that each identifier has the same number of rows
if check_rows:
for identifier in test_row_dict:
if len(test_row_dict[identifier]) != len(ref_row_dict[identifier]):
logging.warning('Found {} entries in test set, but {} entries in reference set for {}'
.format(len(test_row_dict[identifier]),
len(ref_row_dict[identifier]),
identifier))
checks_pass = False
if checks_pass is False:
return False
# Now, if all identifiers have been found and the same number of identifiers are present for both ref and test,
# check that the values actually work out.
for identifier in test_row_dict:
for i in range(len(test_row_dict[identifier])):
for column in self.column_list:
if pd.isna(test_row_dict[identifier][i][column.name]) and \
pd.isna(ref_row_dict[identifier][i][column.name]):
pass # Equality doesn't work for na values in pandas, so have to check this first.
elif column.column_type == 'Categorical':
if test_row_dict[identifier][i][column.name] != ref_row_dict[identifier][i][column.name]:
logging.warning('Attribute {header} ({test}) does not match reference value ({ref}) '
'for sample {sample}'.format(header=column.name,
test=test_row_dict[identifier][i][column.name],
ref=ref_row_dict[identifier][i][column.name],
sample=identifier))
checks_pass = False
elif column.column_type == 'Range':
lower_bound = ref_row_dict[identifier][i][column.name] - column.acceptable_range
upper_bound = ref_row_dict[identifier][i][column.name] + column.acceptable_range
if not lower_bound <= test_row_dict[identifier][i][column.name] <= upper_bound:
logging.warning('Attribute {} is out of range for sample {}'
.format(column.name,
identifier))
checks_pass = False
return checks_pass
| [
"pandas.read_csv",
"logging.warning",
"pandas.isna",
"numpy.issubdtype"
] | [((1101, 1137), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'sep': 'separator'}), '(csv_file, sep=separator)\n', (1112, 1137), True, 'import pandas as pd\n'), ((2092, 2128), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'sep': 'separator'}), '(csv_file, sep=separator)\n', (2103, 2128), True, 'import pandas as pd\n'), ((3100, 3146), 'pandas.read_csv', 'pd.read_csv', (['reference_csv'], {'sep': 'self.separator'}), '(reference_csv, sep=self.separator)\n', (3111, 3146), True, 'import pandas as pd\n'), ((3174, 3215), 'pandas.read_csv', 'pd.read_csv', (['test_csv'], {'sep': 'self.separator'}), '(test_csv, sep=self.separator)\n', (3185, 3215), True, 'import pandas as pd\n'), ((1322, 1364), 'numpy.issubdtype', 'np.issubdtype', (['df[column].dtype', 'np.number'], {}), '(df[column].dtype, np.number)\n', (1335, 1364), True, 'import numpy as np\n'), ((4899, 4973), 'logging.warning', 'logging.warning', (['"""Not all samples in Test set are found in Reference set."""'], {}), "('Not all samples in Test set are found in Reference set.')\n", (4914, 4973), False, 'import logging\n'), ((9907, 9948), 'pandas.isna', 'pd.isna', (['testrow[self.identifying_column]'], {}), '(testrow[self.identifying_column])\n', (9914, 9948), True, 'import pandas as pd\n'), ((11266, 11306), 'pandas.isna', 'pd.isna', (['refrow[self.identifying_column]'], {}), '(refrow[self.identifying_column])\n', (11273, 11306), True, 'import pandas as pd\n'), ((12864, 12905), 'pandas.isna', 'pd.isna', (['testrow[self.identifying_column]'], {}), '(testrow[self.identifying_column])\n', (12871, 12905), True, 'import pandas as pd\n'), ((14221, 14261), 'pandas.isna', 'pd.isna', (['refrow[self.identifying_column]'], {}), '(refrow[self.identifying_column])\n', (14228, 14261), True, 'import pandas as pd\n'), ((17783, 17833), 'pandas.isna', 'pd.isna', (['test_row_dict[identifier][i][column.name]'], {}), '(test_row_dict[identifier][i][column.name])\n', (17790, 17833), True, 'import pandas as pd\n'), ((17868, 17917), 'pandas.isna', 'pd.isna', (['ref_row_dict[identifier][i][column.name]'], {}), '(ref_row_dict[identifier][i][column.name])\n', (17875, 17917), True, 'import pandas as pd\n'), ((5642, 5671), 'pandas.isna', 'pd.isna', (['testrow[column.name]'], {}), '(testrow[column.name])\n', (5649, 5671), True, 'import pandas as pd\n'), ((5676, 5704), 'pandas.isna', 'pd.isna', (['refrow[column.name]'], {}), '(refrow[column.name])\n', (5683, 5704), True, 'import pandas as pd\n')] |
"""
Defines the data handler interface/ABC
"""
# standard
from abc import ABC, abstractmethod
from typing import TypeAlias
from json import loads, dumps
# 3rd party
from numpy import ndarray, asarray
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import UploadedFile
# local
from .csvtools import CsvParser, NumericCsvValidator
from .dataclasses import ValidationResult, CsvContent
from .named_id_manager import NamedIdObject
DataStorageType: TypeAlias = str
"""Format/type used to store the data in the database."""
class DataHandler(ABC, NamedIdObject):
"""Tooling to validate and transform measurement data"""
@property
@abstractmethod
def description(self) -> str:
"""Description of the handler and its source data, possbily refering to external docs"""
@abstractmethod
def validate(self, data: DataStorageType) -> list[ValidationResult]:
"""Validate the data"""
@abstractmethod
def load_from_file(self, file: UploadedFile) -> DataStorageType:
"""Tries to read data from file (without validation)."""
@abstractmethod
def to_file(self, data: DataStorageType) -> ContentFile:
"""Returns the data formatted to a ContentFile, to be served in a download"""
@abstractmethod
def to_json(self, data: DataStorageType, indent=None) -> str:
"""Returns the data formatted to json"""
@abstractmethod
def to_displaytext(self, data: DataStorageType) -> str:
"""Returns the data formatted as text to be displayed"""
@abstractmethod
def to_model_input(self, data: DataStorageType) -> ndarray:
"""Returns the model input part of the data as numpy array, suitable for scroing and training"""
@abstractmethod
def to_model_target(self, data: DataStorageType) -> ndarray:
"""Returns the model target (or 'label') of the data as numpy array, suitable for training"""
class NumericCsvHandler(DataHandler):
"""Simple data type for development and testing.\nThe target values are assumed to be within the first column"""
@property
def id_(self) -> str:
return "NumericCsv"
@property
def name(self) -> str:
return "NumericCsv"
@property
def description(self) -> str:
return self.__doc__
def validate(self, data: DataStorageType) -> list[ValidationResult]:
"""Validate the data meets requirements"""
return NumericCsvValidator.validate(CsvContent.from_json(data))
def load_from_file(self, file: UploadedFile) -> DataStorageType:
"""Tries to read data from file (without validation)."""
return CsvParser.read(file).to_json()
def to_file(self, data: DataStorageType) -> ContentFile:
"""Returns the data formatted to a ContentFile, to be served in a download"""
csv = CsvContent.from_json(data)
contentstring = ",".join(csv.headers)+"\n"
for row in csv.rows:
contentstring += ",".join(row)+"\n"
return ContentFile(contentstring)
def to_json(self, data: DataStorageType, indent=None) -> str:
"""Returns the data formatted to json"""
return dumps(loads(data), indent=indent)
def to_displaytext(self, data: DataStorageType) -> str:
"""Returns the data formatted as text to be displayed"""
return dumps(loads(data), indent=2)
def to_model_input(self, data: DataStorageType) -> ndarray:
"""Returns the model input part of the data as numpy array, suitable for scroing and training."""
model_input = [[float(val) for val in row[1:]] for row in CsvContent.from_json(data).rows]
return asarray(model_input)
def to_model_target(self, data: DataStorageType) -> ndarray:
"""Returns the model target (or 'label') of the data as numpy array, suitable for training"""
model_target = [float(row[0]) for row in CsvContent.from_json(data).rows]
return asarray(model_target)
| [
"numpy.asarray",
"json.loads",
"django.core.files.base.ContentFile"
] | [((3015, 3041), 'django.core.files.base.ContentFile', 'ContentFile', (['contentstring'], {}), '(contentstring)\n', (3026, 3041), False, 'from django.core.files.base import ContentFile\n'), ((3662, 3682), 'numpy.asarray', 'asarray', (['model_input'], {}), '(model_input)\n', (3669, 3682), False, 'from numpy import ndarray, asarray\n'), ((3948, 3969), 'numpy.asarray', 'asarray', (['model_target'], {}), '(model_target)\n', (3955, 3969), False, 'from numpy import ndarray, asarray\n'), ((3179, 3190), 'json.loads', 'loads', (['data'], {}), '(data)\n', (3184, 3190), False, 'from json import loads, dumps\n'), ((3354, 3365), 'json.loads', 'loads', (['data'], {}), '(data)\n', (3359, 3365), False, 'from json import loads, dumps\n')] |
# coding=utf-8
# Copyright 2021 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for losses.py.
"""
import copy
from functools import partial # pylint: disable=g-importing-member
from absl.testing import absltest
from flax import nn
from init2winit.model_lib import models
from init2winit.optimizer_lib.hessian_free import gvp
from init2winit.optimizer_lib.hessian_free import hessian_free
from init2winit.optimizer_lib.hessian_free import mf_conjgrad_solver
from init2winit.optimizer_lib.hessian_free import relative_per_iteration_progress_test
from init2winit.optimizer_lib.hessian_free import residual_norm_test
import jax
from jax.flatten_util import ravel_pytree
import jax.numpy as jnp
import numpy as np
def get_pd_mat(mat):
"""Returns a positive-definite matrix."""
n = mat.shape[0]
return mat @ np.transpose(mat) / n**2 + np.eye(n)
class HessianFreeTest(absltest.TestCase):
"""Tests for hessian_free.py."""
def test_residual_norm_test(self):
"""Tests residual norm test."""
rs_norm = 1e-6
self.assertEqual(residual_norm_test(0, rs_norm, 0., [], 1e-2), 1)
self.assertEqual(residual_norm_test(0, rs_norm, 0., [], 1e-4), 0)
def test_relative_per_iteration_progress_test(self):
"""Tests relative_per_iteration_progress_test."""
obj_value = -10
obj_values = -15 * np.ones(10)
tol = 1e-3
step = 15
convergd = relative_per_iteration_progress_test(step, 0, obj_value,
obj_values, tol)
self.assertEqual(convergd, 1.0)
def test_conjgrad(self):
"""Tests conjugate gradient method."""
n = 5
mat = get_pd_mat(
np.array(
[[2., 4., 5., 2., 8.],
[0., 4., 3., 5., 3.],
[-2., -2., 9., -2., -6.],
[4., 1., -11., 1., 4.],
[-5., 4., -9., 3., -2.]]))
b = np.array([-3, 2, 0, 3, -4])
x0 = np.ones(n)
test_matmul_fn = lambda x: mat @ x
x = mf_conjgrad_solver(test_matmul_fn, b, x0, n, 1e-6, 10, None,
'residual_norm_test')
self.assertAlmostEqual(np.linalg.norm(test_matmul_fn(x) - b), 0, places=3)
def test_conjgrad_preconditioning(self):
"""Tests conjugate gradient method with preconditioning."""
n = 5
mat = get_pd_mat(
np.array(
[[2., 4., 5., 2., 8.],
[0., 4., 3., 5., 3.],
[-2., -2., 9., -2., -6.],
[4., 1., -11., 1., 4.],
[-5., 4., -9., 3., -2.]]))
precond_mat = get_pd_mat(
np.array(
[[4., 2., 0., 2., 4.],
[-2., 4., 4., 2., 6.],
[4., 4., -8., -2., -4.],
[-2., 2., 4., 0., -2.],
[2., 2., -6., 4., 0.]]))
b = np.array([-3, 2, 0, 3, -4])
x0 = np.ones(n)
test_matmul_fn = lambda x: mat @ x
test_precond_fn = lambda x: precond_mat @ x
x = mf_conjgrad_solver(test_matmul_fn, b, x0, n, 1e-6, 10, test_precond_fn,
'residual_norm_test')
self.assertAlmostEqual(np.linalg.norm(test_matmul_fn(x) - b), 0, places=3)
def test_conjgrad_martens_termination_criterion(self):
"""Tests conjugate gradient method with martens termination criterion."""
n = 500
mat = get_pd_mat(
np.array([[((i + j) % n) for j in range(n)] for i in range(n)]))
b = np.linspace(1, n, n) / n
x0 = np.zeros(n)
test_mvm_fn = lambda x: mat @ x
x = mf_conjgrad_solver(test_mvm_fn, b, x0, n, 1e-6, 500, None,
'relative_per_iteration_progress_test')
f_value = np.dot(x, test_mvm_fn(x) - 2 * b) / 2
self.assertAlmostEqual(f_value, -0.223612323, places=8)
def test_hessian_free_optimizer(self):
"""Tests the Hessian-free optimizer."""
model_str = 'autoencoder'
model_cls = models.get_model(model_str)
model_hps = models.get_model_hparams(model_str)
loss = 'sigmoid_binary_cross_entropy'
metrics = 'binary_autoencoder_metrics'
input_shape = (2, 2, 1)
output_shape = (4,)
hps = copy.copy(model_hps)
hps.update({
'hid_sizes': [2],
'activation_function': 'id',
'input_shape': input_shape,
'output_shape': output_shape
})
model = model_cls(hps, {}, loss, metrics)
inputs = jnp.array([[[1, 0], [1, 1]], [[1, 0], [0, 1]]])
targets = inputs.reshape(tuple([inputs.shape[0]] + list(output_shape)))
batch = {'inputs': inputs, 'targets': targets}
def forward_fn(params, inputs):
return nn.base.Model(model.flax_module_def, params)(inputs)
def opt_cost(params):
return model.loss_fn(forward_fn(params, inputs), targets)
optimizer = hessian_free(model.loss_fn)
params = {
'Dense_0': {
'kernel': jnp.array([[-1., 2.], [2., 0.], [-1., 3.], [-2., 2.]]),
'bias': jnp.array([0., 0.])
},
'Dense_1': {
'kernel': jnp.array([[4., 2., -2., 4.], [-3., 1., 2., -4.]]),
'bias': jnp.array([0., 0., 0., 0.])
}
}
grad_fn = jax.grad(opt_cost)
grads = grad_fn(params)
outputs = forward_fn(params, batch['inputs'])
n = inputs.shape[0]
m = outputs.shape[-1]
d = ravel_pytree(params)[0].shape[0]
v = np.ones(d)
p0 = np.zeros(d)
damping = 1
state = optimizer.init(p0, damping)
partial_forward_fn = partial(forward_fn, inputs=batch['inputs'])
partial_loss_fn = partial(model.loss_fn, targets=batch['targets'])
matmul_fn = partial(gvp, params, outputs, damping, partial_forward_fn,
partial_loss_fn)
jacobian = jax.jacfwd(partial_forward_fn)(params)
jacobian_tensor = np.concatenate((
jacobian['Dense_0']['bias'].reshape(n, m, -1),
jacobian['Dense_0']['kernel'].reshape(n, m, -1),
jacobian['Dense_1']['bias'].reshape(n, m, -1),
jacobian['Dense_1']['kernel'].reshape(n, m, -1)), axis=2)
ggn_matrix = 0
for i in range(n):
jacobian_matrix = jacobian_tensor[i]
hessian = jax.hessian(partial_loss_fn)(outputs[i])
ggn_matrix += np.transpose(jacobian_matrix) @ hessian @ jacobian_matrix
ggn_matrix /= n
ggn_matrix += damping * np.identity(d)
expected = ggn_matrix @ v
# Test the gvp function
self.assertAlmostEqual(
jnp.linalg.norm(matmul_fn(v) - expected), 0, places=4)
p, state = optimizer.update(grads, state, forward_fn, batch, params)
# Test the damping parameter update
self.assertEqual(state.damping, 3/2)
# Test the search direction
self.assertAlmostEqual(
jnp.linalg.norm(
ravel_pytree(p)[0] +
jnp.linalg.inv(ggn_matrix) @ ravel_pytree(grads)[0]),
0,
places=4)
if __name__ == '__main__':
absltest.main()
| [
"absl.testing.absltest.main",
"init2winit.optimizer_lib.hessian_free.residual_norm_test",
"numpy.ones",
"flax.nn.base.Model",
"init2winit.optimizer_lib.hessian_free.hessian_free",
"jax.jacfwd",
"numpy.transpose",
"numpy.identity",
"init2winit.model_lib.models.get_model_hparams",
"numpy.linspace",
... | [((7244, 7259), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (7257, 7259), False, 'from absl.testing import absltest\n'), ((1374, 1383), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (1380, 1383), True, 'import numpy as np\n'), ((1906, 1979), 'init2winit.optimizer_lib.hessian_free.relative_per_iteration_progress_test', 'relative_per_iteration_progress_test', (['step', '(0)', 'obj_value', 'obj_values', 'tol'], {}), '(step, 0, obj_value, obj_values, tol)\n', (1942, 1979), False, 'from init2winit.optimizer_lib.hessian_free import relative_per_iteration_progress_test\n'), ((2383, 2410), 'numpy.array', 'np.array', (['[-3, 2, 0, 3, -4]'], {}), '([-3, 2, 0, 3, -4])\n', (2391, 2410), True, 'import numpy as np\n'), ((2420, 2430), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2427, 2430), True, 'import numpy as np\n'), ((2479, 2566), 'init2winit.optimizer_lib.hessian_free.mf_conjgrad_solver', 'mf_conjgrad_solver', (['test_matmul_fn', 'b', 'x0', 'n', '(1e-06)', '(10)', 'None', '"""residual_norm_test"""'], {}), "(test_matmul_fn, b, x0, n, 1e-06, 10, None,\n 'residual_norm_test')\n", (2497, 2566), False, 'from init2winit.optimizer_lib.hessian_free import mf_conjgrad_solver\n'), ((3252, 3279), 'numpy.array', 'np.array', (['[-3, 2, 0, 3, -4]'], {}), '([-3, 2, 0, 3, -4])\n', (3260, 3279), True, 'import numpy as np\n'), ((3289, 3299), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (3296, 3299), True, 'import numpy as np\n'), ((3396, 3494), 'init2winit.optimizer_lib.hessian_free.mf_conjgrad_solver', 'mf_conjgrad_solver', (['test_matmul_fn', 'b', 'x0', 'n', '(1e-06)', '(10)', 'test_precond_fn', '"""residual_norm_test"""'], {}), "(test_matmul_fn, b, x0, n, 1e-06, 10, test_precond_fn,\n 'residual_norm_test')\n", (3414, 3494), False, 'from init2winit.optimizer_lib.hessian_free import mf_conjgrad_solver\n'), ((3881, 3892), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3889, 3892), True, 'import numpy as np\n'), ((3939, 4042), 'init2winit.optimizer_lib.hessian_free.mf_conjgrad_solver', 'mf_conjgrad_solver', (['test_mvm_fn', 'b', 'x0', 'n', '(1e-06)', '(500)', 'None', '"""relative_per_iteration_progress_test"""'], {}), "(test_mvm_fn, b, x0, n, 1e-06, 500, None,\n 'relative_per_iteration_progress_test')\n", (3957, 4042), False, 'from init2winit.optimizer_lib.hessian_free import mf_conjgrad_solver\n'), ((4310, 4337), 'init2winit.model_lib.models.get_model', 'models.get_model', (['model_str'], {}), '(model_str)\n', (4326, 4337), False, 'from init2winit.model_lib import models\n'), ((4354, 4389), 'init2winit.model_lib.models.get_model_hparams', 'models.get_model_hparams', (['model_str'], {}), '(model_str)\n', (4378, 4389), False, 'from init2winit.model_lib import models\n'), ((4540, 4560), 'copy.copy', 'copy.copy', (['model_hps'], {}), '(model_hps)\n', (4549, 4560), False, 'import copy\n'), ((4782, 4829), 'jax.numpy.array', 'jnp.array', (['[[[1, 0], [1, 1]], [[1, 0], [0, 1]]]'], {}), '([[[1, 0], [1, 1]], [[1, 0], [0, 1]]])\n', (4791, 4829), True, 'import jax.numpy as jnp\n'), ((5168, 5195), 'init2winit.optimizer_lib.hessian_free.hessian_free', 'hessian_free', (['model.loss_fn'], {}), '(model.loss_fn)\n', (5180, 5195), False, 'from init2winit.optimizer_lib.hessian_free import hessian_free\n'), ((5536, 5554), 'jax.grad', 'jax.grad', (['opt_cost'], {}), '(opt_cost)\n', (5544, 5554), False, 'import jax\n'), ((5735, 5745), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (5742, 5745), True, 'import numpy as np\n'), ((5756, 5767), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (5764, 5767), True, 'import numpy as np\n'), ((5850, 5893), 'functools.partial', 'partial', (['forward_fn'], {'inputs': "batch['inputs']"}), "(forward_fn, inputs=batch['inputs'])\n", (5857, 5893), False, 'from functools import partial\n'), ((5916, 5964), 'functools.partial', 'partial', (['model.loss_fn'], {'targets': "batch['targets']"}), "(model.loss_fn, targets=batch['targets'])\n", (5923, 5964), False, 'from functools import partial\n'), ((5982, 6057), 'functools.partial', 'partial', (['gvp', 'params', 'outputs', 'damping', 'partial_forward_fn', 'partial_loss_fn'], {}), '(gvp, params, outputs, damping, partial_forward_fn, partial_loss_fn)\n', (5989, 6057), False, 'from functools import partial\n'), ((1577, 1622), 'init2winit.optimizer_lib.hessian_free.residual_norm_test', 'residual_norm_test', (['(0)', 'rs_norm', '(0.0)', '[]', '(0.01)'], {}), '(0, rs_norm, 0.0, [], 0.01)\n', (1595, 1622), False, 'from init2winit.optimizer_lib.hessian_free import residual_norm_test\n'), ((1647, 1694), 'init2winit.optimizer_lib.hessian_free.residual_norm_test', 'residual_norm_test', (['(0)', 'rs_norm', '(0.0)', '[]', '(0.0001)'], {}), '(0, rs_norm, 0.0, [], 0.0001)\n', (1665, 1694), False, 'from init2winit.optimizer_lib.hessian_free import residual_norm_test\n'), ((1849, 1860), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1856, 1860), True, 'import numpy as np\n'), ((2179, 2342), 'numpy.array', 'np.array', (['[[2.0, 4.0, 5.0, 2.0, 8.0], [0.0, 4.0, 3.0, 5.0, 3.0], [-2.0, -2.0, 9.0, -\n 2.0, -6.0], [4.0, 1.0, -11.0, 1.0, 4.0], [-5.0, 4.0, -9.0, 3.0, -2.0]]'], {}), '([[2.0, 4.0, 5.0, 2.0, 8.0], [0.0, 4.0, 3.0, 5.0, 3.0], [-2.0, -2.0,\n 9.0, -2.0, -6.0], [4.0, 1.0, -11.0, 1.0, 4.0], [-5.0, 4.0, -9.0, 3.0, -\n 2.0]])\n', (2187, 2342), True, 'import numpy as np\n'), ((2816, 2979), 'numpy.array', 'np.array', (['[[2.0, 4.0, 5.0, 2.0, 8.0], [0.0, 4.0, 3.0, 5.0, 3.0], [-2.0, -2.0, 9.0, -\n 2.0, -6.0], [4.0, 1.0, -11.0, 1.0, 4.0], [-5.0, 4.0, -9.0, 3.0, -2.0]]'], {}), '([[2.0, 4.0, 5.0, 2.0, 8.0], [0.0, 4.0, 3.0, 5.0, 3.0], [-2.0, -2.0,\n 9.0, -2.0, -6.0], [4.0, 1.0, -11.0, 1.0, 4.0], [-5.0, 4.0, -9.0, 3.0, -\n 2.0]])\n', (2824, 2979), True, 'import numpy as np\n'), ((3050, 3211), 'numpy.array', 'np.array', (['[[4.0, 2.0, 0.0, 2.0, 4.0], [-2.0, 4.0, 4.0, 2.0, 6.0], [4.0, 4.0, -8.0, -\n 2.0, -4.0], [-2.0, 2.0, 4.0, 0.0, -2.0], [2.0, 2.0, -6.0, 4.0, 0.0]]'], {}), '([[4.0, 2.0, 0.0, 2.0, 4.0], [-2.0, 4.0, 4.0, 2.0, 6.0], [4.0, 4.0,\n -8.0, -2.0, -4.0], [-2.0, 2.0, 4.0, 0.0, -2.0], [2.0, 2.0, -6.0, 4.0, 0.0]]\n )\n', (3058, 3211), True, 'import numpy as np\n'), ((3847, 3867), 'numpy.linspace', 'np.linspace', (['(1)', 'n', 'n'], {}), '(1, n, n)\n', (3858, 3867), True, 'import numpy as np\n'), ((6098, 6128), 'jax.jacfwd', 'jax.jacfwd', (['partial_forward_fn'], {}), '(partial_forward_fn)\n', (6108, 6128), False, 'import jax\n'), ((6678, 6692), 'numpy.identity', 'np.identity', (['d'], {}), '(d)\n', (6689, 6692), True, 'import numpy as np\n'), ((1347, 1364), 'numpy.transpose', 'np.transpose', (['mat'], {}), '(mat)\n', (1359, 1364), True, 'import numpy as np\n'), ((5007, 5051), 'flax.nn.base.Model', 'nn.base.Model', (['model.flax_module_def', 'params'], {}), '(model.flax_module_def, params)\n', (5020, 5051), False, 'from flax import nn\n'), ((5255, 5317), 'jax.numpy.array', 'jnp.array', (['[[-1.0, 2.0], [2.0, 0.0], [-1.0, 3.0], [-2.0, 2.0]]'], {}), '([[-1.0, 2.0], [2.0, 0.0], [-1.0, 3.0], [-2.0, 2.0]])\n', (5264, 5317), True, 'import jax.numpy as jnp\n'), ((5331, 5352), 'jax.numpy.array', 'jnp.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5340, 5352), True, 'import jax.numpy as jnp\n'), ((5405, 5463), 'jax.numpy.array', 'jnp.array', (['[[4.0, 2.0, -2.0, 4.0], [-3.0, 1.0, 2.0, -4.0]]'], {}), '([[4.0, 2.0, -2.0, 4.0], [-3.0, 1.0, 2.0, -4.0]])\n', (5414, 5463), True, 'import jax.numpy as jnp\n'), ((5477, 5508), 'jax.numpy.array', 'jnp.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (5486, 5508), True, 'import jax.numpy as jnp\n'), ((6511, 6539), 'jax.hessian', 'jax.hessian', (['partial_loss_fn'], {}), '(partial_loss_fn)\n', (6522, 6539), False, 'import jax\n'), ((5693, 5713), 'jax.flatten_util.ravel_pytree', 'ravel_pytree', (['params'], {}), '(params)\n', (5705, 5713), False, 'from jax.flatten_util import ravel_pytree\n'), ((6572, 6601), 'numpy.transpose', 'np.transpose', (['jacobian_matrix'], {}), '(jacobian_matrix)\n', (6584, 6601), True, 'import numpy as np\n'), ((7097, 7112), 'jax.flatten_util.ravel_pytree', 'ravel_pytree', (['p'], {}), '(p)\n', (7109, 7112), False, 'from jax.flatten_util import ravel_pytree\n'), ((7130, 7156), 'jax.numpy.linalg.inv', 'jnp.linalg.inv', (['ggn_matrix'], {}), '(ggn_matrix)\n', (7144, 7156), True, 'import jax.numpy as jnp\n'), ((7159, 7178), 'jax.flatten_util.ravel_pytree', 'ravel_pytree', (['grads'], {}), '(grads)\n', (7171, 7178), False, 'from jax.flatten_util import ravel_pytree\n')] |
from torch_utils.ops import upfirdn2d
import torch
import numpy as np
import torch.nn as nn
from .. import layers
from ..layers.stylegan2_layers import Conv2dLayer, DiscriminatorEpilogue, StyleGAN2Block
from ..build import DISCRIMINATOR_REGISTRY
@DISCRIMINATOR_REGISTRY.register_module
class FPNDiscriminator(layers.Module):
def __init__(
self,
cnum: int,
max_cnum_mul: int,
imsize,
min_fmap_resolution: int,
image_channels: int,
input_condition: bool,
semantic_nc: int,
semantic_input_mode: str,
conv_clamp: int,
input_cse: bool,
cse_nc: int,
pred_only_cse: bool = False,
pred_only_semantic: bool = False,
*args,
**kwargs):
super().__init__()
if pred_only_cse:
semantic_nc = None
if pred_only_semantic:
cse_nc = None
self.pred_only_cse = pred_only_cse
self.pred_only_semantic = pred_only_semantic
assert semantic_nc is None or cse_nc is None
semantic_nc = 0 if semantic_nc is None else semantic_nc
cse_nc = 0 if cse_nc is None else cse_nc
semantic_nc += cse_nc
self._max_imsize = max(imsize)
self._cnum = cnum
self._max_cnum_mul = max_cnum_mul
self._min_fmap_resolution = min_fmap_resolution
self._input_condition = input_condition
self.semantic_input_mode = semantic_input_mode
self.input_cse = input_cse
self.layers = nn.ModuleList()
out_ch = self.get_chsize(self._max_imsize)
self.from_rgb = StyleGAN2Block(
image_channels + input_condition*(image_channels+1) +
semantic_nc*(semantic_input_mode == "at_input") + input_cse*cse_nc,
out_ch, imsize, None, architecture="orig", conv_clamp=conv_clamp
)
self.output_seg_layer = Conv2dLayer(
semantic_nc, semantic_nc+1*(cse_nc==0), None, None, kernel_size=1, activation="linear")
n_levels = int(np.log2(self._max_imsize) - np.log2(min_fmap_resolution))+1
self.fpn_out = nn.ModuleList()
self.fpn_up = nn.ModuleList()
for i in range(n_levels):
resolution = [x//2**i for x in imsize]
in_ch = self.get_chsize(max(resolution))
out_ch = self.get_chsize(max(max(resolution)//2, min_fmap_resolution))
if i != n_levels - 1:
fpn_up_in_ = semantic_nc if i != n_levels - 2 else in_ch
up = 2 if i != 0 else 1
fpn_up = Conv2dLayer(fpn_up_in_, semantic_nc, None, resolution, kernel_size=1, activation="linear", up=up, conv_clamp=conv_clamp)
self.fpn_up.append(fpn_up)
fpn_conv = Conv2dLayer(in_ch, semantic_nc, None, resolution, kernel_size=1, activation="linear", conv_clamp=conv_clamp)
self.fpn_out.append(fpn_conv)
if semantic_input_mode == "progressive_input":
self.layers.add_module(f"sematic_input{'x'.join([str(_) for _ in resolution])}", layers.SemanticCat())
in_ch += semantic_nc
down = 2
if i == 0:
down = 1
block = StyleGAN2Block(
in_ch, out_ch, resolution=resolution, down=down, conv_clamp=conv_clamp
)
self.layers.append(block)
self.output_layer = DiscriminatorEpilogue(
out_ch, resolution, conv_clamp=conv_clamp)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1, 3, 3, 1]))
def forward(self, img, condition, mask, semantic_mask=None, embedding=None, **kwargs):
to_cat = [img]
if self.semantic_input_mode == "at_input":
to_cat.append(semantic_mask)
if self._input_condition:
to_cat.extend([condition, mask,])
if self.input_cse:
to_cat.extend([embedding])
x = torch.cat(to_cat, dim=1)
batch = {"x": x, "semantic_mask": semantic_mask, "mask": None}
batch = self.from_rgb(batch)
fpn_skips = [self.fpn_out[0](batch["x"])]
for i, layer in enumerate(self.layers):
batch = layer(batch)
if i < len(self.layers)-2:
fpn_skips.append(
self.fpn_out[i+1](batch["x"], gain=np.sqrt(.5))
)
elif i == len(self.layers) - 2:
fpn_skips.append(batch["x"])
fpn_skips.reverse()
segmentation = fpn_skips[0]
for i in range(len(self.fpn_up)):
segmentation = self.fpn_up[-i-1](segmentation, gain=np.sqrt(0.5))
segmentation = (segmentation + fpn_skips[i+1])
batch = self.output_layer(batch)
segmentation = self.output_seg_layer(segmentation)
x = batch["x"]
out = dict(score=x, segmentation=segmentation, E=segmentation)
if self.pred_only_cse:
del out["segmentation"]
if self.pred_only_semantic:
del out["E"]
return out
def get_chsize(self, imsize):
n = int(np.log2(self._max_imsize) - np.log2(imsize))
mul = min(2 ** n, self._max_cnum_mul)
ch = self._cnum * mul
return int(ch)
| [
"torch_utils.ops.upfirdn2d.setup_filter",
"torch.nn.ModuleList",
"numpy.log2",
"torch.cat",
"numpy.sqrt"
] | [((1580, 1595), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1593, 1595), True, 'import torch.nn as nn\n'), ((2172, 2187), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2185, 2187), True, 'import torch.nn as nn\n'), ((2210, 2225), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2223, 2225), True, 'import torch.nn as nn\n'), ((3984, 4008), 'torch.cat', 'torch.cat', (['to_cat'], {'dim': '(1)'}), '(to_cat, dim=1)\n', (3993, 4008), False, 'import torch\n'), ((3581, 3617), 'torch_utils.ops.upfirdn2d.setup_filter', 'upfirdn2d.setup_filter', (['[1, 3, 3, 1]'], {}), '([1, 3, 3, 1])\n', (3603, 3617), False, 'from torch_utils.ops import upfirdn2d\n'), ((5133, 5158), 'numpy.log2', 'np.log2', (['self._max_imsize'], {}), '(self._max_imsize)\n', (5140, 5158), True, 'import numpy as np\n'), ((5161, 5176), 'numpy.log2', 'np.log2', (['imsize'], {}), '(imsize)\n', (5168, 5176), True, 'import numpy as np\n'), ((2089, 2114), 'numpy.log2', 'np.log2', (['self._max_imsize'], {}), '(self._max_imsize)\n', (2096, 2114), True, 'import numpy as np\n'), ((2117, 2145), 'numpy.log2', 'np.log2', (['min_fmap_resolution'], {}), '(min_fmap_resolution)\n', (2124, 2145), True, 'import numpy as np\n'), ((4668, 4680), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4675, 4680), True, 'import numpy as np\n'), ((4377, 4389), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4384, 4389), True, 'import numpy as np\n')] |
import os
from os.path import split
from functools import partial
import numpy as np
from scipy import sparse, signal
from scipy.io import loadmat
import mne
from mne.stats import permutation_cluster_test, ttest_1samp_no_p
import borsar
from borsar.utils import find_index
from borsar.cluster import Clusters, construct_adjacency_matrix
from . import utils
from .stats import ttest_ind_no_p, ttest_rel_no_p
base_dir = split(__file__)[0]
chan_path = os.path.join(base_dir, 'data', 'chan')
# read channel connectivity
# consider renaming to read_neighbours
def get_neighbours(captype):
assert isinstance(captype, str), 'captype must be a string.'
if os.path.exists(captype):
# file path was given
file_name = captype
else:
# cap type was given
fls = [f for f in os.listdir(chan_path) if f.endswith('.mat') and
'_neighbours' in f]
good_file = [f for f in fls if captype in f]
if len(good_file) > 0:
file_name = os.path.join(chan_path, good_file[0])
else:
raise ValueError('Could not find specified cap type.')
return loadmat(file_name, squeeze_me=True)['neighbours']
# - [ ] add edit option (runs in interactive mode only)
# - [ ] new lines should have one color
# - [x] 'random' is actually misleading - it follows colorcycle...
# another approach to random colors:
# plt.cm.viridis(np.linspace(0., 1., num=15) , alpha=0.5)
def plot_neighbours(inst, adj_matrix, color='gray', kind='3d'):
'''Plot channel adjacency.
Parameters
----------
inst : mne Raw, Epochs or info
mne-python data container
adj_matrix : boolean numpy array
Defines which channels are adjacent to each other.
color : matplotlib color or 'random'
Color to plot the web of adjacency relations with.
Returns
-------
fig : matplotlib figure
Figure.
'''
tps = utils.mne_types()
from .viz import set_3d_axes_equal
from mne.viz import plot_sensors
assert isinstance(inst, (tps['raw'], tps['epochs'], tps['info'],
mne.Evoked))
info = utils.get_info(inst)
if isinstance(adj_matrix, sparse.coo_matrix):
adj_matrix = adj_matrix.toarray()
if adj_matrix.dtype == 'int':
max_lw = 5.
max_conn = adj_matrix.max()
def get_lw():
return adj_matrix[ch, n] / max_conn * max_lw
elif adj_matrix.dtype == 'bool' or (np.unique(adj_matrix) ==
np.array([0., 1.])).all():
def get_lw():
return 1.
if kind == '3d':
fig = plot_sensors(info, kind=kind, show=False)
pos = np.array([x['loc'][:3] for x in info['chs']])
set_3d_axes_equal(fig.axes[0])
elif kind == '2d':
import matplotlib as mpl
fig = plot_sensors(info, kind='topomap', show=False)
fig.axes[0].axis('equal')
path_collection = fig.axes[0].findobj(mpl.collections.PathCollection)
pos = path_collection[0].get_offsets()
path_collection[0].set_zorder(10)
lines = dict()
for ch in range(adj_matrix.shape[0]):
ngb = np.where(adj_matrix[ch, :])[0]
for n in ngb:
this_pos = pos[[ch, n], :]
chan_pair = [ch, n]
sorted(chan_pair)
this_color = color if not color == 'random' else np.random.random()
if kind == '3d':
lines[tuple(chan_pair)] = fig.axes[0].plot(
this_pos[:, 0], this_pos[:, 1], this_pos[:, 2],
color=this_color, lw=get_lw())[0]
elif kind == '2d':
lines[tuple(chan_pair)] = fig.axes[0].plot(
this_pos[:, 0], this_pos[:, 1],
color=this_color, lw=get_lw())[0]
highlighted = list()
highlighted_scatter = list()
def onpick(event, axes=None, positions=None, highlighted=None,
line_dict=None, highlighted_scatter=None, adj_matrix=None):
node_ind = event.ind[0]
if node_ind in highlighted:
# change node color back to normal
highlighted_scatter[0].remove()
highlighted_scatter.pop(0)
highlighted.pop(0)
fig.canvas.draw()
else:
if len(highlighted) == 0:
# add current node
highlighted.append(node_ind)
if kind == '3d':
scatter = axes.scatter(positions[node_ind, 0],
positions[node_ind, 1],
positions[node_ind, 2],
c='r', s=100, zorder=15)
elif kind == '2d':
scatter = axes.scatter(positions[node_ind, 0],
positions[node_ind, 1],
c='r', s=100, zorder=15)
highlighted_scatter.append(scatter)
fig.canvas.draw()
else:
# add or remove line
both_nodes = [highlighted[0], node_ind]
sorted(both_nodes)
if tuple(both_nodes) in line_dict.keys():
# remove line
line_dict[tuple(both_nodes)].remove()
# remove line_dict entry
del line_dict[tuple(both_nodes)]
# clear adjacency matrix entry
adj_matrix[both_nodes[0], both_nodes[1]] = False
adj_matrix[both_nodes[1], both_nodes[0]] = False
else:
# add line
selected_pos = positions[both_nodes, :]
if kind == '3d':
line = axes.plot(
selected_pos[:, 0], selected_pos[:, 1],
selected_pos[:, 2], lw=get_lw())[0]
elif kind == '2d':
line = axes.plot(selected_pos[:, 0],
selected_pos[:, 1],
lw=get_lw())[0]
# add line to line_dict
line_dict[tuple(both_nodes)] = line
# modify adjacency matrix
adj_matrix[both_nodes[0], both_nodes[1]] = True
adj_matrix[both_nodes[1], both_nodes[0]] = True
# highlight new node, de-highlight previous
highlighted.append(node_ind)
if kind == '3d':
scatter = axes.scatter(positions[node_ind, 0],
positions[node_ind, 1],
positions[node_ind, 2],
c='r', s=100, zorder=10)
elif kind == '2d':
scatter = axes.scatter(positions[node_ind, 0],
positions[node_ind, 1],
c='r', s=100, zorder=10)
highlighted_scatter.append(scatter)
highlighted_scatter[0].remove()
highlighted_scatter.pop(0)
highlighted.pop(0)
fig.canvas.draw()
this_onpick = partial(onpick, axes=fig.axes[0], positions=pos,
highlighted=list(), line_dict=lines,
highlighted_scatter=list(), adj_matrix=adj_matrix)
fig.canvas.mpl_connect('pick_event', this_onpick)
return fig
def find_adjacency(inst, picks=None):
'''Find channel adjacency matrix.'''
from scipy.spatial import Delaunay
from mne.channels.layout import _find_topomap_coords
try:
from mne.source_estimate import spatial_tris_connectivity as adjacency
except:
from mne.source_estimate import spatial_tris_adjacency as adjacency
n_channels = len(inst.ch_names)
picks = np.arange(n_channels) if picks is None else picks
ch_names = [inst.info['ch_names'][pick] for pick in picks]
xy = _find_topomap_coords(inst.info, picks)
# first on 2x, y
coords = xy.copy()
coords[:, 0] *= 2
tri = Delaunay(coords)
neighbors1 = adjacency(tri.simplices)
# then on x, 2y
coords = xy.copy()
coords[:, 1] *= 2
tri = Delaunay(coords)
neighbors2 = adjacency(tri.simplices)
adjacency = neighbors1.toarray() | neighbors2.toarray()
return adjacency, ch_names
def cluster(data, adjacency=None, min_adj_ch=0):
from borsar.cluster.label import _get_cluster_fun
clst_fun = _get_cluster_fun(data, adjacency, min_adj_ch=min_adj_ch)
return clst_fun(data, adjacency, min_adj_ch=min_adj_ch)
# TODO: do not convert to sparse if already sparse
def cluster_1d(data, connectivity=None):
from mne.stats.cluster_level import _find_clusters
if connectivity is not None:
connectivity = sparse.coo_matrix(connectivity)
return _find_clusters(data, 0.5, connectivity=connectivity)
# TODO: this needs docs!
def cluster_spread(cluster, connectivity):
n_chan = connectivity.shape[0]
spread = np.zeros((n_chan, n_chan), 'int')
unrolled = [cluster[ch, :].ravel() for ch in range(n_chan)]
for ch in range(n_chan - 1): # last chan will be already checked
ch1 = unrolled[ch]
# get unchecked neighbours
neighbours = np.where(connectivity[ch + 1:, ch])[0]
if neighbours.shape[0] > 0:
neighbours += ch + 1
for ngb in neighbours:
ch2 = unrolled[ngb]
num_connected = (ch1 & ch2).sum()
spread[ch, ngb] = num_connected
spread[ngb, ch] = num_connected
return spread
# - [x] add min_channel_neighbours
# - [ ] add docs!
# - [ ] min_neighbours as a 0 - 1 float
# - [ ] include_channels (what was the idea here?)
def filter_clusters(mat, min_neighbours=4, min_channels=0, connectivity=None):
kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
mat = mat.copy()
size = mat.shape
if mat.ndim == 2:
mat = mat[np.newaxis, :, :]
size = mat.shape
for ch in range(size[0]):
enough_ngb = (signal.convolve2d(mat[ch, :, :], kernel, mode='same')
>= min_neighbours)
mat[ch, :, :] = mat[ch, :, :] & enough_ngb
if min_channels > 0:
assert connectivity is not None
for ch in range(size[0]):
ngb = np.where(connectivity[ch, :])[0]
mat[ch, :, :] = mat[ch, :, :] & (mat[ngb, :, :].sum(
axis=0) >= min_channels)
return mat
def remove_links(mat, min_pixels=5):
'''Remove clusters that are smaller than min_pixels within any given
slice (channel) of the matrix. These small blobs sometimes create
weak links between otherwise strong clusters.'''
from skimage.measure import label
# label each channel separately
n_chan = mat.shape[0]
mat = mat.copy()
for ch in range(n_chan):
clusters = label(mat[ch, :, :], connectivity=1, background=False)
n_clusters = clusters.max()
for c in range(n_clusters):
msk = clusters == (c + 1)
if (msk).sum() < min_pixels:
clusters[msk] = 0
mat[ch, clusters == 0] = False
return mat
def relabel_mat(mat, label_map):
'''Change values in a matrix of integers such that mapping given
in label_map dict is fulfilled.
parameters
----------
mat - numpy array of integers
label_map - dictionary, how to remap integer labels
returns
-------
mat_relab - relabeled numpy array
'''
mat_relab = mat.copy()
for k, v in label_map.items():
mat_relab[mat == k] = v
return mat_relab
def smooth(matrix, sd=2.):
from scipy.ndimage.filters import gaussian_filter
matrix = matrix.copy()
if matrix.ndim > 2:
n_chan = matrix.shape[0]
for ch in range(n_chan):
matrix[ch, :] = gaussian_filter(matrix[ch, :], sd)
else:
matrix = gaussian_filter(matrix, sd)
return matrix
def check_list_inst(data, inst):
tps = list()
for this_data in data:
if not isinstance(this_data, inst):
raise TypeError('One of the objects in data list does not '
'belong to supported mne objects (Evoked, '
'AverageTFR).')
tps.append(type(this_data))
all_same_type = [tp == tps[0] for tp in tps[1:]] if len(tps) > 1 else True
if not all_same_type:
raise TypeError('Not all objects in the data list are of the same'
' mne object class.')
# - [ ] add TFR tests!
# - [ ] make sure min_adj_ch works with 2d
# - [ ] add Epochs to supported types if single_trial
# - [ ] one_sample is not passed to lower functions...
# - [ ] add 2-step tests?
def permutation_cluster_ttest(data1, data2, paired=False, n_permutations=1000,
threshold=None, p_threshold=0.05,
adjacency=None, tmin=None, tmax=None,
fmin=None, fmax=None, trial_level=False,
min_adj_ch=0):
'''Perform cluster-based permutation test with t test as statistic.
Parameters
----------
data1 : list of mne objects
List of objects (Evokeds, TFRs) belonging to condition one.
data2 : list of mne objects
List of objects (Evokeds, TFRs) belonging to condition two.
paired : bool
Whether to perform a paired t test. Defaults to ``True``.
n_permutations : int
How many permutations to perform. Defaults to ``1000``.
threshold : value
Cluster entry threshold defined by the value of the statistic. Defaults
to ``None`` which calculates threshold from p value (see
``p_threshold``)
p_threshold : value
Cluster entry threshold defined by the p value.
adjacency : boolean array | sparse array
Information about channel adjacency.
tmin : float
Start of the time window of interest (in seconds). Defaults to ``None``
which takes the earliest possible time.
tmax : float
End of the time window of interest (in seconds). Defaults to ``None``
which takes the latest possible time.
fmin : float
Start of the frequency window of interest (in seconds). Defaults to
``None`` which takes the lowest possible frequency.
fmax : float
End of the frequency window of interest (in seconds). Defaults to
``None`` which takes the highest possible frequency.
min_adj_ch: int
Minimum number of adjacent in-cluster channels to retain a point in
the cluster.
Returns
-------
clst : borsar.cluster.Clusters
Obtained clusters.
'''
if data2 is not None:
one_sample = False
stat_fun = ttest_rel_no_p if paired else ttest_ind_no_p
else:
one_sample = True
stat_fun = lambda data: ttest_1samp_no_p(data[0])
try:
kwarg = 'connectivity'
from mne.source_estimate import spatial_tris_connectivity
except:
kwarg = 'adjacency'
from mne.source_estimate import spatial_tris_adjacency
inst = data1[0]
len1 = len(data1)
len2 = len(data2) if data2 is not None else 0
if paired:
assert len1 == len2
threshold = _compute_threshold([data1, data2], threshold, p_threshold,
trial_level, paired, one_sample)
# data1 and data2 have to be Evokeds or TFRs
supported_types = (mne.Evoked, borsar.freq.PSD,
mne.time_frequency.AverageTFR,
mne.time_frequency.EpochsTFR)
check_list_inst(data1, inst=supported_types)
if data2 is not None:
check_list_inst(data2, inst=supported_types)
# find time and frequency ranges
# ------------------------------
if isinstance(inst, (mne.Evoked, mne.time_frequency.AverageTFR)):
tmin = 0 if tmin is None else inst.time_as_index(tmin)[0]
tmax = (len(inst.times) if tmax is None
else inst.time_as_index(tmax)[0] + 1)
time_slice = slice(tmin, tmax)
if isinstance(inst, (borsar.freq.PSD, mne.time_frequency.AverageTFR)):
fmin = 0 if fmin is None else find_index(data1[0].freqs, fmin)
fmax = (len(inst.freqs) if fmax is None
else find_index(data1[0].freqs, fmax))
freq_slice = slice(fmin, fmax + 1)
# handle object-specific data
# ---------------------------
if isinstance(inst, mne.time_frequency.AverageTFR):
# + fmin, fmax
assert not trial_level
# data are in observations x channels x frequencies x time
data1 = np.stack([tfr.data[:, freq_slice, time_slice]
for tfr in data1], axis=0)
data2 = (np.stack([tfr.data[:, freq_slice, time_slice]
for tfr in data2], axis=0)
if data2 is not None else data2)
elif isinstance(inst, mne.time_frequency.EpochsTFR):
assert trial_level
data1 = inst.data[..., freq_slice, time_slice]
data2 = (data2[0].data[..., freq_slice, time_slice]
if data2 is not None else data2)
elif isinstance(inst, borsar.freq.PSD):
if not inst._has_epochs:
assert not trial_level
data1 = np.stack([psd.data[:, freq_slice].T for psd in data1],
axis=0)
data2 = (np.stack([psd.data[:, freq_slice].T for psd in data2],
axis=0) if data2 is not None else data2)
else:
assert trial_level
data1 = data1[0].data[..., freq_slice].transpose((0, 2, 1))
data2 = (data2[0].data[..., freq_slice].transpose((0, 2, 1))
if data2 is not None else data2)
else:
data1 = np.stack([erp.data[:, time_slice].T for erp in data1], axis=0)
data2 = (np.stack([erp.data[:, time_slice].T for erp in data2], axis=0)
if data2 is not None else data2)
data_3d = data1.ndim > 3
if (isinstance(adjacency, np.ndarray) and not sparse.issparse(adjacency)
and not data_3d):
adjacency = sparse.coo_matrix(adjacency)
# perform cluster-based test
# --------------------------
# TODO: now our cluster-based works also for 1d and 2d etc.
if not data_3d:
assert min_adj_ch == 0
adj_param = {kwarg: adjacency}
stat, clusters, cluster_p, _ = permutation_cluster_test(
[data1, data2], stat_fun=stat_fun, threshold=threshold,
n_permutations=n_permutations, out_type='mask', **adj_param)
if isinstance(inst, mne.Evoked):
dimcoords = [inst.ch_names, inst.times[time_slice]]
dimnames = ['chan', 'time']
elif isinstance(inst, borsar.freq.PSD):
dimcoords = [inst.ch_names, inst.freqs[freq_slice]]
dimnames = ['chan', 'freq']
return Clusters(stat.T, [c.T for c in clusters], cluster_p,
info=inst.info, dimnames=dimnames,
dimcoords=dimcoords)
else:
stat, clusters, cluster_p = permutation_cluster_test_array(
[data1, data2], adjacency, stat_fun, threshold=threshold,
n_permutations=n_permutations, one_sample=one_sample,
paired=paired, min_adj_ch=min_adj_ch)
# pack into Clusters object
dimcoords = [inst.ch_names, inst.freqs, inst.times[tmin:tmax]]
return Clusters(stat, clusters, cluster_p, info=inst.info,
dimnames=['chan', 'freq', 'time'], dimcoords=dimcoords)
# TODO: add condition order argument? This may require a large refactoring of
# the function to allow for 2-step tests (step 1 - within subjects,
# step 2 - across subjects)
# TODO: move `min_adj_ch` up and add `min_adj`
def permutation_cluster_test_array(data, adjacency, stat_fun=None,
threshold=None, p_threshold=0.05,
paired=False, one_sample=False, tail='both',
n_permutations=1000, n_stat_permutations=0,
progress=True, return_distribution=False,
backend='auto', min_adj_ch=0):
"""Permutation cluster test on array data.
Parameters
----------
data : np.ndarray | list of np.ndarray
An array where first two dimensions are ``conditions x observations``
or list of arrays where each array has observations in the first
dimension. If the data contains channels it should be in the dimension
immediately after observations.
adjacency : 2d boolean array | None
Array that denotes adjacency between channels (or vertices). If
``None`` it is assumed that no channels/vertices are present.
stat_fun : function | None
Statistical function to use. It should allow as many arguments as
conditions and should return one array of computed statistics.
threshold : float | None
Cluster entry threshold for the test statistic. If ``None`` (defult)
the ``p_threshold`` argument is used.
p_threshold : float
P value threshold to use in cluster entry threshold computation. For
standard parametric tests (t test, ANOVA) it is computed from
theoretical test distribution; if ``n_stat_permutations`` is above zero
the threshold is obtained from percentile of permutation distribution.
paired : bool
Whether the permutations should be conducted for paired samples
scenario (randomization of condition orders within observations).
Currently the condition orders are randomized even if they are the same
for all subjects. This argument is also used to automatically pick
a statistical test if ``stat_fun`` is ``None``.
one_sample : bool
Whether the permutations should be conducted for a one sample scenario
(sign flipping randomization). This argument is also used to
automatically pick a statistical test if ``stat_fun`` is ``None``.
tail : str
Which differences to test. ``'both'`` tests positive and negative
effects, while ``'pos'`` - only positive.
NEG is not implemented!
n_permutations : int
Number of cluster based permutations to perform. Defaults to ``1000``.
n_stat_permutations : int
Whether to compute ``threshold`` using permutations (this is separate
from cluster-based permutations when the computed thresholds are used).
If ``n_stat_permutations > 0`` then this many permutations will be used
to compute statistical cluster-entry thresholds. The treshold is set to
``p_threshold`` of the computed permutation distribution.
progress : bool | str | tqdm progressbar
Whether to show a progressbar (if boolean) or what kind of progressbar
to show (``'notebook'`` or ``'text'``). Alternatively a progressbar can
be passed that will be reset and set to a new maximum.
return_distribution : bool
Whether to return the distribution of cluster-based permutations.
If ``True`` a dictionary of positive and negative cluster statistics
from permutations is returned.
backend : str
Clustering backend to use. Can be ``'auto'``, ``'mne'``, ``'borsar'``
or ``'numpy'``. Depending on the search space, different backend may be
optimal. Defaults to ``'auto'`` which selects the backend
automatically.
min_adj_ch: int
Minimum number of adjacent in-cluster channels to retain a point in
the cluster.
"""
from .utils import progressbar
from borsar.cluster.label import _get_cluster_fun, find_clusters
n_groups = len(data)
if stat_fun is None:
stat_fun = _find_stat_fun(n_groups, paired, tail)
if not paired and not one_sample or (one_sample and paired):
raise ValueError('Currently you have to use either one_sample=True or'
' paired=True')
n_obs = data[0].shape[0]
signs_size = tuple([n_obs] + [1] * (data[0].ndim - 1))
if one_sample:
signs = np.array([-1, 1])
pos_dist = np.zeros(n_permutations)
if tail == 'both':
neg_dist = np.zeros(n_permutations)
# test on non-permuted data
stat = stat_fun(*data)
# compute threshold from stat, use permutation distribution if
# n_stat_permutations > 0
# FIXME: streamline/simplify permutation reshaping and transposing
# FIXME: time and see whether a different solution (numba?) is better
if n_stat_permutations > 0:
threshold = _compute_threshold_via_permutations(
data, paired, tail, stat_fun, p_threshold, n_stat_permutations)
else:
threshold = _compute_threshold(data, threshold, p_threshold,
paired, one_sample)
# use 3d clustering
cluster_fun = _get_cluster_fun(stat, adjacency=adjacency,
backend=backend, min_adj_ch=min_adj_ch)
clusters, cluster_stats = find_clusters(
stat, threshold, adjacency=adjacency, cluster_fun=cluster_fun,
min_adj_ch=min_adj_ch)
if not clusters:
print('No clusters found, permutations are not performed.')
return stat, clusters, cluster_stats
else:
msg = 'Found {} clusters, computing permutations.'
print(msg.format(len(clusters)))
if paired and n_groups > 2:
orders = [np.arange(n_groups)]
for _ in range(n_groups - 1):
orders.append(np.roll(orders[-1], shift=-1))
data_all = np.stack(data, axis=0)
# FIXME - use sarna progressbar
pbar = progressbar(progress, total=n_permutations)
# compute permutations
for perm in range(n_permutations):
# permute data / predictors
if one_sample:
# one-sample sign-flip
idx = np.random.random_integers(0, 1, size=signs_size)
perm_signs = signs[idx]
perm_data = [data[0] * perm_signs]
elif paired and n_groups == 2:
# this is analogous to one-sample sign-flip but with paired data
# (we could also perform one sample t test on condition differences
# with sign-flip in the permutation step)
idx1 = np.random.random_integers(0, 1, size=signs_size)
idx2 = 1 - idx1
perm_data = list()
perm_data.append(data[0] * idx1 + data[1] * idx2)
perm_data.append(data[0] * idx2 + data[1] * idx1)
elif paired and n_groups > 2:
ord_idx = np.random.randint(0, n_groups, size=n_obs)
perm_data = data_all.copy()
for obs_idx in range(n_obs):
this_order = orders[ord_idx[obs_idx]]
perm_data[:, obs_idx] = data_all[this_order, obs_idx]
perm_stat = stat_fun(*perm_data)
perm_clusters, perm_cluster_stats = find_clusters(
perm_stat, threshold, adjacency=adjacency, cluster_fun=cluster_fun,
min_adj_ch=min_adj_ch)
# if any clusters were found - add max statistic
if len(perm_cluster_stats) > 0:
max_val = perm_cluster_stats.max()
if max_val > 0:
pos_dist[perm] = max_val
if tail == 'both':
min_val = perm_cluster_stats.min()
if min_val < 0:
neg_dist[perm] = min_val
if progressbar:
pbar.update(1)
# compute permutation probability
cluster_p = np.array([(pos_dist > cluster_stat).mean() if cluster_stat > 0
else (neg_dist < cluster_stat).mean()
for cluster_stat in cluster_stats])
if tail == 'both':
cluster_p *= 2 # because we use two-tail
cluster_p[cluster_p > 1.] = 1. # probability has to be <= 1.
# sort clusters by p value
cluster_order = np.argsort(cluster_p)
cluster_p = cluster_p[cluster_order]
clusters = [clusters[i] for i in cluster_order]
if return_distribution:
return stat, clusters, cluster_p, dict(pos=pos_dist, neg=neg_dist)
else:
return stat, clusters, cluster_p
def _compute_threshold(data, threshold, p_threshold, paired,
one_sample):
if threshold is None:
from scipy.stats import distributions
n_groups = len(data)
lens = [len(d) for d in data]
if n_groups < 3:
len1 = len(data[0])
len2 = len(data[1]) if (len(data) > 1 and data[1] is not None) else 0
df = (len1 - 1 if paired or one_sample else len1 + len2 - 2)
threshold = np.abs(distributions.t.ppf(p_threshold / 2., df=df))
else:
# ANOVA F
n_obs = data[0].shape[0] if paired else sum(lens)
dfn = n_groups - 1
dfd = n_obs - n_groups
threshold = distributions.f.ppf(1. - p_threshold, dfn, dfd)
return threshold
def _find_stat_fun(n_groups, paired, tail):
'''Find relevant stat_fun given ``n_groups``, ``paired`` and ``tail``.'''
if n_groups > 2 and tail == 'both':
raise ValueError('Number of compared groups is > 2, but tail is set'
' to "both". If you want to use ANOVA, set tail to'
' "pos".')
if n_groups > 2 and not tail == 'both':
if paired:
# repeated measures ANOVA
return rm_anova_stat_fun
else:
from scipy.stats import f_oneway
def stat_fun(*args):
fval, _ = f_oneway(*args)
return fval
return stat_fun
else:
if paired:
from scipy.stats import ttest_rel
def stat_fun(*args):
tval, _ = ttest_rel(*args)
return tval
return stat_fun
else:
# TODO: always assume non-equal variance?
from mne.stats import ttest_ind_no_p
return ttest_ind_no_p
def rm_anova_stat_fun(*args):
'''Stat fun that does one-way repeated measures ANOVA.'''
from mne.stats import f_mway_rm
data = np.stack(args, axis=1)
n_factors = data.shape[1]
fval, _ = f_mway_rm(data, factor_levels=[n_factors],
return_pvals=False)
if data.ndim > 3:
fval = fval.reshape(data.shape[2:])
return fval
# FIXME: streamline/simplify permutation reshaping and transposing
# FIXME: time and see whether a different solution (numba?) is better
# TODO: separate progressbar for threshold permutations
def _compute_threshold_via_permutations(data, paired, tail, stat_fun,
p_threshold, n_perm):
'''Assumes n_conditions x n_observations x ... data array.
Note that the permutations are implemented via shuffling of the condition
labels, not randomization of independent condition orders.'''
assert paired, "Unpaired permutations are not implemented."
# concatenate condition dimension if needed
if isinstance(data, list):
data = np.stack(data, axis=0)
dims = np.arange(data.ndim)
dims[:2] = [1, 0]
n_cond, n_obs = data.shape[:2]
data_unr = data.transpose(*dims).reshape(n_cond * n_obs,
*data.shape[2:])
stats = np.zeros(shape=(n_perm, *data.shape[2:]))
# compute permutations of the stat
for perm_idx in range(n_perm):
rnd = (np.random.random(size=(n_cond, n_obs))).argsort(axis=0)
idx = (rnd + np.arange(n_obs)[None, :] * n_cond).T.ravel()
this_data = data_unr[idx].reshape(
n_obs, n_cond, *data.shape[2:]).transpose(*dims)
stats[perm_idx] = stat_fun(*this_data)
# now check threshold
if tail == 'pos':
percentile = 100 - p_threshold * 100
threshold = np.percentile(stats, percentile, axis=0)
elif tail == 'neg':
percentile = p_threshold * 100
threshold = np.percentile(stats, percentile, axis=0)
elif tail == 'both':
percentile_neg = p_threshold / 2 * 100
percentile_pos = 100 - p_threshold / 2 * 100
threshold = [np.percentile(stats, perc, axis=0)
for perc in [percentile_pos, percentile_neg]]
else:
raise ValueError(f'Unrecognized tail "{tail}"')
return threshold
| [
"scipy.io.loadmat",
"scipy.sparse.issparse",
"scipy.stats.f_oneway",
"numpy.argsort",
"skimage.measure.label",
"scipy.stats.distributions.t.ppf",
"numpy.arange",
"numpy.random.randint",
"os.path.join",
"numpy.random.random_integers",
"numpy.unique",
"scipy.spatial.Delaunay",
"mne.stats.ttest... | [((455, 493), 'os.path.join', 'os.path.join', (['base_dir', '"""data"""', '"""chan"""'], {}), "(base_dir, 'data', 'chan')\n", (467, 493), False, 'import os\n'), ((424, 439), 'os.path.split', 'split', (['__file__'], {}), '(__file__)\n', (429, 439), False, 'from os.path import split\n'), ((664, 687), 'os.path.exists', 'os.path.exists', (['captype'], {}), '(captype)\n', (678, 687), False, 'import os\n'), ((8097, 8135), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['inst.info', 'picks'], {}), '(inst.info, picks)\n', (8117, 8135), False, 'from mne.channels.layout import _find_topomap_coords\n'), ((8213, 8229), 'scipy.spatial.Delaunay', 'Delaunay', (['coords'], {}), '(coords)\n', (8221, 8229), False, 'from scipy.spatial import Delaunay\n'), ((8247, 8271), 'mne.source_estimate.spatial_tris_adjacency', 'adjacency', (['tri.simplices'], {}), '(tri.simplices)\n', (8256, 8271), True, 'from mne.source_estimate import spatial_tris_adjacency as adjacency\n'), ((8348, 8364), 'scipy.spatial.Delaunay', 'Delaunay', (['coords'], {}), '(coords)\n', (8356, 8364), False, 'from scipy.spatial import Delaunay\n'), ((8382, 8406), 'mne.source_estimate.spatial_tris_adjacency', 'adjacency', (['tri.simplices'], {}), '(tri.simplices)\n', (8391, 8406), True, 'from mne.source_estimate import spatial_tris_adjacency as adjacency\n'), ((8619, 8675), 'borsar.cluster.label._get_cluster_fun', '_get_cluster_fun', (['data', 'adjacency'], {'min_adj_ch': 'min_adj_ch'}), '(data, adjacency, min_adj_ch=min_adj_ch)\n', (8635, 8675), False, 'from borsar.cluster.label import _get_cluster_fun, find_clusters\n'), ((8984, 9036), 'mne.stats.cluster_level._find_clusters', '_find_clusters', (['data', '(0.5)'], {'connectivity': 'connectivity'}), '(data, 0.5, connectivity=connectivity)\n', (8998, 9036), False, 'from mne.stats.cluster_level import _find_clusters\n'), ((9155, 9188), 'numpy.zeros', 'np.zeros', (['(n_chan, n_chan)', '"""int"""'], {}), "((n_chan, n_chan), 'int')\n", (9163, 9188), True, 'import numpy as np\n'), ((9989, 10032), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 0, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n', (9997, 10032), True, 'import numpy as np\n'), ((24396, 24420), 'numpy.zeros', 'np.zeros', (['n_permutations'], {}), '(n_permutations)\n', (24404, 24420), True, 'import numpy as np\n'), ((25137, 25225), 'borsar.cluster.label._get_cluster_fun', '_get_cluster_fun', (['stat'], {'adjacency': 'adjacency', 'backend': 'backend', 'min_adj_ch': 'min_adj_ch'}), '(stat, adjacency=adjacency, backend=backend, min_adj_ch=\n min_adj_ch)\n', (25153, 25225), False, 'from borsar.cluster.label import _get_cluster_fun, find_clusters\n'), ((25287, 25390), 'borsar.cluster.label.find_clusters', 'find_clusters', (['stat', 'threshold'], {'adjacency': 'adjacency', 'cluster_fun': 'cluster_fun', 'min_adj_ch': 'min_adj_ch'}), '(stat, threshold, adjacency=adjacency, cluster_fun=cluster_fun,\n min_adj_ch=min_adj_ch)\n', (25300, 25390), False, 'from borsar.cluster.label import _get_cluster_fun, find_clusters\n'), ((28154, 28175), 'numpy.argsort', 'np.argsort', (['cluster_p'], {}), '(cluster_p)\n', (28164, 28175), True, 'import numpy as np\n'), ((30393, 30415), 'numpy.stack', 'np.stack', (['args'], {'axis': '(1)'}), '(args, axis=1)\n', (30401, 30415), True, 'import numpy as np\n'), ((30461, 30523), 'mne.stats.f_mway_rm', 'f_mway_rm', (['data'], {'factor_levels': '[n_factors]', 'return_pvals': '(False)'}), '(data, factor_levels=[n_factors], return_pvals=False)\n', (30470, 30523), False, 'from mne.stats import f_mway_rm\n'), ((31359, 31379), 'numpy.arange', 'np.arange', (['data.ndim'], {}), '(data.ndim)\n', (31368, 31379), True, 'import numpy as np\n'), ((31559, 31600), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_perm, *data.shape[2:])'}), '(shape=(n_perm, *data.shape[2:]))\n', (31567, 31600), True, 'import numpy as np\n'), ((1133, 1168), 'scipy.io.loadmat', 'loadmat', (['file_name'], {'squeeze_me': '(True)'}), '(file_name, squeeze_me=True)\n', (1140, 1168), False, 'from scipy.io import loadmat\n'), ((2634, 2675), 'mne.viz.plot_sensors', 'plot_sensors', (['info'], {'kind': 'kind', 'show': '(False)'}), '(info, kind=kind, show=False)\n', (2646, 2675), False, 'from mne.viz import plot_sensors\n'), ((2690, 2735), 'numpy.array', 'np.array', (["[x['loc'][:3] for x in info['chs']]"], {}), "([x['loc'][:3] for x in info['chs']])\n", (2698, 2735), True, 'import numpy as np\n'), ((7975, 7996), 'numpy.arange', 'np.arange', (['n_channels'], {}), '(n_channels)\n', (7984, 7996), True, 'import numpy as np\n'), ((8941, 8972), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['connectivity'], {}), '(connectivity)\n', (8958, 8972), False, 'from scipy import sparse, signal\n'), ((11033, 11087), 'skimage.measure.label', 'label', (['mat[ch, :, :]'], {'connectivity': '(1)', 'background': '(False)'}), '(mat[ch, :, :], connectivity=1, background=False)\n', (11038, 11087), False, 'from skimage.measure import label\n'), ((12064, 12091), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['matrix', 'sd'], {}), '(matrix, sd)\n', (12079, 12091), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((16800, 16872), 'numpy.stack', 'np.stack', (['[tfr.data[:, freq_slice, time_slice] for tfr in data1]'], {'axis': '(0)'}), '([tfr.data[:, freq_slice, time_slice] for tfr in data1], axis=0)\n', (16808, 16872), True, 'import numpy as np\n'), ((18302, 18330), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['adjacency'], {}), '(adjacency)\n', (18319, 18330), False, 'from scipy import sparse, signal\n'), ((18591, 18737), 'mne.stats.permutation_cluster_test', 'permutation_cluster_test', (['[data1, data2]'], {'stat_fun': 'stat_fun', 'threshold': 'threshold', 'n_permutations': 'n_permutations', 'out_type': '"""mask"""'}), "([data1, data2], stat_fun=stat_fun, threshold=\n threshold, n_permutations=n_permutations, out_type='mask', **adj_param)\n", (18615, 18737), False, 'from mne.stats import permutation_cluster_test, ttest_1samp_no_p\n'), ((19070, 19182), 'borsar.cluster.Clusters', 'Clusters', (['stat.T', '[c.T for c in clusters]', 'cluster_p'], {'info': 'inst.info', 'dimnames': 'dimnames', 'dimcoords': 'dimcoords'}), '(stat.T, [c.T for c in clusters], cluster_p, info=inst.info,\n dimnames=dimnames, dimcoords=dimcoords)\n', (19078, 19182), False, 'from borsar.cluster import Clusters, construct_adjacency_matrix\n'), ((19615, 19726), 'borsar.cluster.Clusters', 'Clusters', (['stat', 'clusters', 'cluster_p'], {'info': 'inst.info', 'dimnames': "['chan', 'freq', 'time']", 'dimcoords': 'dimcoords'}), "(stat, clusters, cluster_p, info=inst.info, dimnames=['chan',\n 'freq', 'time'], dimcoords=dimcoords)\n", (19623, 19726), False, 'from borsar.cluster import Clusters, construct_adjacency_matrix\n'), ((24362, 24379), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (24370, 24379), True, 'import numpy as np\n'), ((24463, 24487), 'numpy.zeros', 'np.zeros', (['n_permutations'], {}), '(n_permutations)\n', (24471, 24487), True, 'import numpy as np\n'), ((25835, 25857), 'numpy.stack', 'np.stack', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (25843, 25857), True, 'import numpy as np\n'), ((27158, 27267), 'borsar.cluster.label.find_clusters', 'find_clusters', (['perm_stat', 'threshold'], {'adjacency': 'adjacency', 'cluster_fun': 'cluster_fun', 'min_adj_ch': 'min_adj_ch'}), '(perm_stat, threshold, adjacency=adjacency, cluster_fun=\n cluster_fun, min_adj_ch=min_adj_ch)\n', (27171, 27267), False, 'from borsar.cluster.label import _get_cluster_fun, find_clusters\n'), ((31324, 31346), 'numpy.stack', 'np.stack', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (31332, 31346), True, 'import numpy as np\n'), ((32079, 32119), 'numpy.percentile', 'np.percentile', (['stats', 'percentile'], {'axis': '(0)'}), '(stats, percentile, axis=0)\n', (32092, 32119), True, 'import numpy as np\n'), ((1003, 1040), 'os.path.join', 'os.path.join', (['chan_path', 'good_file[0]'], {}), '(chan_path, good_file[0])\n', (1015, 1040), False, 'import os\n'), ((2845, 2891), 'mne.viz.plot_sensors', 'plot_sensors', (['info'], {'kind': '"""topomap"""', 'show': '(False)'}), "(info, kind='topomap', show=False)\n", (2857, 2891), False, 'from mne.viz import plot_sensors\n'), ((3169, 3196), 'numpy.where', 'np.where', (['adj_matrix[ch, :]'], {}), '(adj_matrix[ch, :])\n', (3177, 3196), True, 'import numpy as np\n'), ((9407, 9442), 'numpy.where', 'np.where', (['connectivity[ch + 1:, ch]'], {}), '(connectivity[ch + 1:, ch])\n', (9415, 9442), True, 'import numpy as np\n'), ((10211, 10264), 'scipy.signal.convolve2d', 'signal.convolve2d', (['mat[ch, :, :]', 'kernel'], {'mode': '"""same"""'}), "(mat[ch, :, :], kernel, mode='same')\n", (10228, 10264), False, 'from scipy import sparse, signal\n'), ((12002, 12036), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['matrix[ch, :]', 'sd'], {}), '(matrix[ch, :], sd)\n', (12017, 12036), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((15039, 15064), 'mne.stats.ttest_1samp_no_p', 'ttest_1samp_no_p', (['data[0]'], {}), '(data[0])\n', (15055, 15064), False, 'from mne.stats import permutation_cluster_test, ttest_1samp_no_p\n'), ((16359, 16391), 'borsar.utils.find_index', 'find_index', (['data1[0].freqs', 'fmin'], {}), '(data1[0].freqs, fmin)\n', (16369, 16391), False, 'from borsar.utils import find_index\n'), ((16461, 16493), 'borsar.utils.find_index', 'find_index', (['data1[0].freqs', 'fmax'], {}), '(data1[0].freqs, fmax)\n', (16471, 16493), False, 'from borsar.utils import find_index\n'), ((16916, 16988), 'numpy.stack', 'np.stack', (['[tfr.data[:, freq_slice, time_slice] for tfr in data2]'], {'axis': '(0)'}), '([tfr.data[:, freq_slice, time_slice] for tfr in data2], axis=0)\n', (16924, 16988), True, 'import numpy as np\n'), ((18229, 18255), 'scipy.sparse.issparse', 'sparse.issparse', (['adjacency'], {}), '(adjacency)\n', (18244, 18255), False, 'from scipy import sparse, signal\n'), ((25700, 25719), 'numpy.arange', 'np.arange', (['n_groups'], {}), '(n_groups)\n', (25709, 25719), True, 'import numpy as np\n'), ((26129, 26177), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(1)'], {'size': 'signs_size'}), '(0, 1, size=signs_size)\n', (26154, 26177), True, 'import numpy as np\n'), ((29140, 29188), 'scipy.stats.distributions.f.ppf', 'distributions.f.ppf', (['(1.0 - p_threshold)', 'dfn', 'dfd'], {}), '(1.0 - p_threshold, dfn, dfd)\n', (29159, 29188), False, 'from scipy.stats import distributions\n'), ((32203, 32243), 'numpy.percentile', 'np.percentile', (['stats', 'percentile'], {'axis': '(0)'}), '(stats, percentile, axis=0)\n', (32216, 32243), True, 'import numpy as np\n'), ((812, 833), 'os.listdir', 'os.listdir', (['chan_path'], {}), '(chan_path)\n', (822, 833), False, 'import os\n'), ((3384, 3402), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3400, 3402), True, 'import numpy as np\n'), ((10474, 10503), 'numpy.where', 'np.where', (['connectivity[ch, :]'], {}), '(connectivity[ch, :])\n', (10482, 10503), True, 'import numpy as np\n'), ((17956, 18018), 'numpy.stack', 'np.stack', (['[erp.data[:, time_slice].T for erp in data1]'], {'axis': '(0)'}), '([erp.data[:, time_slice].T for erp in data1], axis=0)\n', (17964, 18018), True, 'import numpy as np\n'), ((25785, 25814), 'numpy.roll', 'np.roll', (['orders[-1]'], {'shift': '(-1)'}), '(orders[-1], shift=-1)\n', (25792, 25814), True, 'import numpy as np\n'), ((26531, 26579), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(1)'], {'size': 'signs_size'}), '(0, 1, size=signs_size)\n', (26556, 26579), True, 'import numpy as np\n'), ((28906, 28951), 'scipy.stats.distributions.t.ppf', 'distributions.t.ppf', (['(p_threshold / 2.0)'], {'df': 'df'}), '(p_threshold / 2.0, df=df)\n', (28925, 28951), False, 'from scipy.stats import distributions\n'), ((29820, 29835), 'scipy.stats.f_oneway', 'f_oneway', (['*args'], {}), '(*args)\n', (29828, 29835), False, 'from scipy.stats import f_oneway\n'), ((30027, 30043), 'scipy.stats.ttest_rel', 'ttest_rel', (['*args'], {}), '(*args)\n', (30036, 30043), False, 'from scipy.stats import ttest_rel\n'), ((31691, 31729), 'numpy.random.random', 'np.random.random', ([], {'size': '(n_cond, n_obs)'}), '(size=(n_cond, n_obs))\n', (31707, 31729), True, 'import numpy as np\n'), ((17447, 17509), 'numpy.stack', 'np.stack', (['[psd.data[:, freq_slice].T for psd in data1]'], {'axis': '(0)'}), '([psd.data[:, freq_slice].T for psd in data1], axis=0)\n', (17455, 17509), True, 'import numpy as np\n'), ((18036, 18098), 'numpy.stack', 'np.stack', (['[erp.data[:, time_slice].T for erp in data2]'], {'axis': '(0)'}), '([erp.data[:, time_slice].T for erp in data2], axis=0)\n', (18044, 18098), True, 'import numpy as np\n'), ((26823, 26865), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_groups'], {'size': 'n_obs'}), '(0, n_groups, size=n_obs)\n', (26840, 26865), True, 'import numpy as np\n'), ((32390, 32424), 'numpy.percentile', 'np.percentile', (['stats', 'perc'], {'axis': '(0)'}), '(stats, perc, axis=0)\n', (32403, 32424), True, 'import numpy as np\n'), ((2462, 2483), 'numpy.unique', 'np.unique', (['adj_matrix'], {}), '(adj_matrix)\n', (2471, 2483), True, 'import numpy as np\n'), ((2527, 2547), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2535, 2547), True, 'import numpy as np\n'), ((17560, 17622), 'numpy.stack', 'np.stack', (['[psd.data[:, freq_slice].T for psd in data2]'], {'axis': '(0)'}), '([psd.data[:, freq_slice].T for psd in data2], axis=0)\n', (17568, 17622), True, 'import numpy as np\n'), ((31768, 31784), 'numpy.arange', 'np.arange', (['n_obs'], {}), '(n_obs)\n', (31777, 31784), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT style dataset."""
import os
import time
import numpy as np
import torch
from megatron import mpu, print_rank_0
from megatron.data.blendable_dataset import BlendableDataset
from megatron.data.dataset_utils import get_datasets_weights_and_num_samples
from megatron.data.dataset_utils import get_train_valid_test_split_
from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup):
"""Build train, valid, and test datasets."""
# Single dataset.
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(data_prefix[0],
data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
prefixes[i], data_impl, splits_string,
datasets_train_valid_test_num_samples[i],
seq_length, seed, skip_warmup)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights)
return (blending_train_dataset, blending_valid_dataset,
blending_test_dataset)
def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup):
"""Build train, valid, and test datasets."""
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
data_impl,
skip_warmup)
total_num_of_documents = indexed_dataset.sizes.shape[0]
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
documents = np.arange(start=splits[index], stop=splits[index + 1],
step=1, dtype=np.int32)
dataset = GPTDataset(name, data_prefix,
documents, indexed_dataset,
train_valid_test_num_samples[index],
seq_length, seed)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
"""Build indexed dataset."""
print_rank_0(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix,
data_impl,
skip_warmup)
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' number of documents: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
class GPTDataset(torch.utils.data.Dataset):
def __init__(self, name, data_prefix, documents, indexed_dataset,
num_samples, seq_length, seed):
self.name = name
self.indexed_dataset = indexed_dataset
# Checks
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
self.name, data_prefix, documents, self.indexed_dataset.sizes,
num_samples, seq_length, seed)
def __len__(self):
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def __getitem__(self, idx):
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(self.doc_idx[doc_index_f],
offset=offset_f,
length=offset_l - offset_f + 1)
else:
# Otherwise, get the rest of the initial document.
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f],
offset=offset_f)]
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
# And finally add the relevant portion of last document.
sample_list.append(self.indexed_dataset.get(
self.doc_idx[doc_index_l],
length=offset_l + 1))
sample = np.concatenate(sample_list)
return {'text': np.array(sample, dtype=np.int64)}
def _build_index_mappings(name, data_prefix, documents, sizes,
num_samples, seq_length, seed):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
_filename = data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_length)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0:
if (not os.path.isfile(doc_idx_filename)) or \
(not os.path.isfile(sample_idx_filename)) or \
(not os.path.isfile(shuffle_idx_filename)):
print_rank_0(' > WARNING: could not find index map files, building '
'the indices on rank 0 ...')
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
print(' > only one epoch required, setting '
'separate_last_epoch to False', flush=True)
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - 1) // seq_length
last_epoch_num_samples = num_samples - \
num_samples_from_epochs_minus_one
assert last_epoch_num_samples >= 0, \
'last epoch number of samples should be non-negative.'
num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length
assert last_epoch_num_samples < (num_samples_per_epoch + 1), \
'last epoch number of samples exceeded max value.'
# If we have less than 80% of the samples for the last epoch,
# seperate out the epoch and treat it differently.
# Note: the 80% number is just based on common sense and can
# be adjusted if needed.
separate_last_epoch = (last_epoch_num_samples <
int(0.80 * num_samples_per_epoch))
if separate_last_epoch:
string = ' > last epoch number of samples ({}) is smaller '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to True'
else:
string = ' > last epoch number of samples ({}) is larger '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to False'
print(string.format(last_epoch_num_samples,
num_samples_per_epoch), flush=True)
# doc-idx.
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng,
separate_last_epoch)
np.save(doc_idx_filename, doc_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save doc-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# sample-idx.
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
from megatron.data import helpers
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch)
# sample_idx = _build_sample_idx(sizes, doc_idx, seq_length,
# num_epochs, tokens_per_epoch)
np.save(sample_idx_filename, sample_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save sample-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(num_samples_,
sample_idx.shape[0] - 1, np_rng)
np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save shuffle-idx mapping'
' (seconds): {:4f}'.format(time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
assert counts[0].item() == (
torch.distributed.get_world_size() //
torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()))
# Load mappings.
start_time = time.time()
print_rank_0(' > loading doc-idx mapping from {}'.format(
doc_idx_filename))
doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading sample-idx mapping from {}'.format(
sample_idx_filename))
sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading shuffle-idx mapping from {}'.format(
shuffle_idx_filename))
shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
sample_idx.shape[0]))
print_rank_0(' total number of epochs: {}'.format(num_epochs))
return doc_idx, sample_idx, shuffle_idx
def _num_tokens(documents, sizes):
"""Total number of tokens in the dataset."""
return np.sum(sizes[documents])
def _num_epochs(tokens_per_epoch, seq_length, num_samples):
"""Based on number of samples and sequence lenght, calculate how many
epochs will be needed."""
num_epochs = 0
total_tokens = 0
while True:
num_epochs += 1
total_tokens += tokens_per_epoch
# -1 is because we need to retrieve seq_length + 1 token each time
# but the last token will overlap with the first token of the next
# sample except for the last sample.
if ((total_tokens - 1) // seq_length) >= num_samples:
return num_epochs
def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch):
"""Build an array with length = number-of-epochs * number-of-dcuments.
Each index is mapped to a corresponding document."""
if not separate_last_epoch or num_epochs == 1:
doc_idx = np.mgrid[0:num_epochs, 0:len(documents)][1]
doc_idx[:] = documents
doc_idx = doc_idx.reshape(-1)
doc_idx = doc_idx.astype(np.int32)
np_rng.shuffle(doc_idx)
return doc_idx
doc_idx_first = _build_doc_idx(documents, num_epochs-1, np_rng, False)
doc_idx_last = _build_doc_idx(documents, 1, np_rng, False)
return np.concatenate((doc_idx_first, doc_idx_last))
def _build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch):
"""Sample index mapping is a 2D array with sizes
[number-of-samples + 1, 2] where [..., 0] contains
the index into `doc_idx` and [..., 1] is the
starting offset in that document."""
# Total number of samples. For -1 see comments in `_num_epochs`.
num_samples = (num_epochs * tokens_per_epoch - 1) // seq_length
sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32)
# Index into sample_idx.
sample_index = 0
# Index into doc_idx.
doc_idx_index = 0
# Begining offset for each document.
doc_offset = 0
# Start with first document and no offset.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
while sample_index <= num_samples:
# Start with a fresh sequence.
remaining_seq_length = seq_length + 1
while remaining_seq_length != 0:
# Get the document length.
doc_id = doc_idx[doc_idx_index]
doc_length = sizes[doc_id] - doc_offset
# And add it to the current sequence.
remaining_seq_length -= doc_length
# If we have more than a full sequence, adjust offset and set
# remaining length to zero so we return from the while loop.
# Note that -1 here is for the same reason we have -1 in
# `_num_epochs` calculations.
if remaining_seq_length <= 0:
doc_offset += (remaining_seq_length + doc_length - 1)
remaining_seq_length = 0
else:
# Otherwise, start from the begining of the next document.
doc_idx_index += 1
doc_offset = 0
# Record the sequence.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
return sample_idx
def _build_shuffle_idx(num_samples, total_size, np_rng):
"""Build the range [0, size) and shuffle."""
print(' > building shuffle index with split [0, {}) and [{}, {}) '
'...'.format(num_samples, num_samples, total_size), flush=True)
dtype_ = np.uint32
if total_size >= (np.iinfo(np.uint32).max - 1):
dtype_ = np.int64
shuffle_idx_first = np.arange(start=0, stop=num_samples,
step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_first)
if num_samples == total_size:
return shuffle_idx_first
shuffle_idx_last = np.arange(start=num_samples, stop=total_size,
step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_last)
return np.concatenate((shuffle_idx_first, shuffle_idx_last))
| [
"numpy.load",
"numpy.sum",
"megatron.data.helpers.build_sample_idx",
"megatron.mpu.get_pipeline_model_parallel_group",
"numpy.iinfo",
"os.path.isfile",
"numpy.arange",
"megatron.mpu.get_tensor_model_parallel_group",
"torch.distributed.get_world_size",
"megatron.mpu.get_data_parallel_group",
"meg... | [((1702, 1781), 'megatron.data.dataset_utils.get_datasets_weights_and_num_samples', 'get_datasets_weights_and_num_samples', (['data_prefix', 'train_valid_test_num_samples'], {}), '(data_prefix, train_valid_test_num_samples)\n', (1738, 1781), False, 'from megatron.data.dataset_utils import get_datasets_weights_and_num_samples\n'), ((3483, 3549), 'megatron.data.dataset_utils.get_train_valid_test_split_', 'get_train_valid_test_split_', (['splits_string', 'total_num_of_documents'], {}), '(splits_string, total_num_of_documents)\n', (3510, 3549), False, 'from megatron.data.dataset_utils import get_train_valid_test_split_\n'), ((3591, 3624), 'megatron.print_rank_0', 'print_rank_0', (['""" > dataset split:"""'], {}), "(' > dataset split:')\n", (3603, 3624), False, 'from megatron import mpu, print_rank_0\n'), ((4836, 4881), 'megatron.print_rank_0', 'print_rank_0', (['""" > building dataset index ..."""'], {}), "(' > building dataset index ...')\n", (4848, 4881), False, 'from megatron import mpu, print_rank_0\n'), ((4900, 4911), 'time.time', 'time.time', ([], {}), '()\n', (4909, 4911), False, 'import time\n'), ((4934, 4991), 'megatron.data.indexed_dataset.make_dataset', 'make_indexed_dataset', (['data_prefix', 'data_impl', 'skip_warmup'], {}), '(data_prefix, data_impl, skip_warmup)\n', (4954, 4991), True, 'from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset\n'), ((8177, 8209), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (8198, 8209), True, 'import numpy as np\n'), ((13305, 13331), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['[1]'], {}), '([1])\n', (13326, 13331), False, 'import torch\n'), ((13703, 13714), 'time.time', 'time.time', ([], {}), '()\n', (13712, 13714), False, 'import time\n'), ((13818, 13877), 'numpy.load', 'np.load', (['doc_idx_filename'], {'allow_pickle': '(True)', 'mmap_mode': '"""r"""'}), "(doc_idx_filename, allow_pickle=True, mmap_mode='r')\n", (13825, 13877), True, 'import numpy as np\n'), ((13990, 14052), 'numpy.load', 'np.load', (['sample_idx_filename'], {'allow_pickle': '(True)', 'mmap_mode': '"""r"""'}), "(sample_idx_filename, allow_pickle=True, mmap_mode='r')\n", (13997, 14052), True, 'import numpy as np\n'), ((14168, 14231), 'numpy.load', 'np.load', (['shuffle_idx_filename'], {'allow_pickle': '(True)', 'mmap_mode': '"""r"""'}), "(shuffle_idx_filename, allow_pickle=True, mmap_mode='r')\n", (14175, 14231), True, 'import numpy as np\n'), ((14638, 14662), 'numpy.sum', 'np.sum', (['sizes[documents]'], {}), '(sizes[documents])\n', (14644, 14662), True, 'import numpy as np\n'), ((15873, 15918), 'numpy.concatenate', 'np.concatenate', (['(doc_idx_first, doc_idx_last)'], {}), '((doc_idx_first, doc_idx_last))\n', (15887, 15918), True, 'import numpy as np\n'), ((16377, 16423), 'numpy.zeros', 'np.zeros', (['[num_samples + 1, 2]'], {'dtype': 'np.int32'}), '([num_samples + 1, 2], dtype=np.int32)\n', (16385, 16423), True, 'import numpy as np\n'), ((18277, 18335), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'num_samples', 'step': '(1)', 'dtype': 'dtype_'}), '(start=0, stop=num_samples, step=1, dtype=dtype_)\n', (18286, 18335), True, 'import numpy as np\n'), ((18499, 18566), 'numpy.arange', 'np.arange', ([], {'start': 'num_samples', 'stop': 'total_size', 'step': '(1)', 'dtype': 'dtype_'}), '(start=num_samples, stop=total_size, step=1, dtype=dtype_)\n', (18508, 18566), True, 'import numpy as np\n'), ((18649, 18702), 'numpy.concatenate', 'np.concatenate', (['(shuffle_idx_first, shuffle_idx_last)'], {}), '((shuffle_idx_first, shuffle_idx_last))\n', (18663, 18702), True, 'import numpy as np\n'), ((2558, 2599), 'megatron.data.blendable_dataset.BlendableDataset', 'BlendableDataset', (['train_datasets', 'weights'], {}), '(train_datasets, weights)\n', (2574, 2599), False, 'from megatron.data.blendable_dataset import BlendableDataset\n'), ((2690, 2731), 'megatron.data.blendable_dataset.BlendableDataset', 'BlendableDataset', (['valid_datasets', 'weights'], {}), '(valid_datasets, weights)\n', (2706, 2731), False, 'from megatron.data.blendable_dataset import BlendableDataset\n'), ((2819, 2859), 'megatron.data.blendable_dataset.BlendableDataset', 'BlendableDataset', (['test_datasets', 'weights'], {}), '(test_datasets, weights)\n', (2835, 2859), False, 'from megatron.data.blendable_dataset import BlendableDataset\n'), ((8666, 8694), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (8692, 8694), False, 'import torch\n'), ((4167, 4245), 'numpy.arange', 'np.arange', ([], {'start': 'splits[index]', 'stop': 'splits[index + 1]', 'step': '(1)', 'dtype': 'np.int32'}), '(start=splits[index], stop=splits[index + 1], step=1, dtype=np.int32)\n', (4176, 4245), True, 'import numpy as np\n'), ((5601, 5618), 'numpy.min', 'np.min', (['documents'], {}), '(documents)\n', (5607, 5618), True, 'import numpy as np\n'), ((5639, 5656), 'numpy.max', 'np.max', (['documents'], {}), '(documents)\n', (5645, 5656), True, 'import numpy as np\n'), ((7441, 7468), 'numpy.concatenate', 'np.concatenate', (['sample_list'], {}), '(sample_list)\n', (7455, 7468), True, 'import numpy as np\n'), ((7494, 7526), 'numpy.array', 'np.array', (['sample'], {'dtype': 'np.int64'}), '(sample, dtype=np.int64)\n', (7502, 7526), True, 'import numpy as np\n'), ((8882, 8986), 'megatron.print_rank_0', 'print_rank_0', (['""" > WARNING: could not find index map files, building the indices on rank 0 ..."""'], {}), "(\n ' > WARNING: could not find index map files, building the indices on rank 0 ...'\n )\n", (8894, 8986), False, 'from megatron import mpu, print_rank_0\n'), ((11232, 11243), 'time.time', 'time.time', ([], {}), '()\n', (11241, 11243), False, 'import time\n'), ((11382, 11435), 'numpy.save', 'np.save', (['doc_idx_filename', 'doc_idx'], {'allow_pickle': '(True)'}), '(doc_idx_filename, doc_idx, allow_pickle=True)\n', (11389, 11435), True, 'import numpy as np\n'), ((11643, 11654), 'time.time', 'time.time', ([], {}), '()\n', (11652, 11654), False, 'import time\n'), ((11907, 11993), 'megatron.data.helpers.build_sample_idx', 'helpers.build_sample_idx', (['sizes', 'doc_idx', 'seq_length', 'num_epochs', 'tokens_per_epoch'], {}), '(sizes, doc_idx, seq_length, num_epochs,\n tokens_per_epoch)\n', (11931, 11993), False, 'from megatron.data import helpers\n'), ((12199, 12258), 'numpy.save', 'np.save', (['sample_idx_filename', 'sample_idx'], {'allow_pickle': '(True)'}), '(sample_idx_filename, sample_idx, allow_pickle=True)\n', (12206, 12258), True, 'import numpy as np\n'), ((12470, 12481), 'time.time', 'time.time', ([], {}), '()\n', (12479, 12481), False, 'import time\n'), ((12937, 12998), 'numpy.save', 'np.save', (['shuffle_idx_filename', 'shuffle_idx'], {'allow_pickle': '(True)'}), '(shuffle_idx_filename, shuffle_idx, allow_pickle=True)\n', (12944, 12998), True, 'import numpy as np\n'), ((13379, 13408), 'megatron.mpu.get_data_parallel_group', 'mpu.get_data_parallel_group', ([], {}), '()\n', (13406, 13408), False, 'from megatron import mpu, print_rank_0\n'), ((13457, 13496), 'megatron.mpu.get_pipeline_model_parallel_group', 'mpu.get_pipeline_model_parallel_group', ([], {}), '()\n', (13494, 13496), False, 'from megatron import mpu, print_rank_0\n'), ((13539, 13573), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (13571, 13573), False, 'import torch\n'), ((5178, 5189), 'time.time', 'time.time', ([], {}), '()\n', (5187, 5189), False, 'import time\n'), ((8717, 8749), 'os.path.isfile', 'os.path.isfile', (['doc_idx_filename'], {}), '(doc_idx_filename)\n', (8731, 8749), False, 'import os\n'), ((8772, 8807), 'os.path.isfile', 'os.path.isfile', (['sample_idx_filename'], {}), '(sample_idx_filename)\n', (8786, 8807), False, 'import os\n'), ((8830, 8866), 'os.path.isfile', 'os.path.isfile', (['shuffle_idx_filename'], {}), '(shuffle_idx_filename)\n', (8844, 8866), False, 'import os\n'), ((14310, 14321), 'time.time', 'time.time', ([], {}), '()\n', (14319, 14321), False, 'import time\n'), ((18196, 18215), 'numpy.iinfo', 'np.iinfo', (['np.uint32'], {}), '(np.uint32)\n', (18204, 18215), True, 'import numpy as np\n'), ((13624, 13661), 'megatron.mpu.get_tensor_model_parallel_group', 'mpu.get_tensor_model_parallel_group', ([], {}), '()\n', (13659, 13661), False, 'from megatron import mpu, print_rank_0\n'), ((11565, 11576), 'time.time', 'time.time', ([], {}), '()\n', (11574, 11576), False, 'import time\n'), ((12391, 12402), 'time.time', 'time.time', ([], {}), '()\n', (12400, 12402), False, 'import time\n'), ((13132, 13143), 'time.time', 'time.time', ([], {}), '()\n', (13141, 13143), False, 'import time\n')] |
from sacred import Experiment
from Config import config_ingredient
import tensorflow as tf
import numpy as np
import os
import Datasets
from Input import Input as Input
from Input import batchgenerators as batchgen
import Utils
import Models.UnetSpectrogramSeparator
import Models.UnetAudioSeparator
import cPickle as pickle
import Test
import Evaluate
import functools
from tensorflow.contrib.signal.python.ops import window_ops
ex = Experiment('Waveunet Training', ingredients=[config_ingredient])
@config_ingredient.capture
def train(model_config, experiment_id, sup_dataset, load_model=None):
# Determine input and output shapes
disc_input_shape = [model_config["batch_size"], model_config["num_frames"], 0] # Shape of input
if model_config["network"] == "unet":
separator_class = Models.UnetAudioSeparator.UnetAudioSeparator(model_config["num_layers"], model_config["num_initial_filters"],
output_type=model_config["output_type"],
context=model_config["context"],
mono=model_config["mono_downmix"],
upsampling=model_config["upsampling"],
num_sources=model_config["num_sources"],
filter_size=model_config["filter_size"],
merge_filter_size=model_config["merge_filter_size"])
elif model_config["network"] == "unet_spectrogram":
separator_class = Models.UnetSpectrogramSeparator.UnetSpectrogramSeparator(model_config["num_layers"], model_config["num_initial_filters"],
mono=model_config["mono_downmix"],
num_sources=model_config["num_sources"])
else:
raise NotImplementedError
sep_input_shape, sep_output_shape = separator_class.get_padding(np.array(disc_input_shape))
separator_func = separator_class.get_output
# Creating the batch generators
assert((sep_input_shape[1] - sep_output_shape[1]) % 2 == 0)
pad_durations = np.array([float((sep_input_shape[1] - sep_output_shape[1])/2), 0, 0]) / float(model_config["expected_sr"]) # Input context that the input audio has to be padded ON EACH SIDE
sup_batch_gen = batchgen.BatchGen_Paired(
model_config,
sup_dataset,
sep_input_shape,
sep_output_shape,
pad_durations[0]
)
print("Starting worker")
sup_batch_gen.start_workers()
print("Started worker!")
# Placeholders and input normalisation
mix_context, sources = Input.get_multitrack_placeholders(sep_output_shape, model_config["num_sources"], sep_input_shape, "sup")
#tf.summary.audio("mix", mix_context, 22050, collections=["sup"])
mix = Utils.crop(mix_context, sep_output_shape)
print("Training...")
# BUILD MODELS
# Separator
separator_sources = separator_func(mix_context, True, not model_config["raw_audio_loss"], reuse=False) # Sources are output in order [acc, voice] for voice separation, [bass, drums, other, vocals] for multi-instrument separation
# Supervised objective: MSE in log-normalized magnitude space
separator_loss = 0
for (real_source, sep_source) in zip(sources, separator_sources):
if model_config["network"] == "unet_spectrogram" and not model_config["raw_audio_loss"]:
window = functools.partial(window_ops.hann_window, periodic=True)
stfts = tf.contrib.signal.stft(tf.squeeze(real_source, 2), frame_length=1024, frame_step=768,
fft_length=1024, window_fn=window)
real_mag = tf.abs(stfts)
separator_loss += tf.reduce_mean(tf.abs(real_mag - sep_source))
else:
separator_loss += tf.reduce_mean(tf.square(real_source - sep_source))
separator_loss = separator_loss / float(len(sources)) # Normalise by number of sources
# TRAINING CONTROL VARIABLES
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False, dtype=tf.int64)
increment_global_step = tf.assign(global_step, global_step + 1)
# Set up optimizers
separator_vars = Utils.getTrainableVariables("separator")
print("Sep_Vars: " + str(Utils.getNumParams(separator_vars)))
print("Num of variables" + str(len(tf.global_variables())))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
with tf.variable_scope("separator_solver"):
separator_solver = tf.train.AdamOptimizer(learning_rate=model_config["init_sup_sep_lr"]).minimize(separator_loss, var_list=separator_vars)
# SUMMARIES
tf.summary.scalar("sep_loss", separator_loss, collections=["sup"])
sup_summaries = tf.summary.merge_all(key='sup')
# Start session and queue input threads
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(model_config["log_dir"] + os.path.sep + str(experiment_id),graph=sess.graph)
# CHECKPOINTING
# Load pretrained model to continue training, if we are supposed to
if load_model != None:
restorer = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V2)
print("Num of variables" + str(len(tf.global_variables())))
restorer.restore(sess, load_model)
print('Pre-trained model restored from file ' + load_model)
saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V2)
# Start training loop
run = True
_global_step = sess.run(global_step)
_init_step = _global_step
it = 0
while run:
# TRAIN SEPARATOR
sup_batch = sup_batch_gen.get_batch()
feed = {i:d for i,d in zip(sources, sup_batch[1:])}
feed.update({mix_context : sup_batch[0]})
_, _sup_summaries = sess.run([separator_solver, sup_summaries], feed)
writer.add_summary(_sup_summaries, global_step=_global_step)
# Increment step counter, check if maximum iterations per epoch is achieved and stop in that case
_global_step = sess.run(increment_global_step)
if _global_step - _init_step > model_config["epoch_it"]:
run = False
print("Finished training phase, stopping batch generators")
sup_batch_gen.stop_workers()
# Epoch finished - Save model
print("Finished epoch!")
save_path = saver.save(sess, model_config["model_base_dir"] + os.path.sep + str(experiment_id) + os.path.sep + str(experiment_id), global_step=int(_global_step))
# Close session, clear computational graph
writer.flush()
writer.close()
sess.close()
tf.reset_default_graph()
return save_path
@config_ingredient.capture
def optimise(model_config, experiment_id, dataset):
epoch = 0
best_loss = 10000
model_path = None
best_model_path = None
for i in range(2):
worse_epochs = 0
if i==1:
print("Finished first round of training, now entering fine-tuning stage")
model_config["batch_size"] *= 2
model_config["cache_size"] *= 2
model_config["min_replacement_rate"] *= 2
model_config["init_sup_sep_lr"] = 1e-5
while worse_epochs < model_config["worse_epochs"]: # Early stopping on validation set after a few epochs
print("EPOCH: " + str(epoch))
model_path = train(sup_dataset=dataset["train"], load_model=model_path)
curr_loss = Test.test(model_config, model_folder=str(experiment_id), audio_list=dataset["valid"], load_model=model_path)
epoch += 1
if curr_loss < best_loss:
worse_epochs = 0
print("Performance on validation set improved from " + str(best_loss) + " to " + str(curr_loss))
best_model_path = model_path
best_loss = curr_loss
else:
worse_epochs += 1
print("Performance on validation set worsened to " + str(curr_loss))
print("TRAINING FINISHED - TESTING WITH BEST MODEL " + best_model_path)
test_loss = Test.test(model_config, model_folder=str(experiment_id), audio_list=dataset["test"], load_model=best_model_path)
return best_model_path, test_loss
@ex.automain
def run(cfg):
model_config = cfg["model_config"]
print("SCRIPT START")
# Create subfolders if they do not exist to save results
for dir in [model_config["model_base_dir"], model_config["log_dir"]]:
if not os.path.exists(dir):
os.makedirs(dir)
# Set up data input
pickle_file = 'dataset_multi.pkl' if model_config["task"] == "multi_instrument" else "dataset_voice.pkl"
if os.path.exists(pickle_file): # Check whether our dataset file is already there, then load it
with open(pickle_file, 'r') as file:
dataset = pickle.load(file)
print("Loaded dataset from pickle!")
else: # Otherwise create the dataset pickle
# Check if MUSDB was prepared before
if os.path.exists("dataset_musdb_allstems.pkl"):
with open("dataset_musdb_allstems.pkl", 'r') as file:
dataset = pickle.load(file)
print("Loaded MUSDB base dataset from pickle!")
else: # We have to prepare the MUSDB dataset
print("Preparing MUSDB dataset! This could take a while...")
dsd_train, dsd_test = Datasets.getMUSDB(model_config["musdb_path"]) # List of (mix, acc, bass, drums, other, vocal) tuples
# Pick 25 random songs for validation from MUSDB train set (this is always the same selection each time since we fix the random seed!)
val_idx = np.random.choice(len(dsd_train), size=25, replace=False)
train_idx = [i for i in range(len(dsd_train)) if i not in val_idx]
print("Validation with MUSDB training songs no. " + str(train_idx))
# Draw randomly from datasets
dataset = dict()
dataset["train"] = [dsd_train[i] for i in train_idx]
dataset["valid"] = [dsd_train[i] for i in val_idx]
dataset["test"] = dsd_test
# Write full MUSDB dataset
with open("dataset_musdb_allstems.pkl", 'wb') as file:
pickle.dump(dataset, file)
print("Wrote MUSDB base dataset!")
# MUSDB base dataset loaded now, now create task-specific dataset based on that
if model_config["task"] == "multi_instrument":
# Write multi instrument dataset
# Remove acc stem from MUSDB
for subset in ["train", "valid", "test"]:
for i in range(len(dataset[subset])):
dataset[subset][i] = (dataset[subset][i][0], dataset[subset][i][2], dataset[subset][i][3], dataset[subset][i][4], dataset[subset][i][5])
with open("dataset_multi.pkl", 'wb') as file:
pickle.dump(dataset,file)
print("Wrote multi-instrument dataset!")
else:
assert(model_config["task"] == "voice")
# Remove other instruments from base MUSDB
for subset in ["train", "valid", "test"]:
for i in range(len(dataset[subset])):
dataset[subset][i] = (dataset[subset][i][0], dataset[subset][i][1], dataset[subset][i][5])
# Prepare CCMixter
print("Preparing CCMixter dataset!")
ccm = Datasets.getCCMixter("CCMixter.xml")
dataset["train"].extend(ccm)
# Save voice dataset
with open("dataset_voice.pkl", 'wb') as file:
pickle.dump(dataset, file)
print("Wrote voice separation dataset!")
print("LOADED DATASET")
# The dataset structure is a dictionary with "train", "valid", "test" keys, whose entries are lists, where each element represents a song.
# Each song is represented as a tuple of (mix, acc, vocal) or (mix, bass, drums, other, vocal) depending on the task.
# Each stem is a Sample object (see Sample class). Custom datasets can be fed by converting it to this data structure, then calling optimise
# Optimize in a supervised fashion until validation loss worsens
sup_model_path, sup_loss = optimise(dataset=dataset)
print("Supervised training finished! Saved model at " + sup_model_path + ". Performance: " + str(sup_loss))
# Evaluate trained model on MUSDB
Evaluate.produce_musdb_source_estimates(model_config, sup_model_path, model_config["musdb_path"], model_config["estimates_path"]) | [
"Datasets.getCCMixter",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"tensorflow.constant_initializer",
"cPickle.load",
"Utils.getNumParams",
"Datasets.getMUSDB",
"tensorflow.ConfigProto",
"tensorflow.assign",
"tensorflow.global_variables",
"tensorflow.abs",
"Utils.crop",
"... | [((438, 502), 'sacred.Experiment', 'Experiment', (['"""Waveunet Training"""'], {'ingredients': '[config_ingredient]'}), "('Waveunet Training', ingredients=[config_ingredient])\n", (448, 502), False, 'from sacred import Experiment\n'), ((2599, 2707), 'Input.batchgenerators.BatchGen_Paired', 'batchgen.BatchGen_Paired', (['model_config', 'sup_dataset', 'sep_input_shape', 'sep_output_shape', 'pad_durations[0]'], {}), '(model_config, sup_dataset, sep_input_shape,\n sep_output_shape, pad_durations[0])\n', (2623, 2707), True, 'from Input import batchgenerators as batchgen\n'), ((2914, 3023), 'Input.Input.get_multitrack_placeholders', 'Input.get_multitrack_placeholders', (['sep_output_shape', "model_config['num_sources']", 'sep_input_shape', '"""sup"""'], {}), "(sep_output_shape, model_config[\n 'num_sources'], sep_input_shape, 'sup')\n", (2947, 3023), True, 'from Input import Input as Input\n'), ((3099, 3140), 'Utils.crop', 'Utils.crop', (['mix_context', 'sep_output_shape'], {}), '(mix_context, sep_output_shape)\n', (3109, 3140), False, 'import Utils\n'), ((4443, 4482), 'tensorflow.assign', 'tf.assign', (['global_step', '(global_step + 1)'], {}), '(global_step, global_step + 1)\n', (4452, 4482), True, 'import tensorflow as tf\n'), ((4529, 4569), 'Utils.getTrainableVariables', 'Utils.getTrainableVariables', (['"""separator"""'], {}), "('separator')\n", (4556, 4569), False, 'import Utils\n'), ((4718, 4760), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (4735, 4760), True, 'import tensorflow as tf\n'), ((5031, 5097), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""sep_loss"""', 'separator_loss'], {'collections': "['sup']"}), "('sep_loss', separator_loss, collections=['sup'])\n", (5048, 5097), True, 'import tensorflow as tf\n'), ((5118, 5149), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""sup"""'}), "(key='sup')\n", (5138, 5149), True, 'import tensorflow as tf\n'), ((5208, 5224), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5222, 5224), True, 'import tensorflow as tf\n'), ((5277, 5302), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (5287, 5302), True, 'import tensorflow as tf\n'), ((7112, 7136), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (7134, 7136), True, 'import tensorflow as tf\n'), ((9144, 9171), 'os.path.exists', 'os.path.exists', (['pickle_file'], {}), '(pickle_file)\n', (9158, 9171), False, 'import os\n'), ((12850, 12983), 'Evaluate.produce_musdb_source_estimates', 'Evaluate.produce_musdb_source_estimates', (['model_config', 'sup_model_path', "model_config['musdb_path']", "model_config['estimates_path']"], {}), "(model_config, sup_model_path,\n model_config['musdb_path'], model_config['estimates_path'])\n", (12889, 12983), False, 'import Evaluate\n'), ((2207, 2233), 'numpy.array', 'np.array', (['disc_input_shape'], {}), '(disc_input_shape)\n', (2215, 2233), True, 'import numpy as np\n'), ((4770, 4805), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (4793, 4805), True, 'import tensorflow as tf\n'), ((5316, 5349), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5347, 5349), True, 'import tensorflow as tf\n'), ((5883, 5904), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5902, 5904), True, 'import tensorflow as tf\n'), ((9471, 9515), 'os.path.exists', 'os.path.exists', (['"""dataset_musdb_allstems.pkl"""'], {}), "('dataset_musdb_allstems.pkl')\n", (9485, 9515), False, 'import os\n'), ((3714, 3770), 'functools.partial', 'functools.partial', (['window_ops.hann_window'], {'periodic': '(True)'}), '(window_ops.hann_window, periodic=True)\n', (3731, 3770), False, 'import functools\n'), ((3978, 3991), 'tensorflow.abs', 'tf.abs', (['stfts'], {}), '(stfts)\n', (3984, 3991), True, 'import tensorflow as tf\n'), ((4354, 4380), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (4377, 4380), True, 'import tensorflow as tf\n'), ((4820, 4857), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""separator_solver"""'], {}), "('separator_solver')\n", (4837, 4857), True, 'import tensorflow as tf\n'), ((5617, 5638), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5636, 5638), True, 'import tensorflow as tf\n'), ((8953, 8972), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (8967, 8972), False, 'import os\n'), ((8986, 9002), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (8997, 9002), False, 'import os\n'), ((9304, 9321), 'cPickle.load', 'pickle.load', (['file'], {}), '(file)\n', (9315, 9321), True, 'import cPickle as pickle\n'), ((9847, 9892), 'Datasets.getMUSDB', 'Datasets.getMUSDB', (["model_config['musdb_path']"], {}), "(model_config['musdb_path'])\n", (9864, 9892), False, 'import Datasets\n'), ((11858, 11894), 'Datasets.getCCMixter', 'Datasets.getCCMixter', (['"""CCMixter.xml"""'], {}), "('CCMixter.xml')\n", (11878, 11894), False, 'import Datasets\n'), ((3814, 3840), 'tensorflow.squeeze', 'tf.squeeze', (['real_source', '(2)'], {}), '(real_source, 2)\n', (3824, 3840), True, 'import tensorflow as tf\n'), ((4037, 4066), 'tensorflow.abs', 'tf.abs', (['(real_mag - sep_source)'], {}), '(real_mag - sep_source)\n', (4043, 4066), True, 'import tensorflow as tf\n'), ((4127, 4162), 'tensorflow.square', 'tf.square', (['(real_source - sep_source)'], {}), '(real_source - sep_source)\n', (4136, 4162), True, 'import tensorflow as tf\n'), ((4599, 4633), 'Utils.getNumParams', 'Utils.getNumParams', (['separator_vars'], {}), '(separator_vars)\n', (4617, 4633), False, 'import Utils\n'), ((9609, 9626), 'cPickle.load', 'pickle.load', (['file'], {}), '(file)\n', (9620, 9626), True, 'import cPickle as pickle\n'), ((10696, 10722), 'cPickle.dump', 'pickle.dump', (['dataset', 'file'], {}), '(dataset, file)\n', (10707, 10722), True, 'import cPickle as pickle\n'), ((11339, 11365), 'cPickle.dump', 'pickle.dump', (['dataset', 'file'], {}), '(dataset, file)\n', (11350, 11365), True, 'import cPickle as pickle\n'), ((12044, 12070), 'cPickle.dump', 'pickle.dump', (['dataset', 'file'], {}), '(dataset, file)\n', (12055, 12070), True, 'import cPickle as pickle\n'), ((4675, 4696), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4694, 4696), True, 'import tensorflow as tf\n'), ((4890, 4959), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': "model_config['init_sup_sep_lr']"}), "(learning_rate=model_config['init_sup_sep_lr'])\n", (4912, 4959), True, 'import tensorflow as tf\n'), ((5719, 5740), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5738, 5740), True, 'import tensorflow as tf\n')] |
import os
import logging
import pathlib
from dataclasses import dataclass
from typing import List
from types import SimpleNamespace
import parse
import numpy as np
import skimage.measure
from dtoolbioimage import (
Image as dbiImage,
Image3D,
ImageDataSet,
scale_to_uint8
)
from fishtools.utils import extract_nuclei, crop_to_non_empty, select_near_colour
from fishtools.segment import scale_segmentation, cell_mask_from_fishimage
from fishtools.probes import find_probe_locations_3d
logger = logging.getLogger("fishtools")
@dataclass
class FISHImage(object):
probes: List[Image3D]
nuclei: Image3D
@classmethod
def from_ids_im_sn(cls, ids, image_name, series_name, nuclear_channel_first):
channels = list(ids.planes_index[image_name][series_name][0].keys())
n_channels = len(channels)
n_probe_channels = n_channels - 1
if nuclear_channel_first:
nuclei = ids.get_stack(image_name, series_name, 0, 0)
probe_channels = range(1, 1+n_probe_channels)
else:
nuclei = ids.get_stack(image_name, series_name, 0, n_channels-1)
probe_channels = range(0, n_probe_channels)
probes = []
for n in probe_channels:
probes.append(ids.get_stack(image_name, series_name, 0, n))
return cls(probes, nuclei)
@classmethod
def from_stack_fpath(cls, fpath):
return cls([Image3D.from_file(fpath)], None)
@dataclass
class ManualAnnotatedImage(object):
raw_image: dbiImage
@classmethod
def from_fpath(cls, fpath):
im = dbiImage.from_file(fpath)
return cls(im)
@property
def marked_cell_mask(self):
return extract_nuclei(self.raw_image)
@dataclass
class DataItem(object):
fishimage: FISHImage
deconv_stack: Image3D
annotation: dbiImage
config: SimpleNamespace
@property
def good_mask(self):
good_mask = select_near_colour(self.cropped_im, self.config.good_col)
return good_mask
@property
def bad_mask(self):
bad_mask = select_near_colour(self.cropped_im, self.config.bad_col)
return bad_mask
@property
def all_mask(self):
return self.bad_mask ^ self.good_mask
def __post_init__(self):
small_crop = self.annotation[10:-10,10:-10]
self.cropped_im = crop_to_non_empty(small_crop)
@property
def maxproj(self):
return np.max(self.deconv_stack, axis=2).view(dbiImage)
@property
def scaled_markers(self):
scaled_markers = scale_segmentation(self.all_mask, self.maxproj)
return scaled_markers
def cell_mask(self, params):
cell_mask = cell_mask_from_fishimage(self.fishimage, params).view(dbiImage)
return cell_mask
def probe_locs_2d(self, thresh=100):
probe_locs_3d = find_probe_locations_3d(self.deconv_stack, thresh)
probe_locs_2d = [(r, c) for r, c, z in probe_locs_3d]
return probe_locs_2d
class DataLoader(object):
def __init__(self, config):
self.config = SimpleNamespace(**config)
self.ids = ImageDataSet(self.config.ids_uri)
def load_by_specifier(self, **kwargs):
fname = self.config.deconv_fname_template.format(**kwargs)
fpath = os.path.join(self.config.deconv_dirpath, fname)
deconv_stack = Image3D.from_file(fpath)
nuclear_channel_first = True
image_name = self.config.image_name_template.format(**kwargs)
series_name = self.config.series_name_template.format(**kwargs)
fishimage = FISHImage.from_ids_im_sn(self.ids, image_name, series_name, nuclear_channel_first)
annotation_fname = self.config.annotation_template.format(**kwargs)
annotation_fpath = os.path.join(self.config.annotation_dirpath, annotation_fname)
annotation = dbiImage.from_file(annotation_fpath)
return DataItem(fishimage, deconv_stack, annotation, self.config)
def get_specs(config):
diriter = pathlib.Path(config.annotation_dirpath).iterdir()
fnameiter = (fpath.name for fpath in diriter)
logger.debug(f"Matching with {config.annotation_template}")
all_specs = [
parse.parse(config.annotation_template, fname)
for fname in fnameiter
]
valid_specs = [
spec.named
for spec in all_specs
if spec is not None
]
return valid_specs
def get_slice(config, spec):
# FIXME - put this in config
# template = "{expid}-{expid}.png"
fname = config.slice_template.format(**spec)
fpath = os.path.join(config.annotation_dirpath, fname)
im = dbiImage.from_file(fpath)
regionim = (im[:, :, 3] == 255)
r = skimage.measure.regionprops(skimage.measure.label(regionim))[0]
rmin, cmin, rmax, cmax = r.bbox
return np.s_[rmin:rmax, cmin:cmax]
def mask_from_template_and_spec(template, config, spec, sl):
fname = template.format(**spec)
fpath = os.path.join(config.annotation_dirpath, fname)
im = dbiImage.from_file(fpath)
maxflatten = np.max(im[:, :, :3], axis=2)
return (maxflatten > 0)[sl]
class MultiAnnotationDataLoader(DataLoader):
def load_by_specifier(self, **kwargs):
pass
class MultiAnnotationDataItem(object):
def __init__(self, config, spec, use_deconv=True):
self.config = config
self.ids = ImageDataSet(self.config.ids_uri)
nuclear_channel_first = True
image_name = self.config.image_name_template.format(**spec)
series_name = self.config.series_name_template.format(**spec)
self.fishimage = FISHImage.from_ids_im_sn(
self.ids, image_name, series_name, nuclear_channel_first
)
fname = self.config.deconv_fname_template.format(**spec)
fpath = os.path.join(self.config.deconv_dirpath, fname)
self.deconv_stack = Image3D.from_file(fpath)
self.deconv_stack = np.clip(self.deconv_stack, 0, 10000)
if self.deconv_stack.shape != self.fishimage.nuclei.shape:
logger.warning("Deconv stack doesn't match shape, trimming")
rdim, cdim, zdim = self.fishimage.nuclei.shape
self.deconv_stack = self.deconv_stack[:rdim,:cdim,:zdim]
sl = get_slice(config, spec)
self.good_mask = mask_from_template_and_spec(
config.good_template,
config,
spec,
sl
)
self.bad_mask = mask_from_template_and_spec(
config.bad_template,
config,
spec,
sl
)
self.nuc_mask = mask_from_template_and_spec(
config.nuc_template,
config,
spec,
sl
)
if use_deconv:
self.probe_stack = self.deconv_stack
else:
self.probe_stack = self.fishimage.probes[0]
@property
def all_mask(self):
# return self.nuc_mask
return self.bad_mask ^ self.good_mask
@property
def maxproj(self):
return np.max(self.probe_stack, axis=2).view(dbiImage)
@property
def scaled_markers(self):
# print(f"Scaling to {self.maxproj.shape}")
# print(f"Diag {self.deconv_stack.shape}, {self.fishimage.nuclei.shape}")
scaled_markers = scale_segmentation(self.all_mask, self.maxproj)
return scaled_markers
def cell_mask(self, params):
cell_mask = cell_mask_from_fishimage(
self.fishimage, params
).view(dbiImage)
return cell_mask
def probe_locs_2d(self, thresh=100):
probe_locs_3d = find_probe_locations_3d(self.probe_stack, thresh)
probe_locs_2d = [(r, c) for r, c, z in probe_locs_3d]
return probe_locs_2d
def load_multiannotation_di(config, spec, use_deconv=True):
di = MultiAnnotationDataItem(config, spec, use_deconv)
return di | [
"fishtools.utils.extract_nuclei",
"fishtools.segment.scale_segmentation",
"fishtools.probes.find_probe_locations_3d",
"dtoolbioimage.Image3D.from_file",
"fishtools.utils.crop_to_non_empty",
"dtoolbioimage.ImageDataSet",
"fishtools.utils.select_near_colour",
"fishtools.segment.cell_mask_from_fishimage"... | [((515, 545), 'logging.getLogger', 'logging.getLogger', (['"""fishtools"""'], {}), "('fishtools')\n", (532, 545), False, 'import logging\n'), ((4645, 4691), 'os.path.join', 'os.path.join', (['config.annotation_dirpath', 'fname'], {}), '(config.annotation_dirpath, fname)\n', (4657, 4691), False, 'import os\n'), ((4701, 4726), 'dtoolbioimage.Image.from_file', 'dbiImage.from_file', (['fpath'], {}), '(fpath)\n', (4719, 4726), True, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((5022, 5068), 'os.path.join', 'os.path.join', (['config.annotation_dirpath', 'fname'], {}), '(config.annotation_dirpath, fname)\n', (5034, 5068), False, 'import os\n'), ((5078, 5103), 'dtoolbioimage.Image.from_file', 'dbiImage.from_file', (['fpath'], {}), '(fpath)\n', (5096, 5103), True, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((5121, 5149), 'numpy.max', 'np.max', (['im[:, :, :3]'], {'axis': '(2)'}), '(im[:, :, :3], axis=2)\n', (5127, 5149), True, 'import numpy as np\n'), ((1599, 1624), 'dtoolbioimage.Image.from_file', 'dbiImage.from_file', (['fpath'], {}), '(fpath)\n', (1617, 1624), True, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((1711, 1741), 'fishtools.utils.extract_nuclei', 'extract_nuclei', (['self.raw_image'], {}), '(self.raw_image)\n', (1725, 1741), False, 'from fishtools.utils import extract_nuclei, crop_to_non_empty, select_near_colour\n'), ((1943, 2000), 'fishtools.utils.select_near_colour', 'select_near_colour', (['self.cropped_im', 'self.config.good_col'], {}), '(self.cropped_im, self.config.good_col)\n', (1961, 2000), False, 'from fishtools.utils import extract_nuclei, crop_to_non_empty, select_near_colour\n'), ((2088, 2144), 'fishtools.utils.select_near_colour', 'select_near_colour', (['self.cropped_im', 'self.config.bad_col'], {}), '(self.cropped_im, self.config.bad_col)\n', (2106, 2144), False, 'from fishtools.utils import extract_nuclei, crop_to_non_empty, select_near_colour\n'), ((2370, 2399), 'fishtools.utils.crop_to_non_empty', 'crop_to_non_empty', (['small_crop'], {}), '(small_crop)\n', (2387, 2399), False, 'from fishtools.utils import extract_nuclei, crop_to_non_empty, select_near_colour\n'), ((2580, 2627), 'fishtools.segment.scale_segmentation', 'scale_segmentation', (['self.all_mask', 'self.maxproj'], {}), '(self.all_mask, self.maxproj)\n', (2598, 2627), False, 'from fishtools.segment import scale_segmentation, cell_mask_from_fishimage\n'), ((2875, 2925), 'fishtools.probes.find_probe_locations_3d', 'find_probe_locations_3d', (['self.deconv_stack', 'thresh'], {}), '(self.deconv_stack, thresh)\n', (2898, 2925), False, 'from fishtools.probes import find_probe_locations_3d\n'), ((3117, 3142), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**config)\n', (3132, 3142), False, 'from types import SimpleNamespace\n'), ((3162, 3195), 'dtoolbioimage.ImageDataSet', 'ImageDataSet', (['self.config.ids_uri'], {}), '(self.config.ids_uri)\n', (3174, 3195), False, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((3331, 3378), 'os.path.join', 'os.path.join', (['self.config.deconv_dirpath', 'fname'], {}), '(self.config.deconv_dirpath, fname)\n', (3343, 3378), False, 'import os\n'), ((3402, 3426), 'dtoolbioimage.Image3D.from_file', 'Image3D.from_file', (['fpath'], {}), '(fpath)\n', (3419, 3426), False, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((3830, 3892), 'os.path.join', 'os.path.join', (['self.config.annotation_dirpath', 'annotation_fname'], {}), '(self.config.annotation_dirpath, annotation_fname)\n', (3842, 3892), False, 'import os\n'), ((3914, 3950), 'dtoolbioimage.Image.from_file', 'dbiImage.from_file', (['annotation_fpath'], {}), '(annotation_fpath)\n', (3932, 3950), True, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((4269, 4315), 'parse.parse', 'parse.parse', (['config.annotation_template', 'fname'], {}), '(config.annotation_template, fname)\n', (4280, 4315), False, 'import parse\n'), ((5431, 5464), 'dtoolbioimage.ImageDataSet', 'ImageDataSet', (['self.config.ids_uri'], {}), '(self.config.ids_uri)\n', (5443, 5464), False, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((5853, 5900), 'os.path.join', 'os.path.join', (['self.config.deconv_dirpath', 'fname'], {}), '(self.config.deconv_dirpath, fname)\n', (5865, 5900), False, 'import os\n'), ((5929, 5953), 'dtoolbioimage.Image3D.from_file', 'Image3D.from_file', (['fpath'], {}), '(fpath)\n', (5946, 5953), False, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((5983, 6019), 'numpy.clip', 'np.clip', (['self.deconv_stack', '(0)', '(10000)'], {}), '(self.deconv_stack, 0, 10000)\n', (5990, 6019), True, 'import numpy as np\n'), ((7341, 7388), 'fishtools.segment.scale_segmentation', 'scale_segmentation', (['self.all_mask', 'self.maxproj'], {}), '(self.all_mask, self.maxproj)\n', (7359, 7388), False, 'from fishtools.segment import scale_segmentation, cell_mask_from_fishimage\n'), ((7652, 7701), 'fishtools.probes.find_probe_locations_3d', 'find_probe_locations_3d', (['self.probe_stack', 'thresh'], {}), '(self.probe_stack, thresh)\n', (7675, 7701), False, 'from fishtools.probes import find_probe_locations_3d\n'), ((4077, 4116), 'pathlib.Path', 'pathlib.Path', (['config.annotation_dirpath'], {}), '(config.annotation_dirpath)\n', (4089, 4116), False, 'import pathlib\n'), ((1430, 1454), 'dtoolbioimage.Image3D.from_file', 'Image3D.from_file', (['fpath'], {}), '(fpath)\n', (1447, 1454), False, 'from dtoolbioimage import Image as dbiImage, Image3D, ImageDataSet, scale_to_uint8\n'), ((2461, 2494), 'numpy.max', 'np.max', (['self.deconv_stack'], {'axis': '(2)'}), '(self.deconv_stack, axis=2)\n', (2467, 2494), True, 'import numpy as np\n'), ((2716, 2764), 'fishtools.segment.cell_mask_from_fishimage', 'cell_mask_from_fishimage', (['self.fishimage', 'params'], {}), '(self.fishimage, params)\n', (2740, 2764), False, 'from fishtools.segment import scale_segmentation, cell_mask_from_fishimage\n'), ((7089, 7121), 'numpy.max', 'np.max', (['self.probe_stack'], {'axis': '(2)'}), '(self.probe_stack, axis=2)\n', (7095, 7121), True, 'import numpy as np\n'), ((7473, 7521), 'fishtools.segment.cell_mask_from_fishimage', 'cell_mask_from_fishimage', (['self.fishimage', 'params'], {}), '(self.fishimage, params)\n', (7497, 7521), False, 'from fishtools.segment import scale_segmentation, cell_mask_from_fishimage\n')] |
import numpy as np # only run this test suite if numpy is installed
import pytest
from h3fake.api import (
basic_int,
numpy_int,
memview_int,
)
# todo: check when a copy is made, and when it isn't
def test_set():
ints = {
619056821839331327,
619056821839593471,
619056821839855615,
619056821840117759,
619056821840379903,
619056821840642047,
619056821840904191,
}
h = 614553222213795839
assert basic_int.compact(ints) == {h}
with pytest.raises(TypeError):
# numpy can't convert from a set
numpy_int.compact(ints)
with pytest.raises(TypeError):
# set isn't a memoryview
memview_int.compact(ints)
def test_list():
ints = [
619056821839331327,
619056821839593471,
619056821839855615,
619056821840117759,
619056821840379903,
619056821840642047,
619056821840904191,
]
h = 614553222213795839
assert basic_int.compact(ints) == {h}
# numpy can convert from a list OK
# (numpy knows to convert it to uint64)
assert numpy_int.compact(ints) == np.array([h], dtype='uint64')
# little weird that numpy comparisons don't consider dtype
assert numpy_int.compact(ints) == np.array([h])
assert not numpy_int.compact(ints).dtype == np.array([h]).dtype
with pytest.raises(TypeError):
# list isn't a memoryview
memview_int.compact(ints)
def test_np_array():
ints = np.array([
619056821839331327,
619056821839593471,
619056821839855615,
619056821840117759,
619056821840379903,
619056821840642047,
619056821840904191,
], dtype='uint64')
h = 614553222213795839
assert basic_int.compact(ints) == {h}
assert numpy_int.compact(ints) == np.array([h], dtype='uint64')
assert numpy_int.compact(ints).dtype == np.dtype('uint64')
out = memview_int.compact(ints)
assert len(out) == 1
assert out[0] == h
def test_list_to_array():
ints0 = [
619056821839331327,
619056821839593471,
619056821839855615,
619056821840117759,
619056821840379903,
619056821840642047,
619056821840904191,
]
ints = np.array(ints0)
h = 614553222213795839
assert basic_int.compact(ints) == {h}
assert numpy_int.compact(ints) == np.array([h], dtype='uint64')
with pytest.raises(ValueError):
# Without the explicit dtype given above, the array
# assumes it has *signed* integers
# The `memview_int` interface requires a dtype match
# with uint64.
memview_int.compact(ints)
def test_iterator():
def foo():
ints = iter([
619056821839331327,
619056821839593471,
619056821839855615,
619056821840117759,
619056821840379903,
619056821840642047,
619056821840904191,
])
return ints
h = 614553222213795839
ints = foo()
assert basic_int.compact(ints) == {h}
ints = foo()
with pytest.raises(TypeError):
# numpy can't create an array from an iterator
numpy_int.compact(ints)
ints = foo()
with pytest.raises(TypeError):
# requires a bytes-like input
memview_int.compact(ints)
| [
"h3fake.api.basic_int.compact",
"h3fake.api.memview_int.compact",
"h3fake.api.numpy_int.compact",
"numpy.dtype",
"pytest.raises",
"numpy.array"
] | [((1506, 1682), 'numpy.array', 'np.array', (['[619056821839331327, 619056821839593471, 619056821839855615, \n 619056821840117759, 619056821840379903, 619056821840642047, \n 619056821840904191]'], {'dtype': '"""uint64"""'}), "([619056821839331327, 619056821839593471, 619056821839855615, \n 619056821840117759, 619056821840379903, 619056821840642047, \n 619056821840904191], dtype='uint64')\n", (1514, 1682), True, 'import numpy as np\n'), ((1950, 1975), 'h3fake.api.memview_int.compact', 'memview_int.compact', (['ints'], {}), '(ints)\n', (1969, 1975), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((2280, 2295), 'numpy.array', 'np.array', (['ints0'], {}), '(ints0)\n', (2288, 2295), True, 'import numpy as np\n'), ((484, 507), 'h3fake.api.basic_int.compact', 'basic_int.compact', (['ints'], {}), '(ints)\n', (501, 507), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((525, 549), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (538, 549), False, 'import pytest\n'), ((600, 623), 'h3fake.api.numpy_int.compact', 'numpy_int.compact', (['ints'], {}), '(ints)\n', (617, 623), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((634, 658), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (647, 658), False, 'import pytest\n'), ((701, 726), 'h3fake.api.memview_int.compact', 'memview_int.compact', (['ints'], {}), '(ints)\n', (720, 726), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1001, 1024), 'h3fake.api.basic_int.compact', 'basic_int.compact', (['ints'], {}), '(ints)\n', (1018, 1024), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1127, 1150), 'h3fake.api.numpy_int.compact', 'numpy_int.compact', (['ints'], {}), '(ints)\n', (1144, 1150), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1154, 1183), 'numpy.array', 'np.array', (['[h]'], {'dtype': '"""uint64"""'}), "([h], dtype='uint64')\n", (1162, 1183), True, 'import numpy as np\n'), ((1259, 1282), 'h3fake.api.numpy_int.compact', 'numpy_int.compact', (['ints'], {}), '(ints)\n', (1276, 1282), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1286, 1299), 'numpy.array', 'np.array', (['[h]'], {}), '([h])\n', (1294, 1299), True, 'import numpy as np\n'), ((1378, 1402), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1391, 1402), False, 'import pytest\n'), ((1446, 1471), 'h3fake.api.memview_int.compact', 'memview_int.compact', (['ints'], {}), '(ints)\n', (1465, 1471), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1776, 1799), 'h3fake.api.basic_int.compact', 'basic_int.compact', (['ints'], {}), '(ints)\n', (1793, 1799), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1819, 1842), 'h3fake.api.numpy_int.compact', 'numpy_int.compact', (['ints'], {}), '(ints)\n', (1836, 1842), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1846, 1875), 'numpy.array', 'np.array', (['[h]'], {'dtype': '"""uint64"""'}), "([h], dtype='uint64')\n", (1854, 1875), True, 'import numpy as np\n'), ((1920, 1938), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (1928, 1938), True, 'import numpy as np\n'), ((2336, 2359), 'h3fake.api.basic_int.compact', 'basic_int.compact', (['ints'], {}), '(ints)\n', (2353, 2359), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((2378, 2401), 'h3fake.api.numpy_int.compact', 'numpy_int.compact', (['ints'], {}), '(ints)\n', (2395, 2401), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((2405, 2434), 'numpy.array', 'np.array', (['[h]'], {'dtype': '"""uint64"""'}), "([h], dtype='uint64')\n", (2413, 2434), True, 'import numpy as np\n'), ((2445, 2470), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2458, 2470), False, 'import pytest\n'), ((2667, 2692), 'h3fake.api.memview_int.compact', 'memview_int.compact', (['ints'], {}), '(ints)\n', (2686, 2692), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((3067, 3090), 'h3fake.api.basic_int.compact', 'basic_int.compact', (['ints'], {}), '(ints)\n', (3084, 3090), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((3125, 3149), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3138, 3149), False, 'import pytest\n'), ((3214, 3237), 'h3fake.api.numpy_int.compact', 'numpy_int.compact', (['ints'], {}), '(ints)\n', (3231, 3237), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((3265, 3289), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3278, 3289), False, 'import pytest\n'), ((3337, 3362), 'h3fake.api.memview_int.compact', 'memview_int.compact', (['ints'], {}), '(ints)\n', (3356, 3362), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1887, 1910), 'h3fake.api.numpy_int.compact', 'numpy_int.compact', (['ints'], {}), '(ints)\n', (1904, 1910), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1315, 1338), 'h3fake.api.numpy_int.compact', 'numpy_int.compact', (['ints'], {}), '(ints)\n', (1332, 1338), False, 'from h3fake.api import basic_int, numpy_int, memview_int\n'), ((1348, 1361), 'numpy.array', 'np.array', (['[h]'], {}), '([h])\n', (1356, 1361), True, 'import numpy as np\n')] |
import numpy as np
from scipy import special
import matplotlib.pyplot as plt
import quadpy
import math
#using a basis of l spherical harmonics
def matlab_legendre(n,X):
res = []
for m in range(n+1):
res.append(np.array(special.lpmv(m,n,X)))
return np.array(res)
#using a basis of l spherical harmonics
#value of l to use
degree=7
### parameters
#Rotational constant for a hydrogen molecule in wavenumber
B=61.20979
# B=1.0
#conversion factor from wavenumber to meV for JZL
w2meV=0.1239842
### not using at the moment
#h/2pi
hbar = 1.05457173E-34
#the mass of a hydorgen atom
Hmass = 1.6737236E-27
#the reduced mass of a hydrogen molecule
mu = Hmass*Hmass/(Hmass+Hmass)
#the bond length of a hydrogen molecule
R = 0.741*10E-10
I = mu*R**2
BB = hbar**2 / (2*I) * (6.24150913*10E18)*1000;
### not using at the moment
# to caclculate the total number of basis functions
NBas = 0
for i in range(0,degree+1):
numfun = 2*i + 1
NBas = NBas + numfun
# to setup the initial matrix
v = np.zeros(phi.size)
H = np.zeros([NBas, NBas],dtype=complex)
J = np.array([])
for i in range(0,degree+1):
for j in range(0,2*i+1):
J = np.append(J,[int(i)])
# to build the Lededev Sphere using 146 points
leb = quadpy.sphere.lebedev_019()
phi = np.arctan2(leb.points[:,1],leb.points[:,0])
theta = np.arccos(leb.points[:,2])
# to construct the Spherical Harmonics basis over theta and phi
k = (degree + 1)**2
Y = np.zeros([leb.points[:,0].size, k],dtype=complex)
for j in range(0,degree+1):
Pm = np.matrix.transpose(matlab_legendre(j,leb.points[:,2]))
lconstant = ((2*j +1)/(4*np.pi))**(0.5)
#calculate where to put the vector
center = (j+1)**2 -j
# calculate the Yj0
Y[:,center-1] = lconstant*Pm[:,0]
# calculate the order Ylm of the set (if any)
for m in range(1,j+1):
precoeff = lconstant * (math.factorial(j-m)/math.factorial(j+m))**(0.5)
mod = m%2
if mod == 1:
Y[:,center + m-1] = precoeff*Pm[:,m]*np.exp(1j*m*phi)
Y[:,center - m-1] = -precoeff*Pm[:,m]*np.exp(-1j*m*phi)
else:
Y[:,center + m-1] = precoeff*Pm[:,m]*np.exp(1j*m*phi)
Y[:,center - m-1] = precoeff*Pm[:,m]*np.exp(-1j*m*phi)
# to calculate the potential matrix
## we first build the diagonal terms
H_diag = np.zeros([NBas, NBas],dtype=complex)
for n in range(0,NBas):
H_diag[n,n]=B*J[n]*(J[n]+1)+H[n,n]
rawdata=np.array([])
## the strength of rotational barrier
b=0.0
f=open('rawdata.csv','a')
# the strength of rotational barrier
for a in np.arange(0,100,1):
# Generte rotational barrier
# v = a*B*(np.sin(theta)**2)+(0.5*b)*(np.cos(2*phi))*(np.sin(theta)**2) # 2-D rotational barrier
# v = a*B*(np.sin(theta)**2) # 1-D rotational barrier
# v = a*(np.sin(theta)**2)
H = H_diag + np.matmul(np.matrix.getH(Y),v.reshape(-1,1)*(leb.weights*4*np.pi).reshape(-1,1)*Y)
eigenvalues, eigenvectors= np.linalg.eig(H)
newline=np.append(np.array(a),np.sort(eigenvalues.real)*w2meV)
np.savetxt(f, newline, fmt='%1.3f', newline=", ")
f.write("\n")
#plot rotational transition
for i in range(0,9):
plt.plot(a*w2meV,w2meV*np.sort(eigenvalues.real)[i],'bo')
f.close()
plt.show()
| [
"numpy.arctan2",
"matplotlib.pyplot.show",
"scipy.special.lpmv",
"quadpy.sphere.lebedev_019",
"numpy.savetxt",
"numpy.zeros",
"numpy.linalg.eig",
"numpy.matrix.getH",
"numpy.sort",
"numpy.array",
"numpy.arange",
"numpy.exp",
"math.factorial",
"numpy.arccos"
] | [((1009, 1027), 'numpy.zeros', 'np.zeros', (['phi.size'], {}), '(phi.size)\n', (1017, 1027), True, 'import numpy as np\n'), ((1032, 1069), 'numpy.zeros', 'np.zeros', (['[NBas, NBas]'], {'dtype': 'complex'}), '([NBas, NBas], dtype=complex)\n', (1040, 1069), True, 'import numpy as np\n'), ((1073, 1085), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1081, 1085), True, 'import numpy as np\n'), ((1231, 1258), 'quadpy.sphere.lebedev_019', 'quadpy.sphere.lebedev_019', ([], {}), '()\n', (1256, 1258), False, 'import quadpy\n'), ((1265, 1311), 'numpy.arctan2', 'np.arctan2', (['leb.points[:, 1]', 'leb.points[:, 0]'], {}), '(leb.points[:, 1], leb.points[:, 0])\n', (1275, 1311), True, 'import numpy as np\n'), ((1317, 1344), 'numpy.arccos', 'np.arccos', (['leb.points[:, 2]'], {}), '(leb.points[:, 2])\n', (1326, 1344), True, 'import numpy as np\n'), ((1433, 1484), 'numpy.zeros', 'np.zeros', (['[leb.points[:, 0].size, k]'], {'dtype': 'complex'}), '([leb.points[:, 0].size, k], dtype=complex)\n', (1441, 1484), True, 'import numpy as np\n'), ((2309, 2346), 'numpy.zeros', 'np.zeros', (['[NBas, NBas]'], {'dtype': 'complex'}), '([NBas, NBas], dtype=complex)\n', (2317, 2346), True, 'import numpy as np\n'), ((2417, 2429), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2425, 2429), True, 'import numpy as np\n'), ((2546, 2566), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (2555, 2566), True, 'import numpy as np\n'), ((3210, 3220), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3218, 3220), True, 'import matplotlib.pyplot as plt\n'), ((269, 282), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (277, 282), True, 'import numpy as np\n'), ((2920, 2936), 'numpy.linalg.eig', 'np.linalg.eig', (['H'], {}), '(H)\n', (2933, 2936), True, 'import numpy as np\n'), ((3008, 3057), 'numpy.savetxt', 'np.savetxt', (['f', 'newline'], {'fmt': '"""%1.3f"""', 'newline': '""", """'}), "(f, newline, fmt='%1.3f', newline=', ')\n", (3018, 3057), True, 'import numpy as np\n'), ((2959, 2970), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2967, 2970), True, 'import numpy as np\n'), ((2816, 2833), 'numpy.matrix.getH', 'np.matrix.getH', (['Y'], {}), '(Y)\n', (2830, 2833), True, 'import numpy as np\n'), ((2971, 2996), 'numpy.sort', 'np.sort', (['eigenvalues.real'], {}), '(eigenvalues.real)\n', (2978, 2996), True, 'import numpy as np\n'), ((236, 257), 'scipy.special.lpmv', 'special.lpmv', (['m', 'n', 'X'], {}), '(m, n, X)\n', (248, 257), False, 'from scipy import special\n'), ((1992, 2014), 'numpy.exp', 'np.exp', (['(1.0j * m * phi)'], {}), '(1.0j * m * phi)\n', (1998, 2014), True, 'import numpy as np\n'), ((2059, 2082), 'numpy.exp', 'np.exp', (['(-1.0j * m * phi)'], {}), '(-1.0j * m * phi)\n', (2065, 2082), True, 'import numpy as np\n'), ((2141, 2163), 'numpy.exp', 'np.exp', (['(1.0j * m * phi)'], {}), '(1.0j * m * phi)\n', (2147, 2163), True, 'import numpy as np\n'), ((2208, 2231), 'numpy.exp', 'np.exp', (['(-1.0j * m * phi)'], {}), '(-1.0j * m * phi)\n', (2214, 2231), True, 'import numpy as np\n'), ((1855, 1876), 'math.factorial', 'math.factorial', (['(j - m)'], {}), '(j - m)\n', (1869, 1876), False, 'import math\n'), ((1875, 1896), 'math.factorial', 'math.factorial', (['(j + m)'], {}), '(j + m)\n', (1889, 1896), False, 'import math\n'), ((3164, 3189), 'numpy.sort', 'np.sort', (['eigenvalues.real'], {}), '(eigenvalues.real)\n', (3171, 3189), True, 'import numpy as np\n')] |
import numpy as np
from generator import Generator, KerasGenerator
def test_generator():
x_data = np.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
]).transpose()
y_data = np.array([
[2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
]).transpose()
gen = Generator(x_data=x_data,
y_data=y_data,
x_num_steps=3,
y_num_steps=2,
f_step=2,
skip_step=3)
for g in gen.generate():
print(g)
def test_keras_generator():
x_data = np.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
]).transpose()
y_data = np.array([
[2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
]).transpose()
gen = KerasGenerator(x_data=x_data,
y_data=y_data,
x_num_steps=3,
y_num_steps=2,
f_step=2,
skip_step=3,
batch_size=2)
print(len(gen))
print(gen[0])
test_keras_generator() | [
"generator.Generator",
"generator.KerasGenerator",
"numpy.array"
] | [((308, 404), 'generator.Generator', 'Generator', ([], {'x_data': 'x_data', 'y_data': 'y_data', 'x_num_steps': '(3)', 'y_num_steps': '(2)', 'f_step': '(2)', 'skip_step': '(3)'}), '(x_data=x_data, y_data=y_data, x_num_steps=3, y_num_steps=2,\n f_step=2, skip_step=3)\n', (317, 404), False, 'from generator import Generator, KerasGenerator\n'), ((796, 911), 'generator.KerasGenerator', 'KerasGenerator', ([], {'x_data': 'x_data', 'y_data': 'y_data', 'x_num_steps': '(3)', 'y_num_steps': '(2)', 'f_step': '(2)', 'skip_step': '(3)', 'batch_size': '(2)'}), '(x_data=x_data, y_data=y_data, x_num_steps=3, y_num_steps=2,\n f_step=2, skip_step=3, batch_size=2)\n', (810, 911), False, 'from generator import Generator, KerasGenerator\n'), ((104, 179), 'numpy.array', 'np.array', (['[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]'], {}), '([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])\n', (112, 179), True, 'import numpy as np\n'), ((227, 271), 'numpy.array', 'np.array', (['[[2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]'], {}), '([[2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])\n', (235, 271), True, 'import numpy as np\n'), ((591, 666), 'numpy.array', 'np.array', (['[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]'], {}), '([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])\n', (599, 666), True, 'import numpy as np\n'), ((714, 758), 'numpy.array', 'np.array', (['[[2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]'], {}), '([[2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])\n', (722, 758), True, 'import numpy as np\n')] |
# Implement the pose-detection demo from the open-cv website
import cv2
import numpy as np
import pickle
def draw(img, corners, imgpts):
imgpts = np.int32(imgpts).reshape(-1,2)
# draw ground floor in green
img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3)
# draw pillars in blue color
for i,j in zip(range(4),range(4,8)):
img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)
# draw top layer in red color
img = cv2.drawContours(img, [imgpts[4:]],-1,(0,0,255),3)
return img
# Load previously saved data
with open('./camera_data.pkl', 'rb') as file:
mtx, dist = pickle.load(file)
# Define search critera for chessboard, and the points the
# chessboard corners correspond to in it's relative space
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Define lines for drawing object (commented out axes lines)
# axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
axis = np.float32(
[[0,0,0], [0,3,0], [3,3,0], [3,0,0],
[0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3]]
)
# Load video stream
vs = cv2.VideoCapture(0)
# While there is an active stream (can also test vs.isOpened() until ready)
active = True
while active:
# Run image from stream through model and display
active, img = vs.read()
h, w, c = img.shape
img = cv2.resize(img, (int(w/2), int(h/2)))
img = cv2.GaussianBlur(img, (3, 3), 0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
if ret == True:
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
print(len(corners2))
# Find the rotation and translation vectors.
print("Find rotation and translation")
_, rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners2, mtx, dist)
# project 3D points to image plane
print("Project Points")
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
# Draw image
print("Draw image")
img = draw(img,corners2,imgpts)
# k = cv2.waitKey(0) & 0xff
cv2.imshow('img',img)
cv2.waitKey(1)
cv2.destroyAllWindows() | [
"cv2.GaussianBlur",
"cv2.findChessboardCorners",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.float32",
"numpy.zeros",
"cv2.imshow",
"cv2.cornerSubPix",
"cv2.solvePnPRansac",
"cv2.VideoCapture",
"cv2.projectPoints",
"pickle.load",
"numpy.int32",
"cv2.drawContours",
"cv2.destroyAllWindows"
] | [((848, 880), 'numpy.zeros', 'np.zeros', (['(6 * 7, 3)', 'np.float32'], {}), '((6 * 7, 3), np.float32)\n', (856, 880), True, 'import numpy as np\n'), ((1058, 1167), 'numpy.float32', 'np.float32', (['[[0, 0, 0], [0, 3, 0], [3, 3, 0], [3, 0, 0], [0, 0, -3], [0, 3, -3], [3, 3,\n -3], [3, 0, -3]]'], {}), '([[0, 0, 0], [0, 3, 0], [3, 3, 0], [3, 0, 0], [0, 0, -3], [0, 3, \n -3], [3, 3, -3], [3, 0, -3]])\n', (1068, 1167), True, 'import numpy as np\n'), ((1180, 1199), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1196, 1199), False, 'import cv2\n'), ((2279, 2302), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2300, 2302), False, 'import cv2\n'), ((226, 282), 'cv2.drawContours', 'cv2.drawContours', (['img', '[imgpts[:4]]', '(-1)', '(0, 255, 0)', '(-3)'], {}), '(img, [imgpts[:4]], -1, (0, 255, 0), -3)\n', (242, 282), False, 'import cv2\n'), ((470, 525), 'cv2.drawContours', 'cv2.drawContours', (['img', '[imgpts[4:]]', '(-1)', '(0, 0, 255)', '(3)'], {}), '(img, [imgpts[4:]], -1, (0, 0, 255), 3)\n', (486, 525), False, 'import cv2\n'), ((629, 646), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (640, 646), False, 'import pickle\n'), ((1470, 1502), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)', '(0)'], {}), '(img, (3, 3), 0)\n', (1486, 1502), False, 'import cv2\n'), ((1514, 1551), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1526, 1551), False, 'import cv2\n'), ((1607, 1652), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(7, 6)', 'None'], {}), '(gray, (7, 6), None)\n', (1632, 1652), False, 'import cv2\n'), ((2237, 2259), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (2247, 2259), False, 'import cv2\n'), ((2263, 2277), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2274, 2277), False, 'import cv2\n'), ((1691, 1752), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['gray', 'corners', '(11, 11)', '(-1, -1)', 'criteria'], {}), '(gray, corners, (11, 11), (-1, -1), criteria)\n', (1707, 1752), False, 'import cv2\n'), ((1912, 1957), 'cv2.solvePnPRansac', 'cv2.solvePnPRansac', (['objp', 'corners2', 'mtx', 'dist'], {}), '(objp, corners2, mtx, dist)\n', (1930, 1957), False, 'import cv2\n'), ((2056, 2104), 'cv2.projectPoints', 'cv2.projectPoints', (['axis', 'rvecs', 'tvecs', 'mtx', 'dist'], {}), '(axis, rvecs, tvecs, mtx, dist)\n', (2073, 2104), False, 'import cv2\n'), ((151, 167), 'numpy.int32', 'np.int32', (['imgpts'], {}), '(imgpts)\n', (159, 167), True, 'import numpy as np\n')] |
from sklearn.linear_model import Lasso
import argparse
import os
import numpy as np
from sklearn.metrics import mean_squared_error
from math import sqrt
import joblib
from sklearn.model_selection import train_test_split
import pandas as pd
from azureml.core.run import Run
from azureml.core.dataset import Dataset
from azureml.data.dataset_factory import TabularDatasetFactory
def clean_data(data):
x_df = data.to_pandas_dataframe()
x_df = x_df.dropna()
#clean data
x_df['MSSubClass'] = x_df['MSSubClass'].astype(str)
x_df['YrSold'] = x_df['YrSold'].astype(str)
x_df['MoSold'] = x_df['MoSold'].astype(str)
x_df['YearBuilt'] = x_df['YearBuilt'].astype(str)
x_df['YearRemodAdd'] = x_df['YearRemodAdd'].astype(str)
# One hot encode data
features_to_encode =['MSSubClass','MSZoning','Street','LotShape',
'LandContour','Utilities','LotConfig','LandSlope','Neighborhood','Condition1','Condition2',
'BldgType','HouseStyle','YearBuilt','YearRemodAdd','RoofStyle','RoofMatl','Exterior1st',
'Exterior2nd','MasVnrType','ExterQual','ExterCond','Foundation','BsmtQual','BsmtCond',
'Heating','HeatingQC','CentralAir','Electrical','KitchenQual','Functional','GarageType',
'GarageYrBlt','GarageFinish','GarageQual','GarageCond','PavedDrive','MoSold','YrSold',
'SaleType','SaleCondition']
new_df = pd.get_dummies(x_df, columns=features_to_encode)
x_df = new_df
y_df = x_df.pop("SalePrice")
return x_df, y_df
def main():
# Add arguments to script
parser = argparse.ArgumentParser()
parser.add_argument('--alpha', type=float, default=1.0, help="")
parser.add_argument('--max_iter', type=int, default=1000, help="The maximum number of iterations")
args = parser.parse_args()
run = Run.get_context()
workspace = run.experiment.workspace
run.log("Alpha:", np.float(args.alpha))
run.log("Maximum Iteration:", np.int(args.max_iter))
#The dataset is registered using Python SDK in the notebook
dataset_name = 'Housing Dataset'
# Get a dataset by name
ds = Dataset.get_by_name(workspace=workspace, name=dataset_name)
x,y = clean_data(ds)
# TODO: Split data into train and test sets.
### YOUR CODE HERE ###
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size =0.2, random_state=223)
model = Lasso(alpha=args.alpha, max_iter=args.max_iter).fit(x_train, y_train)
y_predict = model.predict(x_test)
#calculate the root mean squared error
#y_actual = y_test.values.flatten().tolist()
rmse = sqrt(mean_squared_error(y_test,y_predict))
run.log("root_mean_squared_error", np.float(rmse))
#save the best model
os.makedirs('outputs', exist_ok = True)
import joblib
joblib.dump(value = model, filename= 'outputs/model.joblib')
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"azureml.core.dataset.Dataset.get_by_name",
"os.makedirs",
"pandas.get_dummies",
"sklearn.model_selection.train_test_split",
"azureml.core.run.Run.get_context",
"joblib.dump",
"numpy.float",
"numpy.int",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.Lasso... | [((1367, 1415), 'pandas.get_dummies', 'pd.get_dummies', (['x_df'], {'columns': 'features_to_encode'}), '(x_df, columns=features_to_encode)\n', (1381, 1415), True, 'import pandas as pd\n'), ((1546, 1571), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1569, 1571), False, 'import argparse\n'), ((1787, 1804), 'azureml.core.run.Run.get_context', 'Run.get_context', ([], {}), '()\n', (1802, 1804), False, 'from azureml.core.run import Run\n'), ((2092, 2151), 'azureml.core.dataset.Dataset.get_by_name', 'Dataset.get_by_name', ([], {'workspace': 'workspace', 'name': 'dataset_name'}), '(workspace=workspace, name=dataset_name)\n', (2111, 2151), False, 'from azureml.core.dataset import Dataset\n'), ((2293, 2348), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(223)'}), '(x, y, test_size=0.2, random_state=223)\n', (2309, 2348), False, 'from sklearn.model_selection import train_test_split\n'), ((2702, 2739), 'os.makedirs', 'os.makedirs', (['"""outputs"""'], {'exist_ok': '(True)'}), "('outputs', exist_ok=True)\n", (2713, 2739), False, 'import os\n'), ((2769, 2826), 'joblib.dump', 'joblib.dump', ([], {'value': 'model', 'filename': '"""outputs/model.joblib"""'}), "(value=model, filename='outputs/model.joblib')\n", (2780, 2826), False, 'import joblib\n'), ((1868, 1888), 'numpy.float', 'np.float', (['args.alpha'], {}), '(args.alpha)\n', (1876, 1888), True, 'import numpy as np\n'), ((1924, 1945), 'numpy.int', 'np.int', (['args.max_iter'], {}), '(args.max_iter)\n', (1930, 1945), True, 'import numpy as np\n'), ((2579, 2616), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_predict'], {}), '(y_test, y_predict)\n', (2597, 2616), False, 'from sklearn.metrics import mean_squared_error\n'), ((2657, 2671), 'numpy.float', 'np.float', (['rmse'], {}), '(rmse)\n', (2665, 2671), True, 'import numpy as np\n'), ((2363, 2410), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'args.alpha', 'max_iter': 'args.max_iter'}), '(alpha=args.alpha, max_iter=args.max_iter)\n', (2368, 2410), False, 'from sklearn.linear_model import Lasso\n')] |
# disable GPU acceleration
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import packages
import json
import numpy as np
import networkx as nx
import tensorflow as tf
from pathlib import Path
import scipy.sparse as sps
from collections import defaultdict
from statistics import mean, pvariance
# input/output directory
snapshot_prefix = '' # the input directory should point to the graph snapshots directory
output_dir = 'results/DVGAE/emb_size/'
# == parameters ==
# threshold for high bandwidth test edges
config_test_weight_threshold = 0.4
# first train snapshot
start_subgraph = 10
# last train snapshot
end_subgraph = 20
# test snapshot
test_subgraph = 299
num_snapshots = end_subgraph - start_subgraph
# options for intermediate embedding size
intermediate_size_options = [16, 32, 64, 128]
# number of train iterations
epochs = 100
# learning rate of training process
learning_rate = 0.001
# depth of training (window)
l_depth = 2
# number of times the experiment is repeated
repeat = 5
# deconstructs a sparse matrix to coordinates, values and shape
def sparse_to_tuple(sparse_mx):
sparse_mx = sps.triu(sparse_mx)
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
# calculates the normalized laplacian of the input matrix
def calc_normalized(adj_):
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sps.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo().astype(np.float32)
return adj_normalized
# calculates the mean square error
def calc_mse(coords, labels, embeddings):
predictions = []
for src, dst in coords:
emb1 = embeddings[src]
emb2 = embeddings[dst]
pred = tf.sigmoid(tf.tensordot(emb1, emb2, 1)).numpy()
predictions.append(pred)
mse = tf.keras.losses.MSE(labels, predictions)
return mse
# calculates the mean absolute error
def calc_mae(coords, labels, embeddings):
predictions = []
for src, dst in coords:
emb1 = embeddings[src]
emb2 = embeddings[dst]
pred = tf.sigmoid(tf.tensordot(emb1, emb2, 1)).numpy().tolist()
predictions.append(pred)
predictions = tf.Variable(predictions)
mae = tf.reduce_mean(tf.abs(labels - predictions))
return mae
# removes nodes that do not have any high bandwidth connection
def clear_low_nodes(graph):
delete = []
for node in graph.nodes():
neighbors = dict(graph.adj[node])
vals = list(neighbors.values())
weights = [d['weight'] for d in vals]
filter = [weight > 0.5 for weight in weights]
if not any(filter):
delete.append(node)
for node in delete:
graph.remove_node(node)
# removes nodes that do not have any connections
def clear_empty_nodes(graph):
delete = []
for node in graph.nodes():
neighbors = dict(graph.adj[node])
vals = list(neighbors.values())
weights = sum([d['weight'] for d in vals])
if weights == 0:
delete.append(node)
for node in delete:
graph.remove_node(node)
# the following lists hold the results of preprocessing
# adjacency matrix of each snapshot
adj_snapshots = []
# normalized adjacency matrix of each snapshot
adj_norm_snapshots = []
# node features per snapshot
features_snapshots = []
# number of nodes each snapshot has
num_nodes_snapshot = []
# test coordinates for each snapshot
test_coords_snapshots = []
# test values for each snapshot
test_values_snapshots = []
# high bandwidth test coordinates for each snapshot
test_thres_coords_snapshots = []
# high bandwidth test values for each snapshot
test_thres_values_snapshots = []
# define path for test data
test_path = snapshot_prefix + str(test_subgraph) + '.csv'
# define test snapshot
graph_ground_truth = nx.read_weighted_edgelist(test_path, nodetype=int, delimiter=',')
# == preprocessing ==
for i in range(start_subgraph, end_subgraph):
train_path = snapshot_prefix + str(i) + '.csv'
# read adj
graph = nx.read_weighted_edgelist(train_path, nodetype=int, delimiter=',')
clear_low_nodes(graph)
adj = nx.adjacency_matrix(graph, nodelist=sorted(graph.nodes()))
adj.eliminate_zeros()
# prepare adj tensor (dense)
adj_train_with_diag = adj + sps.identity(adj.shape[0], dtype=np.float32)
adj_tensor = tf.Variable(adj_train_with_diag.todense(), dtype=tf.float32)
# prepare adj normalized tensor (sparse)
adj_norm = calc_normalized(adj_train_with_diag)
indices = np.mat([adj_norm.row, adj_norm.col]).transpose()
adj_norm_tensor = tf.SparseTensor(indices, adj_norm.data, adj_norm.shape)
# create feature matrix (identity matrix)
features = sps.identity(adj_norm.shape[0], dtype=np.float32, format='coo')
# prepare feature tensor (sparse)
indices = np.mat([features.row, features.col]).transpose()
features_tensor = tf.SparseTensor(indices, features.data, features.shape)
# load testset
adj_ground_truth = nx.adjacency_matrix(graph_ground_truth, nodelist=sorted(graph.nodes()))
adj_ground_truth.eliminate_zeros()
adj_ground_truth = adj_ground_truth.todense()
adj_coords, adj_values, _ = sparse_to_tuple(adj)
for coords in adj_coords:
adj_ground_truth[coords[0], coords[1]] = 0
adj_ground_truth[coords[1], coords[0]] = 0
adj_ground_truth = sps.csr_matrix(adj_ground_truth)
adj_ground_truth_thres = adj_ground_truth.copy()
adj_ground_truth_thres[adj_ground_truth_thres < config_test_weight_threshold] = 0
adj_ground_truth_thres.eliminate_zeros()
test_coords, test_values, _ = sparse_to_tuple(adj_ground_truth)
test_coords_thres, test_values_thres, _ = sparse_to_tuple(adj_ground_truth_thres)
# append everything to lists
adj_snapshots.append(adj_tensor)
adj_norm_snapshots.append(adj_norm_tensor)
features_snapshots.append(features_tensor)
num_nodes_snapshot.append(adj.shape[0])
test_coords_snapshots.append(test_coords)
test_values_snapshots.append(test_values)
test_thres_coords_snapshots.append(test_coords_thres)
test_thres_values_snapshots.append(test_values_thres)
# == model definition ==
class FirstLayer(tf.keras.layers.Layer):
def __init__(self, adj_norm, shared_w0):
super(FirstLayer, self).__init__()
self.adj_norm = adj_norm
self.w = shared_w0
def call(self, inputs, **kwargs):
xw = tf.sparse.sparse_dense_matmul(inputs, self.w)
axw = tf.sparse.sparse_dense_matmul(self.adj_norm, xw)
relu = tf.nn.relu(axw)
return relu
class SecondLayer(tf.keras.layers.Layer):
def __init__(self, units, adj_norm):
super(SecondLayer, self).__init__()
self.units = units
self.adj_norm = adj_norm
self.training = True
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer=tf.keras.initializers.glorot_uniform(),
trainable=True)
def call(self, inputs, **kwargs):
x = tf.matmul(inputs, self.w)
x = tf.sparse.sparse_dense_matmul(self.adj_norm, x)
return x
class Encoder(tf.keras.Model):
def __init__(self, adj_norm, embedding_size, shared_w0):
super(Encoder, self).__init__()
self.first_layer = FirstLayer(adj_norm, shared_w0)
self.mean_layer = SecondLayer(embedding_size, adj_norm)
self.std_layer = SecondLayer(embedding_size, adj_norm)
def call(self, input_features, **kwargs):
intermediate = self.first_layer(input_features)
means = self.mean_layer(intermediate)
stds = self.std_layer(intermediate)
z = means + (tf.random.normal(shape=means.shape) * tf.exp(stds))
return z, means, stds
class ThirdLayer(tf.keras.layers.Layer):
def __init__(self):
super(ThirdLayer, self).__init__()
def call(self, inputs, **kwargs):
matmul = tf.matmul(inputs, inputs, transpose_b=True)
flat = tf.reshape(matmul, [-1])
return flat
class Decoder(tf.keras.Model):
def __init__(self):
super(Decoder, self).__init__()
self.third_layer = ThirdLayer()
def call(self, input_features, **kwargs):
return self.third_layer(input_features)
class Autoencoder(tf.keras.Model):
def __init__(self, adj_norm, embedding_size, shared_w0):
super(Autoencoder, self).__init__()
self.encoder = Encoder(adj_norm, embedding_size, shared_w0)
self.decoder = Decoder()
def call(self, input_features, **kwargs):
z, means, stds = self.encoder(input_features)
reconstructed = self.decoder(z)
return reconstructed, means, stds
# == experiment ==
for experiment in intermediate_size_options:
# setup experiment parameters
intermediate_size = experiment
embedding_size = int(intermediate_size / 2)
opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
glorot_initializer = tf.keras.initializers.glorot_uniform()
autoencoders = []
pos_weights = []
norms = []
labels = []
# calculate DVGAE weights
for i in np.arange(num_snapshots):
adj = adj_snapshots[i]
adj_sum = tf.reduce_sum(adj)
pos_weights.append(float((adj.shape[0] * adj.shape[0]) - adj_sum) / adj_sum)
norms.append(adj.shape[0] * adj.shape[0] / float(((adj.shape[0] * adj.shape[0]) - adj_sum) * 2))
labels.append(tf.reshape(adj, [-1]))
print("start training with size", experiment)
# lists that keep track of results
snapshot_history = defaultdict(list)
kl_loss_history = defaultdict(list)
reconstructed_loss_history = defaultdict(list)
mean_mse = []
mean_mse_thres = []
var_mse = []
var_mse_thres = []
mean_mae = []
mean_mae_thres = []
var_mae = []
var_mae_thres = []
autoencoders = defaultdict(list)
kl_losses = {}
# iterate over the training window
for i in range(num_snapshots):
mse_acc = []
thres_mse_acc = []
mae_acc = []
thres_mae_acc = []
# repeat every experiment
for rep in range(repeat):
print('snapshot', start_subgraph + i)
# prepare shared weights
if i > 0:
last_trained_ae = autoencoders[i - 1][rep]
prev_w0 = last_trained_ae.encoder.first_layer.w
num_new_nodes = num_nodes_snapshot[i] - num_nodes_snapshot[i - 1]
if num_new_nodes > 0:
glorot_weights = tf.Variable(
initial_value=glorot_initializer(shape=(num_new_nodes, intermediate_size), dtype=tf.float32),
trainable=True)
w0 = tf.concat([prev_w0, glorot_weights], axis=0)
else:
w0 = prev_w0
else:
w0 = tf.Variable(
initial_value=glorot_initializer(shape=(num_nodes_snapshot[0], intermediate_size),
dtype=tf.float32),
trainable=True)
# create autoencoder
autoenc = Autoencoder(adj_norm_snapshots[i], embedding_size, w0)
autoencoders[i].append(autoenc)
features = features_snapshots[i]
norm = norms[i]
pos_weight = pos_weights[i]
label = labels[i]
num_nodes = num_nodes_snapshot[i]
# train autoencoder
for epoch in range(epochs):
with tf.GradientTape() as tape:
# forward pass
reconstructed, means, stds = autoenc(features)
# compute train error
reconstruction_loss = norm * tf.reduce_mean(
tf.nn.weighted_cross_entropy_with_logits(logits=reconstructed, labels=label,
pos_weight=pos_weight))
kl_self_loss = tf.abs((0.5 / num_nodes_snapshot[i]) * tf.reduce_mean(
tf.reduce_sum(1 + 2 * stds - tf.square(means) - tf.square(tf.exp(stds)), 1)))
kl_loss = 0
if i == 0:
kl_loss += kl_self_loss
else:
for l in range(i - 1, max(-1, i - 1 - l_depth), -1):
prev_kl = kl_losses[l]
kl_loss += (kl_self_loss + prev_kl) / 2
kl_losses[i] = kl_loss
step_loss = reconstruction_loss + kl_loss
snapshot_history[i].append(step_loss)
kl_loss_history[i].append(kl_loss)
reconstructed_loss_history[i].append(reconstruction_loss)
# propagate gradients
gradients = tape.gradient(step_loss, autoenc.trainable_variables)
gradient_variables = zip(gradients, autoenc.trainable_variables)
opt.apply_gradients(gradient_variables)
# measure test error after training
reconstructed, embeddings, stds = autoenc(features)
mse_score = calc_mse(test_coords_snapshots[i], test_values_snapshots[i], embeddings).numpy().tolist()
mse_score_thres = calc_mse(test_thres_coords_snapshots[i], test_thres_values_snapshots[i],
embeddings).numpy().tolist()
mae_score = calc_mae(test_coords_snapshots[i], test_values_snapshots[i], embeddings).numpy().tolist()
mae_score_thres = calc_mae(test_thres_coords_snapshots[i], test_thres_values_snapshots[i],
embeddings).numpy().tolist()
mse_acc.append(mse_score)
thres_mse_acc.append(mse_score_thres)
mae_acc.append(mae_score)
thres_mae_acc.append(mae_score_thres)
mean_mse.append(mean(mse_acc))
var_mse.append(pvariance(mse_acc))
mean_mse_thres.append(mean(thres_mse_acc))
var_mse_thres.append(pvariance(thres_mse_acc))
mean_mae.append(mean(mae_acc))
var_mae.append(pvariance(mae_acc))
mean_mae_thres.append(mean(thres_mae_acc))
var_mae_thres.append(pvariance(thres_mae_acc))
# experiments are over, saving results into files
save_path = output_dir + str(experiment) + '/'
def save_data(list, filename):
with open(save_path + filename, 'w') as handle:
json.dump(list, handle)
Path(save_path).mkdir(parents=True, exist_ok=True)
save_data(mean_mse, 'mean mse.json')
save_data(var_mse, 'variance mse.json')
save_data(mean_mse_thres, 'mean mse thres.json')
save_data(var_mse_thres, 'variance mse thres.json')
save_data(mean_mae, 'mean mae.json')
save_data(var_mae, 'variance mae.json')
save_data(mean_mae_thres, 'mean mae thres.json')
save_data(var_mae_thres, 'variance mae thres.json')
| [
"tensorflow.reduce_sum",
"tensorflow.keras.losses.MSE",
"tensorflow.reshape",
"collections.defaultdict",
"tensorflow.matmul",
"pathlib.Path",
"tensorflow.Variable",
"numpy.arange",
"tensorflow.keras.initializers.glorot_uniform",
"numpy.mat",
"tensorflow.abs",
"tensorflow.nn.relu",
"numpy.pow... | [((3924, 3989), 'networkx.read_weighted_edgelist', 'nx.read_weighted_edgelist', (['test_path'], {'nodetype': 'int', 'delimiter': '""","""'}), "(test_path, nodetype=int, delimiter=',')\n", (3949, 3989), True, 'import networkx as nx\n'), ((1119, 1138), 'scipy.sparse.triu', 'sps.triu', (['sparse_mx'], {}), '(sparse_mx)\n', (1127, 1138), True, 'import scipy.sparse as sps\n'), ((1926, 1966), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['labels', 'predictions'], {}), '(labels, predictions)\n', (1945, 1966), True, 'import tensorflow as tf\n'), ((2300, 2324), 'tensorflow.Variable', 'tf.Variable', (['predictions'], {}), '(predictions)\n', (2311, 2324), True, 'import tensorflow as tf\n'), ((4138, 4204), 'networkx.read_weighted_edgelist', 'nx.read_weighted_edgelist', (['train_path'], {'nodetype': 'int', 'delimiter': '""","""'}), "(train_path, nodetype=int, delimiter=',')\n", (4163, 4204), True, 'import networkx as nx\n'), ((4699, 4754), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['indices', 'adj_norm.data', 'adj_norm.shape'], {}), '(indices, adj_norm.data, adj_norm.shape)\n', (4714, 4754), True, 'import tensorflow as tf\n'), ((4817, 4880), 'scipy.sparse.identity', 'sps.identity', (['adj_norm.shape[0]'], {'dtype': 'np.float32', 'format': '"""coo"""'}), "(adj_norm.shape[0], dtype=np.float32, format='coo')\n", (4829, 4880), True, 'import scipy.sparse as sps\n'), ((5005, 5060), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['indices', 'features.data', 'features.shape'], {}), '(indices, features.data, features.shape)\n', (5020, 5060), True, 'import tensorflow as tf\n'), ((5473, 5505), 'scipy.sparse.csr_matrix', 'sps.csr_matrix', (['adj_ground_truth'], {}), '(adj_ground_truth)\n', (5487, 5505), True, 'import scipy.sparse as sps\n'), ((9041, 9094), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (9065, 9094), True, 'import tensorflow as tf\n'), ((9120, 9158), 'tensorflow.keras.initializers.glorot_uniform', 'tf.keras.initializers.glorot_uniform', ([], {}), '()\n', (9156, 9158), True, 'import tensorflow as tf\n'), ((9278, 9302), 'numpy.arange', 'np.arange', (['num_snapshots'], {}), '(num_snapshots)\n', (9287, 9302), True, 'import numpy as np\n'), ((9720, 9737), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9731, 9737), False, 'from collections import defaultdict\n'), ((9760, 9777), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9771, 9777), False, 'from collections import defaultdict\n'), ((9811, 9828), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9822, 9828), False, 'from collections import defaultdict\n'), ((10014, 10031), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10025, 10031), False, 'from collections import defaultdict\n'), ((2350, 2378), 'tensorflow.abs', 'tf.abs', (['(labels - predictions)'], {}), '(labels - predictions)\n', (2356, 2378), True, 'import tensorflow as tf\n'), ((4393, 4437), 'scipy.sparse.identity', 'sps.identity', (['adj.shape[0]'], {'dtype': 'np.float32'}), '(adj.shape[0], dtype=np.float32)\n', (4405, 4437), True, 'import scipy.sparse as sps\n'), ((6530, 6575), 'tensorflow.sparse.sparse_dense_matmul', 'tf.sparse.sparse_dense_matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (6559, 6575), True, 'import tensorflow as tf\n'), ((6590, 6638), 'tensorflow.sparse.sparse_dense_matmul', 'tf.sparse.sparse_dense_matmul', (['self.adj_norm', 'xw'], {}), '(self.adj_norm, xw)\n', (6619, 6638), True, 'import tensorflow as tf\n'), ((6654, 6669), 'tensorflow.nn.relu', 'tf.nn.relu', (['axw'], {}), '(axw)\n', (6664, 6669), True, 'import tensorflow as tf\n'), ((7199, 7224), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (7208, 7224), True, 'import tensorflow as tf\n'), ((7237, 7284), 'tensorflow.sparse.sparse_dense_matmul', 'tf.sparse.sparse_dense_matmul', (['self.adj_norm', 'x'], {}), '(self.adj_norm, x)\n', (7266, 7284), True, 'import tensorflow as tf\n'), ((8085, 8128), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'inputs'], {'transpose_b': '(True)'}), '(inputs, inputs, transpose_b=True)\n', (8094, 8128), True, 'import tensorflow as tf\n'), ((8144, 8168), 'tensorflow.reshape', 'tf.reshape', (['matmul', '[-1]'], {}), '(matmul, [-1])\n', (8154, 8168), True, 'import tensorflow as tf\n'), ((9353, 9371), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['adj'], {}), '(adj)\n', (9366, 9371), True, 'import tensorflow as tf\n'), ((1152, 1193), 'numpy.vstack', 'np.vstack', (['(sparse_mx.row, sparse_mx.col)'], {}), '((sparse_mx.row, sparse_mx.col))\n', (1161, 1193), True, 'import numpy as np\n'), ((4628, 4664), 'numpy.mat', 'np.mat', (['[adj_norm.row, adj_norm.col]'], {}), '([adj_norm.row, adj_norm.col])\n', (4634, 4664), True, 'import numpy as np\n'), ((4934, 4970), 'numpy.mat', 'np.mat', (['[features.row, features.col]'], {}), '([features.row, features.col])\n', (4940, 4970), True, 'import numpy as np\n'), ((9584, 9605), 'tensorflow.reshape', 'tf.reshape', (['adj', '[-1]'], {}), '(adj, [-1])\n', (9594, 9605), True, 'import tensorflow as tf\n'), ((14066, 14079), 'statistics.mean', 'mean', (['mse_acc'], {}), '(mse_acc)\n', (14070, 14079), False, 'from statistics import mean, pvariance\n'), ((14104, 14122), 'statistics.pvariance', 'pvariance', (['mse_acc'], {}), '(mse_acc)\n', (14113, 14122), False, 'from statistics import mean, pvariance\n'), ((14154, 14173), 'statistics.mean', 'mean', (['thres_mse_acc'], {}), '(thres_mse_acc)\n', (14158, 14173), False, 'from statistics import mean, pvariance\n'), ((14204, 14228), 'statistics.pvariance', 'pvariance', (['thres_mse_acc'], {}), '(thres_mse_acc)\n', (14213, 14228), False, 'from statistics import mean, pvariance\n'), ((14255, 14268), 'statistics.mean', 'mean', (['mae_acc'], {}), '(mae_acc)\n', (14259, 14268), False, 'from statistics import mean, pvariance\n'), ((14293, 14311), 'statistics.pvariance', 'pvariance', (['mae_acc'], {}), '(mae_acc)\n', (14302, 14311), False, 'from statistics import mean, pvariance\n'), ((14343, 14362), 'statistics.mean', 'mean', (['thres_mae_acc'], {}), '(thres_mae_acc)\n', (14347, 14362), False, 'from statistics import mean, pvariance\n'), ((14393, 14417), 'statistics.pvariance', 'pvariance', (['thres_mae_acc'], {}), '(thres_mae_acc)\n', (14402, 14417), False, 'from statistics import mean, pvariance\n'), ((14630, 14653), 'json.dump', 'json.dump', (['list', 'handle'], {}), '(list, handle)\n', (14639, 14653), False, 'import json\n'), ((14660, 14675), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (14664, 14675), False, 'from pathlib import Path\n'), ((1453, 1475), 'numpy.power', 'np.power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (1461, 1475), True, 'import numpy as np\n'), ((7059, 7097), 'tensorflow.keras.initializers.glorot_uniform', 'tf.keras.initializers.glorot_uniform', ([], {}), '()\n', (7095, 7097), True, 'import tensorflow as tf\n'), ((7836, 7871), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': 'means.shape'}), '(shape=means.shape)\n', (7852, 7871), True, 'import tensorflow as tf\n'), ((7874, 7886), 'tensorflow.exp', 'tf.exp', (['stds'], {}), '(stds)\n', (7880, 7886), True, 'import tensorflow as tf\n'), ((1845, 1872), 'tensorflow.tensordot', 'tf.tensordot', (['emb1', 'emb2', '(1)'], {}), '(emb1, emb2, 1)\n', (1857, 1872), True, 'import tensorflow as tf\n'), ((10875, 10919), 'tensorflow.concat', 'tf.concat', (['[prev_w0, glorot_weights]'], {'axis': '(0)'}), '([prev_w0, glorot_weights], axis=0)\n', (10884, 10919), True, 'import tensorflow as tf\n'), ((11676, 11693), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (11691, 11693), True, 'import tensorflow as tf\n'), ((2202, 2229), 'tensorflow.tensordot', 'tf.tensordot', (['emb1', 'emb2', '(1)'], {}), '(emb1, emb2, 1)\n', (2214, 2229), True, 'import tensorflow as tf\n'), ((11936, 12039), 'tensorflow.nn.weighted_cross_entropy_with_logits', 'tf.nn.weighted_cross_entropy_with_logits', ([], {'logits': 'reconstructed', 'labels': 'label', 'pos_weight': 'pos_weight'}), '(logits=reconstructed, labels=label,\n pos_weight=pos_weight)\n', (11976, 12039), True, 'import tensorflow as tf\n'), ((12245, 12261), 'tensorflow.square', 'tf.square', (['means'], {}), '(means)\n', (12254, 12261), True, 'import tensorflow as tf\n'), ((12274, 12286), 'tensorflow.exp', 'tf.exp', (['stds'], {}), '(stds)\n', (12280, 12286), True, 'import tensorflow as tf\n')] |
import numpy as np
from sklearn import svm
class Support_Vector_Machine():
def file2matrix(self,filename,target):
fr=open(filename)
lines=fr.readlines()
m=len(lines)
dataSet=np.zeros((m,3))
index=0
for line in lines:
listFromLine=line.strip().split()
for i in range(len(listFromLine)):
listFromLine[i]=float(listFromLine[i])
if listFromLine[0]==target: #第一列是Y,后两列是X
listFromLine[0]=1
else:
listFromLine[0]=-1
dataSet[index]=listFromLine
index+=1
return dataSet
def linearKernel(self,trainingSet,C):
#model=svm.libsvm.fit(X=trainingSet[:,1:].astype(np.float64),Y=np.transpose(trainingSet[:,:1]).astype(np.float64),kernel='linear',C=C)
model=svm.SVC(C=C,kernel='linear')
X=trainingSet[:,1:]
y=trainingSet[:,:1].flatten()
print(y)
model.fit(X,y)
w=model.coef_[0]
return np.sqrt(np.sum(np.square(w)))
def polynomialKernel(self,filename,C,Q):
min_Ein=1
tagMin=-1
maxsumOfAn=-1
for target in range(0,10,2):
trainingSet=self.file2matrix(filename,target)
model=svm.SVC(C=C,kernel='poly',degree=Q)
X=trainingSet[:,1:]
y=trainingSet[:,:1].flatten()
model.fit(X,y)
y_=model.predict(X)
Ein=0
for i in range(len(y)):
if y[i]!=y_[i]:
Ein+=1
Ein=Ein/len(y)
if Ein<min_Ein:
min_Ein=Ein
tagMin=target
sumOfAn=np.sum(np.fabs(model.dual_coef_[0]))
if(sumOfAn>maxsumOfAn):
maxsumOfAn=sumOfAn
return tagMin,Ein,maxsumOfAn
def GaussianKernel(self,trainingSet,testingSet,C):
Gamma=1
minGamma=0
minEout=8000
while(Gamma<=10000):
model=svm.SVC(C=C,kernel='rbf',gamma=Gamma)
model.fit(trainingSet[:,1:],trainingSet[:,:1].flatten())
X_=testingSet[:,1:]
y_=testingSet[:,:1].flatten()
y=model.predict(X_)
Eout=0
for i in range(len(y)):
if y[i]!=y_[i]:
Eout+=1
if Eout<minEout:
minEout=Eout
minGamma=Gamma
Gamma*=10
return minGamma,minEout/len(testingSet)
def crossValidation(self,trainingSet,C):
Gamma=1
minGamma=0
minEval=8000
while(Gamma<=1000):
Eval=0
model=svm.SVC(C=C,kernel='rbf',gamma=Gamma)
for i in range(100):
np.random.shuffle(trainingSet)
model.fit(trainingSet[1000:,1:],trainingSet[1000:,:1].flatten())
Xval=trainingSet[:1000,1:]
Yval=trainingSet[:1000,:1].flatten()
y_=model.predict(Xval)
for i in range(len(y_)):
if Yval[i]!=y_[i]:
Eval+=1
Eval=Eval/100
if Eval<minEval:
minEval=Eval
minGamma=Gamma
Gamma*=10
return minGamma,Eval/1000
def main():
SVM=Support_Vector_Machine()
trainingSet=SVM.file2matrix("features.train.dat",0)
lengthOfW=SVM.linearKernel(trainingSet,0.01)
print("**************************************************************************")
print("第15题答案如下:")
print ('||W||:'+str(lengthOfW))
print("**************************************************************************")
print()
target,Ein,numofSupportVector=SVM.polynomialKernel("features.train.dat",0.01,2)
print("**************************************************************************")
print("第16题答案如下:")
print ('the SVM classifiers reaches the lowest Ein:'+str(target))
print ('the lowest Ein:'+str(Ein))
print("**************************************************************************")
print()
print("**************************************************************************")
print("第17题答案如下:")
print ('Sum of An:'+str(numofSupportVector))
print("**************************************************************************")
print()
testingSet=SVM.file2matrix("features.test.dat",0)
Gamma,Eout=SVM.GaussianKernel(trainingSet,testingSet,0.1)
print("**************************************************************************")
print("第19题答案如下:")
print ('the Gamma reaches the lowest Eout:'+str(Gamma))
print ('the lowest Eout:'+str(Eout))
print("**************************************************************************")
print()
Gamma,Eval=SVM.crossValidation(trainingSet,0.1)
print("**************************************************************************")
print("第20题答案如下:")
print ('the Gamma reaches the lowest Eval:'+str(Gamma))
print ('the lowest Eval:'+str(Eval))
print("**************************************************************************")
print()
if __name__=="__main__":
main() | [
"numpy.square",
"numpy.zeros",
"numpy.fabs",
"sklearn.svm.SVC",
"numpy.random.shuffle"
] | [((192, 208), 'numpy.zeros', 'np.zeros', (['(m, 3)'], {}), '((m, 3))\n', (200, 208), True, 'import numpy as np\n'), ((728, 757), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': 'C', 'kernel': '"""linear"""'}), "(C=C, kernel='linear')\n", (735, 757), False, 'from sklearn import svm\n'), ((1083, 1120), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': 'C', 'kernel': '"""poly"""', 'degree': 'Q'}), "(C=C, kernel='poly', degree=Q)\n", (1090, 1120), False, 'from sklearn import svm\n'), ((1630, 1669), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': 'C', 'kernel': '"""rbf"""', 'gamma': 'Gamma'}), "(C=C, kernel='rbf', gamma=Gamma)\n", (1637, 1669), False, 'from sklearn import svm\n'), ((2131, 2170), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': 'C', 'kernel': '"""rbf"""', 'gamma': 'Gamma'}), "(C=C, kernel='rbf', gamma=Gamma)\n", (2138, 2170), False, 'from sklearn import svm\n'), ((888, 900), 'numpy.square', 'np.square', (['w'], {}), '(w)\n', (897, 900), True, 'import numpy as np\n'), ((1386, 1414), 'numpy.fabs', 'np.fabs', (['model.dual_coef_[0]'], {}), '(model.dual_coef_[0])\n', (1393, 1414), True, 'import numpy as np\n'), ((2199, 2229), 'numpy.random.shuffle', 'np.random.shuffle', (['trainingSet'], {}), '(trainingSet)\n', (2216, 2229), True, 'import numpy as np\n')] |
import json
import json
import random
import codecs
import numpy as np
import torch
import torch.utils.data
from torch.utils.data import DataLoader
import layers
from utils import load_wav_to_torch, load_filepaths_and_text
from text import text_to_sequence, poly_yinsu_to_sequence, poly_yinsu_to_mask
from transformers import BertTokenizer
from pytorch_pretrained_bert import BertModel
SPLIT_TOKEN = "▁"
def _is_erhua(pinyin):
"""
Decide whether pinyin (without tone number) is retroflex (Erhua)
"""
if len(pinyin) <= 1 or pinyin[:-1] == 'er':
return False
elif pinyin[-2] == 'r':
return True
else:
return False
def text_and_pinyin_norm(texts, pinyins):
pinyins = pinyins.split(' ')
assert len(texts) == len(pinyins)
texts_norm = []
pinyins_nrom = []
for (text, pinyin) in zip(texts, pinyins):
# print('CEHCK (text, pinyin):', text, pinyin)
# print('CEHCK _is_erhua(pinyin):', _is_erhua(pinyin))
if text != '儿' and _is_erhua(pinyin): # erhuayin
# print('CEHCK HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
texts_norm.append(text)
texts_norm.append('儿')
pinyin_norm = pinyin[:-2] + pinyin[-1]
pinyins_nrom.append(pinyin_norm)
pinyins_nrom.append('er5')
elif text == '儿' and pinyin[:-1] == 'rr':
# print('CEHCK HERE TOO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
texts_norm.append(text)
pinyins_nrom.append('er5')
else:
texts_norm.append(text)
pinyins_nrom.append(pinyin)
assert len(texts_norm) == len(pinyins_nrom)
return ''.join(texts_norm), ' '.join(pinyins_nrom)
class G2PDatasetMask(torch.utils.data.Dataset):
def __init__(self, sent_file, label_file, hparams, max_length=512):
super(G2PDatasetMask, self).__init__()
self.max_length = max_length
self.sents = open(sent_file).readlines()
self.labels = open(label_file).readlines()
assert len(self.sents) == len(self.labels)
self.tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
with codecs.open(hparams.class2idx, 'r', 'utf-8') as usernames:
self.class2idx = json.load(usernames)
self.num_classes = len(self.class2idx)
self.total_size = len(self.labels)
with codecs.open(hparams.merge_cedict, 'r', 'utf-8') as usernames:
self.merge_cedict = json.load(usernames)
self.merge_cedict['[UNK]'] = []
def __len__(self):
return self.total_size
def __getitem__(self, index):
cls_tok = "[CLS]"
sep_tok = "[SEP]"
sent = self.sents[index].strip()
label = self.labels[index].strip()
# print('CHECK sent', sent)
sent = sent.replace(SPLIT_TOKEN, cls_tok)
toks = self.tokenizer.tokenize(sent)
poly_idx = toks.index(cls_tok) + 1
poly_character = toks[poly_idx]
toks = list(filter(lambda x: x != cls_tok, toks))
toks.insert(0, cls_tok)
toks.append(sep_tok)
input_ids = self.tokenizer.convert_tokens_to_ids(toks)
input_ids = torch.tensor(input_ids, dtype=torch.long)
label = label.replace('lu:', 'lv')
label = label.replace('nu:', 'nv')
if label == 'r5':
label = 'er5'
label_id = self.class2idx[label]
output_mask = []
output_mask_toks = self.merge_cedict[poly_character]
if len(output_mask_toks) >= 1:
for output_mask_item in output_mask_toks:
output_mask.append(self.class2idx[output_mask_item])
return input_ids, poly_idx, label_id, output_mask, self.num_classes
# else:
# print('CHECK output_mask_toks 0:', sent)
def collate_fn_mask(data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs
def mask(sequences, output_mask, num_classes):
lengths = [len(seq) for seq in sequences]
# mask_output = torch.zeros((len(sequences), max(lengths), num_classes)).long()
# print('CHECK num_classes:', num_classes)
# print('CHECK num_classes:', num_classes.type())
mask_output = torch.FloatTensor(len(sequences), num_classes[0])
# mask_output.fill_(-float('inf'))
mask_output.fill_(0.0)
for i in range(len(output_mask)):
mask_sequence = output_mask[i]
# print('CHECK mask_sequence:', mask_sequence)
for j in range(len(mask_sequence)):
mask_character = mask_sequence[j]
index = torch.LongTensor([[i, mask_character]])
value = torch.ones(index.shape[0])
mask_output.index_put_(tuple(index.t()), value)
return mask_output
data = filter (lambda x:x is not None, data)
# data = filter (lambda x:len(x) == 5, data)
# print('CHECK zip(*data):', zip(*data))
# print('CHECK input:', zip(*data))
data = [*data]
# print('CHECK data length:', len(data))
data_to_check_length = len(data)
if data_to_check_length != 0:
all_input_ids, poly_ids, label_ids, output_mask, num_classes = zip(*data)
all_input_ids = merge(all_input_ids)
poly_ids = torch.tensor(poly_ids, dtype=torch.long)
label_ids = torch.tensor(label_ids, dtype=torch.long)
output_mask = mask(all_input_ids, output_mask, num_classes)
return all_input_ids, poly_ids, label_ids, output_mask
def get_dataloader(use_output_mask, sent_file, label_file, hparams,
batch_size, max_length, shuffle=False):
if use_output_mask:
dataset = G2PDatasetMask(sent_file, label_file, hparams, max_length)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=collate_fn_mask,
num_workers=4)
return dataloader
class polyTTS_G2PDatasetMask(torch.utils.data.Dataset):
def __init__(self, audiopaths_and_text, hparams, max_length=512):
super(polyTTS_G2PDatasetMask, self).__init__()
self.max_length = max_length
self.sents_and_lables = load_filepaths_and_text(audiopaths_and_text)
# self.sents = open(sent_file).readlines()
# self.labels = open(label_file).readlines()
# assert len(self.sents) == len(self.labels)
self.tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
with codecs.open(hparams.class2idx, 'r', 'utf-8') as usernames:
self.class2idx = json.load(usernames)
self.num_classes = len(self.class2idx)
self.total_size = len(self.sents_and_lables)
with codecs.open(hparams.merge_cedict, 'r', 'utf-8') as usernames:
self.merge_cedict = json.load(usernames)
self.merge_cedict['[UNK]'] = []
def __len__(self):
return self.total_size
def __getitem__(self, index):
cls_tok = "[CLS]"
sep_tok = "[SEP]"
sent = self.sents_and_lables[index][1]
label = self.sents_and_lables[index][2]
sent, label = text_and_pinyin_norm(sent, label)
# print('CHECK sent IN polyTTS_G2PDatasetMask:', sent)
# # sent = sent.replace(SPLIT_TOKEN, cls_tok)
# toks = self.tokenizer.tokenize(sent)
# pinyins = label.strip().split(' ')
# assert len(toks) == len(pinyins)
# poly_idx = toks.index(cls_tok) + 1
# poly_character = toks[poly_idx]
# input_ids = self.tokenizer.convert_tokens_to_ids(toks)
# input_ids = torch.tensor(input_ids, dtype=torch.long)
# label = label.replace('lu:', 'lv')
# label = label.replace('nu:', 'nv')
# if label == 'er5':
# label = 'er2'
# label_id = self.class2idx[label]
# output_mask = []
# output_mask_toks = self.merge_cedict[poly_character]
# if len(output_mask_toks) >=1:
# for output_mask_item in output_mask_toks:
# output_mask.append(self.class2idx[output_mask_item])
# return input_ids, poly_idx, label_id, output_mask, self.num_classes
toks = self.tokenizer.tokenize(sent)
pinyins = label.strip().split(' ')
# print('CHECK pinyins IN polyTTS_G2PDatasetMask:', pinyins)
pinyins = ['er5' if i == 'r5' else i for i in pinyins]
# assert len(toks) == len(pinyins)
input_ids = self.tokenizer.convert_tokens_to_ids(toks)
input_ids = torch.tensor(input_ids, dtype=torch.long)
label_idxs = []
poly_idxs = []
output_masks = []
# pinyin_targets = []
for idx, char in enumerate(sent):
prons = self.merge_cedict[char]
if len(prons) >= 1:
poly_idxs.append(idx)
label_idxs.append(self.class2idx[pinyins[idx]])
output_mask = []
for output_mask_item in prons:
output_mask.append(self.class2idx[output_mask_item])
output_masks.append(output_mask)
# pinyin_targets.append(self.class2idx[pinyins[idx]])
else:
output_mask = []
output_mask.append(self.class2idx[prons[0]])
output_masks.append(output_mask)
# pinyin_targets.append(self.class2idx[pinyins[idx]])
# print('CHECK input_ids IN polyTTS_G2PDatasetMask:', input_ids)
# print('CHECK label_idxs IN polyTTS_G2PDatasetMask:', label_idxs)
# print('CHECK output_masks IN polyTTS_G2PDatasetMask:', output_masks)
return input_ids, poly_idxs, label_idxs, output_masks, self.num_classes
def polyTTS_collate_fn_mask(data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs, max(lengths)
def polyPosition2Bool(poly_ids, label_ids, max_input_len):
polys_padded = torch.zeros(len(poly_ids), max_input_len).bool()
labels_padded = torch.LongTensor(len(label_ids), max_input_len)
labels_padded.zero_()
for i in range(len(poly_ids)):
poly_id = poly_ids[i]
label_id = label_ids[i]
# labels_padded[i, :label_id.shape[0]] = label_id
for j in range(len(poly_id)):
index = torch.LongTensor([[i, poly_id[j]]])
value_poly = torch.ones(index.shape[0]).bool()
polys_padded.index_put_(tuple(index.t()), value_poly)
# value_label = torch.ones(index.shape[0]).bool()
value_label = torch.LongTensor([label_id[j]])
labels_padded.index_put_(tuple(index.t()), value_label)
polys_padded = polys_padded.type(torch.BoolTensor)
return polys_padded, labels_padded
def mask(output_mask, max_input_len, num_classes):
# lengths = [len(seq) for seq in sequences]
# mask_output = torch.zeros((len(sequences), max(lengths), num_classes)).long()
# print('CHECK num_classes:', num_classes)
# print('CHECK num_classes:', num_classes.type())
# mask_output = torch.FloatTensor(len(sequences), num_classes[0])
# # mask_output.fill_(-float('inf'))
# mask_output.fill_(0.0)
# for i in range(len(output_mask)):
# mask_sequence = output_mask[i]
# # print('CHECK mask_sequence:', mask_sequence)
# for j in range(len(mask_sequence)):
# mask_character = mask_sequence[j]
# index = torch.LongTensor([[i, mask_character]])
# value = torch.ones(index.shape[0])
# mask_output.index_put_(tuple(index.t()), value)
mask_padded = torch.FloatTensor(len(output_mask), max_input_len, num_classes[0])
mask_padded.zero_()
for i in range(len(output_mask)):
mask_sequence = output_mask[i]
for j in range(len(mask_sequence)):
mask_character = mask_sequence[j]
for k in range(len(mask_character)):
index = torch.LongTensor([[i, j, mask_character[k]]])
value = torch.ones(index.shape[0])
mask_padded.index_put_(tuple(index.t()), value)
return mask_padded
data = filter (lambda x:x is not None, data)
# data = filter (lambda x:len(x) == 5, data)
# print('CHECK zip(*data):', zip(*data))
# print('CHECK input:', zip(*data))
data = [*data]
# print('CHECK data length:', len(data))
data_to_check_length = len(data)
if data_to_check_length != 0:
all_input_ids, poly_ids, label_ids, output_mask, num_classes = zip(*data)
all_input_ids, max_input_len = merge(all_input_ids)
# print('CHECK all_input_ids:', all_input_ids)
# poly_ids = torch.tensor(poly_ids, dtype=torch.long)
# label_ids = torch.tensor(label_ids, dtype=torch.long)
poly_ids, label_ids = polyPosition2Bool(poly_ids, label_ids, max_input_len)
# print('CHECK poly_ids:', poly_ids)
# print('CHECK label_ids:', label_ids)
output_mask = mask(output_mask, max_input_len, num_classes)
# print('CHECK output_mask:', output_mask)
return all_input_ids, poly_ids, label_ids, output_mask
def polyTTS_get_dataloader(use_output_mask, audiopaths_and_text, hparams,
batch_size, max_length, shuffle=False):
if use_output_mask:
dataset = polyTTS_G2PDatasetMask(audiopaths_and_text, hparams, max_length)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=polyTTS_collate_fn_mask,
num_workers=1)
return dataloader
class TextMelLoader(torch.utils.data.Dataset):##继承于torch.utils.data.Dataset,参数是init里的4个参数。
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self, audiopaths_and_text, polyphone_dict_file, mask_dict_file, hparams):
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
self.text_cleaners = hparams.text_cleaners
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.load_mel_from_disk = hparams.load_mel_from_disk
self.stft = layers.TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,
hparams.mel_fmax)
# with codecs.open(polyphone_dict_file, 'r', 'utf-8') as usernames:
# self.polyphone_dict = json.load(usernames)
# with codecs.open(mask_dict_file, 'r', 'utf-8') as usernames:
# self.mask_dict = json.load(usernames)
with codecs.open(hparams.class2idx, 'r', 'utf-8') as usernames:
self.class2idx = json.load(usernames)
print("num classes: {}".format(len(self.class2idx)))
num_classes = len(self.class2idx)
with codecs.open(hparams.merge_cedict, 'r', 'utf-8') as usernames:
self.merge_cedict = json.load(usernames)
self.tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
random.seed(hparams.seed)
random.shuffle(self.audiopaths_and_text)
def get_mel_text_pair(self, audiopath_and_text):
# separate filename and text
audiopath, text, poly_yinsu = audiopath_and_text[0], audiopath_and_text[1], audiopath_and_text[2]
# print('CHECK audiopath', audiopath)
text, poly_yinsu = text_and_pinyin_norm(text, poly_yinsu)
# print('CHECK text_norm:', text)
# print('CHECK poly_yinsu_norm:', poly_yinsu)
input_ids, poly_idxs, label_idxs, output_masks = self.get_poly_label(text, poly_yinsu)
# input_ids, poly_idxs, label_idxs, output_masks, pinyin_targets = self.get_poly_label(text, poly_yinsu)
# print('CHECK input_ids:', input_ids)
# print('CHECK poly_idxs:', poly_idxs)
# print('CHECK label_idxs:', label_idxs)
# print('CHECK output_masks:', output_masks)
mel = self.get_mel(audiopath)
return (input_ids, poly_idxs, label_idxs, output_masks, mel)
# return (input_ids, poly_idxs, label_idxs, output_masks, mel, pinyin_targets)
def get_mel(self, filename):
if not self.load_mel_from_disk:
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
# print('CHECK load mel')
melspec = torch.from_numpy(np.load(filename)).transpose(0, 1)
assert melspec.size(0) == self.stft.n_mel_channels, (
'Mel dimension mismatch: given {}, expected {}'.format(
melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_poly_label(self, text, poly_yinsu):
toks = self.tokenizer.tokenize(text)
pinyins = poly_yinsu.strip().split(' ')
pinyins = ['er5' if i == 'r5' else i for i in pinyins]
assert len(toks) == len(pinyins)
input_ids = self.tokenizer.convert_tokens_to_ids(toks)
input_ids = torch.tensor(input_ids, dtype=torch.long)
label_idxs = []
poly_idxs = []
output_masks = []
# pinyin_targets = []
for idx, char in enumerate(text):
prons = self.merge_cedict[char]
if len(prons) >= 1:
poly_idxs.append(idx)
label_idxs.append(self.class2idx[pinyins[idx]])
output_mask = []
for output_mask_item in prons:
output_mask.append(self.class2idx[output_mask_item])
output_masks.append(output_mask)
# pinyin_targets.append(self.class2idx[pinyins[idx]])
else:
output_mask = []
label_idxs.append(self.class2idx[pinyins[idx]])
# print('----------------CHECK PRONS:', prons, 'of', char, 'as', self.class2idx[prons[0]])
output_mask.append(self.class2idx[prons[0]])
output_masks.append(output_mask)
# pinyin_targets.append(self.class2idx[pinyins[idx]])
label_idxs = torch.tensor(label_idxs, dtype=torch.long)
return input_ids, poly_idxs, label_idxs, output_masks
# pinyin_targets = torch.tensor(pinyin_targets, dtype=torch.long)
# return input_ids, poly_idxs, label_idxs, output_masks, pinyin_targets
def __len__(self):
return len(self.audiopaths_and_text)
def __getitem__(self, index):
return self.get_mel_text_pair(self.audiopaths_and_text[index])
class TextMelCollate(): ##collate 核对 校对
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step, n_pinyin_symbols):
self.n_frames_per_step = n_frames_per_step
self.n_pinyin_symbols = n_pinyin_symbols
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
inputs_padded = torch.LongTensor(len(batch), max_input_len)
inputs_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
input_id = batch[ids_sorted_decreasing[i]][0]
inputs_padded[i, :input_id.shape[0]] = input_id
# print('CHECK inputs_padded IN TextMelCollate:', inputs_padded)
# pinyin_targets_padded = torch.LongTensor(len(batch), max_input_len)
# pinyin_targets_padded.zero_()
# for i in range(len(ids_sorted_decreasing)):
# pinyin_target_id = batch[ids_sorted_decreasing[i]][5]
# pinyin_targets_padded[i, :pinyin_target_id.shape[0]] = pinyin_target_id
# print('CHECK pinyin_targets_padded IN TextMelCollate:', pinyin_targets_padded)
# poly_input_lengths = []
# polys_padded = torch.LongTensor(len(batch), max_input_len)
# polys_padded.zero_()
# for i in range(len(ids_sorted_decreasing)):
# poly_id = batch[ids_sorted_decreasing[i]][1]
# polys_padded[i, :poly_id.shape[0]] = poly_id
# poly_input_lengths.append(poly_id.shape[0])
# print('CHECK polys_padded IN TextMelCollate:', polys_padded)
# poly_input_lengths = []
# polys_padded = torch.zeros(len(batch), max_input_len).bool()
# for i in range(len(ids_sorted_decreasing)):
# poly_id = batch[ids_sorted_decreasing[i]][1]
# for j in range(len(poly_id)):
# index = torch.LongTensor([[i, poly_id[j]]])
# value = torch.ones(index.shape[0]).bool()
# polys_padded.index_put_(tuple(index.t()), value)
# poly_input_lengths.append(len(poly_id))
# polys_padded = polys_padded.type(torch.BoolTensor)
# print('CHECK polys_padded IN TextMelCollate:', polys_padded)
poly_input_lengths = []
polys_padded = torch.zeros(len(batch), max_input_len).bool()
# labels_padded = torch.LongTensor(len(batch), max_input_len)
# labels_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
poly_id = batch[ids_sorted_decreasing[i]][1]
# label_id = batch[ids_sorted_decreasing[i]][2]
# labels_padded[i, :label_id.shape[0]] = label_id
for j in range(len(poly_id)):
index = torch.LongTensor([[i, poly_id[j]]])
value_poly = torch.ones(index.shape[0]).bool()
polys_padded.index_put_(tuple(index.t()), value_poly)
# value_label = torch.ones(index.shape[0]).bool()
# value_label = torch.LongTensor([label_id[j]])
# labels_padded.index_put_(tuple(index.t()), value_label)
poly_input_lengths.append(len(poly_id))
polys_padded = polys_padded.type(torch.BoolTensor)
# print('CHECK polys_padded IN TextMelCollate:', polys_padded)
# print('CHECK labels_padded IN TextMelCollate:', labels_padded)
poly_input_lengths = torch.tensor(poly_input_lengths, dtype=torch.long)
labels_padded = torch.LongTensor(len(batch), max_input_len)
labels_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
label_id = batch[ids_sorted_decreasing[i]][2]
labels_padded[i, :label_id.shape[0]] = label_id
# print('CHECK labels_padded IN TextMelCollate:', labels_padded)
# # TODO:ids_sorted_decreasing
# _, poly_ids, label_ids, _, _ = zip(*batch)
# print('CHECK poly_ids:', poly_ids)
# print('CHECK label_ids:', label_ids)
# poly_ids = torch.tensor(poly_ids, dtype=torch.long)
# label_ids = torch.tensor(label_ids, dtype=torch.long)
mask_padded = torch.FloatTensor(len(batch), max_input_len, self.n_pinyin_symbols)
mask_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
mask_sequence = batch[ids_sorted_decreasing[i]][3]
for j in range(len(mask_sequence)):
mask_character = mask_sequence[j]
for k in range(len(mask_character)):
index = torch.LongTensor([[i, j, mask_character[k]]])
value = torch.ones(index.shape[0])
mask_padded.index_put_(tuple(index.t()), value)
# print('CHECK mask_padded IN TextMelCollate:', mask_padded.shape)
# loss_mask = torch.reshape(mask_padded, [-1, 1663])
# print('CHECK loss_mask IN TextMelCollate:', loss_mask.shape)
# loss_mask = torch.reshape(polys_padded, [-1,])
# print('CHECK loss_mask IN TextMelCollate:', loss_mask.shape)
# select_pred = mask_padded[loss_mask, :]
# print('CHECK select_pred IN TextMelCollate:', select_pred.shape)
select_pred = torch.argmax(mask_padded, 2)
# print('CHECK select_pred IN TextMelCollate:', select_pred)
# Right zero-pad mel-spec
num_mels = batch[0][4].size(0)
max_target_len = max([x[4].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][4]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1)-1:] = 1
output_lengths[i] = mel.size(1)
return input_lengths, poly_input_lengths, inputs_padded, polys_padded, labels_padded, mask_padded, \
mel_padded, gate_padded, output_lengths | [
"torch.ones",
"numpy.load",
"json.load",
"codecs.open",
"torch.utils.data.DataLoader",
"utils.load_wav_to_torch",
"utils.load_filepaths_and_text",
"random.shuffle",
"torch.argmax",
"torch.autograd.Variable",
"torch.LongTensor",
"torch.squeeze",
"transformers.BertTokenizer.from_pretrained",
... | [((2178, 2228), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-chinese"""'], {}), "('bert-base-chinese')\n", (2207, 2228), False, 'from transformers import BertTokenizer\n'), ((3259, 3300), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.long'}), '(input_ids, dtype=torch.long)\n', (3271, 3300), False, 'import torch\n'), ((5565, 5605), 'torch.tensor', 'torch.tensor', (['poly_ids'], {'dtype': 'torch.long'}), '(poly_ids, dtype=torch.long)\n', (5577, 5605), False, 'import torch\n'), ((5626, 5667), 'torch.tensor', 'torch.tensor', (['label_ids'], {'dtype': 'torch.long'}), '(label_ids, dtype=torch.long)\n', (5638, 5667), False, 'import torch\n'), ((6051, 6158), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'collate_fn': 'collate_fn_mask', 'num_workers': '(4)'}), '(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=\n collate_fn_mask, num_workers=4)\n', (6061, 6158), False, 'from torch.utils.data import DataLoader\n'), ((6557, 6601), 'utils.load_filepaths_and_text', 'load_filepaths_and_text', (['audiopaths_and_text'], {}), '(audiopaths_and_text)\n', (6580, 6601), False, 'from utils import load_wav_to_torch, load_filepaths_and_text\n'), ((6784, 6834), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-chinese"""'], {}), "('bert-base-chinese')\n", (6813, 6834), False, 'from transformers import BertTokenizer\n'), ((8871, 8912), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.long'}), '(input_ids, dtype=torch.long)\n', (8883, 8912), False, 'import torch\n'), ((14079, 14194), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'collate_fn': 'polyTTS_collate_fn_mask', 'num_workers': '(1)'}), '(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=\n polyTTS_collate_fn_mask, num_workers=1)\n', (14089, 14194), False, 'from torch.utils.data import DataLoader\n'), ((14741, 14785), 'utils.load_filepaths_and_text', 'load_filepaths_and_text', (['audiopaths_and_text'], {}), '(audiopaths_and_text)\n', (14764, 14785), False, 'from utils import load_wav_to_torch, load_filepaths_and_text\n'), ((15020, 15195), 'layers.TacotronSTFT', 'layers.TacotronSTFT', (['hparams.filter_length', 'hparams.hop_length', 'hparams.win_length', 'hparams.n_mel_channels', 'hparams.sampling_rate', 'hparams.mel_fmin', 'hparams.mel_fmax'], {}), '(hparams.filter_length, hparams.hop_length, hparams.\n win_length, hparams.n_mel_channels, hparams.sampling_rate, hparams.\n mel_fmin, hparams.mel_fmax)\n', (15039, 15195), False, 'import layers\n'), ((15859, 15909), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-chinese"""'], {}), "('bert-base-chinese')\n", (15888, 15909), False, 'from transformers import BertTokenizer\n'), ((15919, 15944), 'random.seed', 'random.seed', (['hparams.seed'], {}), '(hparams.seed)\n', (15930, 15944), False, 'import random\n'), ((15953, 15993), 'random.shuffle', 'random.shuffle', (['self.audiopaths_and_text'], {}), '(self.audiopaths_and_text)\n', (15967, 15993), False, 'import random\n'), ((18303, 18344), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.long'}), '(input_ids, dtype=torch.long)\n', (18315, 18344), False, 'import torch\n'), ((19363, 19405), 'torch.tensor', 'torch.tensor', (['label_idxs'], {'dtype': 'torch.long'}), '(label_idxs, dtype=torch.long)\n', (19375, 19405), False, 'import torch\n'), ((23548, 23598), 'torch.tensor', 'torch.tensor', (['poly_input_lengths'], {'dtype': 'torch.long'}), '(poly_input_lengths, dtype=torch.long)\n', (23560, 23598), False, 'import torch\n'), ((25317, 25345), 'torch.argmax', 'torch.argmax', (['mask_padded', '(2)'], {}), '(mask_padded, 2)\n', (25329, 25345), False, 'import torch\n'), ((2243, 2287), 'codecs.open', 'codecs.open', (['hparams.class2idx', '"""r"""', '"""utf-8"""'], {}), "(hparams.class2idx, 'r', 'utf-8')\n", (2254, 2287), False, 'import codecs\n'), ((2331, 2351), 'json.load', 'json.load', (['usernames'], {}), '(usernames)\n', (2340, 2351), False, 'import json\n'), ((2457, 2504), 'codecs.open', 'codecs.open', (['hparams.merge_cedict', '"""r"""', '"""utf-8"""'], {}), "(hparams.merge_cedict, 'r', 'utf-8')\n", (2468, 2504), False, 'import codecs\n'), ((2551, 2571), 'json.load', 'json.load', (['usernames'], {}), '(usernames)\n', (2560, 2571), False, 'import json\n'), ((6849, 6893), 'codecs.open', 'codecs.open', (['hparams.class2idx', '"""r"""', '"""utf-8"""'], {}), "(hparams.class2idx, 'r', 'utf-8')\n", (6860, 6893), False, 'import codecs\n'), ((6937, 6957), 'json.load', 'json.load', (['usernames'], {}), '(usernames)\n', (6946, 6957), False, 'import json\n'), ((7073, 7120), 'codecs.open', 'codecs.open', (['hparams.merge_cedict', '"""r"""', '"""utf-8"""'], {}), "(hparams.merge_cedict, 'r', 'utf-8')\n", (7084, 7120), False, 'import codecs\n'), ((7167, 7187), 'json.load', 'json.load', (['usernames'], {}), '(usernames)\n', (7176, 7187), False, 'import json\n'), ((15493, 15537), 'codecs.open', 'codecs.open', (['hparams.class2idx', '"""r"""', '"""utf-8"""'], {}), "(hparams.class2idx, 'r', 'utf-8')\n", (15504, 15537), False, 'import codecs\n'), ((15581, 15601), 'json.load', 'json.load', (['usernames'], {}), '(usernames)\n', (15590, 15601), False, 'import json\n'), ((15718, 15765), 'codecs.open', 'codecs.open', (['hparams.merge_cedict', '"""r"""', '"""utf-8"""'], {}), "(hparams.merge_cedict, 'r', 'utf-8')\n", (15729, 15765), False, 'import codecs\n'), ((15812, 15832), 'json.load', 'json.load', (['usernames'], {}), '(usernames)\n', (15821, 15832), False, 'import json\n'), ((17107, 17134), 'utils.load_wav_to_torch', 'load_wav_to_torch', (['filename'], {}), '(filename)\n', (17124, 17134), False, 'from utils import load_wav_to_torch, load_filepaths_and_text\n'), ((17458, 17514), 'torch.autograd.Variable', 'torch.autograd.Variable', (['audio_norm'], {'requires_grad': '(False)'}), '(audio_norm, requires_grad=False)\n', (17481, 17514), False, 'import torch\n'), ((17597, 17622), 'torch.squeeze', 'torch.squeeze', (['melspec', '(0)'], {}), '(melspec, 0)\n', (17610, 17622), False, 'import torch\n'), ((4915, 4954), 'torch.LongTensor', 'torch.LongTensor', (['[[i, mask_character]]'], {}), '([[i, mask_character]])\n', (4931, 4954), False, 'import torch\n'), ((4979, 5005), 'torch.ones', 'torch.ones', (['index.shape[0]'], {}), '(index.shape[0])\n', (4989, 5005), False, 'import torch\n'), ((10864, 10899), 'torch.LongTensor', 'torch.LongTensor', (['[[i, poly_id[j]]]'], {}), '([[i, poly_id[j]]])\n', (10880, 10899), False, 'import torch\n'), ((11131, 11162), 'torch.LongTensor', 'torch.LongTensor', (['[label_id[j]]'], {}), '([label_id[j]])\n', (11147, 11162), False, 'import torch\n'), ((22887, 22922), 'torch.LongTensor', 'torch.LongTensor', (['[[i, poly_id[j]]]'], {}), '([[i, poly_id[j]]])\n', (22903, 22922), False, 'import torch\n'), ((12616, 12661), 'torch.LongTensor', 'torch.LongTensor', (['[[i, j, mask_character[k]]]'], {}), '([[i, j, mask_character[k]]])\n', (12632, 12661), False, 'import torch\n'), ((12690, 12716), 'torch.ones', 'torch.ones', (['index.shape[0]'], {}), '(index.shape[0])\n', (12700, 12716), False, 'import torch\n'), ((24665, 24710), 'torch.LongTensor', 'torch.LongTensor', (['[[i, j, mask_character[k]]]'], {}), '([[i, j, mask_character[k]]])\n', (24681, 24710), False, 'import torch\n'), ((24739, 24765), 'torch.ones', 'torch.ones', (['index.shape[0]'], {}), '(index.shape[0])\n', (24749, 24765), False, 'import torch\n'), ((10930, 10956), 'torch.ones', 'torch.ones', (['index.shape[0]'], {}), '(index.shape[0])\n', (10940, 10956), False, 'import torch\n'), ((17714, 17731), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (17721, 17731), True, 'import numpy as np\n'), ((22953, 22979), 'torch.ones', 'torch.ones', (['index.shape[0]'], {}), '(index.shape[0])\n', (22963, 22979), False, 'import torch\n')] |
import numpy as np
time = 3
# aa:训练拟合权重
aa = 0.8
# bb:更新拟合权重
bb = 0.9
def polyamorphic(data, param, *args):
flag = 0
z1 = np.polyfit(list(range(len(data), 0, -1)), data, time)
p1 = np.poly1d(z1)
if (param[0] + param[1] + param[2] + param[3]) == 0:
flag = 1
if flag != 1:
for i in range(time+1):
if args:
param[i] = (1 - bb) * p1[i] + bb * param[i]
else:
param[i] = (1-aa) * p1[i] + aa * param[i]
else:
for i in range(time+1):
param[i] = p1[i] + param[i]
return param
def polyamorphic_calculation(polynomial, num_max):
polyamorphic_list = []
for _ in range(num_max, 0, -1):
polyamorphic_list.append(pow(_, 3) * polynomial[3] + pow(_, 2) * polynomial[2] + _ * polynomial[1] + polynomial[0])
return polyamorphic_list
def polyamorphic_update(result, data, param, flag):
if len(result) < flag:
return
result.reverse()
for i in range(result[0], result[0]+flag):
if i != result[i-result[0]]:
result.reverse()
return
polyamorphic(data, param, 1)
print('Polyamorphic has Updated!!')
result.reverse()
result
| [
"numpy.poly1d"
] | [((208, 221), 'numpy.poly1d', 'np.poly1d', (['z1'], {}), '(z1)\n', (217, 221), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys
import os
import gzip
import pandas as pd
import argparse
import time
import numpy as np
# --------------------------------
# index_hopping.py
# Created on: March 2019
# Author: <NAME> and Bioinformatics Services (TxGen Lab)
#
# Releases
#
# v05.2 - Accepts parameter "demuxed_reads" to compute stats directly
# Single function to show stats to screen and file
# Provides now complete stats on number of reads and index hopping rate
#
# v05.1 - Added Saving of files at the end
# v05.0 - Computes 0 and 1 mismatches - splits list of barcodes on 0m and 1m
#
# v04.0 - Removed parameter -m - Now it expects ALWAYS a undetermined.fastq.gz file from 0m demux
# It will always process up to 1 mismatch
#
# v03.4 - Added parameter -r (row) to indicate where the data starts in the Sample Sheet
# v03.2 - Added quick search for 0 mismatch using hash table with valid barcodes
# Split has tables on two, one for i5 and one for i7
# Added --max parameter
# Reduced time to 11 sec / 1M reads (0 mismatches) and 13 sec / 1M reads (1 mismatch)
#
# v03.1 - Added -m parameter
#
# v03 - Added saving list of compatible (but invalid) barcode pairs
#
# Hashing - From https://interactivepython.org/runestone/static/pythonds/SortSearch/Hashing.html
# Reading - From https://darrenjw.wordpress.com/tag/biopython/
# --------------------------------
def line_to_record(line=None):
record = dict()
record["index1"] = str(line.split(':')[-1]).split("+")[0].rstrip()
record["index2"] = str(line.split(':')[-1]).split("+")[1].rstrip()
return record
def print_results_both(time_start,linesProcessed,MismatchesMatrixNoValid, MismatchesMatrixValid,myStatus,args):
outputPrefix = args.output
logFileName = outputPrefix + "_log.txt"
f = open(logFileName, "w")
print_results(time_start, linesProcessed, MismatchesMatrixNoValid, MismatchesMatrixValid, f, myStatus, args)
f.close()
f = sys.stdout
print_results(time_start, linesProcessed, MismatchesMatrixNoValid, MismatchesMatrixValid, f, myStatus, args)
def print_results(time_start,linesProcessed, MismatchesMatrixNoValid, MismatchesMatrixValid,f,myStatus,args):
time_end = time.time()
time_million = 1000000 * (time_end - time_start) / linesProcessed
demuxReads = int(args.demuxReads)
ignoreValid = args.ignorevalid
outputPrefix = args.output
fastqFileName = args.input
outputFileName = outputPrefix + "_reads.fastq"
logFileName = outputPrefix + "_log.txt"
if ignoreValid:
saveValidStr = "No"
else:
saveValidStr = "Yes"
#Counts the amount of valid compatible barcodes (no swap, but removed by demux because of 1 mismatch)
MMShape = MismatchesMatrixValid.shape
mySumValid = 0
for i in range(0, MMShape[0]):
for j in range(0, MMShape[1]):
mySumValid = mySumValid + MismatchesMatrixValid[i, j]
#Counts the amount of non-valid compatible barcodes (swap)
MMShape = MismatchesMatrixNoValid.shape
mySumNoValid = 0
for i in range(0,MMShape[0]):
for j in range(0,MMShape[1]):
mySumNoValid = mySumNoValid + MismatchesMatrixNoValid[i,j]
# Reads that are compatible and no valid (wrong pair), with 0 mismatches
mySumNoValid0m = MismatchesMatrixNoValid[0,0]
# Compatible Reads = Compatible Valid + Compatible Non Valid
linesCompatible = mySumNoValid + mySumValid
f.write("\n")
f.write("Status ................................ : " + myStatus + "\n")
f.write("Input File ............................ : " + fastqFileName + "\n")
f.write("Output File ........................... : " + outputFileName + "\n")
f.write("Save Compatible Valid Barcodes ........ : " + saveValidStr + "\n")
f.write("Seconds (Per Million barcodes) ........ : %.2f" % (time_million) + "\n")
f.write("Seconds (Total Processing Time) ....... : %.2f" % (time_end - time_start) + "\n")
f.write("\n")
f.write("A = Compatible Valid Barcodes ......... : " + "{:>12.0f}".format(mySumValid) + "\n")
f.write("B = Compatible Non Valid Barcodes ..... : " + "{:>12.0f}".format(mySumNoValid) + "\n")
f.write("C = No Compatible Barcodes ............ : " + "{:>12.0f}".format(linesProcessed - linesCompatible) + "\n")
f.write("Barcodes in Undetermined File = A+B+C . : " + "{:>12.0f}".format(linesProcessed) + "\n")
f.write("Compatible Barcodes = A+B ............. : " + "{:>12.0f}".format(linesCompatible) + "\n")
if demuxReads>0:
f.write("D = Demuxed Valid Barcodes ............ : " + "{:>12.0f}".format(demuxReads) + "\n")
f.write("Total Barcodes = A+B+C+D .............. : " + "{:>12.0f}".format(linesProcessed + demuxReads) + "\n")
f.write("\n")
if demuxReads>0:
f.write("Reads Demuxed with 0m ................. : " + "{:>12.0f}".format(demuxReads) + "\n")
f.write("Reads Demuxed with 1m ................. : " + "{:>12.0f}".format(demuxReads+mySumValid) + "\n")
f.write("Reads lost when using 0m at Demux ..... : " + "{:>12.0f}".format(mySumValid) + "\n")
if demuxReads>0:
f.write("Read lost rate when using 0m at Demux . : " + "{:>12.3f}".format( 100*mySumValid/(demuxReads+mySumValid)) + " %" + "\n")
f.write("\n")
f.write("0m - Reads with Index Hopping ........ : " + "{:>12.0f}".format(mySumNoValid0m) + "\n")
if demuxReads>0:
f.write("0m - Reads Demuxed .................... : " + "{:>12.0f}".format(demuxReads) + "\n")
f.write("0m - Reads Total ...................... : " + "{:>12.0f}".format(demuxReads + mySumNoValid0m) + "\n")
f.write("0m - Index Hopping Rate ............... : " + "{:>12.3f}".format( 100*mySumNoValid0m/(demuxReads+mySumNoValid0m)) + " %" + "\n")
f.write("\n")
f.write("1m - Reads with Index Hopping ......... : " + "{:>12.0f}".format(mySumNoValid) + "\n")
if demuxReads>0:
f.write("1m - Reads Demuxed .................... : " + "{:>12.0f}".format(demuxReads+mySumValid) + "\n")
f.write("1m - Reads Total ...................... : " + "{:>12.0f}".format(demuxReads + mySumValid + mySumNoValid) + "\n")
f.write("1m - Index Hopping Rate ............... : " + "{:>12.3f}".format( 100*mySumNoValid/(demuxReads + mySumValid + mySumNoValid) ) + " %" + "\n")
f.write("\n")
MMShape = MismatchesMatrixValid.shape
for i in range(0,MMShape[0]):
for j in range(0,MMShape[1]):
f.write("Compatible Valid (i7="+str(i)+"m,i5="+str(j)+"m) ........ : " + "{:>12.0f}".format(MismatchesMatrixValid[i,j]) + "\n")
f.write("Compatible Valid Total ................ : " + "{:>12.0f}".format(mySumValid) + "\n")
f.write("\n")
MMShape = MismatchesMatrixNoValid.shape
for i in range(0,MMShape[0]):
for j in range(0,MMShape[1]):
f.write("Compatible Non Valid (i7="+str(i)+"m,i5="+str(j)+"m) .... : " + "{:>12.0f}".format(MismatchesMatrixNoValid[i,j]) + "\n")
f.write("Compatible Non Valid Total ............ : " + "{:>12.0f}".format(mySumNoValid) + "\n")
def save_results(outputPrefix, Prefix, ConfusionMatrix, index7list, index5list):
cfFile = outputPrefix + "_" + Prefix + "_confusion_list.csv"
with open(cfFile, 'w') as f:
f.write("i7Id,i7Seq,i5SeqPair,i5Id,i5Seq,i7SeqPair,Count\n")
MMShape = ConfusionMatrix.shape
for i in range(0,MMShape[0]):
for j in range(0,MMShape[1]):
f.write(str(i) + "," + str(index7list[i]) + "," + str(index5list[i]) + "," + str(j) + "," + str(index5list[j]) + "," + str(index7list[j]) + "," + str(ConfusionMatrix[i,j]) + "\n")
f.close()
cfFile = outputPrefix + "_" + Prefix + "_confusion_matrix.csv"
with open(cfFile, 'w') as f:
MMShape = ConfusionMatrix.shape
f.write("Index")
for j in range(0, MMShape[1]):
f.write(",")
f.write(str(j))
f.write("\n")
for i in range(0,MMShape[0]):
f.write(str(i))
for j in range(0,MMShape[1]):
f.write("," + str(ConfusionMatrix[i,j]))
f.write("\n")
f.close()
summaryI7FileName = outputPrefix + "_" + Prefix + "_summ_i7.csv"
with open(summaryI7FileName, 'w') as f:
f.write("Index,Sequence,Hits,Different\n")
MMShape = ConfusionMatrix.shape
for i in range(0,MMShape[0]):
mySum = 0
nonZero = 0
for j in range(0,MMShape[1]):
mySum = mySum + ConfusionMatrix[i,j]
if ConfusionMatrix[i,j] > 0:
nonZero = nonZero + 1
f.write(str(i) + "," + index7list[i] + "," + str(mySum) + "," + str(nonZero) + "\n")
f.close()
summaryI5FileName = outputPrefix + "_" + Prefix + "_summ_i5.csv"
with open(summaryI5FileName, 'w') as f:
f.write("Index,Sequence,Hits,Different\n")
MMShape = ConfusionMatrix.shape
for j in range(0, MMShape[1]):
mySum = 0
nonZero = 0
for i in range(0, MMShape[0]):
mySum = mySum + ConfusionMatrix[i,j]
if ConfusionMatrix[i, j] > 0:
nonZero = nonZero + 1
f.write(str(j) + "," + index5list[j] + "," + str(mySum) + "," + str(nonZero) + "\n")
f.close()
def readRead(s):
return [s.readline(),s.readline(),s.readline(),s.readline()]
def writeRead(sread,s):
for i in range(4):
s.write(sread[i])
# Initializes Hash tables with compatible indices
def generateCompatibleTables0m(indexlist):
myCollectionCompatible0m = {}
position = 0
for myIndex in indexlist:
myCollectionCompatible0m[myIndex] = position
position = position + 1
return myCollectionCompatible0m
# Initializes Hash tables with compatible indices
# It generates all barcodes
# that are one mismatch from the barcodes in the list
# The value stored in each entry indicates the position
# of the barcode in the original list
def generateCompatibleTables1m(indexlist):
myCollectionCompatible1m = {}
position = 0
for myIndex in indexlist:
for i in range(0,len(myIndex)):
myTemp = list(myIndex)
myTemp[i] = 'A'
myIndexB = ''.join(myTemp)
if myIndexB!=myIndex:
myCollectionCompatible1m[myIndexB] = position
myTemp[i] = 'T'
myIndexB = ''.join(myTemp)
if myIndexB!=myIndex:
myCollectionCompatible1m[myIndexB] = position
myTemp[i] = 'C'
myIndexB = ''.join(myTemp)
if myIndexB!=myIndex:
myCollectionCompatible1m[myIndexB] = position
myTemp[i] = 'G'
myIndexB = ''.join(myTemp)
if myIndexB!=myIndex:
myCollectionCompatible1m[myIndexB] = position
myTemp[i] = 'N'
myIndexB = ''.join(myTemp)
if myIndexB!=myIndex:
myCollectionCompatible1m[myIndexB] = position
position = position + 1
return myCollectionCompatible1m
def mainLoop():
# command line arguments
parser = argparse.ArgumentParser(description='Determines the number and percentage of compatible but invalid barcode pairs')
parser.add_argument('-i', '--input', required=True, help='Name of the fastq gzipped file with undemuxed reads - must be the result of demuxing with 0 mismatches')
parser.add_argument('-s', '--samples', required=True, help='Name of the sample sheet CSV file with barcodes as used for demux. Must contain ONLY valid barcodes')
parser.add_argument('-r', '--row', required=False, help='Row with columns header in sample sheet (Default = 17)', default=17)
parser.add_argument('-dr', '--demuxReads', help="Number of reads demuxed with 0 mismatches. If provided will generate swap stats", default = 0)
parser.add_argument('-o', '--output', required=False, help='prefix for output files. It may include folder name. No need to end it with "_"', default="")
parser.add_argument('-n', '--printEach', required=False, help='Number of iters before printing', default=1000)
parser.add_argument('-x', '--max', required=False, help='Max Number of reads to process (Default = 0 - All)', default=0)
parser.add_argument('-iv', '--ignore-valid', help="Do not save valid pairs (undemuxed because of 1 mismatch) in output fastq file", dest='ignorevalid', action='store_true')
parser.add_argument('-v', '--verbose', help="Makes verbose", dest='verbose', action='store_true')
args = parser.parse_args()
fastqFileName = args.input
sampleSheetFileName = args.samples
isVerbose = args.verbose
printEach = int(args.printEach)
maxReads = int(args.max)
sampleSheetRow = int(args.row)
ignoreValid = args.ignorevalid
outputPrefix = args.output
demuxReads = int(args.demuxReads)
maxMismatchsAllowed = 1
# Reads Barcodes List from Demux Sample Sheet (same used for demux)
df = pd.read_csv(sampleSheetFileName, header = 0, skiprows=sampleSheetRow-1)
index7list = df.loc[:,'index']
index5list = df.loc[:,'index2']
# Open undemuxed file
myInput = gzip.open(fastqFileName, 'rt')
# Open output file for saving
outputFileName = outputPrefix + "_reads.fastq"
if outputFileName:
myOutput = open(outputFileName, 'w')
else:
myOutput = None
numI7 = index7list.size
numI5 = index5list.size
ConfusionMatrixValid = np.zeros( (numI7, numI5) )
ConfusionMatrixNoValid = np.zeros( (numI7, numI5) )
MismatchesMatrixNoValid = np.zeros( (maxMismatchsAllowed+1, maxMismatchsAllowed+1) )
MismatchesMatrixValid = np.zeros( (maxMismatchsAllowed+1, maxMismatchsAllowed+1) )
# Initializes tables with valid 17 and 15 indices
myCollectionValidi70m = generateCompatibleTables0m(index7list)
myCollectionValidi50m = generateCompatibleTables0m(index5list)
myCollectionValidi71m = generateCompatibleTables1m(index7list)
myCollectionValidi51m = generateCompatibleTables1m(index5list)
linesProcessed = 0
linesCompatible = 0
time_start = time.time()
with myInput:
s = readRead(myInput)
while s[0]:
# Get record and barcodes
record = line_to_record(s[0])
index17 = record["index1"]
index15 = record["index2"]
isCompatible = False
isValid = False
saveOutput = False
if index17 in myCollectionValidi70m:
myPosiI7 = myCollectionValidi70m[index17]
i7dist = 0
elif index17 in myCollectionValidi71m:
myPosiI7 = myCollectionValidi71m[index17]
i7dist = 1
else:
i7dist = maxMismatchsAllowed+1
if index15 in myCollectionValidi50m:
myPosiI5 = myCollectionValidi50m[index15]
i5dist = 0
elif index15 in myCollectionValidi51m:
myPosiI5 = myCollectionValidi51m[index15]
i5dist = 1
else:
i5dist = maxMismatchsAllowed+1
if i7dist<=maxMismatchsAllowed and i5dist<=maxMismatchsAllowed:
isCompatible = True
if isCompatible:
linesCompatible += 1
if myPosiI7 == myPosiI5:
MismatchesMatrixValid[i7dist, i5dist] += 1
ConfusionMatrixValid[myPosiI7,myPosiI5] = ConfusionMatrixValid[myPosiI7,myPosiI5] + 1
isValid = True
else:
MismatchesMatrixNoValid[i7dist, i5dist] += 1
ConfusionMatrixNoValid[myPosiI7,myPosiI5] = ConfusionMatrixNoValid[myPosiI7,myPosiI5] + 1
if isCompatible and not (isValid and ignoreValid):
saveOutput = True
if saveOutput and myOutput:
writeRead(s,myOutput)
linesProcessed = linesProcessed + 1
if linesProcessed % printEach == 0:
print_results_both(time_start, linesProcessed, MismatchesMatrixNoValid, MismatchesMatrixValid, "Processing", args)
if maxReads>0 and linesProcessed >= maxReads:
break
s = readRead(myInput)
save_results(outputPrefix, "NoValid", ConfusionMatrixNoValid, index7list, index5list)
save_results(outputPrefix, "Valid", ConfusionMatrixValid, index7list, index5list)
print_results_both(time_start, linesProcessed, MismatchesMatrixNoValid, MismatchesMatrixValid,"Finished",args)
if __name__ == '__main__':
mainLoop()
| [
"gzip.open",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.zeros",
"time.time"
] | [((2330, 2341), 'time.time', 'time.time', ([], {}), '()\n', (2339, 2341), False, 'import time\n'), ((11458, 11583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Determines the number and percentage of compatible but invalid barcode pairs"""'}), "(description=\n 'Determines the number and percentage of compatible but invalid barcode pairs'\n )\n", (11481, 11583), False, 'import argparse\n'), ((13408, 13479), 'pandas.read_csv', 'pd.read_csv', (['sampleSheetFileName'], {'header': '(0)', 'skiprows': '(sampleSheetRow - 1)'}), '(sampleSheetFileName, header=0, skiprows=sampleSheetRow - 1)\n', (13419, 13479), True, 'import pandas as pd\n'), ((13597, 13627), 'gzip.open', 'gzip.open', (['fastqFileName', '"""rt"""'], {}), "(fastqFileName, 'rt')\n", (13606, 13627), False, 'import gzip\n'), ((13913, 13937), 'numpy.zeros', 'np.zeros', (['(numI7, numI5)'], {}), '((numI7, numI5))\n', (13921, 13937), True, 'import numpy as np\n'), ((13970, 13994), 'numpy.zeros', 'np.zeros', (['(numI7, numI5)'], {}), '((numI7, numI5))\n', (13978, 13994), True, 'import numpy as np\n'), ((14030, 14090), 'numpy.zeros', 'np.zeros', (['(maxMismatchsAllowed + 1, maxMismatchsAllowed + 1)'], {}), '((maxMismatchsAllowed + 1, maxMismatchsAllowed + 1))\n', (14038, 14090), True, 'import numpy as np\n'), ((14120, 14180), 'numpy.zeros', 'np.zeros', (['(maxMismatchsAllowed + 1, maxMismatchsAllowed + 1)'], {}), '((maxMismatchsAllowed + 1, maxMismatchsAllowed + 1))\n', (14128, 14180), True, 'import numpy as np\n'), ((14580, 14591), 'time.time', 'time.time', ([], {}), '()\n', (14589, 14591), False, 'import time\n')] |
import os
import time
import numpy as np
from openslide import OpenSlide
from multiprocessing import Pool
import cv2
import csv
import random
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
path_list = "./data/npy/"
file_path_txt = "./data/txt/x4.txt"
path_patch = "./data/patch_prognostic/x4/"
file_path_tif = "./data/svs/"
stride = 1
num_process = 5
patch_size = 640
patch_level = 1
def get_coordinates(np_path,file_name):
np_file = np.load(np_path)
list_tumor = []
list_tumor_new = []
txt = open(file_path_txt, 'a+')
[rows, cols] = np_file.shape
for i in range(0,rows-10,stride):
for j in range(0,cols-10,stride):
if int(np_file[i, j]) == 2:
flag1 = 0
for index_w in range(i,i + 10):
for index_h in range(j,j + 10):
if np_file[index_w,index_h] == 2:
flag1 = flag1+1
if flag1 == 10*10:
list_tumor.append([i, j])
else:
continue
if len(list_tumor) > 150:
list_index = random.sample(range(len(list_tumor)), 150)
for number in list_index:
list_tumor_new.append(list_tumor[number])
list_tumor = list_tumor_new
for item in list_tumor:
x = item[0]
y = item[1]
patch_x_lv_1 = str(x*256)
patch_y_lv_1 = str(y*256)
txt.writelines([file_name, ',', patch_x_lv_1, ',', patch_y_lv_1,',tumor', '\n'])
txt.close()
def cut(file_path_txt,file_path_tif,patch_path,patch_size,patch_level):
opts_list = []
infile = open(file_path_txt)
for i, line in enumerate(infile):
pid, x, y, kind = line.strip('\n').split(',')
dir = pid.split("/")[-1]
dir_split=dir.split("-")[:3]
pid_par = dir_split[0] + '-' + dir_split[1] + '-' + dir_split[2]
pid_dir = os.path.join(patch_path, pid_par)
if not os.path.exists(pid_dir):
os.mkdir(pid_dir)
class_dir = os.path.join(pid_dir, kind)
if not os.path.exists(class_dir):
os.mkdir(class_dir)
opts_list.append((i, pid, x, y, file_path_tif,patch_path, patch_size, patch_level,class_dir))
count = len(opts_list)
print(count)
infile.close()
pool = Pool(processes=num_process)
pool.map(process, opts_list)
def process(opts):
i, pid, x, y, file_path_tif, patch_path, patch_size, patch_level,class_dir= opts
dir = pid.split("/")[-1]
dir_split=dir.split("-")[:3]
dir_name=dir_split[0] + '-' + dir_split[1] + '-' + dir_split[2]
x=int(float(x))
y=int(float(y))
wsi_path = os.path.join(file_path_tif , dir + '.svs')
slide = OpenSlide(wsi_path)
img = slide.read_region(
(x, y), patch_level,
(patch_size, patch_size))
wsi_ary_lv_ = np.array(img)
img = cv2.cvtColor(wsi_ary_lv_, cv2.COLOR_RGBA2BGR)
img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(os.path.join(class_dir, dir_name + "_" + str(i) + '.png'), img)
if __name__=='__main__':
for root, dirs, files in os.walk(path_list):
for file in files:
file_name = file.split(".npy")[0]
np_path = os.path.join(root,file)
file_path = root.split('/')[-1]
file_path = file_path+'/'+file_name
pid_split = file_name.split('-')[:3]
pid = pid_split[0] + '-' + pid_split[1] + '-' + pid_split[2]
get_coordinates(np_path, file_path)
print('Making patch!')
time_now = time.time()
cut(file_path_txt, file_path_tif, path_patch, patch_size, patch_level)
time_spent = (time.time() - time_now) / 60
print('Making patch for %f min!' % time_spent)
| [
"openslide.OpenSlide",
"numpy.load",
"os.path.abspath",
"os.mkdir",
"cv2.cvtColor",
"os.walk",
"os.path.exists",
"time.time",
"numpy.array",
"multiprocessing.Pool",
"os.path.join",
"cv2.resize"
] | [((495, 511), 'numpy.load', 'np.load', (['np_path'], {}), '(np_path)\n', (502, 511), True, 'import numpy as np\n'), ((2410, 2437), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'num_process'}), '(processes=num_process)\n', (2414, 2437), False, 'from multiprocessing import Pool\n'), ((2773, 2814), 'os.path.join', 'os.path.join', (['file_path_tif', "(dir + '.svs')"], {}), "(file_path_tif, dir + '.svs')\n", (2785, 2814), False, 'import os\n'), ((2831, 2850), 'openslide.OpenSlide', 'OpenSlide', (['wsi_path'], {}), '(wsi_path)\n', (2840, 2850), False, 'from openslide import OpenSlide\n'), ((2973, 2986), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2981, 2986), True, 'import numpy as np\n'), ((2998, 3043), 'cv2.cvtColor', 'cv2.cvtColor', (['wsi_ary_lv_', 'cv2.COLOR_RGBA2BGR'], {}), '(wsi_ary_lv_, cv2.COLOR_RGBA2BGR)\n', (3010, 3043), False, 'import cv2\n'), ((3055, 3114), 'cv2.resize', 'cv2.resize', (['img', '(256, 256)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (256, 256), interpolation=cv2.INTER_LINEAR)\n', (3065, 3114), False, 'import cv2\n'), ((3260, 3278), 'os.walk', 'os.walk', (['path_list'], {}), '(path_list)\n', (3267, 3278), False, 'import os\n'), ((3715, 3726), 'time.time', 'time.time', ([], {}), '()\n', (3724, 3726), False, 'import time\n'), ((1992, 2025), 'os.path.join', 'os.path.join', (['patch_path', 'pid_par'], {}), '(patch_path, pid_par)\n', (2004, 2025), False, 'import os\n'), ((2121, 2148), 'os.path.join', 'os.path.join', (['pid_dir', 'kind'], {}), '(pid_dir, kind)\n', (2133, 2148), False, 'import os\n'), ((194, 219), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (209, 219), False, 'import os\n'), ((2044, 2067), 'os.path.exists', 'os.path.exists', (['pid_dir'], {}), '(pid_dir)\n', (2058, 2067), False, 'import os\n'), ((2082, 2099), 'os.mkdir', 'os.mkdir', (['pid_dir'], {}), '(pid_dir)\n', (2090, 2099), False, 'import os\n'), ((2165, 2190), 'os.path.exists', 'os.path.exists', (['class_dir'], {}), '(class_dir)\n', (2179, 2190), False, 'import os\n'), ((2205, 2224), 'os.mkdir', 'os.mkdir', (['class_dir'], {}), '(class_dir)\n', (2213, 2224), False, 'import os\n'), ((3378, 3402), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (3390, 3402), False, 'import os\n'), ((3824, 3835), 'time.time', 'time.time', ([], {}), '()\n', (3833, 3835), False, 'import time\n')] |
'''
@author: pkao
This code has three funtions:
1. Converting a combined lesion label to three individual lesion labels
2. Mapping the individual lesions to MNI152 space
3. Mergeing the individual lesion label to segmentation mask in MNI152 space
'''
from utils import Brats2018ValidationN4ITKFilePaths, AllSubjectID, FindOneElement, ReadImage
from utils import PredictedLesionMaskPath, Brats2018PredictedLesionsPaths, Brats2018PredictedLesionsProbMapMNI152Paths
import paths
import argparse
import os
import subprocess
from multiprocessing import Pool
import SimpleITK as sitk
import numpy as np
def Seg2Lesions(seg_path):
''' This code converts a combined lesion label to three individual lesion labels'''
subject_dir, seg_file = os.path.split(seg_path)
lesion_dir = os.path.join(subject_dir, 'LesionLabels')
gt_img = sitk.ReadImage(seg_path)
gt_nda = sitk.GetArrayFromImage(gt_img)
necrosis_nda = np.zeros(gt_nda.shape, gt_nda.dtype)
necrosis_nda[gt_nda==1] = 1
necrosis_img = sitk.GetImageFromArray(necrosis_nda)
necrosis_img.CopyInformation(gt_img)
necrosis_name = seg_file[:seg_file.index('.nii.gz')] + '_necrosis.nii.gz'
sitk.WriteImage(necrosis_img, os.path.join(lesion_dir, necrosis_name))
edema_nda = np.zeros(gt_nda.shape, gt_nda.dtype)
edema_nda[gt_nda==2] = 1
edema_img = sitk.GetImageFromArray(edema_nda)
edema_img.CopyInformation(gt_img)
edema_name = seg_file[:seg_file.index('.nii.gz')] + '_edema.nii.gz'
sitk.WriteImage(edema_img, os.path.join(lesion_dir, edema_name))
enhancing_nda = np.zeros(gt_nda.shape, gt_nda.dtype)
enhancing_nda[gt_nda==4] = 1
enhancing_img = sitk.GetImageFromArray(enhancing_nda)
enhancing_img.CopyInformation(gt_img)
enhancing_name = seg_file[:seg_file.index('.nii.gz')] + '_enhancing.nii.gz'
sitk.WriteImage(enhancing_img, os.path.join(lesion_dir, enhancing_name))
print('Complete subject %s' %seg_file)
def Lesions2MNI152(necrosisInVol, edemaInVol, enhancingTumorInVol):
'''This code maps the individual lesions to MNI152 space'''
new_name_append = "_prob_MNI152_T1_1mm.nii.gz"
assert SubjectID(necrosisInVol) == SubjectID(edemaInVol) == SubjectID(enhancingTumorInVol)
print('Working on %s' %os.path.split(necrosisInVol)[1])
omats = [os.path.join(root,name) for root, dirs, files in os.walk(brats_path) for name in files if "invol2refvol" in name and name.endswith(".mat")]
omat_temp = [f for f in omats if SubjectID(necrosisInVol) in f]
omat = omat_temp[0]
assert SubjectID(necrosisInVol) == SubjectID(edemaInVol) == SubjectID(enhancingTumorInVol) == SubjectID(omat)
subprocess.call(["flirt", "-in", necrosisInVol, "-ref", refVol, "-out", necrosisInVol[:-7] + new_name_append, "-init", omat, "-applyxfm"])
subprocess.call(["flirt", "-in", edemaInVol, "-ref", refVol, "-out", edemaInVol[:-7] + new_name_append, "-init", omat, "-applyxfm"])
subprocess.call(["flirt", "-in", enhancingTumorInVol, "-ref", refVol, "-out", enhancingTumorInVol[:-7] + new_name_append, "-init", omat, "-applyxfm"])
print('Finished %s' %os.path.split(necrosisInVol)[1])
def Lesions2MNI152_star(lesion_dirs):
return Lesions2MNI152(*lesion_dirs)
def Lesions2SegMNI152(necrosis_mni_path, edema_mni_path, enhancing_tumor_path, subject_id):
'''This function maps the lesions in MNI152 space to tumor compartments'''
print(SubjectID(necrosis_mni_path), SubjectID(edema_mni_path), SubjectID(enhancing_tumor_path), subject_id)
assert (SubjectID(necrosis_mni_path) == SubjectID(edema_mni_path) == SubjectID(enhancing_tumor_path) == subject_id), 'Subject Mismatch!!!'
mni152_path = os.path.join(necrosis_mni_path[:FindOneElement(necrosis_mni_path, '/')[-2]], 'MNI152')
necrosis_mni_img = sitk.ReadImage(necrosis_mni_path)
necrosis_mask_nda = sitk.GetArrayFromImage(necrosis_mni_img)
edema_mask_nda = ReadImage(edema_mni_path)
enhancing_tumor_mask_nda = ReadImage(enhancing_tumor_path)
# seg in MNI 152 space
seg_mni = np.zeros((5, necrosis_mask_nda.shape[0], necrosis_mask_nda.shape[1], necrosis_mask_nda.shape[2]), dtype=necrosis_mask_nda.dtype)
seg_mni[1, :] = necrosis_mask_nda
seg_mni[2, :] = edema_mask_nda
seg_mni[4, :] = enhancing_tumor_mask_nda
seg_mask_mni = np.argmax(seg_mni, axis=0).astype(np.int16)
seg_name = os.path.join(mni152_path, subject_id+'_seg_MNI152_1mm.nii.gz')
print('Working on %s' %seg_name)
seg_mask_mni_img = sitk.GetImageFromArray(seg_mask_mni)
seg_mask_mni_img.CopyInformation(necrosis_mni_img)
sitk.WriteImage(seg_mask_mni_img, seg_name)
necrosis_mni_nda = np.zeros(seg_mask_mni.shape, seg_mask_mni.dtype)
edema_mni_nda = np.zeros(seg_mask_mni.shape, seg_mask_mni.dtype)
enhancing_mni_nda = np.zeros(seg_mask_mni.shape, seg_mask_mni.dtype)
necrosis_mni_nda[seg_mask_mni==1] = 1
edema_mni_nda[seg_mask_mni==2] = 1
enhancing_mni_nda[seg_mask_mni==4] = 1
# whole tumor binary mask
whole_tumor_mask_mni = necrosis_mni_nda + edema_mni_nda + enhancing_mni_nda
whole_tumor_mask_mni_nda = whole_tumor_mask_mni.astype(np.int16)
whole_tumor_mask_mni_name = os.path.join(mni152_path, subject_id+'_whole_tumor_MNI152_1mm.nii.gz')
whole_tumor_mask_mni_img = sitk.GetImageFromArray(whole_tumor_mask_mni_nda)
whole_tumor_mask_mni_img.CopyInformation(necrosis_mni_img)
assert (np.amax(whole_tumor_mask_mni_nda) <= 1), 'Maximum of whole tumor mask not equal to 1'
sitk.WriteImage(whole_tumor_mask_mni_img, whole_tumor_mask_mni_name)
# tumor core binary mask
tumor_core_mask_mni = necrosis_mni_nda + enhancing_mni_nda
tumor_core_mask_mni_nda = tumor_core_mask_mni.astype(np.int16)
tumor_core_mask_mni_name = os.path.join(mni152_path, subject_id+'_tumor_core_MNI152_1mm.nii.gz')
tumor_core_mask_mni_img = sitk.GetImageFromArray(tumor_core_mask_mni_nda)
tumor_core_mask_mni_img.CopyInformation(necrosis_mni_img)
assert (np.amax(tumor_core_mask_mni_nda) <= 1), 'Maximum of tumor core mask not equal to 1'
sitk.WriteImage(tumor_core_mask_mni_img, tumor_core_mask_mni_name)
# enhancing tumor binary mask
enhancing_tumor_mask_mni = enhancing_mni_nda
enhancing_tumor_mask_mni_nda = enhancing_tumor_mask_mni.astype(np.int16)
enhancing_tumor_mask_mni_name = os.path.join(mni152_path, subject_id+'_enhancing_tumor_MNI152_1mm.nii.gz')
enhancing_tumor_mask_mni_img = sitk.GetImageFromArray(enhancing_tumor_mask_mni_nda)
enhancing_tumor_mask_mni_img.CopyInformation(necrosis_mni_img)
assert (np.amax(enhancing_tumor_mask_mni_nda) <= 1), 'Maximum of enhancing tumor mask not equal to 1'
sitk.WriteImage(enhancing_tumor_mask_mni_img, enhancing_tumor_mask_mni_name)
def Lesions2SegMNI152_star(dirs):
return Lesions2SegMNI152(*dirs)
def SubjectID(sub_dir):
subject_file = os.path.split(sub_dir)[1]
if 'necrosis' in subject_file:
return subject_file[:subject_file.find('_necrosis')]
if 'edema' in subject_file:
return subject_file[:subject_file.find('_edema')]
if 'enhancing' in subject_file:
return subject_file[:subject_file.find('_enhancing')]
if 'MNI152' in subject_file:
return subject_file[:subject_file.find('_MNI152')]
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mode", help="can be train, valid or test", default='train', type=str)
parser.add_argument("-t", "--thread", help="the number of thread you want to use ", default=8, type=int)
args = parser.parse_args()
if args.mode == "train":
brats_path = paths.brats2018_training_dir
predicted_path = paths.brats2018_training_predicted_lesions_dir
elif args.mode == "valid":
brats_path = paths.brats2018_validation_dir
predicted_path = paths.brats2018_validation_predicted_lesions_dir
elif args.mode == "test":
brats_path = paths.brats2018_testing_dir
predicted_path = paths.brats2018_testing_predicted_lesions_dir
else:
raise ValueError("Unknown value for --mode. Use \"train\", \"valid\" or \"test\"")
refVol = paths.mni152_1mm_path
pool = Pool(args.thread)
# The following lines work on spliting seg into individual lesion label
predicted_lesions_paths = PredictedLesionMaskPath(predicted_path=predicted_path)
assert(len(predicted_lesions_paths)==191 or len(predicted_lesions_paths)==285 or len(predicted_lesions_paths)==66)
predicted_lesions_dir = os.path.split(predicted_lesions_paths[0])[0]
if not os.path.exists(os.path.join(predicted_lesions_dir, 'LesionLabels')):
os.mkdir(os.path.join(predicted_lesions_dir, 'LesionLabels'))
pool.map(Seg2Lesions, predicted_lesions_paths)
# The following lines work on mapping these individual lesion label into probability maps in MNI152 space
necrosis_paths, edema_paths, enhancing_tumor_paths = Brats2018PredictedLesionsPaths(predicted_path=predicted_path)
assert(len(necrosis_paths) == len(edema_paths) == len(enhancing_tumor_paths))
pool.map(Lesions2MNI152_star, zip(necrosis_paths, edema_paths, enhancing_tumor_paths))
# The following lines merge the individual lesion label to segmentation mask in MNI152 space
necrosis_mni_paths, edema_mni_paths, enhancing_tumor_paths = Brats2018PredictedLesionsProbMapMNI152Paths(predicted_path=predicted_path)
all_ids = AllSubjectID(brats_path)
assert(len(all_ids) == len(necrosis_mni_paths) == len(edema_mni_paths) == len(enhancing_tumor_paths)), 'brats_path and mode mismatch!!!'
if not os.path.exists(os.path.join(predicted_lesions_dir, 'MNI152')):
os.mkdir(os.path.join(predicted_lesions_dir, 'MNI152'))
pool.map(Lesions2SegMNI152_star, zip(necrosis_mni_paths, edema_mni_paths, enhancing_tumor_paths, all_ids)) | [
"argparse.ArgumentParser",
"numpy.argmax",
"os.walk",
"os.path.join",
"utils.Brats2018PredictedLesionsPaths",
"SimpleITK.ReadImage",
"SimpleITK.GetArrayFromImage",
"utils.PredictedLesionMaskPath",
"subprocess.call",
"SimpleITK.WriteImage",
"multiprocessing.Pool",
"utils.Brats2018PredictedLesio... | [((6969, 6994), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6992, 6994), False, 'import argparse\n'), ((7782, 7799), 'multiprocessing.Pool', 'Pool', (['args.thread'], {}), '(args.thread)\n', (7786, 7799), False, 'from multiprocessing import Pool\n'), ((7899, 7953), 'utils.PredictedLesionMaskPath', 'PredictedLesionMaskPath', ([], {'predicted_path': 'predicted_path'}), '(predicted_path=predicted_path)\n', (7922, 7953), False, 'from utils import PredictedLesionMaskPath, Brats2018PredictedLesionsPaths, Brats2018PredictedLesionsProbMapMNI152Paths\n'), ((8486, 8547), 'utils.Brats2018PredictedLesionsPaths', 'Brats2018PredictedLesionsPaths', ([], {'predicted_path': 'predicted_path'}), '(predicted_path=predicted_path)\n', (8516, 8547), False, 'from utils import PredictedLesionMaskPath, Brats2018PredictedLesionsPaths, Brats2018PredictedLesionsProbMapMNI152Paths\n'), ((8869, 8943), 'utils.Brats2018PredictedLesionsProbMapMNI152Paths', 'Brats2018PredictedLesionsProbMapMNI152Paths', ([], {'predicted_path': 'predicted_path'}), '(predicted_path=predicted_path)\n', (8912, 8943), False, 'from utils import PredictedLesionMaskPath, Brats2018PredictedLesionsPaths, Brats2018PredictedLesionsProbMapMNI152Paths\n'), ((8954, 8978), 'utils.AllSubjectID', 'AllSubjectID', (['brats_path'], {}), '(brats_path)\n', (8966, 8978), False, 'from utils import Brats2018ValidationN4ITKFilePaths, AllSubjectID, FindOneElement, ReadImage\n'), ((740, 763), 'os.path.split', 'os.path.split', (['seg_path'], {}), '(seg_path)\n', (753, 763), False, 'import os\n'), ((779, 820), 'os.path.join', 'os.path.join', (['subject_dir', '"""LesionLabels"""'], {}), "(subject_dir, 'LesionLabels')\n", (791, 820), False, 'import os\n'), ((832, 856), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['seg_path'], {}), '(seg_path)\n', (846, 856), True, 'import SimpleITK as sitk\n'), ((867, 897), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['gt_img'], {}), '(gt_img)\n', (889, 897), True, 'import SimpleITK as sitk\n'), ((916, 952), 'numpy.zeros', 'np.zeros', (['gt_nda.shape', 'gt_nda.dtype'], {}), '(gt_nda.shape, gt_nda.dtype)\n', (924, 952), True, 'import numpy as np\n'), ((998, 1034), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['necrosis_nda'], {}), '(necrosis_nda)\n', (1020, 1034), True, 'import SimpleITK as sitk\n'), ((1234, 1270), 'numpy.zeros', 'np.zeros', (['gt_nda.shape', 'gt_nda.dtype'], {}), '(gt_nda.shape, gt_nda.dtype)\n', (1242, 1270), True, 'import numpy as np\n'), ((1310, 1343), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['edema_nda'], {}), '(edema_nda)\n', (1332, 1343), True, 'import SimpleITK as sitk\n'), ((1532, 1568), 'numpy.zeros', 'np.zeros', (['gt_nda.shape', 'gt_nda.dtype'], {}), '(gt_nda.shape, gt_nda.dtype)\n', (1540, 1568), True, 'import numpy as np\n'), ((1616, 1653), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['enhancing_nda'], {}), '(enhancing_nda)\n', (1638, 1653), True, 'import SimpleITK as sitk\n'), ((2561, 2704), 'subprocess.call', 'subprocess.call', (["['flirt', '-in', necrosisInVol, '-ref', refVol, '-out', necrosisInVol[:-7] +\n new_name_append, '-init', omat, '-applyxfm']"], {}), "(['flirt', '-in', necrosisInVol, '-ref', refVol, '-out', \n necrosisInVol[:-7] + new_name_append, '-init', omat, '-applyxfm'])\n", (2576, 2704), False, 'import subprocess\n'), ((2701, 2838), 'subprocess.call', 'subprocess.call', (["['flirt', '-in', edemaInVol, '-ref', refVol, '-out', edemaInVol[:-7] +\n new_name_append, '-init', omat, '-applyxfm']"], {}), "(['flirt', '-in', edemaInVol, '-ref', refVol, '-out', \n edemaInVol[:-7] + new_name_append, '-init', omat, '-applyxfm'])\n", (2716, 2838), False, 'import subprocess\n'), ((2835, 2993), 'subprocess.call', 'subprocess.call', (["['flirt', '-in', enhancingTumorInVol, '-ref', refVol, '-out', \n enhancingTumorInVol[:-7] + new_name_append, '-init', omat, '-applyxfm']"], {}), "(['flirt', '-in', enhancingTumorInVol, '-ref', refVol,\n '-out', enhancingTumorInVol[:-7] + new_name_append, '-init', omat,\n '-applyxfm'])\n", (2850, 2993), False, 'import subprocess\n'), ((3657, 3690), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['necrosis_mni_path'], {}), '(necrosis_mni_path)\n', (3671, 3690), True, 'import SimpleITK as sitk\n'), ((3712, 3752), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['necrosis_mni_img'], {}), '(necrosis_mni_img)\n', (3734, 3752), True, 'import SimpleITK as sitk\n'), ((3771, 3796), 'utils.ReadImage', 'ReadImage', (['edema_mni_path'], {}), '(edema_mni_path)\n', (3780, 3796), False, 'from utils import Brats2018ValidationN4ITKFilePaths, AllSubjectID, FindOneElement, ReadImage\n'), ((3825, 3856), 'utils.ReadImage', 'ReadImage', (['enhancing_tumor_path'], {}), '(enhancing_tumor_path)\n', (3834, 3856), False, 'from utils import Brats2018ValidationN4ITKFilePaths, AllSubjectID, FindOneElement, ReadImage\n'), ((3893, 4025), 'numpy.zeros', 'np.zeros', (['(5, necrosis_mask_nda.shape[0], necrosis_mask_nda.shape[1],\n necrosis_mask_nda.shape[2])'], {'dtype': 'necrosis_mask_nda.dtype'}), '((5, necrosis_mask_nda.shape[0], necrosis_mask_nda.shape[1],\n necrosis_mask_nda.shape[2]), dtype=necrosis_mask_nda.dtype)\n', (3901, 4025), True, 'import numpy as np\n'), ((4203, 4267), 'os.path.join', 'os.path.join', (['mni152_path', "(subject_id + '_seg_MNI152_1mm.nii.gz')"], {}), "(mni152_path, subject_id + '_seg_MNI152_1mm.nii.gz')\n", (4215, 4267), False, 'import os\n'), ((4320, 4356), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['seg_mask_mni'], {}), '(seg_mask_mni)\n', (4342, 4356), True, 'import SimpleITK as sitk\n'), ((4410, 4453), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['seg_mask_mni_img', 'seg_name'], {}), '(seg_mask_mni_img, seg_name)\n', (4425, 4453), True, 'import SimpleITK as sitk\n'), ((4475, 4523), 'numpy.zeros', 'np.zeros', (['seg_mask_mni.shape', 'seg_mask_mni.dtype'], {}), '(seg_mask_mni.shape, seg_mask_mni.dtype)\n', (4483, 4523), True, 'import numpy as np\n'), ((4541, 4589), 'numpy.zeros', 'np.zeros', (['seg_mask_mni.shape', 'seg_mask_mni.dtype'], {}), '(seg_mask_mni.shape, seg_mask_mni.dtype)\n', (4549, 4589), True, 'import numpy as np\n'), ((4611, 4659), 'numpy.zeros', 'np.zeros', (['seg_mask_mni.shape', 'seg_mask_mni.dtype'], {}), '(seg_mask_mni.shape, seg_mask_mni.dtype)\n', (4619, 4659), True, 'import numpy as np\n'), ((4975, 5047), 'os.path.join', 'os.path.join', (['mni152_path', "(subject_id + '_whole_tumor_MNI152_1mm.nii.gz')"], {}), "(mni152_path, subject_id + '_whole_tumor_MNI152_1mm.nii.gz')\n", (4987, 5047), False, 'import os\n'), ((5074, 5122), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['whole_tumor_mask_mni_nda'], {}), '(whole_tumor_mask_mni_nda)\n', (5096, 5122), True, 'import SimpleITK as sitk\n'), ((5279, 5347), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['whole_tumor_mask_mni_img', 'whole_tumor_mask_mni_name'], {}), '(whole_tumor_mask_mni_img, whole_tumor_mask_mni_name)\n', (5294, 5347), True, 'import SimpleITK as sitk\n'), ((5527, 5598), 'os.path.join', 'os.path.join', (['mni152_path', "(subject_id + '_tumor_core_MNI152_1mm.nii.gz')"], {}), "(mni152_path, subject_id + '_tumor_core_MNI152_1mm.nii.gz')\n", (5539, 5598), False, 'import os\n'), ((5624, 5671), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['tumor_core_mask_mni_nda'], {}), '(tumor_core_mask_mni_nda)\n', (5646, 5671), True, 'import SimpleITK as sitk\n'), ((5825, 5891), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['tumor_core_mask_mni_img', 'tumor_core_mask_mni_name'], {}), '(tumor_core_mask_mni_img, tumor_core_mask_mni_name)\n', (5840, 5891), True, 'import SimpleITK as sitk\n'), ((6077, 6153), 'os.path.join', 'os.path.join', (['mni152_path', "(subject_id + '_enhancing_tumor_MNI152_1mm.nii.gz')"], {}), "(mni152_path, subject_id + '_enhancing_tumor_MNI152_1mm.nii.gz')\n", (6089, 6153), False, 'import os\n'), ((6184, 6236), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['enhancing_tumor_mask_mni_nda'], {}), '(enhancing_tumor_mask_mni_nda)\n', (6206, 6236), True, 'import SimpleITK as sitk\n'), ((6405, 6481), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['enhancing_tumor_mask_mni_img', 'enhancing_tumor_mask_mni_name'], {}), '(enhancing_tumor_mask_mni_img, enhancing_tumor_mask_mni_name)\n', (6420, 6481), True, 'import SimpleITK as sitk\n'), ((8093, 8134), 'os.path.split', 'os.path.split', (['predicted_lesions_paths[0]'], {}), '(predicted_lesions_paths[0])\n', (8106, 8134), False, 'import os\n'), ((1179, 1218), 'os.path.join', 'os.path.join', (['lesion_dir', 'necrosis_name'], {}), '(lesion_dir, necrosis_name)\n', (1191, 1218), False, 'import os\n'), ((1476, 1512), 'os.path.join', 'os.path.join', (['lesion_dir', 'edema_name'], {}), '(lesion_dir, edema_name)\n', (1488, 1512), False, 'import os\n'), ((1802, 1842), 'os.path.join', 'os.path.join', (['lesion_dir', 'enhancing_name'], {}), '(lesion_dir, enhancing_name)\n', (1814, 1842), False, 'import os\n'), ((2223, 2247), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (2235, 2247), False, 'import os\n'), ((5192, 5225), 'numpy.amax', 'np.amax', (['whole_tumor_mask_mni_nda'], {}), '(whole_tumor_mask_mni_nda)\n', (5199, 5225), True, 'import numpy as np\n'), ((5740, 5772), 'numpy.amax', 'np.amax', (['tumor_core_mask_mni_nda'], {}), '(tumor_core_mask_mni_nda)\n', (5747, 5772), True, 'import numpy as np\n'), ((6310, 6347), 'numpy.amax', 'np.amax', (['enhancing_tumor_mask_mni_nda'], {}), '(enhancing_tumor_mask_mni_nda)\n', (6317, 6347), True, 'import numpy as np\n'), ((6592, 6614), 'os.path.split', 'os.path.split', (['sub_dir'], {}), '(sub_dir)\n', (6605, 6614), False, 'import os\n'), ((8160, 8211), 'os.path.join', 'os.path.join', (['predicted_lesions_dir', '"""LesionLabels"""'], {}), "(predicted_lesions_dir, 'LesionLabels')\n", (8172, 8211), False, 'import os\n'), ((8224, 8275), 'os.path.join', 'os.path.join', (['predicted_lesions_dir', '"""LesionLabels"""'], {}), "(predicted_lesions_dir, 'LesionLabels')\n", (8236, 8275), False, 'import os\n'), ((9138, 9183), 'os.path.join', 'os.path.join', (['predicted_lesions_dir', '"""MNI152"""'], {}), "(predicted_lesions_dir, 'MNI152')\n", (9150, 9183), False, 'import os\n'), ((9196, 9241), 'os.path.join', 'os.path.join', (['predicted_lesions_dir', '"""MNI152"""'], {}), "(predicted_lesions_dir, 'MNI152')\n", (9208, 9241), False, 'import os\n'), ((2272, 2291), 'os.walk', 'os.walk', (['brats_path'], {}), '(brats_path)\n', (2279, 2291), False, 'import os\n'), ((4147, 4173), 'numpy.argmax', 'np.argmax', (['seg_mni'], {'axis': '(0)'}), '(seg_mni, axis=0)\n', (4156, 4173), True, 'import numpy as np\n'), ((2180, 2208), 'os.path.split', 'os.path.split', (['necrosisInVol'], {}), '(necrosisInVol)\n', (2193, 2208), False, 'import os\n'), ((3008, 3036), 'os.path.split', 'os.path.split', (['necrosisInVol'], {}), '(necrosisInVol)\n', (3021, 3036), False, 'import os\n'), ((3582, 3620), 'utils.FindOneElement', 'FindOneElement', (['necrosis_mni_path', '"""/"""'], {}), "(necrosis_mni_path, '/')\n", (3596, 3620), False, 'from utils import Brats2018ValidationN4ITKFilePaths, AllSubjectID, FindOneElement, ReadImage\n')] |
import glob
import os.path
import numpy as np
from scipy.interpolate import RectBivariateSpline
from netCDF4 import Dataset
from .geogrid import GeoGrid
from . import util
class SAR(GeoGrid):
"""Class encapsulating a single SAR image."""
def __init__(self, lons, lats, data, date):
super().__init__(lons, lats, data)
self.date = date
@classmethod
def interpolate(cls, sar_before, sar_after, idate):
"""Time interpolate a SAR image to a specific date."""
if sar_before.data.size != sar_after.data.size:
raise ValueError('Dimensions of weather radar images do not match')
if not sar_before.date <= idate <= sar_after.date:
raise ValueError(('Interpolation date does not lie between known '
'dates'))
if sar_before.date == sar_after.date:
return sar_before
time_delta = (sar_after.date - sar_before.date).total_seconds()
before_delta = (idate - sar_before.date).total_seconds()
before_factor = 1 - (before_delta / time_delta)
new_data = before_factor * sar_before.data + (1 - before_factor) * sar_after.data
return cls(sar_before.lons, sar_before.lats, new_data, idate)
def zenith2slant(self, angle):
"""Returns a new SAR instance containing the slant delay calculated
from a cosine mapping with `angle`. `angle` can be an float or a
matrix."""
return SAR(self.lons, self.lats, self.data / np.cos(angle), self.date)
class InSAR(GeoGrid):
"""Class encapsulating an interferogram."""
def __init__(self, lons, lats, data, master_date, slave_date):
super().__init__(lons, lats, data)
self.master_date = master_date
self.slave_date = slave_date
@classmethod
def from_netcdf(cls, path):
master_date, slave_date = util.extract_timestamp_from_ifg_name(path)
with Dataset(path) as df:
if 'Band1' in df.variables:
# Read ISCE GDAL converted NetCDF
return InSAR(df.variables['lon'][:],
df.variables['lat'][:],
df.variables['Band1'][:, :],
master_date,
slave_date)
if 'delay' in df.variables:
# Read a pysarts generated NetCDF
return InSAR(df.variables['lon'][:],
df.variables['lat'][:],
df.variables['delay'][:, :],
master_date,
slave_date)
else:
# Try reading a generic NetCDF
return InSAR(df.variables['x'][:],
df.variables['y'][:],
df.variables['z'][:, :].data,
master_date,
slave_date)
def save_netcdf(self, path, history=None):
with Dataset(path, 'w', format='NETCDF4') as df:
lat = df.createDimension('lat', self.lats.size)
lon = df.createDimension('lon', self.lons.size)
# Create variables
lats = df.createVariable('lat', 'f4', ('lat',), zlib=True)
lons = df.createVariable('lon', 'f4', ('lon',), zlib=True)
delays = df.createVariable('delay', 'f4', ('lat', 'lon'), zlib=True)
# Set global attributes
df.description = 'Unwrapped interferometric line of sight delays'
if history is not None:
df.history = history
# Set attributes on variables
lats.units = 'degrees north'
lons.units = 'degrees east'
delays.units = 'cm'
delays.description = 'Line of sight delay.'
# Assign to variables
lats[:] = self.lats
lons[:] = self.lons
delays[:, :] = self.data
def find_ifgs_for_dates(ifg_dir, master_date, slc_dates=None):
"""Find all the interferograms for a set of SLC dates and a given master
date.
Arguments
---------
ifg_dir : str
The directory to search for interferograms. Interferograms should be
named as SLAVE_MASTER.nc where SLAVE and MASTER are datestamps in the
format YYYYMMDD.
master_date : date
The master date.
slc_dates : list(date), opt
SLC dates to consider when selecting interferograms. A value of `None`
(default) means use all the files in ifg_dir.
Returns
-------
A list of files that are made up of images from `master_date` or
`slc_dates`.
"""
ifg_files = glob.glob(os.path.join(ifg_dir, '**/*.nc'), recursive=True)
if not slc_dates:
return ifg_files
else:
slc_dates.append(master_date)
accepted_files = []
for file in ifg_files:
ifg_master_date, ifg_slave_date = util.extract_timestamp_from_ifg_name(file)
if ifg_master_date in slc_dates and ifg_slave_date in slc_dates:
accepted_files.append(file)
return accepted_files
| [
"netCDF4.Dataset",
"numpy.cos"
] | [((1928, 1941), 'netCDF4.Dataset', 'Dataset', (['path'], {}), '(path)\n', (1935, 1941), False, 'from netCDF4 import Dataset\n'), ((2993, 3029), 'netCDF4.Dataset', 'Dataset', (['path', '"""w"""'], {'format': '"""NETCDF4"""'}), "(path, 'w', format='NETCDF4')\n", (3000, 3029), False, 'from netCDF4 import Dataset\n'), ((1504, 1517), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1510, 1517), True, 'import numpy as np\n')] |
"""
This script reads all the bootstrap performance result files, plots histograms, and calculates averages.
t-tests are done to compute p-values and confidence intervals are computed
"""
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
from scipy import stats
matplotlib.rcParams.update({'font.size': 8})
# well_list = ["043", "125", "129", "153", "155", "170", "175"]
well_list = ["125"]
for well in well_list: # loop through all wells
# specify folder locations
out_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/rnn_lstm_comparison_results/mmps" + well
rnn_full_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_full_bootstrap_rnn/"
lstm_full_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_full_bootstrap_lstm/"
rnn_storms_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_storm_bootstrap_rnn/"
lstm_storms_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_storm_bootstrap_lstm/"
folder_list = [rnn_full_results_folder, lstm_full_results_folder, rnn_storms_results_folder,
lstm_storms_results_folder]
rmse_df_list = []
nse_df_list = []
mae_df_list = []
rmse_storms_df_list = []
nse_storms_df_list = []
mae_storms_df_list = []
for folder in folder_list:
folder_name1 = folder.split("/")[6].split("_")[2]
folder_name2 = folder.split("/")[6].split("_")[4]
folder_name = folder_name1 + "_" + folder_name2
print(folder_name)
rmse_t1_list, rmse_t9_list, rmse_t18_list = [], [], []
nse_t1_list, nse_t9_list, nse_t18_list = [], [], []
mae_t1_list, mae_t9_list, mae_t18_list = [], [], []
rmse_storms_t1_list, rmse_storms_t9_list, rmse_storms_t18_list = [], [], []
nse_storms_t1_list, nse_storms_t9_list, nse_storms_t18_list = [], [], []
mae_storms_t1_list, mae_storms_t9_list, mae_storms_t18_list = [], [], []
count = 0
for file in os.listdir(folder): # extract forecast data
if count % 100 == 0:
print(folder, "count is", count)
data = folder + file
if file.endswith("_RMSE.csv"):
# print(file)
rmse_df = pd.read_csv(data)
rmse_t1, rmse_t9, rmse_t18 = rmse_df[["0"]].iloc[0], rmse_df[["0"]].iloc[8], rmse_df[["0"]].iloc[17]
rmse_t1_list.append(rmse_t1[0])
rmse_t9_list.append(rmse_t9[0])
rmse_t18_list.append(rmse_t18[0])
if file.endswith("_NSE.csv"):
nse_df = pd.read_csv(data)
nse_t1, nse_t9, nse_t18 = nse_df[["0"]].iloc[0], nse_df[["0"]].iloc[8], nse_df[["0"]].iloc[17]
nse_t1_list.append(nse_t1[0])
nse_t9_list.append(nse_t9[0])
nse_t18_list.append(nse_t18[0])
if file.endswith("_MAE.csv"):
mae_df = pd.read_csv(data)
mae_t1, mae_t9, mae_t18 = mae_df[["0"]].iloc[0], mae_df[["0"]].iloc[8], mae_df[["0"]].iloc[17]
mae_t1_list.append(mae_t1[0])
mae_t9_list.append(mae_t9[0])
mae_t18_list.append(mae_t18[0])
if file.endswith("_RMSE_storms.csv"):
# print(file)
rmse_df = pd.read_csv(data)
rmse_storms_t1, rmse_storms_t9, rmse_storms_t18 = rmse_df[["0"]].iloc[0], rmse_df[["0"]].iloc[1],\
rmse_df[["0"]].iloc[2]
rmse_storms_t1_list.append(rmse_storms_t1[0])
rmse_storms_t9_list.append(rmse_storms_t9[0])
rmse_storms_t18_list.append(rmse_storms_t18[0])
if file.endswith("_NSE_storms.csv"):
nse_df = pd.read_csv(data)
nse_storms_t1, nse_storms_t9, nse_storms_t18 = nse_df[["0"]].iloc[0], nse_df[["0"]].iloc[1],\
nse_df[["0"]].iloc[2]
nse_storms_t1_list.append(nse_storms_t1[0])
nse_storms_t9_list.append(nse_storms_t9[0])
nse_storms_t18_list.append(nse_storms_t18[0])
if file.endswith("_MAE_storms.csv"):
mae_df = pd.read_csv(data)
mae_storms_t1, mae_storms_t9, mae_storms_t18 = mae_df[["0"]].iloc[0], mae_df[["0"]].iloc[1],\
mae_df[["0"]].iloc[2]
mae_storms_t1_list.append(mae_storms_t1[0])
mae_storms_t9_list.append(mae_storms_t9[0])
mae_storms_t18_list.append(mae_storms_t18[0])
count += 1
# write extracted data to data frames
folder_RMSE_df = pd.DataFrame([rmse_t1_list, rmse_t9_list, rmse_t18_list]).transpose()
folder_RMSE_df.columns = [(folder_name + "_t+1"), (folder_name + "_t+9"), (folder_name + "_t+18")]
# print("folder rmse df", folder_RMSE_df.head())
folder_NSE_df = pd.DataFrame([nse_t1_list, nse_t9_list, nse_t18_list]).transpose()
folder_NSE_df.columns = [(folder_name + "_t+1"), (folder_name + "_t+9"), (folder_name + "_t+18")]
folder_MAE_df = pd.DataFrame([mae_t1_list, mae_t9_list, mae_t18_list]).transpose()
folder_MAE_df.columns = [(folder_name + "_t+1"), (folder_name + "_t+9"), (folder_name + "_t+18")]
if folder_name1 == "full":
folder_storms_RMSE_df = pd.DataFrame([rmse_storms_t1_list, rmse_storms_t9_list, rmse_storms_t18_list])\
.transpose()
folder_storms_RMSE_df.columns = [(folder_name + "storms_t+1"), (folder_name + "storms_t+9"),
(folder_name + "storms_t+18")]
# print("folder rmse df", folder_RMSE_df.head())
folder_storms_NSE_df = pd.DataFrame([nse_storms_t1_list, nse_storms_t9_list, nse_storms_t18_list])\
.transpose()
folder_storms_NSE_df.columns = [(folder_name + "storms_t+1"), (folder_name + "storms_t+9"),
(folder_name + "storms_t+18")]
folder_storms_MAE_df = pd.DataFrame([mae_storms_t1_list, mae_storms_t9_list, mae_storms_t18_list])\
.transpose()
folder_storms_MAE_df.columns = [(folder_name + "storms_t+1"), (folder_name + "storms_t+9"),
(folder_name + "storms_t+18")]
# append folder dataframes to lists
rmse_df_list.append(folder_RMSE_df)
nse_df_list.append(folder_NSE_df)
mae_df_list.append(folder_MAE_df)
if folder_name1 == "full":
rmse_df_list.append(folder_storms_RMSE_df)
nse_df_list.append(folder_storms_NSE_df)
mae_df_list.append(folder_storms_MAE_df)
# concat data to well dfs
rmse_df = pd.concat(rmse_df_list, axis=1)
rmse_df = rmse_df[:948]
nse_df = pd.concat(nse_df_list, axis=1)
nse_df = nse_df[:1000]
mae_df = pd.concat(mae_df_list, axis=1)
mae_df = mae_df[:1000]
# rmse_storms_df = pd.concat(rmse_storms_df_list, axis=1)
# nse_storms_df = pd.concat(nse_storms_df_list, axis=1)
# mae_storms_df = pd.concat(mae_storms_df_list, axis=1)
# save well dfs
rmse_df.to_csv(os.path.join(out_folder, "rmse_df.csv"), index=False)
nse_df.to_csv(os.path.join(out_folder, "nse_df.csv"), index=False)
mae_df.to_csv(os.path.join(out_folder, "mae_df.csv"), index=False)
# rmse_storms_df.to_csv(os.path.join(out_folder, "rmse_storms_df.csv"), index=False)
# nse_storms_df.to_csv(os.path.join(out_folder, "nse_storms_df.csv"), index=False)
# mae_storms_df.to_csv(os.path.join(out_folder, "mae_storms_df.csv"), index=False)
# plot histograms of RMSE for individual well
col_list = rmse_df.columns
plt.figure(1, figsize=(6, 9))
for i in range(0, len(col_list), 1):
ax = plt.subplot(6, 3, i+1)
rmse_df.hist(ax=ax, column=col_list[i], bins=15)
bs_type = col_list[i].split("_")[0]
model_type = col_list[i].split("_")[1]
ax.set_title("")
if i % 3 == 0:
ax.set_ylabel(bs_type + " " + model_type)
if i < 3:
ax.set_title(col_list[i].split("_")[2])
plt.tight_layout()
plt.gcf().text(0.5, 0.05, "RMSE (m)")
plt.subplots_adjust(bottom=0.1)
# plt.show()
plt.savefig(os.path.join(out_folder, "rmse_hists.png"), dpi=300)
plt.close()
# perform t-tests
rnn_full_storm_tvalues, rnn_full_storm_pvalues = [], []
lstm_full_storm_tvalues, lstm_full_storm_pvalues = [], []
rnn_lstm_full_tvalues, rnn_lstm_full_pvalues = [], []
rnn_lstm_storm_tvalues, rnn_lstm_storm_pvalues = [], []
fullRNNStorms_vs_stormRNN_tvalues, fullRNNStorms_vs_stormRNN_pvalues = [], []
fullLSTMStorms_vs_stormLSTM_tvalues, fullLSTMStorms_vs_stormLSTM_pvalues = [], []
rnn_full_storm_t1_t, rnn_full_storm_t1_p = stats.ttest_ind(rmse_df["full_rnn_t+1"], rmse_df["storm_rnn_t+1"])
rnn_full_storm_t9_t, rnn_full_storm_t9_p = stats.ttest_ind(rmse_df["full_rnn_t+9"], rmse_df["storm_rnn_t+9"])
rnn_full_storm_t18_t, rnn_full_storm_t18_p = stats.ttest_ind(rmse_df["full_rnn_t+18"], rmse_df["storm_rnn_t+18"])
rnn_full_storm_tvalues.append([rnn_full_storm_t1_t, rnn_full_storm_t9_t, rnn_full_storm_t18_t])
rnn_full_storm_pvalues.append([rnn_full_storm_t1_p, rnn_full_storm_t9_p, rnn_full_storm_t18_p])
lstm_full_storm_t1_t, lstm_full_storm_t1_p = stats.ttest_ind(rmse_df["full_lstm_t+1"], rmse_df["storm_lstm_t+1"])
lstm_full_storm_t9_t, lstm_full_storm_t9_p = stats.ttest_ind(rmse_df["full_lstm_t+9"], rmse_df["storm_lstm_t+9"])
lstm_full_storm_t18_t, lstm_full_storm_t18_p = stats.ttest_ind(rmse_df["full_lstm_t+18"],rmse_df["storm_lstm_t+18"])
lstm_full_storm_tvalues.append([lstm_full_storm_t1_t, lstm_full_storm_t9_t, lstm_full_storm_t18_t])
lstm_full_storm_pvalues.append([lstm_full_storm_t1_p, lstm_full_storm_t9_p, lstm_full_storm_t18_p])
rnn_lstm_full_t1_t, rnn_lstm_full_t1_p = stats.ttest_ind(rmse_df["full_rnn_t+1"], rmse_df["full_lstm_t+1"])
rnn_lstm_full_t9_t, rnn_lstm_full_t9_p = stats.ttest_ind(rmse_df["full_rnn_t+9"], rmse_df["full_lstm_t+9"])
rnn_lstm_full_t18_t, rnn_lstm_full_t18_p = stats.ttest_ind(rmse_df["full_rnn_t+18"], rmse_df["full_lstm_t+18"])
rnn_lstm_full_tvalues.append([rnn_lstm_full_t1_t, rnn_lstm_full_t9_t, rnn_lstm_full_t18_t])
rnn_lstm_full_pvalues.append([rnn_lstm_full_t1_p, rnn_lstm_full_t9_p, rnn_lstm_full_t18_p])
rnn_lstm_storm_t1_t, rnn_lstm_storm_t1_p = stats.ttest_ind(rmse_df["storm_rnn_t+1"], rmse_df["storm_lstm_t+1"])
rnn_lstm_storm_t9_t, rnn_lstm_storm_t9_p = stats.ttest_ind(rmse_df["storm_rnn_t+9"], rmse_df["storm_lstm_t+9"])
rnn_lstm_storm_t18_t, rnn_lstm_storm_t18_p = stats.ttest_ind(rmse_df["storm_rnn_t+18"], rmse_df["storm_lstm_t+18"])
rnn_lstm_storm_tvalues.append([rnn_lstm_storm_t1_t, rnn_lstm_storm_t9_t, rnn_lstm_storm_t18_t])
rnn_lstm_storm_pvalues.append([rnn_lstm_storm_t1_p, rnn_lstm_storm_t9_p, rnn_lstm_storm_t18_p])
fullRNNStorms_vs_stormRNN_t1_t, fullRNNStorms_vs_stormRNN_t1_p = stats.ttest_ind(rmse_df["full_rnnstorms_t+1"],
rmse_df["storm_rnn_t+1"])
fullRNNStorms_vs_stormRNN_t9_t, fullRNNStorms_vs_stormRNN_t9_p = stats.ttest_ind(rmse_df["full_rnnstorms_t+9"],
rmse_df["storm_rnn_t+9"])
fullRNNStorms_vs_stormRNN_t18_t, fullRNNStorms_vs_stormRNN_t18_p = stats.ttest_ind(rmse_df["full_rnnstorms_t+18"],
rmse_df["storm_rnn_t+18"])
fullRNNStorms_vs_stormRNN_tvalues.append([fullRNNStorms_vs_stormRNN_t1_t, fullRNNStorms_vs_stormRNN_t9_t,
fullRNNStorms_vs_stormRNN_t18_t])
fullRNNStorms_vs_stormRNN_pvalues.append([fullRNNStorms_vs_stormRNN_t1_p, fullRNNStorms_vs_stormRNN_t9_p,
fullRNNStorms_vs_stormRNN_t18_p])
fullLSTMStorms_vs_stormLSTM_t1_t, fullLSTMStorms_vs_stormLSTM_t1_p = stats.ttest_ind(rmse_df["full_lstmstorms_t+1"],
rmse_df["storm_lstm_t+1"])
fullLSTMStorms_vs_stormLSTM_t9_t, fullLSTMStorms_vs_stormLSTM_t9_p = stats.ttest_ind(rmse_df["full_lstmstorms_t+9"],
rmse_df["storm_lstm_t+9"])
fullLSTMStorms_vs_stormLSTM_t18_t, fullLSTMStorms_vs_stormLSTM_t18_p = stats.ttest_ind(rmse_df["full_lstmstorms_t+18"],
rmse_df["storm_lstm_t+18"])
fullLSTMStorms_vs_stormLSTM_tvalues.append([fullLSTMStorms_vs_stormLSTM_t1_t, fullLSTMStorms_vs_stormLSTM_t9_t,
fullLSTMStorms_vs_stormLSTM_t18_t])
fullLSTMStorms_vs_stormLSTM_pvalues.append([fullLSTMStorms_vs_stormLSTM_t1_p, fullLSTMStorms_vs_stormLSTM_t9_p,
fullLSTMStorms_vs_stormLSTM_t18_p])
# save t-test results to dataframe
ttest_cols = ["rnn_full_storm_t", "rnn_full_storm_p", "lstm_full_storm_t", "lstm_full_storm_p",
"rnn_lstm_full_t", "rnn_lstm_full_p", "rnn_lstm_storm_t", "rnn_lstm_storm_p",
"fullRNNStorms_vs_stormRNN_t", "fullRNNStorms_vs_stormRNN_p", "fullLSTMStorms_vs_stormLSTM_t",
"fullLSTMStorms_vs_stormLSTM_p"]
ttest_df = pd.DataFrame([rnn_full_storm_tvalues[0], rnn_full_storm_pvalues[0],
lstm_full_storm_tvalues[0], lstm_full_storm_pvalues[0],
rnn_lstm_full_tvalues[0], rnn_lstm_full_pvalues[0],
rnn_lstm_storm_tvalues[0], rnn_lstm_storm_pvalues[0],
fullRNNStorms_vs_stormRNN_tvalues[0], fullRNNStorms_vs_stormRNN_pvalues[0],
fullLSTMStorms_vs_stormLSTM_tvalues[0], fullLSTMStorms_vs_stormLSTM_pvalues[0]])\
.transpose()
ttest_df.columns = ttest_cols
ttest_df["forecast"] = ["t+1", "t+9", "t+18"]
ttest_df = ttest_df.set_index("forecast")
ttest_df.to_csv(os.path.join(out_folder, "ttest.csv"))
# calculate means
mean_list = []
for i in col_list:
col_mean = rmse_df[i].mean()
mean_list.append(col_mean)
mean_df = pd.DataFrame(mean_list).transpose()
mean_df.columns = col_list
mean_df.to_csv(os.path.join(out_folder, "means.csv"), index=False)
# calculate confidence intervals
upper_ci_list = []
lower_ci_list = []
for i in col_list:
col_ci = stats.t.interval(0.95, len(rmse_df[i]) - 1, loc=np.mean(rmse_df[i]), scale=stats.sem(rmse_df[i]))
upper_ci_list.append(col_ci[1])
lower_ci_list.append(col_ci[0])
# calculate error
# rnn_rmse_t1_err = rnn_rmse_t1_mean - rnn_rmse_t1_ci[0]
# save CIs to df
ci_df = pd.DataFrame([lower_ci_list, upper_ci_list], columns=col_list, index=["lower", "upper"])
ci_df.to_csv(os.path.join(out_folder, "CIs.csv"))
| [
"pandas.DataFrame",
"os.listdir",
"matplotlib.pyplot.subplot",
"os.path.join",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"scipy.stats.ttest_ind",
"matplotlib.pyplot.figure",
"numpy.mean",
"scipy.stats.sem",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.py... | [((325, 369), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 8}"], {}), "({'font.size': 8})\n", (351, 369), False, 'import matplotlib\n'), ((7305, 7336), 'pandas.concat', 'pd.concat', (['rmse_df_list'], {'axis': '(1)'}), '(rmse_df_list, axis=1)\n', (7314, 7336), True, 'import pandas as pd\n'), ((7380, 7410), 'pandas.concat', 'pd.concat', (['nse_df_list'], {'axis': '(1)'}), '(nse_df_list, axis=1)\n', (7389, 7410), True, 'import pandas as pd\n'), ((7453, 7483), 'pandas.concat', 'pd.concat', (['mae_df_list'], {'axis': '(1)'}), '(mae_df_list, axis=1)\n', (7462, 7483), True, 'import pandas as pd\n'), ((8296, 8325), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6, 9)'}), '(1, figsize=(6, 9))\n', (8306, 8325), True, 'import matplotlib.pyplot as plt\n'), ((8738, 8756), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8754, 8756), True, 'import matplotlib.pyplot as plt\n'), ((8805, 8836), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.1)'}), '(bottom=0.1)\n', (8824, 8836), True, 'import matplotlib.pyplot as plt\n'), ((8930, 8941), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8939, 8941), True, 'import matplotlib.pyplot as plt\n'), ((9431, 9497), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnn_t+1']", "rmse_df['storm_rnn_t+1']"], {}), "(rmse_df['full_rnn_t+1'], rmse_df['storm_rnn_t+1'])\n", (9446, 9497), False, 'from scipy import stats\n'), ((9546, 9612), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnn_t+9']", "rmse_df['storm_rnn_t+9']"], {}), "(rmse_df['full_rnn_t+9'], rmse_df['storm_rnn_t+9'])\n", (9561, 9612), False, 'from scipy import stats\n'), ((9663, 9731), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnn_t+18']", "rmse_df['storm_rnn_t+18']"], {}), "(rmse_df['full_rnn_t+18'], rmse_df['storm_rnn_t+18'])\n", (9678, 9731), False, 'from scipy import stats\n'), ((9986, 10054), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_lstm_t+1']", "rmse_df['storm_lstm_t+1']"], {}), "(rmse_df['full_lstm_t+1'], rmse_df['storm_lstm_t+1'])\n", (10001, 10054), False, 'from scipy import stats\n'), ((10105, 10173), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_lstm_t+9']", "rmse_df['storm_lstm_t+9']"], {}), "(rmse_df['full_lstm_t+9'], rmse_df['storm_lstm_t+9'])\n", (10120, 10173), False, 'from scipy import stats\n'), ((10226, 10296), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_lstm_t+18']", "rmse_df['storm_lstm_t+18']"], {}), "(rmse_df['full_lstm_t+18'], rmse_df['storm_lstm_t+18'])\n", (10241, 10296), False, 'from scipy import stats\n'), ((10554, 10620), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnn_t+1']", "rmse_df['full_lstm_t+1']"], {}), "(rmse_df['full_rnn_t+1'], rmse_df['full_lstm_t+1'])\n", (10569, 10620), False, 'from scipy import stats\n'), ((10667, 10733), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnn_t+9']", "rmse_df['full_lstm_t+9']"], {}), "(rmse_df['full_rnn_t+9'], rmse_df['full_lstm_t+9'])\n", (10682, 10733), False, 'from scipy import stats\n'), ((10782, 10850), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnn_t+18']", "rmse_df['full_lstm_t+18']"], {}), "(rmse_df['full_rnn_t+18'], rmse_df['full_lstm_t+18'])\n", (10797, 10850), False, 'from scipy import stats\n'), ((11095, 11163), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['storm_rnn_t+1']", "rmse_df['storm_lstm_t+1']"], {}), "(rmse_df['storm_rnn_t+1'], rmse_df['storm_lstm_t+1'])\n", (11110, 11163), False, 'from scipy import stats\n'), ((11212, 11280), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['storm_rnn_t+9']", "rmse_df['storm_lstm_t+9']"], {}), "(rmse_df['storm_rnn_t+9'], rmse_df['storm_lstm_t+9'])\n", (11227, 11280), False, 'from scipy import stats\n'), ((11331, 11401), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['storm_rnn_t+18']", "rmse_df['storm_lstm_t+18']"], {}), "(rmse_df['storm_rnn_t+18'], rmse_df['storm_lstm_t+18'])\n", (11346, 11401), False, 'from scipy import stats\n'), ((11676, 11748), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnnstorms_t+1']", "rmse_df['storm_rnn_t+1']"], {}), "(rmse_df['full_rnnstorms_t+1'], rmse_df['storm_rnn_t+1'])\n", (11691, 11748), False, 'from scipy import stats\n'), ((11905, 11977), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnnstorms_t+9']", "rmse_df['storm_rnn_t+9']"], {}), "(rmse_df['full_rnnstorms_t+9'], rmse_df['storm_rnn_t+9'])\n", (11920, 11977), False, 'from scipy import stats\n'), ((12136, 12210), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_rnnstorms_t+18']", "rmse_df['storm_rnn_t+18']"], {}), "(rmse_df['full_rnnstorms_t+18'], rmse_df['storm_rnn_t+18'])\n", (12151, 12210), False, 'from scipy import stats\n'), ((12759, 12833), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_lstmstorms_t+1']", "rmse_df['storm_lstm_t+1']"], {}), "(rmse_df['full_lstmstorms_t+1'], rmse_df['storm_lstm_t+1'])\n", (12774, 12833), False, 'from scipy import stats\n'), ((12998, 13072), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_lstmstorms_t+9']", "rmse_df['storm_lstm_t+9']"], {}), "(rmse_df['full_lstmstorms_t+9'], rmse_df['storm_lstm_t+9'])\n", (13013, 13072), False, 'from scipy import stats\n'), ((13239, 13315), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["rmse_df['full_lstmstorms_t+18']", "rmse_df['storm_lstm_t+18']"], {}), "(rmse_df['full_lstmstorms_t+18'], rmse_df['storm_lstm_t+18'])\n", (13254, 13315), False, 'from scipy import stats\n'), ((15727, 15820), 'pandas.DataFrame', 'pd.DataFrame', (['[lower_ci_list, upper_ci_list]'], {'columns': 'col_list', 'index': "['lower', 'upper']"}), "([lower_ci_list, upper_ci_list], columns=col_list, index=[\n 'lower', 'upper'])\n", (15739, 15820), True, 'import pandas as pd\n'), ((2362, 2380), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2372, 2380), False, 'import os\n'), ((7740, 7779), 'os.path.join', 'os.path.join', (['out_folder', '"""rmse_df.csv"""'], {}), "(out_folder, 'rmse_df.csv')\n", (7752, 7779), False, 'import os\n'), ((7813, 7851), 'os.path.join', 'os.path.join', (['out_folder', '"""nse_df.csv"""'], {}), "(out_folder, 'nse_df.csv')\n", (7825, 7851), False, 'import os\n'), ((7885, 7923), 'os.path.join', 'os.path.join', (['out_folder', '"""mae_df.csv"""'], {}), "(out_folder, 'mae_df.csv')\n", (7897, 7923), False, 'import os\n'), ((8382, 8406), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(3)', '(i + 1)'], {}), '(6, 3, i + 1)\n', (8393, 8406), True, 'import matplotlib.pyplot as plt\n'), ((8872, 8914), 'os.path.join', 'os.path.join', (['out_folder', '"""rmse_hists.png"""'], {}), "(out_folder, 'rmse_hists.png')\n", (8884, 8914), False, 'import os\n'), ((14952, 14989), 'os.path.join', 'os.path.join', (['out_folder', '"""ttest.csv"""'], {}), "(out_folder, 'ttest.csv')\n", (14964, 14989), False, 'import os\n'), ((15241, 15278), 'os.path.join', 'os.path.join', (['out_folder', '"""means.csv"""'], {}), "(out_folder, 'means.csv')\n", (15253, 15278), False, 'import os\n'), ((15834, 15869), 'os.path.join', 'os.path.join', (['out_folder', '"""CIs.csv"""'], {}), "(out_folder, 'CIs.csv')\n", (15846, 15869), False, 'import os\n'), ((8762, 8771), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8769, 8771), True, 'import matplotlib.pyplot as plt\n'), ((14236, 14647), 'pandas.DataFrame', 'pd.DataFrame', (['[rnn_full_storm_tvalues[0], rnn_full_storm_pvalues[0],\n lstm_full_storm_tvalues[0], lstm_full_storm_pvalues[0],\n rnn_lstm_full_tvalues[0], rnn_lstm_full_pvalues[0],\n rnn_lstm_storm_tvalues[0], rnn_lstm_storm_pvalues[0],\n fullRNNStorms_vs_stormRNN_tvalues[0], fullRNNStorms_vs_stormRNN_pvalues\n [0], fullLSTMStorms_vs_stormLSTM_tvalues[0],\n fullLSTMStorms_vs_stormLSTM_pvalues[0]]'], {}), '([rnn_full_storm_tvalues[0], rnn_full_storm_pvalues[0],\n lstm_full_storm_tvalues[0], lstm_full_storm_pvalues[0],\n rnn_lstm_full_tvalues[0], rnn_lstm_full_pvalues[0],\n rnn_lstm_storm_tvalues[0], rnn_lstm_storm_pvalues[0],\n fullRNNStorms_vs_stormRNN_tvalues[0], fullRNNStorms_vs_stormRNN_pvalues\n [0], fullLSTMStorms_vs_stormLSTM_tvalues[0],\n fullLSTMStorms_vs_stormLSTM_pvalues[0]])\n', (14248, 14647), True, 'import pandas as pd\n'), ((15151, 15174), 'pandas.DataFrame', 'pd.DataFrame', (['mean_list'], {}), '(mean_list)\n', (15163, 15174), True, 'import pandas as pd\n'), ((2627, 2644), 'pandas.read_csv', 'pd.read_csv', (['data'], {}), '(data)\n', (2638, 2644), True, 'import pandas as pd\n'), ((2981, 2998), 'pandas.read_csv', 'pd.read_csv', (['data'], {}), '(data)\n', (2992, 2998), True, 'import pandas as pd\n'), ((3323, 3340), 'pandas.read_csv', 'pd.read_csv', (['data'], {}), '(data)\n', (3334, 3340), True, 'import pandas as pd\n'), ((3705, 3722), 'pandas.read_csv', 'pd.read_csv', (['data'], {}), '(data)\n', (3716, 3722), True, 'import pandas as pd\n'), ((4196, 4213), 'pandas.read_csv', 'pd.read_csv', (['data'], {}), '(data)\n', (4207, 4213), True, 'import pandas as pd\n'), ((4672, 4689), 'pandas.read_csv', 'pd.read_csv', (['data'], {}), '(data)\n', (4683, 4689), True, 'import pandas as pd\n'), ((5169, 5226), 'pandas.DataFrame', 'pd.DataFrame', (['[rmse_t1_list, rmse_t9_list, rmse_t18_list]'], {}), '([rmse_t1_list, rmse_t9_list, rmse_t18_list])\n', (5181, 5226), True, 'import pandas as pd\n'), ((5430, 5484), 'pandas.DataFrame', 'pd.DataFrame', (['[nse_t1_list, nse_t9_list, nse_t18_list]'], {}), '([nse_t1_list, nse_t9_list, nse_t18_list])\n', (5442, 5484), True, 'import pandas as pd\n'), ((5629, 5683), 'pandas.DataFrame', 'pd.DataFrame', (['[mae_t1_list, mae_t9_list, mae_t18_list]'], {}), '([mae_t1_list, mae_t9_list, mae_t18_list])\n', (5641, 5683), True, 'import pandas as pd\n'), ((15471, 15490), 'numpy.mean', 'np.mean', (['rmse_df[i]'], {}), '(rmse_df[i])\n', (15478, 15490), True, 'import numpy as np\n'), ((15498, 15519), 'scipy.stats.sem', 'stats.sem', (['rmse_df[i]'], {}), '(rmse_df[i])\n', (15507, 15519), False, 'from scipy import stats\n'), ((5876, 5954), 'pandas.DataFrame', 'pd.DataFrame', (['[rmse_storms_t1_list, rmse_storms_t9_list, rmse_storms_t18_list]'], {}), '([rmse_storms_t1_list, rmse_storms_t9_list, rmse_storms_t18_list])\n', (5888, 5954), True, 'import pandas as pd\n'), ((6267, 6342), 'pandas.DataFrame', 'pd.DataFrame', (['[nse_storms_t1_list, nse_storms_t9_list, nse_storms_t18_list]'], {}), '([nse_storms_t1_list, nse_storms_t9_list, nse_storms_t18_list])\n', (6279, 6342), True, 'import pandas as pd\n'), ((6591, 6666), 'pandas.DataFrame', 'pd.DataFrame', (['[mae_storms_t1_list, mae_storms_t9_list, mae_storms_t18_list]'], {}), '([mae_storms_t1_list, mae_storms_t9_list, mae_storms_t18_list])\n', (6603, 6666), True, 'import pandas as pd\n')] |
import numpy as np
from mmhuman3d.data.datasets import HumanImageDataset
def test_human_image_dataset():
train_dataset = HumanImageDataset(
data_prefix='tests/data',
pipeline=[],
dataset_name='h36m',
ann_file='sample_3dpw_test.npz')
data_keys = [
'img_prefix', 'image_path', 'dataset_name', 'sample_idx', 'bbox_xywh',
'center', 'scale', 'keypoints2d', 'keypoints3d', 'has_smpl',
'smpl_body_pose', 'smpl_global_orient', 'smpl_betas', 'smpl_transl'
]
for i, data in enumerate(train_dataset):
for key in data_keys:
assert key in data
num_data = 1
test_dataset = HumanImageDataset(
data_prefix='tests/data',
pipeline=[],
dataset_name='pw3d',
body_model=dict(
type='SMPL',
keypoint_src='smpl_45',
keypoint_dst='h36m',
model_path='data/body_models/smpl'),
ann_file='sample_3dpw_test.npz')
test_dataset.num_data = 1
outputs = [{
'keypoints_3d': np.random.rand(num_data, 17, 3),
'image_idx': np.arange(num_data)
}]
res = test_dataset.evaluate(outputs, res_folder='tests/data')
assert 'MPJPE' in res
assert 'MPJPE-PA' in res
assert res['MPJPE'] > 0
assert res['MPJPE-PA'] > 0
test_dataset = HumanImageDataset(
data_prefix='tests/data',
pipeline=[],
dataset_name='pw3d',
body_model=dict(
type='SMPL',
keypoint_src='smpl_45',
keypoint_dst='smpl_24',
model_path='data/body_models/smpl'),
ann_file='sample_3dpw_test.npz')
test_dataset.num_data = 1
outputs = [{
'keypoints_3d': np.random.rand(num_data, 24, 3),
'image_idx': np.arange(num_data)
}]
res = test_dataset.evaluate(outputs, res_folder='tests/data')
assert 'MPJPE' in res
assert 'MPJPE-PA' in res
assert res['MPJPE'] > 0
assert res['MPJPE-PA'] > 0
test_dataset = HumanImageDataset(
data_prefix='tests/data',
pipeline=[],
dataset_name='pw3d',
body_model=dict(
type='SMPL',
keypoint_src='smpl_45',
keypoint_dst='smpl_49',
model_path='data/body_models/smpl'),
ann_file='sample_3dpw_test.npz')
test_dataset.num_data = 1
outputs = [{
'keypoints_3d': np.random.rand(num_data, 49, 3),
'image_idx': np.arange(num_data)
}]
res = test_dataset.evaluate(outputs, res_folder='tests/data')
assert 'MPJPE' in res
assert 'MPJPE-PA' in res
assert res['MPJPE'] > 0
assert res['MPJPE-PA'] > 0
def test_human_image_dataset_smc():
# test loading smc
train_dataset = HumanImageDataset(
data_prefix='tests/data',
pipeline=[],
dataset_name='humman',
ann_file='sample_humman_test_iphone_ds10.npz')
data_keys = [
'img_prefix', 'image_path', 'image_id', 'dataset_name', 'sample_idx',
'bbox_xywh', 'center', 'scale', 'keypoints2d', 'keypoints3d',
'has_smpl', 'smpl_body_pose', 'smpl_global_orient', 'smpl_betas',
'smpl_transl'
]
for i, data in enumerate(train_dataset):
for key in data_keys:
assert key in data
| [
"numpy.random.rand",
"mmhuman3d.data.datasets.HumanImageDataset",
"numpy.arange"
] | [((128, 243), 'mmhuman3d.data.datasets.HumanImageDataset', 'HumanImageDataset', ([], {'data_prefix': '"""tests/data"""', 'pipeline': '[]', 'dataset_name': '"""h36m"""', 'ann_file': '"""sample_3dpw_test.npz"""'}), "(data_prefix='tests/data', pipeline=[], dataset_name=\n 'h36m', ann_file='sample_3dpw_test.npz')\n", (145, 243), False, 'from mmhuman3d.data.datasets import HumanImageDataset\n'), ((2722, 2853), 'mmhuman3d.data.datasets.HumanImageDataset', 'HumanImageDataset', ([], {'data_prefix': '"""tests/data"""', 'pipeline': '[]', 'dataset_name': '"""humman"""', 'ann_file': '"""sample_humman_test_iphone_ds10.npz"""'}), "(data_prefix='tests/data', pipeline=[], dataset_name=\n 'humman', ann_file='sample_humman_test_iphone_ds10.npz')\n", (2739, 2853), False, 'from mmhuman3d.data.datasets import HumanImageDataset\n'), ((1046, 1077), 'numpy.random.rand', 'np.random.rand', (['num_data', '(17)', '(3)'], {}), '(num_data, 17, 3)\n', (1060, 1077), True, 'import numpy as np\n'), ((1100, 1119), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (1109, 1119), True, 'import numpy as np\n'), ((1713, 1744), 'numpy.random.rand', 'np.random.rand', (['num_data', '(24)', '(3)'], {}), '(num_data, 24, 3)\n', (1727, 1744), True, 'import numpy as np\n'), ((1767, 1786), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (1776, 1786), True, 'import numpy as np\n'), ((2380, 2411), 'numpy.random.rand', 'np.random.rand', (['num_data', '(49)', '(3)'], {}), '(num_data, 49, 3)\n', (2394, 2411), True, 'import numpy as np\n'), ((2434, 2453), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (2443, 2453), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# File name : client.py
# Description : client
# Website : www.adeept.com
# Author : William
# Date : 2019/08/28
from socket import *
import sys
import time
import threading as thread
import tkinter as tk
import math
try:
import cv2
import zmq
import base64
import numpy as np
except:
print("Couldn't import OpenCV, you need to install it first.")
OSD_X = 0#1
OSD_Y = 0
advanced_OSD = 0
def global_init():
global DS_stu, TS_stu, color_bg, color_text, color_btn, color_line, color_can, color_oval, target_color
global speed, ip_stu, Switch_3, Switch_2, Switch_1, servo_stu, function_stu
DS_stu = 0
TS_stu = 0
color_bg='#000000' #Set background color
color_text='#E1F5FE' #Set text color
color_btn='#0277BD' #Set button color
color_line='#01579B' #Set line color
color_can='#212121' #Set canvas color
color_oval='#2196F3' #Set oval color
target_color='#FF6D00'
speed = 1
ip_stu=1
Switch_3 = 0
Switch_2 = 0
Switch_1 = 0
servo_stu = 0
function_stu = 0
global_init()
########>>>>>VIDEO<<<<<########
def RGB_to_Hex(r, g, b):
return ('#'+str(hex(r))[-2:]+str(hex(g))[-2:]+str(hex(b))[-2:]).replace('x','0').upper()
def rgb2hsv(r, g, b):
r, g, b = r/255.0, g/255.0, b/255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn:
h = 0
elif mx == r:
h = (60*((g-b)/df) + 360) % 360
elif mx == g:
h = (60*((b-r)/df) + 120) % 360
elif mx == b:
h = (60*((r-g)/df) + 240) % 360
if mx == 0:
s = 0
else:
s = (df/mx)*100
v = mx*100
h=h/360*255
return str(int(h))+' '+str(int(s))+' '+str(int(v))
def video_thread():
global footage_socket, font, frame_num, fps
context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
footage_socket.bind('tcp://*:5555')
footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
font = cv2.FONT_HERSHEY_SIMPLEX
frame_num = 0
fps = 0
def getposBgr(event, x, y, flags, param):
if event==cv2.EVENT_LBUTTONDOWN:
getBGR = source[y, x]
var_R.set(getBGR[2])
var_G.set(getBGR[1])
var_B.set(getBGR[0])
# tcpClicSock.send(('FCSET %s'%rgb2hsv(int(var_R.get()), int(var_G.get()), int(var_B.get()))).encode())
canvas_show.config(bg = RGB_to_Hex(int(var_R.get()), int(var_G.get()), int(var_B.get())))
print("BGR is", getBGR)
print("HSV is", HSVimg[y, x])
tcpClicSock.send(('FCSET %s %s %s'%(HSVimg[y, x][0], HSVimg[y, x][1], HSVimg[y, x][2])).encode())
# print("HSV genOut is", rgb2hsv(int(var_R.get()), int(var_G.get()), int(var_B.get())))
def getposHsv(event, x, y, flags, param):
if event==cv2.EVENT_LBUTTONDOWN:
print("HSV is", HSVimg[y, x])
tcpClicSock.send(('FCSET %s %s %s'%(HSVimg[y, x][0], HSVimg[y, x][1], HSVimg[y, x][2])).encode())
getBGR = source[y, x]
var_R.set(getBGR[2])
var_G.set(getBGR[1])
var_B.set(getBGR[0])
canvas_show.config(bg = RGB_to_Hex(int(var_R.get()), int(var_G.get()), int(var_B.get())))
def get_FPS():
global frame_num, fps
while 1:
try:
time.sleep(1)
fps = frame_num
frame_num = 0
except:
time.sleep(1)
def advanced_OSD_add(draw_on, X, Y):#1
error_X = X*10
error_Y = Y*6-2
#if error_Y > 0:
X_s = int(200+120-120*math.cos(math.radians(error_Y)))
Y_s = int(240+120*math.sin(math.radians(error_Y))-error_X*3)
X_e = int(320+120*math.cos(math.radians(error_Y)))
Y_e = int(240-120*math.sin(math.radians(error_Y))-error_X*3)
cv2.line(draw_on,(X_s,Y_s),(X_e,Y_e),(0,255,0),2)
cv2.putText(draw_on,('horizontal line'),(X_e+10,Y_e), font, 0.5,(0,255,0),1,cv2.LINE_AA)
cv2.line(draw_on,(X_s,Y_s+270),(X_e,Y_e+270),(0,255,0),2)
cv2.putText(draw_on,('Down'),(X_e+10,Y_e+270), font, 0.5,(0,255,0),1,cv2.LINE_AA)
cv2.line(draw_on,(X_s,Y_s-270),(X_e,Y_e-270),(0,255,0),2)
cv2.putText(draw_on,('Up'),(X_e+10,Y_e-270), font, 0.5,(0,255,0),1,cv2.LINE_AA)
X_s_short = int(260+60-60*math.cos(math.radians(error_Y)))
Y_s_short = int(240+60*math.sin(math.radians(error_Y))-error_X*3)
X_e_short = int(320+60*math.cos(math.radians(error_Y)))
Y_e_short = int(240-60*math.sin(math.radians(error_Y))-error_X*3)
cv2.line(draw_on,(X_s_short,Y_s_short+90),(X_e_short,Y_e_short+90),(0,255,0))
cv2.line(draw_on,(X_s_short,Y_s_short+180),(X_e_short,Y_e_short+180),(0,255,0))
cv2.line(draw_on,(X_s_short,Y_s_short+360),(X_e_short,Y_e_short+360),(0,255,0))
cv2.line(draw_on,(X_s_short,Y_s_short+450),(X_e_short,Y_e_short+450),(0,255,0))
cv2.line(draw_on,(X_s_short,Y_s_short-90),(X_e_short,Y_e_short-90),(0,255,0))
cv2.line(draw_on,(X_s_short,Y_s_short-180),(X_e_short,Y_e_short-180),(0,255,0))
cv2.line(draw_on,(X_s_short,Y_s_short-360),(X_e_short,Y_e_short-360),(0,255,0))
cv2.line(draw_on,(X_s_short,Y_s_short-450),(X_e_short,Y_e_short-450),(0,255,0))
def opencv_r():
global frame_num, source, HSVimg
while True:
try:
frame = footage_socket.recv_string()
img = base64.b64decode(frame)
npimg = np.frombuffer(img, dtype=np.uint8)
source = cv2.imdecode(npimg, 1)
cv2.putText(source,('PC FPS: %s'%fps),(40,20), font, 0.5,(255,255,255),1,cv2.LINE_AA)
try:
cv2.putText(source,('CPU Temperature: %s'%CPU_TEP),(370,350), font, 0.5,(128,255,128),1,cv2.LINE_AA)
cv2.putText(source,('CPU Usage: %s'%CPU_USE),(370,380), font, 0.5,(128,255,128),1,cv2.LINE_AA)
cv2.putText(source,('RAM Usage: %s'%RAM_USE),(370,410), font, 0.5,(128,255,128),1,cv2.LINE_AA)
cv2.rectangle(source, (167, 320), (473, 330), (255,255,255))
DIR_show = int(CAR_DIR)
if DIR_show > 0:
cv2.rectangle(source, ((320-DIR_show), 323), (320, 327), (255,255,255))
elif DIR_show < 0:
cv2.rectangle(source, (320, 323), ((320-DIR_show), 327), (255,255,255))
#cv2.line(source,(320,240),(260,300),(255,255,255),1)
#cv2.line(source,(210,300),(260,300),(255,255,255),1)
#cv2.putText(source,('%sm'%ultra_data),(210,290), font, 0.5,(255,255,255),1,cv2.LINE_AA)
except:
pass
if advanced_OSD:#1
advanced_OSD_add(source, OSD_X, OSD_Y)
#cv2.putText(source,('%sm'%ultra_data),(210,290), font, 0.5,(255,255,255),1,cv2.LINE_AA)
cv2.imshow("Stream", source)
cv2.setMouseCallback("Stream", getposBgr)
HSVimg = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
cv2.imshow("StreamHSV", HSVimg)
cv2.setMouseCallback("StreamHSV", getposHsv)
frame_num += 1
cv2.waitKey(1)
except:
time.sleep(0.5)
break
fps_threading=thread.Thread(target=get_FPS) #Define a thread for FPV and OpenCV
fps_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
fps_threading.start() #Thread starts
video_threading=thread.Thread(target=video_thread) #Define a thread for FPV and OpenCV
video_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
video_threading.start() #Thread starts
########>>>>>VIDEO<<<<<########
def replace_num(initial,new_num): #Call this function to replace data in '.txt' file
newline=""
str_num=str(new_num)
with open("ip.txt","r") as f:
for line in f.readlines():
if(line.find(initial) == 0):
line = initial+"%s" %(str_num)
newline += line
with open("ip.txt","w") as f:
f.writelines(newline) #Call this function to replace data in '.txt' file
def num_import(initial): #Call this function to import data from '.txt' file
with open("ip.txt") as f:
for line in f.readlines():
if(line.find(initial) == 0):
r=line
begin=len(list(initial))
snum=r[begin:]
n=snum
return n
def connection_thread():
global Switch_3, Switch_2, Switch_1, function_stu, OSD_X, OSD_Y, OSD_info, advanced_OSD
while 1:
car_info = (tcpClicSock.recv(BUFSIZ)).decode()
if not car_info:
continue
elif 'Switch_3_on' in car_info:
Switch_3 = 1
Btn_Switch_3.config(bg='#4CAF50')
elif 'Switch_2_on' in car_info:
Switch_2 = 1
Btn_Switch_2.config(bg='#4CAF50')
elif 'Switch_1_on' in car_info:
Switch_1 = 1
Btn_Switch_1.config(bg='#4CAF50')
elif 'Switch_3_off' in car_info:
Switch_3 = 0
Btn_Switch_3.config(bg=color_btn)
elif 'Switch_2_off' in car_info:
Switch_2 = 0
Btn_Switch_2.config(bg=color_btn)
elif 'Switch_1_off' in car_info:
Switch_1 = 0
Btn_Switch_1.config(bg=color_btn)
elif 'U:' in car_info:
print('ultrasonic radar')
new_number2view(30,290,car_info)
elif 'function_1_on' in car_info:
function_stu = 1
Btn_function_1.config(bg='#4CAF50')
elif 'function_2_on' in car_info:
function_stu = 1
Btn_function_2.config(bg='#4CAF50')
elif 'function_3_on' in car_info:
function_stu = 1
Btn_function_3.config(bg='#4CAF50')
elif 'function_4_on' in car_info:
function_stu = 1
Btn_function_4.config(bg='#4CAF50')
advanced_OSD = 1
elif 'function_5_on' in car_info:
function_stu = 1
Btn_function_5.config(bg='#4CAF50')
elif 'function_6_on' in car_info:
function_stu = 1
Btn_function_6.config(bg='#4CAF50')
# advanced_OSD = 1 #bug happend
elif 'function_1_off' in car_info:
function_stu = 0
Btn_function_1.config(bg=color_btn)
elif 'function_2_off' in car_info:
function_stu = 0
Btn_function_2.config(bg=color_btn)
elif 'function_3_off' in car_info:
function_stu = 0
Btn_function_3.config(bg=color_btn)
elif 'function_4_off' in car_info:
function_stu = 0
Btn_function_4.config(bg=color_btn)
advanced_OSD = 0
elif 'function_5_off' in car_info:
function_stu = 0
Btn_function_5.config(bg=color_btn)
elif 'function_6_off' in car_info:
function_stu = 0
Btn_function_6.config(bg=color_btn)
# advanced_OSD = 0 #bug
elif 'CVFL_on' in car_info:
function_stu = 1
Btn_CVFL.config(bg='#4CAF50')
elif 'CVFL_off' in car_info:
function_stu = 0
Btn_CVFL.config(bg='#212121')
elif 'OSD' in car_info:
OSD_info = car_info.split()
try:
OSD_X = float(OSD_info[1])
OSD_Y = float(OSD_info[2])
except:
pass
def Info_receive():
global CPU_TEP,CPU_USE,RAM_USE,CAR_DIR
HOST = ''
INFO_PORT = 2256 #Define port serial
ADDR = (HOST, INFO_PORT)
InfoSock = socket(AF_INET, SOCK_STREAM)
InfoSock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
InfoSock.bind(ADDR)
InfoSock.listen(5) #Start server,waiting for client
InfoSock, addr = InfoSock.accept()
print('Info connected')
while 1:
try:
info_data = ''
info_data = str(InfoSock.recv(BUFSIZ).decode())
info_get = info_data.split()
CPU_TEP,CPU_USE,RAM_USE,CAR_DIR= info_get
CPU_TEP_lab.config(text='CPU Temp: %s℃'%CPU_TEP)
CPU_USE_lab.config(text='CPU Usage: %s'%CPU_USE)
RAM_lab.config(text='RAM Usage: %s'%RAM_USE)
except:
pass
def socket_connect(): #Call this function to connect with the server
global ADDR,tcpClicSock,BUFSIZ,ip_stu,ipaddr
ip_adr=E1.get() #Get the IP address from Entry
if ip_adr == '': #If no input IP address in Entry,import a default IP
ip_adr=num_import('IP:')
l_ip_4.config(text='Connecting')
l_ip_4.config(bg='#FF8F00')
l_ip_5.config(text='Default:%s'%ip_adr)
pass
SERVER_IP = ip_adr
SERVER_PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (SERVER_IP, SERVER_PORT)
tcpClicSock = socket(AF_INET, SOCK_STREAM) #Set connection value for socket
for i in range (1,6): #Try 5 times if disconnected
#try:
if ip_stu == 1:
print("Connecting to server @ %s:%d..." %(SERVER_IP, SERVER_PORT))
print("Connecting")
tcpClicSock.connect(ADDR) #Connection with the server
print("Connected")
l_ip_5.config(text='IP:%s'%ip_adr)
l_ip_4.config(text='Connected')
l_ip_4.config(bg='#558B2F')
replace_num('IP:',ip_adr)
E1.config(state='disabled') #Disable the Entry
Btn14.config(state='disabled') #Disable the Entry
ip_stu=0 #'0' means connected
connection_threading=thread.Thread(target=connection_thread) #Define a thread for FPV and OpenCV
connection_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
connection_threading.start() #Thread starts
info_threading=thread.Thread(target=Info_receive) #Define a thread for FPV and OpenCV
info_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
info_threading.start() #Thread starts
video_threading=thread.Thread(target=opencv_r) #Define a thread for FPV and OpenCV
video_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
video_threading.start() #Thread starts
break
else:
print("Cannot connecting to server,try it latter!")
l_ip_4.config(text='Try %d/5 time(s)'%i)
l_ip_4.config(bg='#EF6C00')
print('Try %d/5 time(s)'%i)
ip_stu=1
time.sleep(1)
continue
if ip_stu == 1:
l_ip_4.config(text='Disconnected')
l_ip_4.config(bg='#F44336')
def connect(event): #Call this function to connect with the server
if ip_stu == 1:
sc=thread.Thread(target=socket_connect) #Define a thread for connection
sc.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
sc.start() #Thread starts
def scale_send(event):
time.sleep(0.03)
tcpClicSock.send(('wsB %s'%var_Speed.get()).encode())
def servo_buttons(x,y):
def call_up(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('up').encode())
servo_stu = 1
def call_down(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('down').encode())
servo_stu = 1
def call_lookleft(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('lookleft').encode())
servo_stu = 1
def call_lookright(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('lookright').encode())
servo_stu = 1
def call_lookup(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('lookup').encode())
servo_stu = 1
def call_lookdown(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('lookdown').encode())
servo_stu = 1
def call_grab(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('grab').encode())
servo_stu = 1
def call_loose(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('loose').encode())
servo_stu = 1
def call_stop(event):
global servo_stu
tcpClicSock.send(('stop').encode())
servo_stu = 0
def call_home(event):
tcpClicSock.send(('home').encode())
time.sleep(0.15)
Btn_0 = tk.Button(root, width=8, text='Left',fg=color_text,bg=color_btn,relief='ridge')
Btn_0.place(x=x,y=y+35)
Btn_0.bind('<ButtonPress-1>', call_lookleft)
Btn_0.bind('<ButtonRelease-1>', call_stop)
root.bind('<KeyPress-j>', call_lookleft)
root.bind('<KeyRelease-j>', call_stop)
Btn_1 = tk.Button(root, width=8, text='Up',fg=color_text,bg=color_btn,relief='ridge')
Btn_1.place(x=x+70,y=y)
Btn_1.bind('<ButtonPress-1>', call_up)
Btn_1.bind('<ButtonRelease-1>', call_stop)
root.bind('<KeyPress-i>', call_up)
root.bind('<KeyRelease-i>', call_stop)
Btn_1 = tk.Button(root, width=8, text='Down',fg=color_text,bg=color_btn,relief='ridge')
Btn_1.place(x=x+70,y=y+35)
Btn_1.bind('<ButtonPress-1>', call_down)
Btn_1.bind('<ButtonRelease-1>', call_stop)
root.bind('<KeyPress-k>', call_down)
root.bind('<KeyRelease-k>', call_stop)
Btn_2 = tk.Button(root, width=8, text='Right',fg=color_text,bg=color_btn,relief='ridge')
Btn_2.place(x=x+140,y=y+35)
Btn_2.bind('<ButtonPress-1>', call_lookright)
Btn_2.bind('<ButtonRelease-1>', call_stop)
root.bind('<KeyPress-l>', call_lookright)
root.bind('<KeyRelease-l>', call_stop)
Btn_3 = tk.Button(root, width=8, text='Grab',fg=color_text,bg=color_btn,relief='ridge')
Btn_3.place(x=x,y=y)
Btn_3.bind('<ButtonPress-1>', call_grab)
Btn_3.bind('<ButtonRelease-1>', call_stop)
root.bind('<KeyPress-u>', call_grab)
root.bind('<KeyRelease-u>', call_stop)
Btn_4 = tk.Button(root, width=8, text='Loose',fg=color_text,bg=color_btn,relief='ridge')
Btn_4.place(x=x+140,y=y)
Btn_4.bind('<ButtonPress-1>', call_loose)
Btn_4.bind('<ButtonRelease-1>', call_stop)
root.bind('<KeyPress-o>', call_loose)
root.bind('<KeyRelease-o>', call_stop)
Btn_5 = tk.Button(root, width=8, text='L_Down',fg=color_text,bg=color_btn,relief='ridge')
Btn_5.place(x=x,y=y-55)
Btn_5.bind('<ButtonPress-1>', call_lookdown)
Btn_5.bind('<ButtonRelease-1>', call_stop)
root.bind('<KeyPress-h>', call_lookdown)
root.bind('<KeyRelease-h>', call_stop)
Btn_6 = tk.Button(root, width=8, text='L_Up',fg=color_text,bg=color_btn,relief='ridge')
Btn_6.place(x=x,y=y-55-35)
Btn_6.bind('<ButtonPress-1>', call_lookup)
Btn_6.bind('<ButtonRelease-1>', call_stop)
root.bind('<KeyPress-y>', call_lookup)
root.bind('<KeyRelease-y>', call_stop)
# root.bind('<KeyPress-h>', call_home)
def motor_buttons(x,y):
def call_left(event):
global TS_stu
if TS_stu == 0:
tcpClicSock.send(('left').encode())
TS_stu = 1
def call_right(event):
global TS_stu
if TS_stu == 0:
tcpClicSock.send(('right').encode())
TS_stu = 1
def call_forward(event):
global DS_stu
if DS_stu == 0:
tcpClicSock.send(('forward').encode())
DS_stu = 1
def call_backward(event):
global DS_stu
if DS_stu == 0:
tcpClicSock.send(('backward').encode())
DS_stu = 1
def call_DS(event):
global DS_stu
tcpClicSock.send(('DS').encode())
DS_stu = 0
def call_TS(event):
global TS_stu
tcpClicSock.send(('TS').encode())
TS_stu = 0
def servoStop(event):
global servo_stu
servo_stu = 0
tcpClicSock.send(('stop').encode())
def handup(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('handup').encode())
servo_stu = 1
def handdown(event):
global servo_stu
if servo_stu == 0:
tcpClicSock.send(('handdown').encode())
servo_stu = 1
Btn_HU = tk.Button(root, width=8, text='HandUp',fg=color_text,bg=color_btn,relief='ridge')
Btn_HU.place(x=x,y=y)
Btn_HU.bind('<ButtonPress-1>', handup)
Btn_HU.bind('<ButtonRelease-1>', servoStop)
root.bind('<KeyPress-q>', handup)
root.bind('<KeyRelease-q>', servoStop)
Btn_HU = tk.Button(root, width=8, text='HandDown',fg=color_text,bg=color_btn,relief='ridge')
Btn_HU.place(x=x+140,y=y)
Btn_HU.bind('<ButtonPress-1>', handdown)
Btn_HU.bind('<ButtonRelease-1>', servoStop)
root.bind('<KeyPress-e>', handdown)
root.bind('<KeyRelease-e>', servoStop)
Btn_0 = tk.Button(root, width=8, text='Left',fg=color_text,bg=color_btn,relief='ridge')
Btn_0.place(x=x,y=y+35)
Btn_0.bind('<ButtonPress-1>', call_left)
Btn_0.bind('<ButtonRelease-1>', call_TS)
root.bind('<KeyPress-a>', call_left)
root.bind('<KeyRelease-a>', call_TS)
Btn_1 = tk.Button(root, width=8, text='Forward',fg=color_text,bg=color_btn,relief='ridge')
Btn_1.place(x=x+70,y=y)
Btn_1.bind('<ButtonPress-1>', call_forward)
Btn_1.bind('<ButtonRelease-1>', call_DS)
root.bind('<KeyPress-w>', call_forward)
root.bind('<KeyRelease-w>', call_DS)
Btn_1 = tk.Button(root, width=8, text='Backward',fg=color_text,bg=color_btn,relief='ridge')
Btn_1.place(x=x+70,y=y+35)
Btn_1.bind('<ButtonPress-1>', call_backward)
Btn_1.bind('<ButtonRelease-1>', call_DS)
root.bind('<KeyPress-s>', call_backward)
root.bind('<KeyRelease-s>', call_DS)
Btn_2 = tk.Button(root, width=8, text='Right',fg=color_text,bg=color_btn,relief='ridge')
Btn_2.place(x=x+140,y=y+35)
Btn_2.bind('<ButtonPress-1>', call_right)
Btn_2.bind('<ButtonRelease-1>', call_TS)
root.bind('<KeyPress-d>', call_right)
root.bind('<KeyRelease-d>', call_TS)
def information_screen(x,y):
global CPU_TEP_lab, CPU_USE_lab, RAM_lab, l_ip_4, l_ip_5
CPU_TEP_lab=tk.Label(root,width=18,text='CPU Temp:',fg=color_text,bg='#212121')
CPU_TEP_lab.place(x=x,y=y) #Define a Label and put it in position
CPU_USE_lab=tk.Label(root,width=18,text='CPU Usage:',fg=color_text,bg='#212121')
CPU_USE_lab.place(x=x,y=y+30) #Define a Label and put it in position
RAM_lab=tk.Label(root,width=18,text='RAM Usage:',fg=color_text,bg='#212121')
RAM_lab.place(x=x,y=y+60) #Define a Label and put it in position
l_ip_4=tk.Label(root,width=18,text='Disconnected',fg=color_text,bg='#F44336')
l_ip_4.place(x=x,y=y+95) #Define a Label and put it in position
l_ip_5=tk.Label(root,width=18,text='Use default IP',fg=color_text,bg=color_btn)
l_ip_5.place(x=x,y=y+130) #Define a Label and put it in position
def connent_input(x,y):
global E1, Btn14
E1 = tk.Entry(root,show=None,width=16,bg="#37474F",fg='#eceff1')
E1.place(x=x+5,y=y+25) #Define a Entry and put it in position
l_ip_3=tk.Label(root,width=10,text='IP Address:',fg=color_text,bg='#000000')
l_ip_3.place(x=x,y=y) #Define a Label and put it in position
Btn14= tk.Button(root, width=8,height=2, text='Connect',fg=color_text,bg=color_btn,relief='ridge')
Btn14.place(x=x+130,y=y) #Define a Button and put it in position
root.bind('<Return>', connect)
Btn14.bind('<ButtonPress-1>', connect)
def switch_button(x,y):
global Btn_Switch_1, Btn_Switch_2, Btn_Switch_3
def call_Switch_1(event):
if Switch_1 == 0:
tcpClicSock.send(('Switch_1_on').encode())
else:
tcpClicSock.send(('Switch_1_off').encode())
def call_Switch_2(event):
if Switch_2 == 0:
tcpClicSock.send(('Switch_2_on').encode())
else:
tcpClicSock.send(('Switch_2_off').encode())
def call_Switch_3(event):
if Switch_3 == 0:
tcpClicSock.send(('Switch_3_on').encode())
else:
tcpClicSock.send(('Switch_3_off').encode())
Btn_Switch_1 = tk.Button(root, width=8, text='Port 1',fg=color_text,bg=color_btn,relief='ridge')
Btn_Switch_2 = tk.Button(root, width=8, text='Port 2',fg=color_text,bg=color_btn,relief='ridge')
Btn_Switch_3 = tk.Button(root, width=8, text='Port 3',fg=color_text,bg=color_btn,relief='ridge')
Btn_Switch_1.place(x=x,y=y)
Btn_Switch_2.place(x=x+70,y=y)
Btn_Switch_3.place(x=x+140,y=y)
Btn_Switch_1.bind('<ButtonPress-1>', call_Switch_1)
Btn_Switch_2.bind('<ButtonPress-1>', call_Switch_2)
Btn_Switch_3.bind('<ButtonPress-1>', call_Switch_3)
def scale(x,y,w):
global var_Speed
var_Speed = tk.StringVar()
var_Speed.set(100)
Scale_B = tk.Scale(root,label=None,
from_=60,to=100,orient=tk.HORIZONTAL,length=w,
showvalue=1,tickinterval=None,resolution=10,variable=var_Speed,troughcolor='#448AFF',command=scale_send,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_B.place(x=x,y=y) #Define a Scale and put it in position
canvas_cover=tk.Canvas(root,bg=color_bg,height=30,width=510,highlightthickness=0)
canvas_cover.place(x=x,y=y+30)
def ultrasonic_radar(x,y):
x_range = 2
can_scan = tk.Canvas(root,bg=color_can,height=250,width=320,highlightthickness=0) #define a canvas
can_scan.place(x=x,y=y) #Place the canvas
line = can_scan.create_line(0,62,320,62,fill='darkgray') #Draw a line on canvas
line = can_scan.create_line(0,124,320,124,fill='darkgray') #Draw a line on canvas
line = can_scan.create_line(0,186,320,186,fill='darkgray') #Draw a line on canvas
line = can_scan.create_line(160,0,160,250,fill='darkgray') #Draw a line on canvas
line = can_scan.create_line(80,0,80,250,fill='darkgray') #Draw a line on canvas
line = can_scan.create_line(240,0,240,250,fill='darkgray') #Draw a line on canvas
can_tex_11=can_scan.create_text((27,178),text='%sm'%round((x_range/4),2),fill='#aeea00') #Create a text on canvas
can_tex_12=can_scan.create_text((27,116),text='%sm'%round((x_range/2),2),fill='#aeea00') #Create a text on canvas
can_tex_13=can_scan.create_text((27,54),text='%sm'%round((x_range*0.75),2),fill='#aeea00') #Create a text on canvas
def new_number2view(x,y,info):
print(info)
x_range = 2
dis_list=[]
f_list=[]
info = info.split()
info = info[1:]
total_number = len(info)
print(total_number)
can_scan_1 = tk.Canvas(root,bg=color_can,height=250,width=320,highlightthickness=0) #define a canvas
can_scan_1.place(x=x,y=y) #Place the canvas
line = can_scan_1.create_line(0,62,320,62,fill='darkgray') #Draw a line on canvas
line = can_scan_1.create_line(0,124,320,124,fill='darkgray') #Draw a line on canvas
line = can_scan_1.create_line(0,186,320,186,fill='darkgray') #Draw a line on canvas
line = can_scan_1.create_line(160,0,160,250,fill='darkgray') #Draw a line on canvas
line = can_scan_1.create_line(80,0,80,250,fill='darkgray') #Draw a line on canvas
line = can_scan_1.create_line(240,0,240,250,fill='darkgray') #Draw a line on canvas
for i in range (0,total_number): #Scale the result to the size as canvas
dis_info_get = info[i]
dis_info_get = float(dis_info_get)
if dis_info_get > 0:
len_dis_1 = int((dis_info_get/x_range)*250) #600 is the height of canvas
pos = int((i/total_number)*320) #740 is the width of canvas
pos_ra = int(((i/total_number)*140)+20) #Scale the direction range to (20-160)
len_dis = int(len_dis_1*(math.sin(math.radians(pos_ra)))) #len_dis is the height of the line
x0_l,y0_l,x1_l,y1_l=pos,(250-len_dis),pos,(250-len_dis) #The position of line
x0,y0,x1,y1=(pos+3),(250-len_dis+3),(pos-3),(250-len_dis-3) #The position of arc
if pos <= 160: #Scale the whole picture to a shape of sector
pos = 160-abs(int(len_dis_1*(math.cos(math.radians(pos_ra)))))
x1_l= (x1_l-math.cos(math.radians(pos_ra))*130)
else:
pos = abs(int(len_dis_1*(math.cos(math.radians(pos_ra)))))+160
x1_l= x1_l+abs(math.cos(math.radians(pos_ra))*130)
y1_l = y1_l-abs(math.sin(math.radians(pos_ra))*130) #Orientation of line
line = can_scan_1.create_line(pos,y0_l,x1_l,y1_l,fill=color_line) #Draw a line on canvas
point_scan = can_scan_1.create_oval((pos+3),y0,(pos-3),y1,fill=color_oval,outline=color_oval) #Draw a arc on canvas
can_tex_11=can_scan_1.create_text((27,178),text='%sm'%round((x_range/4),2),fill='#aeea00') #Create a text on canvas
can_tex_12=can_scan_1.create_text((27,116),text='%sm'%round((x_range/2),2),fill='#aeea00') #Create a text on canvas
can_tex_13=can_scan_1.create_text((27,54),text='%sm'%round((x_range*0.75),2),fill='#aeea00') #Create a text on canvas
def scale_FL(x,y,w):
global Btn_CVFL
def lip1_send(event):
time.sleep(0.03)
tcpClicSock.send(('lip1 %s'%var_lip1.get()).encode())
def lip2_send(event):
time.sleep(0.03)
tcpClicSock.send(('lip2 %s'%var_lip2.get()).encode())
def err_send(event):
time.sleep(0.03)
tcpClicSock.send(('err %s'%var_err.get()).encode())
def call_Render(event):
tcpClicSock.send(('Render').encode())
def call_CVFL(event):
tcpClicSock.send(('CVFL').encode())
def call_WB(event):
tcpClicSock.send(('WBswitch').encode())
Scale_lip1 = tk.Scale(root,label=None,
from_=0,to=480,orient=tk.HORIZONTAL,length=w,
showvalue=1,tickinterval=None,resolution=1,variable=var_lip1,troughcolor='#212121',command=lip1_send,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_lip1.place(x=x,y=y) #Define a Scale and put it in position
Scale_lip2 = tk.Scale(root,label=None,
from_=0,to=480,orient=tk.HORIZONTAL,length=w,
showvalue=1,tickinterval=None,resolution=1,variable=var_lip2,troughcolor='#212121',command=lip2_send,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_lip2.place(x=x,y=y+30) #Define a Scale and put it in position
Scale_err = tk.Scale(root,label=None,
from_=0,to=200,orient=tk.HORIZONTAL,length=w,
showvalue=1,tickinterval=None,resolution=1,variable=var_err,troughcolor='#212121',command=err_send,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_err.place(x=x,y=y+60) #Define a Scale and put it in position
canvas_cover=tk.Canvas(root,bg=color_bg,height=30,width=510,highlightthickness=0)
canvas_cover.place(x=x,y=y+90)
Btn_Render = tk.Button(root, width=10, text='Render',fg=color_text,bg='#212121',relief='ridge')
Btn_Render.place(x=x+w+111,y=y+20)
Btn_Render.bind('<ButtonPress-1>', call_Render)
Btn_CVFL = tk.Button(root, width=10, text='CV FL',fg=color_text,bg='#212121',relief='ridge')
Btn_CVFL.place(x=x+w+21,y=y+20)
Btn_CVFL.bind('<ButtonPress-1>', call_CVFL)
Btn_WB = tk.Button(root, width=23, text='LineColorSwitch',fg=color_text,bg='#212121',relief='ridge')
Btn_WB.place(x=x+w+21,y=y+60)
Btn_WB.bind('<ButtonPress-1>', call_WB)
def scale_FC(x,y,w):
global canvas_show
def R_send(event):
canvas_show.config(bg = RGB_to_Hex(int(var_R.get()), int(var_G.get()), int(var_B.get())))
time.sleep(0.03)
# tcpClicSock.send(('hsvH %s'%var_R.get()).encode())
def G_send(event):
canvas_show.config(bg = RGB_to_Hex(int(var_R.get()), int(var_G.get()), int(var_B.get())))
time.sleep(0.03)
# tcpClicSock.send(('hsvS %s'%var_G.get()).encode())
def B_send(event):
canvas_show.config(bg = RGB_to_Hex(int(var_R.get()), int(var_G.get()), int(var_B.get())))
time.sleep(0.03)
# tcpClicSock.send(('hsvV %s'%var_B.get()).encode())
def call_SET(event):
tcpClicSock.send(('FCSET %s'%rgb2hsv(int(var_R.get()), int(var_G.get()), int(var_B.get()))).encode())
Scale_R = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=w,
showvalue=1,tickinterval=None,resolution=1,variable=var_R,troughcolor='#FF1744',command=R_send,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_R.place(x=x,y=y) #Define a Scale and put it in position
Scale_G = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=w,
showvalue=1,tickinterval=None,resolution=1,variable=var_G,troughcolor='#00E676',command=G_send,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_G.place(x=x,y=y+30) #Define a Scale and put it in position
Scale_B = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=w,
showvalue=1,tickinterval=None,resolution=1,variable=var_B,troughcolor='#2979FF',command=B_send,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_B.place(x=x,y=y+60) #Define a Scale and put it in position
canvas_cover=tk.Canvas(root,bg=color_bg,height=30,width=510,highlightthickness=0)
canvas_cover.place(x=x,y=y+90)
canvas_show=tk.Canvas(root,bg=RGB_to_Hex(int(var_R.get()), int(var_G.get()), int(var_B.get())),height=35,width=170,highlightthickness=0)
canvas_show.place(x=w+x+21,y=y+15)
Btn_WB = tk.Button(root, width=23, text='Color Set',fg=color_text,bg='#212121',relief='ridge')
Btn_WB.place(x=x+w+21,y=y+60)
Btn_WB.bind('<ButtonPress-1>', call_SET)
def scale_ExpCom(x,y,w):#Z
def EC_send(event):
tcpClicSock.send(('setEC %s'%var_ec.get()).encode())
time.sleep(0.03)
def EC_default(event):
var_ec.set(0)
tcpClicSock.send(('defEC').encode())
Scale_ExpCom = tk.Scale(root,label='Exposure Compensation Level',
from_=-25,to=25,orient=tk.HORIZONTAL,length=w,
showvalue=1,tickinterval=None,resolution=1,variable=var_ec,troughcolor='#212121',command=EC_send,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_ExpCom.place(x=x,y=y) #Define a Scale and put it in position
canvas_cover=tk.Canvas(root,bg=color_bg,height=30,width=510,highlightthickness=0)
canvas_cover.place(x=x,y=y+50)
Btn_dEC = tk.Button(root, width=23,height=2, text='Set Default Exposure\nCompensation Level',fg=color_text,bg='#212121',relief='ridge')
Btn_dEC.place(x=x+w+21,y=y+3)
Btn_dEC.bind('<ButtonPress-1>', EC_default)
def function_buttons(x,y):
global function_stu, Btn_function_1, Btn_function_2, Btn_function_3, Btn_function_4, Btn_function_5, Btn_function_6, Btn_function_7
def call_function_1(event):
if function_stu == 0:
tcpClicSock.send(('function_1_on').encode())
else:
tcpClicSock.send(('function_1_off').encode())
def call_function_2(event):
if function_stu == 0:
tcpClicSock.send(('function_2_on').encode())
else:
tcpClicSock.send(('function_2_off').encode())
def call_function_3(event):
if function_stu == 0:
tcpClicSock.send(('function_3_on').encode())
else:
tcpClicSock.send(('function_3_off').encode())
def call_function_4(event):
if function_stu == 0:
tcpClicSock.send(('function_4_on').encode())
else:
tcpClicSock.send(('function_4_off').encode())
def call_function_5(event):
if function_stu == 0:
tcpClicSock.send(('function_5_on').encode())
else:
tcpClicSock.send(('function_5_off').encode())
def call_function_6(event):
if function_stu == 0:
tcpClicSock.send(('function_6_on').encode())
else:
tcpClicSock.send(('function_6_off').encode())
def call_function_7(event):
if function_stu == 0:
tcpClicSock.send(('function_7_on').encode())
else:
tcpClicSock.send(('function_7_off').encode())
Btn_function_1 = tk.Button(root, width=8, text='RadarScan',fg=color_text,bg=color_btn,relief='ridge')
Btn_function_2 = tk.Button(root, width=8, text='FindColor',fg=color_text,bg=color_btn,relief='ridge')
Btn_function_3 = tk.Button(root, width=8, text='MotionGet',fg=color_text,bg=color_btn,relief='ridge')
Btn_function_4 = tk.Button(root, width=8, text='OSDscreen',fg=color_text,bg=color_btn,relief='ridge')
Btn_function_5 = tk.Button(root, width=8, text='Automatic',fg=color_text,bg=color_btn,relief='ridge')
Btn_function_6 = tk.Button(root, width=8, text='SteadyCam',fg=color_text,bg=color_btn,relief='ridge')
Btn_function_7 = tk.Button(root, width=8, text='Instruction',fg=color_text,bg=color_btn,relief='ridge')
#Btn_function_1.place(x=x,y=y)
Btn_function_2.place(x=x,y=y+35)
Btn_function_3.place(x=x,y=y+70)
Btn_function_4.place(x=x,y=y+105)
Btn_function_5.place(x=x,y=y+140)
Btn_function_6.place(x=x,y=y+175)
Btn_function_7.place(x=x,y=y+221)
#Btn_function_1.bind('<ButtonPress-1>', call_function_1)
Btn_function_2.bind('<ButtonPress-1>', call_function_2)
Btn_function_3.bind('<ButtonPress-1>', call_function_3)
Btn_function_4.bind('<ButtonPress-1>', call_function_4)
Btn_function_5.bind('<ButtonPress-1>', call_function_5)
Btn_function_6.bind('<ButtonPress-1>', call_function_6)
Btn_function_7.bind('<ButtonPress-1>', call_function_7)
def loop():
global root, var_lip1, var_lip2, var_err, var_R, var_G, var_B, var_ec#Z
root = tk.Tk()
root.title('Rasptank[RRO] GUI')
root.geometry('495x860') #Z
root.config(bg=color_bg)
var_lip1 = tk.StringVar()
var_lip1.set(440)
var_lip2 = tk.StringVar()
var_lip2.set(380)
var_err = tk.StringVar()
var_err.set(20)
var_R = tk.StringVar()
var_R.set(80)
var_G = tk.StringVar()
var_G.set(80)
var_B = tk.StringVar()
var_B.set(80)
var_ec = tk.StringVar() #Z
var_ec.set(0) #Z
try:
logo =tk.PhotoImage(file = 'logo.png')
l_logo=tk.Label(root,image = logo,bg=color_bg)
l_logo.place(x=30,y=13)
except:
pass
motor_buttons(30,105)
information_screen(330,15)
connent_input(125,15)
switch_button(30,195)
servo_buttons(255,195)
scale(30,230,203)
ultrasonic_radar(30,290)
function_buttons(395,290)
scale_FL(30,550,238)
scale_FC(30,650,238)
scale_ExpCom(30,770,238) #Z
root.mainloop()
if __name__ == '__main__':
loop()
| [
"tkinter.StringVar",
"cv2.imdecode",
"base64.b64decode",
"cv2.rectangle",
"cv2.imshow",
"tkinter.Label",
"zmq.Context",
"cv2.line",
"tkinter.PhotoImage",
"tkinter.Button",
"cv2.cvtColor",
"math.radians",
"tkinter.Entry",
"cv2.setMouseCallback",
"tkinter.Tk",
"threading.Thread",
"nump... | [((6541, 6570), 'threading.Thread', 'thread.Thread', ([], {'target': 'get_FPS'}), '(target=get_FPS)\n', (6554, 6570), True, 'import threading as thread\n'), ((6790, 6824), 'threading.Thread', 'thread.Thread', ([], {'target': 'video_thread'}), '(target=video_thread)\n', (6803, 6824), True, 'import threading as thread\n'), ((1772, 1785), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (1783, 1785), False, 'import zmq\n'), ((3520, 3577), 'cv2.line', 'cv2.line', (['draw_on', '(X_s, Y_s)', '(X_e, Y_e)', '(0, 255, 0)', '(2)'], {}), '(draw_on, (X_s, Y_s), (X_e, Y_e), (0, 255, 0), 2)\n', (3528, 3577), False, 'import cv2\n'), ((3572, 3672), 'cv2.putText', 'cv2.putText', (['draw_on', '"""horizontal line"""', '(X_e + 10, Y_e)', 'font', '(0.5)', '(0, 255, 0)', '(1)', 'cv2.LINE_AA'], {}), "(draw_on, 'horizontal line', (X_e + 10, Y_e), font, 0.5, (0, 255,\n 0), 1, cv2.LINE_AA)\n", (3583, 3672), False, 'import cv2\n'), ((3665, 3734), 'cv2.line', 'cv2.line', (['draw_on', '(X_s, Y_s + 270)', '(X_e, Y_e + 270)', '(0, 255, 0)', '(2)'], {}), '(draw_on, (X_s, Y_s + 270), (X_e, Y_e + 270), (0, 255, 0), 2)\n', (3673, 3734), False, 'import cv2\n'), ((3725, 3820), 'cv2.putText', 'cv2.putText', (['draw_on', '"""Down"""', '(X_e + 10, Y_e + 270)', 'font', '(0.5)', '(0, 255, 0)', '(1)', 'cv2.LINE_AA'], {}), "(draw_on, 'Down', (X_e + 10, Y_e + 270), font, 0.5, (0, 255, 0),\n 1, cv2.LINE_AA)\n", (3736, 3820), False, 'import cv2\n'), ((3811, 3880), 'cv2.line', 'cv2.line', (['draw_on', '(X_s, Y_s - 270)', '(X_e, Y_e - 270)', '(0, 255, 0)', '(2)'], {}), '(draw_on, (X_s, Y_s - 270), (X_e, Y_e - 270), (0, 255, 0), 2)\n', (3819, 3880), False, 'import cv2\n'), ((3871, 3964), 'cv2.putText', 'cv2.putText', (['draw_on', '"""Up"""', '(X_e + 10, Y_e - 270)', 'font', '(0.5)', '(0, 255, 0)', '(1)', 'cv2.LINE_AA'], {}), "(draw_on, 'Up', (X_e + 10, Y_e - 270), font, 0.5, (0, 255, 0), 1,\n cv2.LINE_AA)\n", (3882, 3964), False, 'import cv2\n'), ((4214, 4306), 'cv2.line', 'cv2.line', (['draw_on', '(X_s_short, Y_s_short + 90)', '(X_e_short, Y_e_short + 90)', '(0, 255, 0)'], {}), '(draw_on, (X_s_short, Y_s_short + 90), (X_e_short, Y_e_short + 90),\n (0, 255, 0))\n', (4222, 4306), False, 'import cv2\n'), ((4294, 4389), 'cv2.line', 'cv2.line', (['draw_on', '(X_s_short, Y_s_short + 180)', '(X_e_short, Y_e_short + 180)', '(0, 255, 0)'], {}), '(draw_on, (X_s_short, Y_s_short + 180), (X_e_short, Y_e_short + 180\n ), (0, 255, 0))\n', (4302, 4389), False, 'import cv2\n'), ((4376, 4471), 'cv2.line', 'cv2.line', (['draw_on', '(X_s_short, Y_s_short + 360)', '(X_e_short, Y_e_short + 360)', '(0, 255, 0)'], {}), '(draw_on, (X_s_short, Y_s_short + 360), (X_e_short, Y_e_short + 360\n ), (0, 255, 0))\n', (4384, 4471), False, 'import cv2\n'), ((4458, 4553), 'cv2.line', 'cv2.line', (['draw_on', '(X_s_short, Y_s_short + 450)', '(X_e_short, Y_e_short + 450)', '(0, 255, 0)'], {}), '(draw_on, (X_s_short, Y_s_short + 450), (X_e_short, Y_e_short + 450\n ), (0, 255, 0))\n', (4466, 4553), False, 'import cv2\n'), ((4542, 4634), 'cv2.line', 'cv2.line', (['draw_on', '(X_s_short, Y_s_short - 90)', '(X_e_short, Y_e_short - 90)', '(0, 255, 0)'], {}), '(draw_on, (X_s_short, Y_s_short - 90), (X_e_short, Y_e_short - 90),\n (0, 255, 0))\n', (4550, 4634), False, 'import cv2\n'), ((4622, 4717), 'cv2.line', 'cv2.line', (['draw_on', '(X_s_short, Y_s_short - 180)', '(X_e_short, Y_e_short - 180)', '(0, 255, 0)'], {}), '(draw_on, (X_s_short, Y_s_short - 180), (X_e_short, Y_e_short - 180\n ), (0, 255, 0))\n', (4630, 4717), False, 'import cv2\n'), ((4704, 4799), 'cv2.line', 'cv2.line', (['draw_on', '(X_s_short, Y_s_short - 360)', '(X_e_short, Y_e_short - 360)', '(0, 255, 0)'], {}), '(draw_on, (X_s_short, Y_s_short - 360), (X_e_short, Y_e_short - 360\n ), (0, 255, 0))\n', (4712, 4799), False, 'import cv2\n'), ((4786, 4881), 'cv2.line', 'cv2.line', (['draw_on', '(X_s_short, Y_s_short - 450)', '(X_e_short, Y_e_short - 450)', '(0, 255, 0)'], {}), '(draw_on, (X_s_short, Y_s_short - 450), (X_e_short, Y_e_short - 450\n ), (0, 255, 0))\n', (4794, 4881), False, 'import cv2\n'), ((13592, 13608), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (13602, 13608), False, 'import time\n'), ((14927, 15014), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Left"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Left', fg=color_text, bg=color_btn, relief=\n 'ridge')\n", (14936, 15014), True, 'import tkinter as tk\n'), ((15221, 15306), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Up"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Up', fg=color_text, bg=color_btn, relief='ridge'\n )\n", (15230, 15306), True, 'import tkinter as tk\n'), ((15502, 15589), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Down"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Down', fg=color_text, bg=color_btn, relief=\n 'ridge')\n", (15511, 15589), True, 'import tkinter as tk\n'), ((15791, 15879), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Right"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Right', fg=color_text, bg=color_btn, relief=\n 'ridge')\n", (15800, 15879), True, 'import tkinter as tk\n'), ((16093, 16180), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Grab"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Grab', fg=color_text, bg=color_btn, relief=\n 'ridge')\n", (16102, 16180), True, 'import tkinter as tk\n'), ((16378, 16466), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Loose"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Loose', fg=color_text, bg=color_btn, relief=\n 'ridge')\n", (16387, 16466), True, 'import tkinter as tk\n'), ((16669, 16758), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""L_Down"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='L_Down', fg=color_text, bg=color_btn, relief\n ='ridge')\n", (16678, 16758), True, 'import tkinter as tk\n'), ((16966, 17053), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""L_Up"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='L_Up', fg=color_text, bg=color_btn, relief=\n 'ridge')\n", (16975, 17053), True, 'import tkinter as tk\n'), ((18354, 18443), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""HandUp"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='HandUp', fg=color_text, bg=color_btn, relief\n ='ridge')\n", (18363, 18443), True, 'import tkinter as tk\n'), ((18637, 18727), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""HandDown"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='HandDown', fg=color_text, bg=color_btn,\n relief='ridge')\n", (18646, 18727), True, 'import tkinter as tk\n'), ((18929, 19016), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Left"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Left', fg=color_text, bg=color_btn, relief=\n 'ridge')\n", (18938, 19016), True, 'import tkinter as tk\n'), ((19211, 19300), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Forward"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Forward', fg=color_text, bg=color_btn,\n relief='ridge')\n", (19220, 19300), True, 'import tkinter as tk\n'), ((19503, 19593), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Backward"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Backward', fg=color_text, bg=color_btn,\n relief='ridge')\n", (19512, 19593), True, 'import tkinter as tk\n'), ((19800, 19888), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Right"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Right', fg=color_text, bg=color_btn, relief=\n 'ridge')\n", (19809, 19888), True, 'import tkinter as tk\n'), ((20186, 20257), 'tkinter.Label', 'tk.Label', (['root'], {'width': '(18)', 'text': '"""CPU Temp:"""', 'fg': 'color_text', 'bg': '"""#212121"""'}), "(root, width=18, text='CPU Temp:', fg=color_text, bg='#212121')\n", (20194, 20257), True, 'import tkinter as tk\n'), ((20344, 20416), 'tkinter.Label', 'tk.Label', (['root'], {'width': '(18)', 'text': '"""CPU Usage:"""', 'fg': 'color_text', 'bg': '"""#212121"""'}), "(root, width=18, text='CPU Usage:', fg=color_text, bg='#212121')\n", (20352, 20416), True, 'import tkinter as tk\n'), ((20502, 20574), 'tkinter.Label', 'tk.Label', (['root'], {'width': '(18)', 'text': '"""RAM Usage:"""', 'fg': 'color_text', 'bg': '"""#212121"""'}), "(root, width=18, text='RAM Usage:', fg=color_text, bg='#212121')\n", (20510, 20574), True, 'import tkinter as tk\n'), ((20655, 20729), 'tkinter.Label', 'tk.Label', (['root'], {'width': '(18)', 'text': '"""Disconnected"""', 'fg': 'color_text', 'bg': '"""#F44336"""'}), "(root, width=18, text='Disconnected', fg=color_text, bg='#F44336')\n", (20663, 20729), True, 'import tkinter as tk\n'), ((20809, 20885), 'tkinter.Label', 'tk.Label', (['root'], {'width': '(18)', 'text': '"""Use default IP"""', 'fg': 'color_text', 'bg': 'color_btn'}), "(root, width=18, text='Use default IP', fg=color_text, bg=color_btn)\n", (20817, 20885), True, 'import tkinter as tk\n'), ((21010, 21073), 'tkinter.Entry', 'tk.Entry', (['root'], {'show': 'None', 'width': '(16)', 'bg': '"""#37474F"""', 'fg': '"""#eceff1"""'}), "(root, show=None, width=16, bg='#37474F', fg='#eceff1')\n", (21018, 21073), True, 'import tkinter as tk\n'), ((21152, 21225), 'tkinter.Label', 'tk.Label', (['root'], {'width': '(10)', 'text': '"""IP Address:"""', 'fg': 'color_text', 'bg': '"""#000000"""'}), "(root, width=10, text='IP Address:', fg=color_text, bg='#000000')\n", (21160, 21225), True, 'import tkinter as tk\n'), ((21302, 21402), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'height': '(2)', 'text': '"""Connect"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, height=2, text='Connect', fg=color_text, bg=\n color_btn, relief='ridge')\n", (21311, 21402), True, 'import tkinter as tk\n'), ((22109, 22198), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Port 1"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Port 1', fg=color_text, bg=color_btn, relief\n ='ridge')\n", (22118, 22198), True, 'import tkinter as tk\n'), ((22208, 22297), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Port 2"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Port 2', fg=color_text, bg=color_btn, relief\n ='ridge')\n", (22217, 22297), True, 'import tkinter as tk\n'), ((22307, 22396), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Port 3"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Port 3', fg=color_text, bg=color_btn, relief\n ='ridge')\n", (22316, 22396), True, 'import tkinter as tk\n'), ((22708, 22722), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (22720, 22722), True, 'import tkinter as tk\n'), ((22758, 23006), 'tkinter.Scale', 'tk.Scale', (['root'], {'label': 'None', 'from_': '(60)', 'to': '(100)', 'orient': 'tk.HORIZONTAL', 'length': 'w', 'showvalue': '(1)', 'tickinterval': 'None', 'resolution': '(10)', 'variable': 'var_Speed', 'troughcolor': '"""#448AFF"""', 'command': 'scale_send', 'fg': 'color_text', 'bg': 'color_bg', 'highlightthickness': '(0)'}), "(root, label=None, from_=60, to=100, orient=tk.HORIZONTAL, length=w,\n showvalue=1, tickinterval=None, resolution=10, variable=var_Speed,\n troughcolor='#448AFF', command=scale_send, fg=color_text, bg=color_bg,\n highlightthickness=0)\n", (22766, 23006), True, 'import tkinter as tk\n'), ((23074, 23146), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'bg': 'color_bg', 'height': '(30)', 'width': '(510)', 'highlightthickness': '(0)'}), '(root, bg=color_bg, height=30, width=510, highlightthickness=0)\n', (23083, 23146), True, 'import tkinter as tk\n'), ((23235, 23309), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'bg': 'color_can', 'height': '(250)', 'width': '(320)', 'highlightthickness': '(0)'}), '(root, bg=color_can, height=250, width=320, highlightthickness=0)\n', (23244, 23309), True, 'import tkinter as tk\n'), ((24426, 24500), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'bg': 'color_can', 'height': '(250)', 'width': '(320)', 'highlightthickness': '(0)'}), '(root, bg=color_can, height=250, width=320, highlightthickness=0)\n', (24435, 24500), True, 'import tkinter as tk\n'), ((27335, 27579), 'tkinter.Scale', 'tk.Scale', (['root'], {'label': 'None', 'from_': '(0)', 'to': '(480)', 'orient': 'tk.HORIZONTAL', 'length': 'w', 'showvalue': '(1)', 'tickinterval': 'None', 'resolution': '(1)', 'variable': 'var_lip1', 'troughcolor': '"""#212121"""', 'command': 'lip1_send', 'fg': 'color_text', 'bg': 'color_bg', 'highlightthickness': '(0)'}), "(root, label=None, from_=0, to=480, orient=tk.HORIZONTAL, length=w,\n showvalue=1, tickinterval=None, resolution=1, variable=var_lip1,\n troughcolor='#212121', command=lip1_send, fg=color_text, bg=color_bg,\n highlightthickness=0)\n", (27343, 27579), True, 'import tkinter as tk\n'), ((27650, 27894), 'tkinter.Scale', 'tk.Scale', (['root'], {'label': 'None', 'from_': '(0)', 'to': '(480)', 'orient': 'tk.HORIZONTAL', 'length': 'w', 'showvalue': '(1)', 'tickinterval': 'None', 'resolution': '(1)', 'variable': 'var_lip2', 'troughcolor': '"""#212121"""', 'command': 'lip2_send', 'fg': 'color_text', 'bg': 'color_bg', 'highlightthickness': '(0)'}), "(root, label=None, from_=0, to=480, orient=tk.HORIZONTAL, length=w,\n showvalue=1, tickinterval=None, resolution=1, variable=var_lip2,\n troughcolor='#212121', command=lip2_send, fg=color_text, bg=color_bg,\n highlightthickness=0)\n", (27658, 27894), True, 'import tkinter as tk\n'), ((27967, 28209), 'tkinter.Scale', 'tk.Scale', (['root'], {'label': 'None', 'from_': '(0)', 'to': '(200)', 'orient': 'tk.HORIZONTAL', 'length': 'w', 'showvalue': '(1)', 'tickinterval': 'None', 'resolution': '(1)', 'variable': 'var_err', 'troughcolor': '"""#212121"""', 'command': 'err_send', 'fg': 'color_text', 'bg': 'color_bg', 'highlightthickness': '(0)'}), "(root, label=None, from_=0, to=200, orient=tk.HORIZONTAL, length=w,\n showvalue=1, tickinterval=None, resolution=1, variable=var_err,\n troughcolor='#212121', command=err_send, fg=color_text, bg=color_bg,\n highlightthickness=0)\n", (27975, 28209), True, 'import tkinter as tk\n'), ((28282, 28354), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'bg': 'color_bg', 'height': '(30)', 'width': '(510)', 'highlightthickness': '(0)'}), '(root, bg=color_bg, height=30, width=510, highlightthickness=0)\n', (28291, 28354), True, 'import tkinter as tk\n'), ((28401, 28490), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(10)', 'text': '"""Render"""', 'fg': 'color_text', 'bg': '"""#212121"""', 'relief': '"""ridge"""'}), "(root, width=10, text='Render', fg=color_text, bg='#212121',\n relief='ridge')\n", (28410, 28490), True, 'import tkinter as tk\n'), ((28586, 28675), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(10)', 'text': '"""CV FL"""', 'fg': 'color_text', 'bg': '"""#212121"""', 'relief': '"""ridge"""'}), "(root, width=10, text='CV FL', fg=color_text, bg='#212121', relief\n ='ridge')\n", (28595, 28675), True, 'import tkinter as tk\n'), ((28761, 28860), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(23)', 'text': '"""LineColorSwitch"""', 'fg': 'color_text', 'bg': '"""#212121"""', 'relief': '"""ridge"""'}), "(root, width=23, text='LineColorSwitch', fg=color_text, bg=\n '#212121', relief='ridge')\n", (28770, 28860), True, 'import tkinter as tk\n'), ((29692, 29930), 'tkinter.Scale', 'tk.Scale', (['root'], {'label': 'None', 'from_': '(0)', 'to': '(255)', 'orient': 'tk.HORIZONTAL', 'length': 'w', 'showvalue': '(1)', 'tickinterval': 'None', 'resolution': '(1)', 'variable': 'var_R', 'troughcolor': '"""#FF1744"""', 'command': 'R_send', 'fg': 'color_text', 'bg': 'color_bg', 'highlightthickness': '(0)'}), "(root, label=None, from_=0, to=255, orient=tk.HORIZONTAL, length=w,\n showvalue=1, tickinterval=None, resolution=1, variable=var_R,\n troughcolor='#FF1744', command=R_send, fg=color_text, bg=color_bg,\n highlightthickness=0)\n", (29700, 29930), True, 'import tkinter as tk\n'), ((29995, 30233), 'tkinter.Scale', 'tk.Scale', (['root'], {'label': 'None', 'from_': '(0)', 'to': '(255)', 'orient': 'tk.HORIZONTAL', 'length': 'w', 'showvalue': '(1)', 'tickinterval': 'None', 'resolution': '(1)', 'variable': 'var_G', 'troughcolor': '"""#00E676"""', 'command': 'G_send', 'fg': 'color_text', 'bg': 'color_bg', 'highlightthickness': '(0)'}), "(root, label=None, from_=0, to=255, orient=tk.HORIZONTAL, length=w,\n showvalue=1, tickinterval=None, resolution=1, variable=var_G,\n troughcolor='#00E676', command=G_send, fg=color_text, bg=color_bg,\n highlightthickness=0)\n", (30003, 30233), True, 'import tkinter as tk\n'), ((30301, 30539), 'tkinter.Scale', 'tk.Scale', (['root'], {'label': 'None', 'from_': '(0)', 'to': '(255)', 'orient': 'tk.HORIZONTAL', 'length': 'w', 'showvalue': '(1)', 'tickinterval': 'None', 'resolution': '(1)', 'variable': 'var_B', 'troughcolor': '"""#2979FF"""', 'command': 'B_send', 'fg': 'color_text', 'bg': 'color_bg', 'highlightthickness': '(0)'}), "(root, label=None, from_=0, to=255, orient=tk.HORIZONTAL, length=w,\n showvalue=1, tickinterval=None, resolution=1, variable=var_B,\n troughcolor='#2979FF', command=B_send, fg=color_text, bg=color_bg,\n highlightthickness=0)\n", (30309, 30539), True, 'import tkinter as tk\n'), ((30610, 30682), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'bg': 'color_bg', 'height': '(30)', 'width': '(510)', 'highlightthickness': '(0)'}), '(root, bg=color_bg, height=30, width=510, highlightthickness=0)\n', (30619, 30682), True, 'import tkinter as tk\n'), ((30903, 30995), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(23)', 'text': '"""Color Set"""', 'fg': 'color_text', 'bg': '"""#212121"""', 'relief': '"""ridge"""'}), "(root, width=23, text='Color Set', fg=color_text, bg='#212121',\n relief='ridge')\n", (30912, 30995), True, 'import tkinter as tk\n'), ((31299, 31565), 'tkinter.Scale', 'tk.Scale', (['root'], {'label': '"""Exposure Compensation Level"""', 'from_': '(-25)', 'to': '(25)', 'orient': 'tk.HORIZONTAL', 'length': 'w', 'showvalue': '(1)', 'tickinterval': 'None', 'resolution': '(1)', 'variable': 'var_ec', 'troughcolor': '"""#212121"""', 'command': 'EC_send', 'fg': 'color_text', 'bg': 'color_bg', 'highlightthickness': '(0)'}), "(root, label='Exposure Compensation Level', from_=-25, to=25,\n orient=tk.HORIZONTAL, length=w, showvalue=1, tickinterval=None,\n resolution=1, variable=var_ec, troughcolor='#212121', command=EC_send,\n fg=color_text, bg=color_bg, highlightthickness=0)\n", (31307, 31565), True, 'import tkinter as tk\n'), ((31638, 31710), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'bg': 'color_bg', 'height': '(30)', 'width': '(510)', 'highlightthickness': '(0)'}), '(root, bg=color_bg, height=30, width=510, highlightthickness=0)\n', (31647, 31710), True, 'import tkinter as tk\n'), ((31754, 31896), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(23)', 'height': '(2)', 'text': '"""Set Default Exposure\nCompensation Level"""', 'fg': 'color_text', 'bg': '"""#212121"""', 'relief': '"""ridge"""'}), '(root, width=23, height=2, text=\n """Set Default Exposure\nCompensation Level""", fg=color_text, bg=\n \'#212121\', relief=\'ridge\')\n', (31763, 31896), True, 'import tkinter as tk\n'), ((33298, 33389), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""RadarScan"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='RadarScan', fg=color_text, bg=color_btn,\n relief='ridge')\n", (33307, 33389), True, 'import tkinter as tk\n'), ((33402, 33493), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""FindColor"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='FindColor', fg=color_text, bg=color_btn,\n relief='ridge')\n", (33411, 33493), True, 'import tkinter as tk\n'), ((33506, 33597), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""MotionGet"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='MotionGet', fg=color_text, bg=color_btn,\n relief='ridge')\n", (33515, 33597), True, 'import tkinter as tk\n'), ((33610, 33701), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""OSDscreen"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='OSDscreen', fg=color_text, bg=color_btn,\n relief='ridge')\n", (33619, 33701), True, 'import tkinter as tk\n'), ((33714, 33805), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Automatic"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Automatic', fg=color_text, bg=color_btn,\n relief='ridge')\n", (33723, 33805), True, 'import tkinter as tk\n'), ((33818, 33909), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""SteadyCam"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='SteadyCam', fg=color_text, bg=color_btn,\n relief='ridge')\n", (33827, 33909), True, 'import tkinter as tk\n'), ((33922, 34015), 'tkinter.Button', 'tk.Button', (['root'], {'width': '(8)', 'text': '"""Instruction"""', 'fg': 'color_text', 'bg': 'color_btn', 'relief': '"""ridge"""'}), "(root, width=8, text='Instruction', fg=color_text, bg=color_btn,\n relief='ridge')\n", (33931, 34015), True, 'import tkinter as tk\n'), ((34767, 34774), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (34772, 34774), True, 'import tkinter as tk\n'), ((34890, 34904), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (34902, 34904), True, 'import tkinter as tk\n'), ((34938, 34952), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (34950, 34952), True, 'import tkinter as tk\n'), ((34985, 34999), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (34997, 34999), True, 'import tkinter as tk\n'), ((35030, 35044), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (35042, 35044), True, 'import tkinter as tk\n'), ((35071, 35085), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (35083, 35085), True, 'import tkinter as tk\n'), ((35112, 35126), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (35124, 35126), True, 'import tkinter as tk\n'), ((35156, 35170), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (35168, 35170), True, 'import tkinter as tk\n'), ((1917, 1931), 'numpy.unicode', 'np.unicode', (['""""""'], {}), "('')\n", (1927, 1931), True, 'import numpy as np\n'), ((13351, 13387), 'threading.Thread', 'thread.Thread', ([], {'target': 'socket_connect'}), '(target=socket_connect)\n', (13364, 13387), True, 'import threading as thread\n'), ((14898, 14914), 'time.sleep', 'time.sleep', (['(0.15)'], {}), '(0.15)\n', (14908, 14914), False, 'import time\n'), ((26840, 26856), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (26850, 26856), False, 'import time\n'), ((26943, 26959), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (26953, 26959), False, 'import time\n'), ((27045, 27061), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (27055, 27061), False, 'import time\n'), ((29091, 29107), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (29101, 29107), False, 'import time\n'), ((29283, 29299), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (29293, 29299), False, 'import time\n'), ((29475, 29491), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (29485, 29491), False, 'import time\n'), ((31177, 31193), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (31187, 31193), False, 'import time\n'), ((35213, 35243), 'tkinter.PhotoImage', 'tk.PhotoImage', ([], {'file': '"""logo.png"""'}), "(file='logo.png')\n", (35226, 35243), True, 'import tkinter as tk\n'), ((35256, 35295), 'tkinter.Label', 'tk.Label', (['root'], {'image': 'logo', 'bg': 'color_bg'}), '(root, image=logo, bg=color_bg)\n', (35264, 35295), True, 'import tkinter as tk\n'), ((3101, 3114), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3111, 3114), False, 'import time\n'), ((4995, 5018), 'base64.b64decode', 'base64.b64decode', (['frame'], {}), '(frame)\n', (5011, 5018), False, 'import base64\n'), ((5031, 5065), 'numpy.frombuffer', 'np.frombuffer', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (5044, 5065), True, 'import numpy as np\n'), ((5079, 5101), 'cv2.imdecode', 'cv2.imdecode', (['npimg', '(1)'], {}), '(npimg, 1)\n', (5091, 5101), False, 'import cv2\n'), ((5106, 5204), 'cv2.putText', 'cv2.putText', (['source', "('PC FPS: %s' % fps)", '(40, 20)', 'font', '(0.5)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), "(source, 'PC FPS: %s' % fps, (40, 20), font, 0.5, (255, 255, 255\n ), 1, cv2.LINE_AA)\n", (5117, 5204), False, 'import cv2\n'), ((6226, 6254), 'cv2.imshow', 'cv2.imshow', (['"""Stream"""', 'source'], {}), "('Stream', source)\n", (6236, 6254), False, 'import cv2\n'), ((6259, 6300), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Stream"""', 'getposBgr'], {}), "('Stream', getposBgr)\n", (6279, 6300), False, 'import cv2\n'), ((6316, 6355), 'cv2.cvtColor', 'cv2.cvtColor', (['source', 'cv2.COLOR_BGR2HSV'], {}), '(source, cv2.COLOR_BGR2HSV)\n', (6328, 6355), False, 'import cv2\n'), ((6360, 6391), 'cv2.imshow', 'cv2.imshow', (['"""StreamHSV"""', 'HSVimg'], {}), "('StreamHSV', HSVimg)\n", (6370, 6391), False, 'import cv2\n'), ((6396, 6440), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""StreamHSV"""', 'getposHsv'], {}), "('StreamHSV', getposHsv)\n", (6416, 6440), False, 'import cv2\n'), ((6466, 6480), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6477, 6480), False, 'import cv2\n'), ((12146, 12185), 'threading.Thread', 'thread.Thread', ([], {'target': 'connection_thread'}), '(target=connection_thread)\n', (12159, 12185), True, 'import threading as thread\n'), ((12427, 12461), 'threading.Thread', 'thread.Thread', ([], {'target': 'Info_receive'}), '(target=Info_receive)\n', (12440, 12461), True, 'import threading as thread\n'), ((12692, 12722), 'threading.Thread', 'thread.Thread', ([], {'target': 'opencv_r'}), '(target=opencv_r)\n', (12705, 12722), True, 'import threading as thread\n'), ((13136, 13149), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (13146, 13149), False, 'import time\n'), ((3168, 3181), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3178, 3181), False, 'import time\n'), ((5213, 5325), 'cv2.putText', 'cv2.putText', (['source', "('CPU Temperature: %s' % CPU_TEP)", '(370, 350)', 'font', '(0.5)', '(128, 255, 128)', '(1)', 'cv2.LINE_AA'], {}), "(source, 'CPU Temperature: %s' % CPU_TEP, (370, 350), font, 0.5,\n (128, 255, 128), 1, cv2.LINE_AA)\n", (5224, 5325), False, 'import cv2\n'), ((5319, 5425), 'cv2.putText', 'cv2.putText', (['source', "('CPU Usage: %s' % CPU_USE)", '(370, 380)', 'font', '(0.5)', '(128, 255, 128)', '(1)', 'cv2.LINE_AA'], {}), "(source, 'CPU Usage: %s' % CPU_USE, (370, 380), font, 0.5, (128,\n 255, 128), 1, cv2.LINE_AA)\n", (5330, 5425), False, 'import cv2\n'), ((5419, 5525), 'cv2.putText', 'cv2.putText', (['source', "('RAM Usage: %s' % RAM_USE)", '(370, 410)', 'font', '(0.5)', '(128, 255, 128)', '(1)', 'cv2.LINE_AA'], {}), "(source, 'RAM Usage: %s' % RAM_USE, (370, 410), font, 0.5, (128,\n 255, 128), 1, cv2.LINE_AA)\n", (5430, 5525), False, 'import cv2\n'), ((5521, 5583), 'cv2.rectangle', 'cv2.rectangle', (['source', '(167, 320)', '(473, 330)', '(255, 255, 255)'], {}), '(source, (167, 320), (473, 330), (255, 255, 255))\n', (5534, 5583), False, 'import cv2\n'), ((6498, 6513), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (6508, 6513), False, 'import time\n'), ((3313, 3334), 'math.radians', 'math.radians', (['error_Y'], {}), '(error_Y)\n', (3325, 3334), False, 'import math\n'), ((3431, 3452), 'math.radians', 'math.radians', (['error_Y'], {}), '(error_Y)\n', (3443, 3452), False, 'import math\n'), ((3990, 4011), 'math.radians', 'math.radians', (['error_Y'], {}), '(error_Y)\n', (4002, 4011), False, 'import math\n'), ((4118, 4139), 'math.radians', 'math.radians', (['error_Y'], {}), '(error_Y)\n', (4130, 4139), False, 'import math\n'), ((5641, 5714), 'cv2.rectangle', 'cv2.rectangle', (['source', '(320 - DIR_show, 323)', '(320, 327)', '(255, 255, 255)'], {}), '(source, (320 - DIR_show, 323), (320, 327), (255, 255, 255))\n', (5654, 5714), False, 'import cv2\n'), ((3366, 3387), 'math.radians', 'math.radians', (['error_Y'], {}), '(error_Y)\n', (3378, 3387), False, 'import math\n'), ((3484, 3505), 'math.radians', 'math.radians', (['error_Y'], {}), '(error_Y)\n', (3496, 3505), False, 'import math\n'), ((4048, 4069), 'math.radians', 'math.radians', (['error_Y'], {}), '(error_Y)\n', (4060, 4069), False, 'import math\n'), ((4176, 4197), 'math.radians', 'math.radians', (['error_Y'], {}), '(error_Y)\n', (4188, 4197), False, 'import math\n'), ((5743, 5816), 'cv2.rectangle', 'cv2.rectangle', (['source', '(320, 323)', '(320 - DIR_show, 327)', '(255, 255, 255)'], {}), '(source, (320, 323), (320 - DIR_show, 327), (255, 255, 255))\n', (5756, 5816), False, 'import cv2\n'), ((25529, 25549), 'math.radians', 'math.radians', (['pos_ra'], {}), '(pos_ra)\n', (25541, 25549), False, 'import math\n'), ((25941, 25961), 'math.radians', 'math.radians', (['pos_ra'], {}), '(pos_ra)\n', (25953, 25961), False, 'import math\n'), ((26133, 26153), 'math.radians', 'math.radians', (['pos_ra'], {}), '(pos_ra)\n', (26145, 26153), False, 'import math\n'), ((26075, 26095), 'math.radians', 'math.radians', (['pos_ra'], {}), '(pos_ra)\n', (26087, 26095), False, 'import math\n'), ((25890, 25910), 'math.radians', 'math.radians', (['pos_ra'], {}), '(pos_ra)\n', (25902, 25910), False, 'import math\n'), ((26017, 26037), 'math.radians', 'math.radians', (['pos_ra'], {}), '(pos_ra)\n', (26029, 26037), False, 'import math\n')] |
"""
Supplies MultiDimensionalMapping and NdMapping which are multi-dimensional
map types. The former class only allows indexing whereas the latter
also enables slicing over multiple dimension ranges.
"""
from itertools import cycle
from operator import itemgetter
import numpy as np
import param
from . import util
from .dimension import OrderedDict, Dimension, Dimensioned, ViewableElement, asdim
from .util import (unique_iterator, sanitize_identifier, dimension_sort,
basestring, wrap_tuple, process_ellipses, get_ndmapping_label)
class item_check(object):
"""
Context manager to allow creating NdMapping types without
performing the usual item_checks, providing significant
speedups when there are a lot of items. Should only be
used when both keys and values are guaranteed to be the
right type, as is the case for many internal operations.
"""
def __init__(self, enabled):
self.enabled = enabled
def __enter__(self):
self._enabled = MultiDimensionalMapping._check_items
MultiDimensionalMapping._check_items = self.enabled
def __exit__(self, exc_type, exc_val, exc_tb):
MultiDimensionalMapping._check_items = self._enabled
class sorted_context(object):
"""
Context manager to temporarily disable sorting on NdMapping
types. Retains the current sort order, which can be useful as
an optimization on NdMapping instances where sort=True but the
items are already known to have been sorted.
"""
def __init__(self, enabled):
self.enabled = enabled
def __enter__(self):
self._enabled = MultiDimensionalMapping.sort
MultiDimensionalMapping.sort = self.enabled
def __exit__(self, exc_type, exc_val, exc_tb):
MultiDimensionalMapping.sort = self._enabled
class MultiDimensionalMapping(Dimensioned):
"""
An MultiDimensionalMapping is a Dimensioned mapping (like a
dictionary or array) that uses fixed-length multidimensional
keys. This behaves like a sparse N-dimensional array that does not
require a dense sampling over the multidimensional space.
If the underlying value for each (key,value) pair also supports
indexing (such as a dictionary, array, or list), fully qualified
(deep) indexing may be used from the top level, with the first N
dimensions of the index selecting a particular Dimensioned object
and the remaining dimensions indexing into that object.
For instance, for a MultiDimensionalMapping with dimensions "Year"
and "Month" and underlying values that are 2D floating-point
arrays indexed by (r,c), a 2D array may be indexed with x[2000,3]
and a single floating-point number may be indexed as
x[2000,3,1,9].
In practice, this class is typically only used as an abstract base
class, because the NdMapping subclass extends it with a range of
useful slicing methods for selecting subsets of the data. Even so,
keeping the slicing support separate from the indexing and data
storage methods helps make both classes easier to understand.
"""
group = param.String(default='MultiDimensionalMapping', constant=True)
kdims = param.List(default=[Dimension("Default")], constant=True)
vdims = param.List(default=[], bounds=(0, 0), constant=True)
sort = param.Boolean(default=True, doc="""
Whether the items should be sorted in the constructor.""")
data_type = None # Optional type checking of elements
_deep_indexable = False
_check_items = True
def __init__(self, initial_items=None, kdims=None, **params):
if isinstance(initial_items, MultiDimensionalMapping):
params = dict(util.get_param_values(initial_items),
**dict({'sort': self.sort}, **params))
if kdims is not None:
params['kdims'] = kdims
super(MultiDimensionalMapping, self).__init__(OrderedDict(), **dict(params))
if type(initial_items) is dict and not self.sort:
raise ValueError('If sort=False the data must define a fixed '
'ordering, please supply a list of items or '
'an OrderedDict, not a regular dictionary.')
self._next_ind = 0
self._check_key_type = True
if initial_items is None: initial_items = []
if isinstance(initial_items, tuple):
self._add_item(initial_items[0], initial_items[1])
elif not self._check_items:
if isinstance(initial_items, dict):
initial_items = initial_items.items()
elif isinstance(initial_items, MultiDimensionalMapping):
initial_items = initial_items.data.items()
self.data = OrderedDict((k if isinstance(k, tuple) else (k,), v)
for k, v in initial_items)
if self.sort:
self._resort()
elif initial_items is not None:
self.update(OrderedDict(initial_items))
def _item_check(self, dim_vals, data):
"""
Applies optional checks to individual data elements before
they are inserted ensuring that they are of a certain
type. Subclassed may implement further element restrictions.
"""
if self.data_type is not None and not isinstance(data, self.data_type):
if isinstance(self.data_type, tuple):
data_type = tuple(dt.__name__ for dt in self.data_type)
else:
data_type = self.data_type.__name__
raise TypeError('{slf} does not accept {data} type, data elements have '
'to be a {restr}.'.format(slf=type(self).__name__,
data=type(data).__name__,
restr=data_type))
elif not len(dim_vals) == self.ndims:
raise KeyError('The data contains keys of length %d, but the kdims '
'only declare %d dimensions. Ensure that the number '
'of kdims match the length of the keys in your data.'
% (len(dim_vals), self.ndims))
def _add_item(self, dim_vals, data, sort=True, update=True):
"""
Adds item to the data, applying dimension types and ensuring
key conforms to Dimension type and values.
"""
sort = sort and self.sort
if not isinstance(dim_vals, tuple):
dim_vals = (dim_vals,)
self._item_check(dim_vals, data)
# Apply dimension types
dim_types = zip([kd.type for kd in self.kdims], dim_vals)
dim_vals = tuple(v if None in [t, v] else t(v) for t, v in dim_types)
valid_vals = zip(self.kdims, dim_vals)
for dim, val in valid_vals:
if dim.values and val is not None and val not in dim.values:
raise KeyError('%s dimension value %s not in'
' specified dimension values.' % (dim, repr(val)))
# Updates nested data structures rather than simply overriding them.
if (update and (dim_vals in self.data)
and isinstance(self.data[dim_vals], (MultiDimensionalMapping, OrderedDict))):
self.data[dim_vals].update(data)
else:
self.data[dim_vals] = data
if sort:
self._resort()
def _apply_key_type(self, keys):
"""
If a type is specified by the corresponding key dimension,
this method applies the type to the supplied key.
"""
typed_key = ()
for dim, key in zip(self.kdims, keys):
key_type = dim.type
if key_type is None:
typed_key += (key,)
elif isinstance(key, slice):
sl_vals = [key.start, key.stop, key.step]
typed_key += (slice(*[key_type(el) if el is not None else None
for el in sl_vals]),)
elif key is Ellipsis:
typed_key += (key,)
elif isinstance(key, list):
typed_key += ([key_type(k) for k in key],)
else:
typed_key += (key_type(key),)
return typed_key
def _split_index(self, key):
"""
Partitions key into key and deep dimension groups. If only key
indices are supplied, the data is indexed with an empty tuple.
Keys with indices than there are dimensions will be padded.
"""
if not isinstance(key, tuple):
key = (key,)
elif key == ():
return (), ()
if key[0] is Ellipsis:
num_pad = self.ndims - len(key) + 1
key = (slice(None),) * num_pad + key[1:]
elif len(key) < self.ndims:
num_pad = self.ndims - len(key)
key = key + (slice(None),) * num_pad
map_slice = key[:self.ndims]
if self._check_key_type:
map_slice = self._apply_key_type(map_slice)
if len(key) == self.ndims:
return map_slice, ()
else:
return map_slice, key[self.ndims:]
def _dataslice(self, data, indices):
"""
Returns slice of data element if the item is deep
indexable. Warns if attempting to slice an object that has not
been declared deep indexable.
"""
if self._deep_indexable and isinstance(data, Dimensioned) and indices:
return data[indices]
elif len(indices) > 0:
self.warning('Cannot index into data element, extra data'
' indices ignored.')
return data
def _resort(self):
self.data = OrderedDict(dimension_sort(self.data, self.kdims, self.vdims,
range(self.ndims)))
def clone(self, data=None, shared_data=True, *args, **overrides):
"""
Overrides Dimensioned clone to avoid checking items if data
is unchanged.
"""
with item_check(not shared_data and self._check_items):
return super(MultiDimensionalMapping, self).clone(data, shared_data,
*args, **overrides)
def groupby(self, dimensions, container_type=None, group_type=None, **kwargs):
"""
Splits the mapping into groups by key dimension which are then
returned together in a mapping of class container_type. The
individual groups are of the same type as the original map.
This operation will always sort the groups and the items in
each group.
"""
if self.ndims == 1:
self.warning('Cannot split Map with only one dimension.')
return self
container_type = container_type if container_type else type(self)
group_type = group_type if group_type else type(self)
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
with item_check(False):
return util.ndmapping_groupby(self, dimensions, container_type,
group_type, sort=True, **kwargs)
def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs):
"""
Create a new object with an additional key dimensions.
Requires the dimension name or object, the desired position
in the key dimensions and a key value scalar or sequence of
the same length as the existing keys.
"""
dimension = asdim(dimension)
if dimension in self.dimensions():
raise Exception('{dim} dimension already defined'.format(dim=dimension.name))
if vdim and self._deep_indexable:
raise Exception('Cannot add value dimension to object that is deep indexable')
if vdim:
dims = self.vdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(vdims=dims)
dim_pos += self.ndims
else:
dims = self.kdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(kdims=dims)
if isinstance(dim_val, basestring) or not hasattr(dim_val, '__iter__'):
dim_val = cycle([dim_val])
else:
if not len(dim_val) == len(self):
raise ValueError("Added dimension values must be same length"
"as existing keys.")
items = OrderedDict()
for dval, (key, val) in zip(dim_val, self.data.items()):
if vdim:
new_val = list(val)
new_val.insert(dim_pos, dval)
items[key] = tuple(new_val)
else:
new_key = list(key)
new_key.insert(dim_pos, dval)
items[tuple(new_key)] = val
return self.clone(items, **dict(dimensions, **kwargs))
def drop_dimension(self, dimensions):
"""
Returns a new mapping with the named dimension(s) removed.
"""
dimensions = [dimensions] if np.isscalar(dimensions) else dimensions
dims = [d for d in self.kdims if d not in dimensions]
dim_inds = [self.get_dimension_index(d) for d in dims]
key_getter = itemgetter(*dim_inds)
return self.clone([(key_getter(k), v) for k, v in self.data.items()],
kdims=dims)
def dimension_values(self, dimension, expanded=True, flat=True):
"Returns the values along the specified dimension."
dimension = self.get_dimension(dimension, strict=True)
if dimension in self.kdims:
return np.array([k[self.get_dimension_index(dimension)] for k in self.data.keys()])
if dimension in self.dimensions():
values = [el.dimension_values(dimension) for el in self
if dimension in el.dimensions()]
vals = np.concatenate(values)
return vals if expanded else util.unique_array(vals)
else:
return super(MultiDimensionalMapping, self).dimension_values(dimension, expanded, flat)
def reindex(self, kdims=[], force=False):
"""
Create a new object with a re-ordered or reduced set of key
dimensions.
Reducing the number of key dimensions will discard information
from the keys. All data values are accessible in the newly
created object as the new labels must be sufficient to address
each value uniquely.
"""
old_kdims = [d.name for d in self.kdims]
if not len(kdims):
kdims = [d for d in old_kdims
if not len(set(self.dimension_values(d))) == 1]
indices = [self.get_dimension_index(el) for el in kdims]
keys = [tuple(k[i] for i in indices) for k in self.data.keys()]
reindexed_items = OrderedDict(
(k, v) for (k, v) in zip(keys, self.data.values()))
reduced_dims = set([d.name for d in self.kdims]).difference(kdims)
dimensions = [self.get_dimension(d) for d in kdims
if d not in reduced_dims]
if len(set(keys)) != len(keys) and not force:
raise Exception("Given dimension labels not sufficient"
"to address all values uniquely")
if len(keys):
cdims = {self.get_dimension(d): self.dimension_values(d)[0] for d in reduced_dims}
else:
cdims = {}
with item_check(indices == sorted(indices)):
return self.clone(reindexed_items, kdims=dimensions,
cdims=cdims)
@property
def last(self):
"Returns the item highest data item along the map dimensions."
return list(self.data.values())[-1] if len(self) else None
@property
def last_key(self):
"Returns the last key value."
return list(self.keys())[-1] if len(self) else None
@property
def info(self):
"""
Prints information about the Dimensioned object, including the
number and type of objects contained within it and information
about its dimensions.
"""
if (len(self.values()) > 0):
info_str = self.__class__.__name__ +\
" containing %d items of type %s\n" % (len(self.keys()),
type(self.values()[0]).__name__)
else:
info_str = self.__class__.__name__ + " containing no items\n"
info_str += ('-' * (len(info_str)-1)) + "\n\n"
aliases = {v: k for k, v in self._dim_aliases.items()}
for group in self._dim_groups:
dimensions = getattr(self, group)
if dimensions:
group = aliases[group].split('_')[0]
info_str += '%s Dimensions: \n' % group.capitalize()
for d in dimensions:
dmin, dmax = self.range(d.name)
if d.value_format:
dmin, dmax = d.value_format(dmin), d.value_format(dmax)
info_str += '\t %s: %s...%s \n' % (d.pprint_label, dmin, dmax)
print(info_str)
def table(self, datatype=None, **kwargs):
"Creates a table from the stored keys and data."
from .data.interface import Interface
from ..element.tabular import Table
new_data = [(key, value.table(datatype=datatype, **kwargs))
for key, value in self.data.items()]
tables = self.clone(new_data)
return Interface.concatenate(tables, new_type=Table)
def dframe(self):
"Creates a pandas DataFrame from the stored keys and data."
try:
import pandas
except ImportError:
raise Exception("Cannot build a DataFrame without the pandas library.")
labels = self.dimensions('key', True) + [self.group]
return pandas.DataFrame(
[dict(zip(labels, k + (v,))) for (k, v) in self.data.items()])
def update(self, other):
"""
Updates the current mapping with some other mapping or
OrderedDict instance, making sure that they are indexed along
the same set of dimensions. The order of key dimensions remains
unchanged after the update.
"""
if isinstance(other, NdMapping):
dims = [d for d in other.kdims if d not in self.kdims]
if len(dims) == other.ndims:
raise KeyError("Cannot update with NdMapping that has"
" a different set of key dimensions.")
elif dims:
other = other.drop_dimension(dims)
other = other.data
for key, data in other.items():
self._add_item(key, data, sort=False)
if self.sort:
self._resort()
def keys(self):
" Returns the keys of all the elements."
if self.ndims == 1:
return [k[0] for k in self.data.keys()]
else:
return list(self.data.keys())
def values(self):
" Returns the values of all the elements."
return list(self.data.values())
def items(self):
"Returns all elements as a list in (key,value) format."
return list(zip(list(self.keys()), list(self.values())))
def get(self, key, default=None):
"Standard get semantics for all mapping types"
try:
if key is None:
return None
return self[key]
except KeyError:
return default
def pop(self, key, default=None):
"Standard pop semantics for all mapping types"
if not isinstance(key, tuple): key = (key,)
return self.data.pop(key, default)
def __getitem__(self, key):
"""
Allows multi-dimensional indexing in the order of the
specified key dimensions, passing any additional indices to
the data elements.
"""
if key in [Ellipsis, ()]:
return self
map_slice, data_slice = self._split_index(key)
return self._dataslice(self.data[map_slice], data_slice)
def __setitem__(self, key, value):
self._add_item(key, value, update=False)
def __str__(self):
return repr(self)
def __iter__(self):
return iter(self.values())
def __contains__(self, key):
if self.ndims == 1:
return key in self.data.keys()
else:
return key in self.keys()
def __len__(self):
return len(self.data)
class NdMapping(MultiDimensionalMapping):
"""
NdMapping supports the same indexing semantics as
MultiDimensionalMapping but also supports slicing semantics.
Slicing semantics on an NdMapping is dependent on the ordering
semantics of the keys. As MultiDimensionalMapping sort the keys, a
slice on an NdMapping is effectively a way of filtering out the
keys that are outside the slice range.
"""
group = param.String(default='NdMapping', constant=True)
def __getitem__(self, indexslice):
"""
Allows slicing operations along the key and data
dimensions. If no data slice is supplied it will return all
data elements, otherwise it will return the requested slice of
the data.
"""
if isinstance(indexslice, np.ndarray) and indexslice.dtype.kind == 'b':
if not len(indexslice) == len(self):
raise IndexError("Boolean index must match length of sliced object")
selection = zip(indexslice, self.data.items())
return self.clone([item for c, item in selection if c])
elif indexslice == () and not self.kdims:
return self.data[()]
elif indexslice in [Ellipsis, ()]:
return self
elif Ellipsis in wrap_tuple(indexslice):
indexslice = process_ellipses(self, indexslice)
map_slice, data_slice = self._split_index(indexslice)
map_slice = self._transform_indices(map_slice)
map_slice = self._expand_slice(map_slice)
if all(not (isinstance(el, (slice, set, list, tuple)) or callable(el))
for el in map_slice):
return self._dataslice(self.data[map_slice], data_slice)
else:
conditions = self._generate_conditions(map_slice)
items = self.data.items()
for cidx, (condition, dim) in enumerate(zip(conditions, self.kdims)):
values = dim.values
items = [(k, v) for k, v in items
if condition(values.index(k[cidx])
if values else k[cidx])]
sliced_items = []
for k, v in items:
val_slice = self._dataslice(v, data_slice)
if val_slice or isinstance(val_slice, tuple):
sliced_items.append((k, val_slice))
if len(sliced_items) == 0:
raise KeyError('No items within specified slice.')
with item_check(False):
return self.clone(sliced_items)
def _expand_slice(self, indices):
"""
Expands slices containing steps into a list.
"""
keys = list(self.data.keys())
expanded = []
for idx, ind in enumerate(indices):
if isinstance(ind, slice) and ind.step is not None:
dim_ind = slice(ind.start, ind.stop)
if dim_ind == slice(None):
condition = self._all_condition()
elif dim_ind.start is None:
condition = self._upto_condition(dim_ind)
elif dim_ind.stop is None:
condition = self._from_condition(dim_ind)
else:
condition = self._range_condition(dim_ind)
dim_vals = unique_iterator(k[idx] for k in keys)
expanded.append(set([k for k in dim_vals if condition(k)][::int(ind.step)]))
else:
expanded.append(ind)
return tuple(expanded)
def _transform_indices(self, indices):
"""
Identity function here but subclasses can implement transforms
of the dimension indices from one coordinate system to another.
"""
return indices
def _generate_conditions(self, map_slice):
"""
Generates filter conditions used for slicing the data structure.
"""
conditions = []
for dim, dim_slice in zip(self.kdims, map_slice):
if isinstance(dim_slice, slice):
start, stop = dim_slice.start, dim_slice.stop
if dim.values:
values = dim.values
dim_slice = slice(None if start is None else values.index(start),
None if stop is None else values.index(stop))
if dim_slice == slice(None):
conditions.append(self._all_condition())
elif start is None:
conditions.append(self._upto_condition(dim_slice))
elif stop is None:
conditions.append(self._from_condition(dim_slice))
else:
conditions.append(self._range_condition(dim_slice))
elif isinstance(dim_slice, (set, list)):
if dim.values:
dim_slice = [dim.values.index(dim_val)
for dim_val in dim_slice]
conditions.append(self._values_condition(dim_slice))
elif dim_slice is Ellipsis:
conditions.append(self._all_condition())
elif callable(dim_slice):
conditions.append(dim_slice)
elif isinstance(dim_slice, (tuple)):
raise IndexError("Keys may only be selected with sets or lists, not tuples.")
else:
if dim.values:
dim_slice = dim.values.index(dim_slice)
conditions.append(self._value_condition(dim_slice))
return conditions
def _value_condition(self, value):
return lambda x: x == value
def _values_condition(self, values):
return lambda x: x in values
def _range_condition(self, slice):
if slice.step is None:
lmbd = lambda x: slice.start <= x < slice.stop
else:
lmbd = lambda x: slice.start <= x < slice.stop and not (
(x-slice.start) % slice.step)
return lmbd
def _upto_condition(self, slice):
if slice.step is None:
lmbd = lambda x: x < slice.stop
else:
lmbd = lambda x: x < slice.stop and not (x % slice.step)
return lmbd
def _from_condition(self, slice):
if slice.step is None:
lmbd = lambda x: x >= slice.start
else:
lmbd = lambda x: x >= slice.start and ((x-slice.start) % slice.step)
return lmbd
def _all_condition(self):
return lambda x: True
class UniformNdMapping(NdMapping):
"""
A UniformNdMapping is a map of Dimensioned objects and is itself
indexed over a number of specified dimensions. The dimension may
be a spatial dimension (i.e., a ZStack), time (specifying a frame
sequence) or any other combination of Dimensions.
UniformNdMapping objects can be sliced, sampled, reduced, overlaid
and split along its and its containing Views
dimensions. Subclasses should implement the appropriate slicing,
sampling and reduction methods for their Dimensioned type.
"""
data_type = (ViewableElement, NdMapping)
__abstract = True
_deep_indexable = True
_auxiliary_component = False
def __init__(self, initial_items=None, kdims=None, group=None, label=None, **params):
self._type = None
self._group_check, self.group = None, group
self._label_check, self.label = None, label
super(UniformNdMapping, self).__init__(initial_items, kdims=kdims, **params)
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):
"""
Returns a clone of the object with matching parameter values
containing the specified args and kwargs.
If shared_data is set to True and no data explicitly supplied,
the clone will share data with the original.
"""
settings = dict(self.get_param_values())
if settings.get('group', None) != self._group:
settings.pop('group')
if settings.get('label', None) != self._label:
settings.pop('label')
if new_type is None:
clone_type = self.__class__
else:
clone_type = new_type
new_params = new_type.params()
settings = {k: v for k, v in settings.items()
if k in new_params}
settings = dict(settings, **overrides)
if 'id' not in settings and new_type in [type(self), None]:
settings['id'] = self.id
if data is None and shared_data:
data = self.data
settings['plot_id'] = self._plot_id
# Apply name mangling for __ attribute
pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', [])
with item_check(not shared_data and self._check_items):
return clone_type(data, *args, **{k:v for k,v in settings.items()
if k not in pos_args})
@property
def group(self):
if self._group:
return self._group
group = get_ndmapping_label(self, 'group') if len(self) else None
if group is None:
return type(self).__name__
return group
@group.setter
def group(self, group):
if group is not None and not sanitize_identifier.allowable(group):
raise ValueError("Supplied group %s contains invalid "
"characters." % self.group)
self._group = group
@property
def label(self):
if self._label:
return self._label
else:
if len(self):
label = get_ndmapping_label(self, 'label')
return '' if label is None else label
else:
return ''
@label.setter
def label(self, label):
if label is not None and not sanitize_identifier.allowable(label):
raise ValueError("Supplied group %s contains invalid "
"characters." % self.group)
self._label = label
@property
def type(self):
"""
The type of elements stored in the map.
"""
if self._type is None and len(self):
self._type = self.values()[0].__class__
return self._type
@property
def empty_element(self):
return self.type(None)
def _item_check(self, dim_vals, data):
if self.type is not None and (type(data) != self.type):
raise AssertionError("%s must only contain one type of object, not both %s and %s." %
(self.__class__.__name__, type(data).__name__, self.type.__name__))
super(UniformNdMapping, self)._item_check(dim_vals, data)
def dframe(self):
"""
Gets a dframe for each Element in the HoloMap, appends the
dimensions of the HoloMap as series and concatenates the
dframes.
"""
import pandas
dframes = []
for key, view in self.data.items():
view_frame = view.dframe()
key_dims = reversed(list(zip(key, self.dimensions('key', True))))
for val, dim in key_dims:
dimn = 1
while dim in view_frame:
dim = dim+'_%d' % dimn
if dim in view_frame:
dimn += 1
view_frame.insert(0, dim, val)
dframes.append(view_frame)
return pandas.concat(dframes)
def __mul__(self, other, reverse=False):
from .overlay import Overlay
if isinstance(other, type(self)):
if self.kdims != other.kdims:
raise KeyError("Can only overlay two %ss with "
"non-matching key dimensions."
% type(self).__name__)
items = []
self_keys = list(self.data.keys())
other_keys = list(other.data.keys())
for key in util.unique_iterator(self_keys+other_keys):
self_el = self.data.get(key)
other_el = other.data.get(key)
if self_el is None:
item = [other_el]
elif other_el is None:
item = [self_el]
elif reverse:
item = [other_el, self_el]
else:
item = [self_el, other_el]
items.append((key, Overlay(item)))
return self.clone(items)
overlayed_items = [(k, other * el if reverse else el * other)
for k, el in self.items()]
return self.clone(overlayed_items)
def __rmul__(self, other):
return self.__mul__(other, reverse=True)
| [
"param.List",
"numpy.isscalar",
"param.Boolean",
"itertools.cycle",
"operator.itemgetter",
"param.String",
"pandas.concat",
"numpy.concatenate"
] | [((3126, 3188), 'param.String', 'param.String', ([], {'default': '"""MultiDimensionalMapping"""', 'constant': '(True)'}), "(default='MultiDimensionalMapping', constant=True)\n", (3138, 3188), False, 'import param\n'), ((3273, 3325), 'param.List', 'param.List', ([], {'default': '[]', 'bounds': '(0, 0)', 'constant': '(True)'}), '(default=[], bounds=(0, 0), constant=True)\n', (3283, 3325), False, 'import param\n'), ((3338, 3445), 'param.Boolean', 'param.Boolean', ([], {'default': '(True)', 'doc': '"""\n Whether the items should be sorted in the constructor."""'}), '(default=True, doc=\n """\n Whether the items should be sorted in the constructor.""")\n', (3351, 3445), False, 'import param\n'), ((21021, 21069), 'param.String', 'param.String', ([], {'default': '"""NdMapping"""', 'constant': '(True)'}), "(default='NdMapping', constant=True)\n", (21033, 21069), False, 'import param\n'), ((13298, 13319), 'operator.itemgetter', 'itemgetter', (['*dim_inds'], {}), '(*dim_inds)\n', (13308, 13319), False, 'from operator import itemgetter\n'), ((32030, 32052), 'pandas.concat', 'pandas.concat', (['dframes'], {}), '(dframes)\n', (32043, 32052), False, 'import pandas\n'), ((12280, 12296), 'itertools.cycle', 'cycle', (['[dim_val]'], {}), '([dim_val])\n', (12285, 12296), False, 'from itertools import cycle\n'), ((13112, 13135), 'numpy.isscalar', 'np.isscalar', (['dimensions'], {}), '(dimensions)\n', (13123, 13135), True, 'import numpy as np\n'), ((13947, 13969), 'numpy.concatenate', 'np.concatenate', (['values'], {}), '(values)\n', (13961, 13969), True, 'import numpy as np\n')] |
import legwork as lw
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from matplotlib.colors import TwoSlopeNorm
plt.rc('font', family='serif')
plt.rcParams['text.usetex'] = False
fs = 24
# update various fontsizes to match
params = {'figure.figsize': (12, 8),
'legend.fontsize': fs,
'axes.labelsize': fs,
'xtick.labelsize': 0.9 * fs,
'ytick.labelsize': 0.9 * fs,
'axes.linewidth': 1.1,
'xtick.major.size': 7,
'xtick.minor.size': 4,
'ytick.major.size': 7,
'ytick.minor.size': 4}
plt.rcParams.update(params)
# spread out some frequencies and eccentricities
f_orb_s = np.logspace(-4, -1, 200)
ecc_s = np.linspace(0, 0.9, 150)
# turn them into a grid
F, E = np.meshgrid(f_orb_s, ecc_s)
# flatten the grid
F_flat, E_flat = F.flatten(), E.flatten()
# put all of the sources at the same distance with the same mass
m_1 = np.repeat(10, len(F_flat)) * u.Msun
m_2 = np.repeat(10, len(F_flat)) * u.Msun
dist = np.repeat(8, len(F_flat)) * u.kpc
# define a set of sources
sources = lw.source.Source(m_1=m_1, m_2=m_2, f_orb=F_flat * u.Hz, ecc=E_flat, dist=dist, gw_lum_tol=1e-3)
sources.get_merger_time()
# compute the LISA SNR
LISA_snr = sources.get_snr(verbose=True, which_sources=sources.t_merge > 0.1 * u.yr)
# compute the TianQin SNR
sources.update_sc_params({"instrument": "TianQin", "L": np.sqrt(3) * 1e5 * u.km})
TQ_snr = sources.get_snr(verbose=True, which_sources=sources.t_merge > 0.1 * u.yr)
# create a figure
fig, ax = plt.subplots(figsize=(14, 12))
ax.set_xscale("log")
ax.set_xlabel(r"Orbital Frequency, $f_{\rm orb} \, [{\rm Hz}]$")
ax.set_ylabel(r"Eccentricity, $e$")
ratio = np.zeros_like(LISA_snr)
nonzero = np.logical_and(LISA_snr > 0, TQ_snr > 0)
ratio[nonzero] = LISA_snr[nonzero] / TQ_snr[nonzero]
ratio = ratio.reshape(F.shape)
# make contours of the ratio of SNR
ratio_cont = ax.contourf(F, E, ratio, cmap="PRGn_r", norm=TwoSlopeNorm(vcenter=1.0),
levels=np.arange(0, 3.75 + 0.2, 0.2))
for c in ratio_cont.collections:
c.set_edgecolor("face")
# add a line when the SNRs are equal
ax.contour(F, E, ratio, levels=[1.0], colors="grey", linewidths=2.0, linestyles="--")
# add a colourbar
cbar = fig.colorbar(ratio_cont, fraction=2/14, pad=0.02,
label=r"$\rho_{\rm LISA} / \rho_{\rm TianQin}$",
ticks=np.arange(0, 3.5 + 0.5, 0.5))
# annotate which regions suit each detector
ax.annotate("LISA stronger", xy=(0.1, 0.53), xycoords="axes fraction", fontsize=0.7 * fs,
color=plt.get_cmap("PRGn_r")(1.0),
bbox=dict(boxstyle="round", facecolor="white", edgecolor="white", alpha=0.5, pad=0.4))
ax.annotate("TianQin stronger", xy=(0.6, 0.73), xycoords="axes fraction", fontsize=0.7 * fs,
color=plt.get_cmap("PRGn_r")(0.0),
bbox=dict(boxstyle="round", facecolor="white", edgecolor="white", alpha=0.5, pad=0.4))
# annotate with source details
source_string = r"$m_1 = {{{}}} \, {{ \rm M_{{\odot}}}}$".format(m_1[0].value)
source_string += "\n"
source_string += r"$m_2 = {{{}}} \, {{ \rm M_{{\odot}}}}$".format(m_1[0].value)
source_string += "\n"
source_string += r"$D_L = {{{}}} \, {{ \rm kpc}}$".format(dist[0].value)
ax.annotate(source_string, xy=(0.98, 0.03), xycoords="axes fraction", ha="right", fontsize=0.75*fs,
bbox=dict(boxstyle="round", facecolor="white", edgecolor="white", alpha=0.5, pad=0.4))
ax.set_rasterization_zorder(10000)
plt.savefig("detector_snr_ratio.pdf", format="pdf", bbox_inches="tight") | [
"numpy.meshgrid",
"numpy.zeros_like",
"matplotlib.pyplot.get_cmap",
"numpy.logical_and",
"numpy.logspace",
"legwork.source.Source",
"matplotlib.pyplot.rcParams.update",
"numpy.arange",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.colors.TwoSlopeNorm",
"... | [((142, 172), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (148, 172), True, 'import matplotlib.pyplot as plt\n'), ((599, 626), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (618, 626), True, 'import matplotlib.pyplot as plt\n'), ((687, 711), 'numpy.logspace', 'np.logspace', (['(-4)', '(-1)', '(200)'], {}), '(-4, -1, 200)\n', (698, 711), True, 'import numpy as np\n'), ((720, 744), 'numpy.linspace', 'np.linspace', (['(0)', '(0.9)', '(150)'], {}), '(0, 0.9, 150)\n', (731, 744), True, 'import numpy as np\n'), ((777, 804), 'numpy.meshgrid', 'np.meshgrid', (['f_orb_s', 'ecc_s'], {}), '(f_orb_s, ecc_s)\n', (788, 804), True, 'import numpy as np\n'), ((1095, 1196), 'legwork.source.Source', 'lw.source.Source', ([], {'m_1': 'm_1', 'm_2': 'm_2', 'f_orb': '(F_flat * u.Hz)', 'ecc': 'E_flat', 'dist': 'dist', 'gw_lum_tol': '(0.001)'}), '(m_1=m_1, m_2=m_2, f_orb=F_flat * u.Hz, ecc=E_flat, dist=\n dist, gw_lum_tol=0.001)\n', (1111, 1196), True, 'import legwork as lw\n'), ((1547, 1577), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 12)'}), '(figsize=(14, 12))\n', (1559, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1732), 'numpy.zeros_like', 'np.zeros_like', (['LISA_snr'], {}), '(LISA_snr)\n', (1722, 1732), True, 'import numpy as np\n'), ((1743, 1783), 'numpy.logical_and', 'np.logical_and', (['(LISA_snr > 0)', '(TQ_snr > 0)'], {}), '(LISA_snr > 0, TQ_snr > 0)\n', (1757, 1783), True, 'import numpy as np\n'), ((3504, 3576), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""detector_snr_ratio.pdf"""'], {'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('detector_snr_ratio.pdf', format='pdf', bbox_inches='tight')\n", (3515, 3576), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1988), 'matplotlib.colors.TwoSlopeNorm', 'TwoSlopeNorm', ([], {'vcenter': '(1.0)'}), '(vcenter=1.0)\n', (1975, 1988), False, 'from matplotlib.colors import TwoSlopeNorm\n'), ((2022, 2051), 'numpy.arange', 'np.arange', (['(0)', '(3.75 + 0.2)', '(0.2)'], {}), '(0, 3.75 + 0.2, 0.2)\n', (2031, 2051), True, 'import numpy as np\n'), ((2410, 2438), 'numpy.arange', 'np.arange', (['(0)', '(3.5 + 0.5)', '(0.5)'], {}), '(0, 3.5 + 0.5, 0.5)\n', (2419, 2438), True, 'import numpy as np\n'), ((2593, 2615), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""PRGn_r"""'], {}), "('PRGn_r')\n", (2605, 2615), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2854), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""PRGn_r"""'], {}), "('PRGn_r')\n", (2844, 2854), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1419), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1416, 1419), True, 'import numpy as np\n')] |
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
DEFAULT_CAMERA_CONFIG = {
'distance': 4.0,
}
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='ant.xml',
ctrl_cost_weight=0.5,
contact_cost_weight=5e-4,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_z_range=(0.2, 1.0),
contact_force_range=(-1.0, 1.0),
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True):
utils.EzPickle.__init__(**locals())
self._ctrl_cost_weight = ctrl_cost_weight
self._contact_cost_weight = contact_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._contact_force_range = contact_force_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
@property
def healthy_reward(self):
return float(
self.is_healthy
or self._terminate_when_unhealthy
) * self._healthy_reward
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def contact_forces(self):
raw_contact_forces = self.sim.data.cfrc_ext
min_value, max_value = self._contact_force_range
contact_forces = np.clip(raw_contact_forces, min_value, max_value)
return contact_forces
@property
def contact_cost(self):
contact_cost = self._contact_cost_weight * np.sum(
np.square(self.contact_forces))
return contact_cost
@property
def is_healthy(self):
state = self.state_vector()
min_z, max_z = self._healthy_z_range
is_healthy = (np.isfinite(state).all() and min_z <= state[2] <= max_z)
return is_healthy
@property
def done(self):
done = (not self.is_healthy
if self._terminate_when_unhealthy
else False)
return done
def step(self, action):
action[4:]=0
xy_position_before = self.get_body_com("torso")[:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.get_body_com("torso")[:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
ctrl_cost = self.control_cost(action)
contact_cost = self.contact_cost
forward_reward = x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost + contact_cost
reward = rewards - costs
done = self.done
observation = self._get_obs()
info = {
'reward_forward': forward_reward,
'reward_ctrl': -ctrl_cost,
'reward_contact': -contact_cost,
'reward_survive': healthy_reward,
'x_position': xy_position_after[0],
'y_position': xy_position_after[1],
'distance_from_origin': np.linalg.norm(xy_position_after, ord=2),
'x_velocity': x_velocity,
'y_velocity': y_velocity,
'forward_reward': forward_reward,
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
contact_force = self.contact_forces.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
observations = np.concatenate((position, velocity, contact_force))
return observations
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| [
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"numpy.square",
"numpy.isfinite",
"numpy.clip",
"numpy.linalg.norm",
"numpy.concatenate"
] | [((1163, 1211), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', 'xml_file', '(5)'], {}), '(self, xml_file, 5)\n', (1192, 1211), False, 'from gym.envs.mujoco import mujoco_env\n'), ((1704, 1753), 'numpy.clip', 'np.clip', (['raw_contact_forces', 'min_value', 'max_value'], {}), '(raw_contact_forces, min_value, max_value)\n', (1711, 1753), True, 'import numpy as np\n'), ((3919, 3970), 'numpy.concatenate', 'np.concatenate', (['(position, velocity, contact_force)'], {}), '((position, velocity, contact_force))\n', (3933, 3970), True, 'import numpy as np\n'), ((3393, 3433), 'numpy.linalg.norm', 'np.linalg.norm', (['xy_position_after'], {'ord': '(2)'}), '(xy_position_after, ord=2)\n', (3407, 3433), True, 'import numpy as np\n'), ((1478, 1495), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (1487, 1495), True, 'import numpy as np\n'), ((1898, 1928), 'numpy.square', 'np.square', (['self.contact_forces'], {}), '(self.contact_forces)\n', (1907, 1928), True, 'import numpy as np\n'), ((2102, 2120), 'numpy.isfinite', 'np.isfinite', (['state'], {}), '(state)\n', (2113, 2120), True, 'import numpy as np\n')] |
import os
import pickle
import torch
import csv
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
class VirefDataset(Dataset):
def __init__(self, args, refexp_csv, max_refexp_len=25, video_name_restriction=None):
self.refexp_list = []
self.video_names = set([])
self.word_embed = {}
self.word2idx = {}
self.max_refexp_len = max_refexp_len
self.video_name_restriction = video_name_restriction
self.object_features = {}
self.object_vgg_features = {}
self.pair_features = {}
self.pair_features_blackened = {}
features_dir = args.features_dir
dataset_split_dir = args.dataset_split_dir
data_dir = args.data_dir
with open(refexp_csv, newline='') as csv_file:
reader = csv.reader(csv_file)
for video_name, obj1, obj2, refexp in reader:
if len(refexp.split()) > self.max_refexp_len:
continue
if self.video_name_restriction is not None and self.video_name_restriction != video_name:
continue
self.refexp_list.append([video_name, int(obj1), int(obj2), refexp])
for video_name, obj1, obj2, refexp in self.refexp_list:
self.video_names.add(video_name)
for video_name, obj1, obj2, refexp in self.refexp_list:
obj1, obj2 = sorted([obj1, obj2])
obj1_feature_path = os.path.join(features_dir, 'object_blackened', '{:s}__{:d}'.format(video_name, obj1)) #c3d blackened features
obj2_feature_path = os.path.join(features_dir, 'object_blackened', '{:s}__{:d}'.format(video_name, obj2))
obj1_vgg_feature_path = os.path.join(features_dir, 'object', '{:s}__{:d}'.format(video_name, obj1))
obj2_vgg_feature_path = os.path.join(features_dir, 'object', '{:s}__{:d}'.format(video_name, obj2))
pair_feature_path = os.path.join(features_dir, 'pair', '{:s}__{:d}_{:d}'.format(video_name, obj1, obj2))
pair_feature_blackened_path = os.path.join(features_dir, 'pair_blackened', '{:s}__{:d}_{:d}'.format(video_name, obj1, obj2))
if (video_name, obj1) not in self.object_features:
with open(obj1_feature_path, 'rb') as pickle_file:
self.object_features[(video_name, obj1)] = pickle.load(pickle_file)
if (video_name, obj2) not in self.object_features:
with open(obj2_feature_path, 'rb') as pickle_file:
self.object_features[(video_name, obj2)] = pickle.load(pickle_file)
if (video_name, obj1) not in self.object_vgg_features:
with open(obj1_vgg_feature_path, 'rb') as pickle_file:
self.object_vgg_features[(video_name, obj1)] = pickle.load(pickle_file)
if (video_name, obj2) not in self.object_vgg_features:
with open(obj2_vgg_feature_path, 'rb') as pickle_file:
self.object_vgg_features[(video_name, obj2)] = pickle.load(pickle_file)
if (video_name, obj1, obj2) not in self.pair_features:
with open(pair_feature_path, "rb") as pickle_file:
self.pair_features[(video_name, obj1, obj2)] = pickle.load(pickle_file)
if (video_name, obj1, obj2) not in self.pair_features_blackened:
with open(pair_feature_blackened_path, "rb") as pickle_file:
self.pair_features_blackened[(video_name, obj1, obj2)] = pickle.load(pickle_file)
self.word_list = []
with open(os.path.join(data_dir, 'dict.txt'), 'r') as dict_file:
for line in dict_file:
line = line.strip()
if line == '':
continue
self.word_list.append(line)
word2vec = {}
with open(os.path.join(data_dir, 'glove.6B.50d.txt'), 'r') as word2vec_file:
for line in word2vec_file:
line = line.strip()
if line == '':
continue
word, *vector = line.split()
vector = np.asarray(list(map(float, vector))).astype(np.float32)
word2vec[word] = vector
self.word_embed['<nil>'] = np.zeros((50, )).astype(np.float32)
self.word_embed['<unk>'] = np.ones((50, )).astype(np.float32)
self.word_embed['<start>'] = np.zeros((50, )).astype(np.float32)
self.word_embed['<end>'] = np.zeros((50, )).astype(np.float32)
self.word_embed['<start>'][0] = 1
self.word_embed['<end>'][1] = 1
self.word2idx['<nil>'] = 0
self.word2idx['<unk>'] = 1
self.word2idx['<start>'] = 2
self.word2idx['<end>'] = 3
for word_idx, word in enumerate(self.word_list):
if word not in word2vec:
raise NameError("Dictionary word {:s} is not in word2vec".format(word))
self.word2idx[word] = word_idx+4
self.word_embed[word] = word2vec[word]
self.word_list = ['<nil>', '<unk>', '<start>', '<end>'] + self.word_list
def __len__(self):
return len(self.refexp_list)
def __getitem__(self, index):
video_name, obj1, obj2, refexp = self.refexp_list[index]
sorted_obj1, sorted_obj2 = sorted([obj1, obj2])
obj1_feature = self.object_features[(video_name, obj1)]
obj2_feature = self.object_features[(video_name, obj2)]
obj1_vgg_feature = self.object_vgg_features[(video_name, obj1)]
obj2_vgg_feature = self.object_vgg_features[(video_name, obj2)]
pair_feature = self.pair_features[(video_name, sorted_obj1, sorted_obj2)]
pair_feature_blackened = self.pair_features_blackened[(video_name, sorted_obj1, sorted_obj2)]
decoder_input = np.zeros((self.max_refexp_len+1, 50))
decoder_output = np.zeros((self.max_refexp_len+1, )).astype(int)
exps = refexp.split()
exps = ["<start>"] + exps + ["<end>"]
exps_embed = []
for exp_idx, exp in enumerate(exps):
if exp not in self.word_embed:
exp = "<unk>"
exps[exp_idx] = "<unk>"
exps_embed.append(self.word_embed[exp])
t_decoder = 0
while t_decoder < len(exps_embed)-1:
decoder_input[t_decoder] = exps_embed[t_decoder]
decoder_output[t_decoder] = self.word2idx[exps[t_decoder+1]]
t_decoder += 1
sample = {"obj1_feature":obj1_feature,
"obj2_feature":obj2_feature,
"obj1_vgg_feature":obj1_vgg_feature,
"obj2_vgg_feature":obj2_vgg_feature,
"pair_feature":pair_feature,
"pair_feature_blackened":pair_feature_blackened,
"decoder_input":decoder_input,
"decoder_output":decoder_output,
"video_name":video_name,
"obj1":obj1,
"obj2":obj2,
"refexp":refexp}
return sample
| [
"csv.reader",
"numpy.zeros",
"numpy.ones",
"pickle.load",
"os.path.join"
] | [((5076, 5115), 'numpy.zeros', 'np.zeros', (['(self.max_refexp_len + 1, 50)'], {}), '((self.max_refexp_len + 1, 50))\n', (5084, 5115), True, 'import numpy as np\n'), ((770, 790), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (780, 790), False, 'import csv\n'), ((3175, 3209), 'os.path.join', 'os.path.join', (['data_dir', '"""dict.txt"""'], {}), "(data_dir, 'dict.txt')\n", (3187, 3209), False, 'import os\n'), ((3376, 3418), 'os.path.join', 'os.path.join', (['data_dir', '"""glove.6B.50d.txt"""'], {}), "(data_dir, 'glove.6B.50d.txt')\n", (3388, 3418), False, 'import os\n'), ((3689, 3704), 'numpy.zeros', 'np.zeros', (['(50,)'], {}), '((50,))\n', (3697, 3704), True, 'import numpy as np\n'), ((3754, 3768), 'numpy.ones', 'np.ones', (['(50,)'], {}), '((50,))\n', (3761, 3768), True, 'import numpy as np\n'), ((3820, 3835), 'numpy.zeros', 'np.zeros', (['(50,)'], {}), '((50,))\n', (3828, 3835), True, 'import numpy as np\n'), ((3885, 3900), 'numpy.zeros', 'np.zeros', (['(50,)'], {}), '((50,))\n', (3893, 3900), True, 'import numpy as np\n'), ((5133, 5169), 'numpy.zeros', 'np.zeros', (['(self.max_refexp_len + 1,)'], {}), '((self.max_refexp_len + 1,))\n', (5141, 5169), True, 'import numpy as np\n'), ((2123, 2147), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (2134, 2147), False, 'import pickle\n'), ((2306, 2330), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (2317, 2330), False, 'import pickle\n'), ((2501, 2525), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (2512, 2525), False, 'import pickle\n'), ((2696, 2720), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (2707, 2720), False, 'import pickle\n'), ((2892, 2916), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (2903, 2916), False, 'import pickle\n'), ((3114, 3138), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (3125, 3138), False, 'import pickle\n')] |
# -*- coding: utf-8 -*-
#
# Convert NORDIF DAT-file with Kikuchi diffraction patterns to HyperSpy HDF5
# format.
# Created by <NAME> (<EMAIL>)
# 2018-11-20
import hyperspy.api as hs
import numpy as np
import os
import re
import warnings
import argparse
# Parse input parameters
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('file', help='Full path of original file')
parser.add_argument('--lazy', dest='lazy', default=False, action='store_true',
help='Whether to read/write lazy or not')
arguments = parser.parse_args()
# Set data directory, filename and file extension
datadir, fname = os.path.split(arguments.file)
fname, ext = os.path.splitext(fname)
# Get grid dimensions and pattern size from Setting.txt
settings = open(os.path.join(datadir,'Setting.txt'), 'rb')
for i, line in enumerate(settings):
# Pattern size
if i == 47:
match = re.search(b'Resolution\t(.*)\tpx', line).group(1).split(b'x')
SX, SY = [int(i) for i in match]
# Grid dimensions
if i == 79:
match = re.search(b'Number of samples\t(.*)\t#',
line).group(1).split(b'x')
NX, NY = [int(i) for i in match]
settings.close()
# Get data size
DAT_SZ = NX * NY * SX * SY
# Open in correct mode
if arguments.lazy:
patterns = open(arguments.file, 'r+b')
else:
patterns = open(arguments.file, 'rb')
# Read data from file
if not arguments.lazy:
patterns.seek(-DAT_SZ, 2)
data = np.fromfile(patterns, dtype='uint8')
else:
data = np.memmap(patterns, mode='r')
# Reshape data
try:
data = data.reshape((NX, NY, SX, SY), order='C').squeeze()
except ValueError:
warnings.warn('Setting.txt dimensions larger than file size!')
# Create HyperSpy signal
if not arguments.lazy:
s = hs.signals.Signal2D(data)
else:
s = hs.signals.Signal2D(data).as_lazy()
# Write signal to file
s.save(os.path.join(datadir, fname + '.hdf5')) | [
"argparse.ArgumentParser",
"numpy.memmap",
"numpy.fromfile",
"os.path.splitext",
"re.search",
"warnings.warn",
"os.path.split",
"os.path.join",
"hyperspy.api.signals.Signal2D"
] | [((291, 335), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (314, 335), False, 'import argparse\n'), ((640, 669), 'os.path.split', 'os.path.split', (['arguments.file'], {}), '(arguments.file)\n', (653, 669), False, 'import os\n'), ((683, 706), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (699, 706), False, 'import os\n'), ((780, 816), 'os.path.join', 'os.path.join', (['datadir', '"""Setting.txt"""'], {}), "(datadir, 'Setting.txt')\n", (792, 816), False, 'import os\n'), ((1484, 1520), 'numpy.fromfile', 'np.fromfile', (['patterns'], {'dtype': '"""uint8"""'}), "(patterns, dtype='uint8')\n", (1495, 1520), True, 'import numpy as np\n'), ((1538, 1567), 'numpy.memmap', 'np.memmap', (['patterns'], {'mode': '"""r"""'}), "(patterns, mode='r')\n", (1547, 1567), True, 'import numpy as np\n'), ((1795, 1820), 'hyperspy.api.signals.Signal2D', 'hs.signals.Signal2D', (['data'], {}), '(data)\n', (1814, 1820), True, 'import hyperspy.api as hs\n'), ((1902, 1940), 'os.path.join', 'os.path.join', (['datadir', "(fname + '.hdf5')"], {}), "(datadir, fname + '.hdf5')\n", (1914, 1940), False, 'import os\n'), ((1675, 1737), 'warnings.warn', 'warnings.warn', (['"""Setting.txt dimensions larger than file size!"""'], {}), "('Setting.txt dimensions larger than file size!')\n", (1688, 1737), False, 'import warnings\n'), ((1835, 1860), 'hyperspy.api.signals.Signal2D', 'hs.signals.Signal2D', (['data'], {}), '(data)\n', (1854, 1860), True, 'import hyperspy.api as hs\n'), ((910, 950), 're.search', 're.search', (["b'Resolution\\t(.*)\\tpx'", 'line'], {}), "(b'Resolution\\t(.*)\\tpx', line)\n", (919, 950), False, 'import re\n'), ((1067, 1113), 're.search', 're.search', (["b'Number of samples\\t(.*)\\t#'", 'line'], {}), "(b'Number of samples\\t(.*)\\t#', line)\n", (1076, 1113), False, 'import re\n')] |
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import networkx as nx
import numpy as np
colors = {'susceptible':'g',
'exposed':'orange',
'infectious':'red',
'recovered':'gray',
'quarantined':'blue',
'testable':'k'}
def get_pos(G, model):
units = list(set([model.G.nodes[ID]['unit'] for ID in model.G.nodes]))
num_residents = len([a for a in model.schedule.agents if \
(a.type == 'resident' and a.unit == 'Q1')])
fixed = ['r{}'.format(i * num_residents + 1) for i in range(len(units))]
if len(units) == 4:
coords = [[-3, -3], [-3, 3], [3, 3], [3, -3]]
elif len(units) == 3:
coords = [[0, -3], [-3, 3], [3, 3]]
elif len(units) == 2:
coords = [[-3, 0], [3, 0]]
else:
coords = [[0, 0]]
fixed_pos = {f:c for f, c in zip(fixed, coords)}
pos = nx.drawing.layout.spring_layout(G, k=1.5, dim=2, weight='weight',
fixed=fixed, pos=fixed_pos, scale=1, iterations=100)
return pos
def draw_states(model, step, pos, pat_ax, emp_ax, leg_ax):
units = list(set([model.G.nodes[ID]['unit'] for ID in model.G.nodes]))
units.sort()
G = model.G
## draw residents
residents = [a.unique_id for a in model.schedule.agents if a.type == 'resident']
resident_states = model.datacollector.get_agent_vars_dataframe()
resident_states = resident_states.iloc[resident_states.index.isin(residents, level=1)]
resident_states['color'] = resident_states['infection_state'].replace(colors)
color_list = resident_states.loc[step].sort_index()['color']
quarantine_states = resident_states.loc[step].sort_index()['quarantine_state']
x_max = np.asarray([a[0] for a in pos.values()]).max()
x_min = np.asarray([a[0] for a in pos.values()]).min()
x_extent = x_max + np.abs(x_min)
y_min = np.asarray([a[1] for a in pos.values()]).min()
y_max = np.asarray([a[1] for a in pos.values()]).max()
y_step = (y_max + np.abs(y_min)) / 10
pat_ax.set_ylim(y_min - y_step/2, y_max + y_step)
pat_ax.text(x_max - x_extent / 2 - 0.1, y_max + y_step / 2, 'residents', fontsize=14)
resident_edges = [(x, y) for x,y, z in G.edges(data=True) \
if (G.nodes[x]['type'] == 'resident' and G.nodes[y]['type'] == 'resident')]
for u, v in resident_edges:
weight = G[u][v]['weight']**2 / 5
try:
pat_ax.plot([pos[u][0], pos[v][0]], [pos[u][1], pos[v][1]], \
color='k', linewidth=weight, zorder=1)
except KeyError:
print('warning: edge ({}, {}) not found in position map'.format(u, v))
resident_handles = {}
for n in residents:
if quarantine_states[n]:
handle = pat_ax.scatter(pos[n][0], pos[n][1], color=color_list[n], s=150, zorder=2,
edgecolors='k', linewidth=3)
resident_handles.update({n:handle})
else:
handle = pat_ax.scatter(pos[n][0], pos[n][1], color=color_list[n], s=150, zorder=2)
resident_handles.update({n:handle})
## draw employees
employees = [a.unique_id for a in model.schedule.agents if a.type == 'employee']
employee_states = model.datacollector.get_agent_vars_dataframe()
employee_states = employee_states.iloc[employee_states.index.isin(employees, level=1)]
employee_states['color'] = employee_states['infection_state'].replace(colors)
color_list = employee_states.loc[step].sort_index()['color']
quarantine_states = employee_states.loc[step].sort_index()['quarantine_state']
unit_list = [y['unit'] for x,y in G.nodes(data=True) if y['type'] == 'employee']
N_employee = len(unit_list) / len(set(unit_list))
employee_handles = {}
emp_ax.set_xlim(-0.5, len(units) - 1 + 0.5)
emp_ax.set_ylim(-1, N_employee)
emp_ax.text(0 - 0.25, N_employee - 0.45, 'employees', fontsize=14)
for j, unit in enumerate(units):
employees = [a.unique_id for a in model.schedule.agents if \
(a.type == 'employee' and a.unit == unit)]
#emp_ax.text(j - 0.065, -0.8, unit, fontsize=14)
for i, e in enumerate(employees):
ypos = i
if quarantine_states[e]:
handle = emp_ax.scatter(j, i, color=color_list[e], \
s=100, edgecolors='k', linewidth=3)
employee_handles.update({e:handle})
else:
handle = emp_ax.scatter(j, i, color=color_list[e], s=150)
employee_handles.update({e:handle})
for ax in [pat_ax, emp_ax, leg_ax]:
ax.set_xticks([])
ax.set_yticks([])
ax.set_frame_on(False)
handles, labels = pat_ax.get_legend_handles_labels()
S_handle = plt.Line2D((0,1),(0,0), color=colors['susceptible'],
marker='o', linestyle='', markersize=15)
E_handle = plt.Line2D((0,1),(0,0), color=colors['exposed'],
marker='o', linestyle='', markersize=15)
I_handle = plt.Line2D((0,1),(0,0), color=colors['infectious'],
marker='o', linestyle='', markersize=15)
R_handle = plt.Line2D((0,1),(0,0), color=colors['recovered'],
marker='o', linestyle='', markersize=15)
X_handle = plt.Line2D((0,1),(0,0), color='k',marker='o',
linestyle='', markersize=15, mfc='none', mew=3)
#Create legend from custom artist/label lists
legend = leg_ax.legend([S_handle, E_handle, I_handle, R_handle, X_handle],
['susceptible', 'exposed', 'infected', 'recovered', 'quarantined'],
fontsize=14, loc=2)
step_text_handle = leg_ax.text(0.32, 0.7, 'day {}'.format(step), fontsize=14)
return legend, employee_handles, resident_handles, step_text_handle
def draw_infection_timeline(model, agent_type, ax):
linewidth = 3
pop_numbers = model.datacollector.get_model_vars_dataframe()
N = pop_numbers.loc[0, f"S_{agent_type}"] + pop_numbers.loc[0, f"E_{agent_type}"]
pop_numbers['S_{}'.format(agent_type)] = N - pop_numbers['E_{}'.format(agent_type)]\
- pop_numbers['I_{}'.format(agent_type)]\
- pop_numbers['R_{}'.format(agent_type)]
ax.plot(pop_numbers['S_{}'.format(agent_type)]/N * 100,\
label='S', color=colors['susceptible'], linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['E_{}'.format(agent_type)]/N* 100,\
label='E', color=colors['exposed'], linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['I_symptomatic_{}'.format(agent_type)]/N* 100, \
label='$I_1$', color=colors['infectious'],
linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['I_asymptomatic_{}'.format(agent_type)]/N* 100, \
label='$I_2$', color=colors['infectious'], alpha=0.3,
linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['R_{}'.format(agent_type)]/N* 100, \
label='R', color=colors['recovered'], linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['X_{}'.format(agent_type)]/N* 100, \
label='X', color=colors['quarantined'], linewidth=linewidth, zorder=1)
# draw screen lines
screen_colours = {'reactive':'grey', 'follow_up':'blue', 'preventive':'green'}
screen_types = ['reactive', 'follow_up', 'preventive']
for screen_type in screen_types:
for i, screen in enumerate(pop_numbers['screen_{}s_{}'.format(agent_type, screen_type)]):
if screen:
ax.plot([i, i], [0, 100], color=screen_colours[screen_type], alpha=0.3,
linewidth=4, zorder=2)
# legend with custom artist for the screening lines
handles, labels = ax.get_legend_handles_labels()
reactive_screen_handle = plt.Line2D((0,1),(0,0), color=screen_colours['reactive']
, alpha=0.3, linewidth=4)
follow_up_screen_handle = plt.Line2D((0,1),(0,0), color=screen_colours['follow_up']
, alpha=0.3, linewidth=4)
preventive_screen_handle = plt.Line2D((0,1),(0,0), color=screen_colours['preventive']
, alpha=0.3, linewidth=4)
#Create legend from custom artist/label lists
ax.legend([handle for i,handle in enumerate(handles)] + \
[reactive_screen_handle, follow_up_screen_handle, preventive_screen_handle],
[label for i,label in enumerate(labels)] + \
['{} {} screen'.format(st.replace('_', '-'), agent_type.replace('_', ' '))\
for st in screen_types], ncol=2, loc=6,
fontsize=14, bbox_to_anchor=[0, 0.55])
ax.set_xlabel('steps', fontsize=20)
ax.set_ylabel('% of population', fontsize=20)
#ax.set_ylim(-1, 100)
#ax.set_xlim(0, 60)
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.tick_params(axis='both', which='major', labelsize=14)
ax.set_title('{}s (N={})'.format(agent_type.replace('_', ' '), N), fontsize=20)
def draw_combined_infection_timeline(model, agent_groups, ax):
linewidth = 3
pop_numbers = model.datacollector.get_model_vars_dataframe()
N_total = len(set([x for x,y in model.MG.nodes(data=True) \
if y['type'] in agent_groups]))
pop_numbers['S_combined'] = 0
pop_numbers['E_combined'] = 0
pop_numbers['I_symptomatic_combined'] = 0
pop_numbers['I_asymptomatic_combined'] = 0
pop_numbers['R_combined'] = 0
pop_numbers['X_combined'] = 0
for agent_type in agent_groups:
N_agent = len([x for x,y in model.G.nodes(data=True) if y['type'] == agent_type])
pop_numbers['S_{}'.format(agent_type)] = N_agent - pop_numbers['E_{}'.format(agent_type)]\
- pop_numbers['I_{}'.format(agent_type)]\
- pop_numbers['R_{}'.format(agent_type)]
pop_numbers['S_combined'] += pop_numbers['S_{}'.format(agent_type)]
pop_numbers['E_combined'] += pop_numbers['E_{}'.format(agent_type)]
pop_numbers['I_symptomatic_combined'] += pop_numbers['I_symptomatic_{}'.format(agent_type)]
pop_numbers['I_asymptomatic_combined'] += pop_numbers['I_asymptomatic_{}'.format(agent_type)]
pop_numbers['R_combined'] += pop_numbers['R_{}'.format(agent_type)]
pop_numbers['X_combined'] += pop_numbers['X_{}'.format(agent_type)]
ax.plot(pop_numbers['S_combined'] / N_total * 100,\
label='S', color=colors['susceptible'], linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['E_combined'] / N_total * 100,\
label='E', color=colors['exposed'], linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['I_symptomatic_combined'] / N_total * 100, \
label='$I_1$', color=colors['infectious'],
linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['I_asymptomatic_combined'] / N_total * 100, \
label='$I_2$', color=colors['infectious'], alpha=0.3,
linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['R_combined'] / N_total * 100, \
label='R', color=colors['recovered'], linewidth=linewidth, zorder=1)
ax.plot(pop_numbers['X_combined'] / N_total * 100, \
label='X', color=colors['quarantined'], linewidth=linewidth, zorder=1)
# legend with custom artist for the screening lines
handles, labels = ax.get_legend_handles_labels()
#Create legend from custom artist/label lists
ax.legend([handle for i,handle in enumerate(handles)],
[label for i,label in enumerate(labels)], ncol=2, loc=6,
fontsize=14, bbox_to_anchor=[0, 0.55])
ax.set_xlabel('steps', fontsize=20)
ax.set_ylabel('% of population', fontsize=20)
ax.set_ylim(-1, 100)
#ax.set_xlim(0, 60)
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.tick_params(axis='both', which='major', labelsize=14)
ax.set_title('combined (N={})'.format(N_total), fontsize=20)
| [
"networkx.drawing.layout.spring_layout",
"matplotlib.ticker.MultipleLocator",
"numpy.abs",
"matplotlib.pyplot.Line2D"
] | [((826, 949), 'networkx.drawing.layout.spring_layout', 'nx.drawing.layout.spring_layout', (['G'], {'k': '(1.5)', 'dim': '(2)', 'weight': '"""weight"""', 'fixed': 'fixed', 'pos': 'fixed_pos', 'scale': '(1)', 'iterations': '(100)'}), "(G, k=1.5, dim=2, weight='weight', fixed=\n fixed, pos=fixed_pos, scale=1, iterations=100)\n", (857, 949), True, 'import networkx as nx\n'), ((4381, 4481), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': "colors['susceptible']", 'marker': '"""o"""', 'linestyle': '""""""', 'markersize': '(15)'}), "((0, 1), (0, 0), color=colors['susceptible'], marker='o',\n linestyle='', markersize=15)\n", (4391, 4481), True, 'import matplotlib.pyplot as plt\n'), ((4490, 4587), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': "colors['exposed']", 'marker': '"""o"""', 'linestyle': '""""""', 'markersize': '(15)'}), "((0, 1), (0, 0), color=colors['exposed'], marker='o', linestyle=\n '', markersize=15)\n", (4500, 4587), True, 'import matplotlib.pyplot as plt\n'), ((4595, 4694), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': "colors['infectious']", 'marker': '"""o"""', 'linestyle': '""""""', 'markersize': '(15)'}), "((0, 1), (0, 0), color=colors['infectious'], marker='o',\n linestyle='', markersize=15)\n", (4605, 4694), True, 'import matplotlib.pyplot as plt\n'), ((4703, 4802), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': "colors['recovered']", 'marker': '"""o"""', 'linestyle': '""""""', 'markersize': '(15)'}), "((0, 1), (0, 0), color=colors['recovered'], marker='o', linestyle\n ='', markersize=15)\n", (4713, 4802), True, 'import matplotlib.pyplot as plt\n'), ((4810, 4912), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': '"""k"""', 'marker': '"""o"""', 'linestyle': '""""""', 'markersize': '(15)', 'mfc': '"""none"""', 'mew': '(3)'}), "((0, 1), (0, 0), color='k', marker='o', linestyle='', markersize=\n 15, mfc='none', mew=3)\n", (4820, 4912), True, 'import matplotlib.pyplot as plt\n'), ((7077, 7165), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': "screen_colours['reactive']", 'alpha': '(0.3)', 'linewidth': '(4)'}), "((0, 1), (0, 0), color=screen_colours['reactive'], alpha=0.3,\n linewidth=4)\n", (7087, 7165), True, 'import matplotlib.pyplot as plt\n'), ((7189, 7278), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': "screen_colours['follow_up']", 'alpha': '(0.3)', 'linewidth': '(4)'}), "((0, 1), (0, 0), color=screen_colours['follow_up'], alpha=0.3,\n linewidth=4)\n", (7199, 7278), True, 'import matplotlib.pyplot as plt\n'), ((7303, 7393), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': "screen_colours['preventive']", 'alpha': '(0.3)', 'linewidth': '(4)'}), "((0, 1), (0, 0), color=screen_colours['preventive'], alpha=0.3,\n linewidth=4)\n", (7313, 7393), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1748), 'numpy.abs', 'np.abs', (['x_min'], {}), '(x_min)\n', (1741, 1748), True, 'import numpy as np\n'), ((7983, 8002), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (7998, 8002), False, 'from matplotlib.ticker import MultipleLocator\n'), ((8032, 8050), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (8047, 8050), False, 'from matplotlib.ticker import MultipleLocator\n'), ((10736, 10755), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (10751, 10755), False, 'from matplotlib.ticker import MultipleLocator\n'), ((10785, 10803), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (10800, 10803), False, 'from matplotlib.ticker import MultipleLocator\n'), ((1881, 1894), 'numpy.abs', 'np.abs', (['y_min'], {}), '(y_min)\n', (1887, 1894), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.