code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegressionCV
from sklearn.decomposition import DictionaryLearning, NMF, PCA
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import scale, normalize
from sklearn.model_selection import GridSearchCV
from ENWrapper import ENSim
from matplotlib import pyplot as plt
import json
class ResidualType:
ABSOLUTE = 0
RELATIVE = 1
STANDARDIZED = 2
NORMALIZED = 3
SCALED = 4
TRAIN_PATH = 'train_set.json'
TEST_PATH = 'test_set.json'
TEST2_PATH = 'test2_set.json'
class FaultClassifier(object):
REGION = list(range(2, 25))
def __init__(self, estimator, train_dataset_path=None, test_dataset_path=None, network_file='data/hanoi.inp',
feature_extraction=ResidualType.ABSOLUTE):
"""
:param estimator: an sklearn estimator object
:param dataset_path: path to the training JSON dataset
:param network_file: path to the network file
"""
self.estimator = estimator
self.train_dataset_path = train_dataset_path
self.test_dataset_path = test_dataset_path
self.network_file = network_file
self.feature_extraction = feature_extraction
if self.network_file is not None:
self.sim_eng = ENSim(self.network_file)
self.dataset = None
def build_dataset(self, element='NODE_VALUES', feature='EN_PRESSURE', method=ResidualType.ABSOLUTE):
print("Building dataset")
with open(self.train_dataset_path) as f:
json_str = f.read()
train_data = json.loads(json_str)
ref = np.array(train_data[element][0][feature])
print("ref size is {}".format(np.shape(ref)))
with open(self.test_dataset_path) as f:
json_str = f.read()
test_data = json.loads(json_str)
X_train, y_train = self.get_features(train_data, element, feature, method)
X_test, y_test = self.get_features(test_data, element, feature, method)
self.dataset = {}
self.dataset["X_train"] = X_train
self.dataset["y_train"] = y_train
self.dataset["X_test"] = X_test
self.dataset["y_test"] = y_test
def train_model(self, element='NODE_VALUES', feature='EN_PRESSURE', method=ResidualType.ABSOLUTE):
"""
method used to train the estimator object provided
:return:
"""
if self.dataset == None:
self.build_dataset(element, feature, method)
X = self.dataset["X_train"]
y = self.dataset["y_train"]
self.estimator.fit(X, y)
def get_scores(self):
X_test = self.dataset["X_test"]
y_test = self.dataset["y_test"]
y_pred = self.estimator.predict(X_test)
labels = None
print(classification_report(y_test, y_pred, labels=labels))
# print(confusion_matrix(y_test, y_pred))
print("Model accuracy is", self.estimator.score(X_test, y_test))
def get_features(self, data, element='NODES', feature='EN_PRESSURE',method=ResidualType.ABSOLUTE):
# extracting refference from data
ref = data[element][0][feature]
X = []
y = []
for vals in data[element]:
if vals["EMITTER_NODE"] == 1:
continue
residual = FaultClassifier.residual_func(ref, vals[feature], method=method)
# residual = scale(residual, 1)
residual = normalize(residual)
residual = np.mean(residual[35:65], axis=0)
X.append(residual)
y.append(vals["EMITTER_NODE"])
X = np.array(X)
y = np.array(y)
shuffle = list(range(1, len(y)))
np.random.shuffle(shuffle)
X = X[shuffle]
y = y[shuffle]
y = np.squeeze(y)
X = np.squeeze(X)
return X, y
def grid_search(self, param_grid=None):
if param_grid is None:
param_grid = [
{
'C': [1, 10, 100, 1000],
'kernel': ['linear']
},
{
'C': [1, 10, 100, 1000],
'gamma': [0.001, 0.0001],
'kernel': ['rbf']
}
]
# finding the optimal parameter combination for the desired model
grid_clf = GridSearchCV(self.estimator, param_grid,)
grid_clf.fit()
@staticmethod
def residual_func(ref, measured, method=ResidualType.ABSOLUTE):
"""
utility function used to compute the residual of the network
:param ref: reference vector/matrix
:param measured: measured vector/matrix
:param method: method of computing
:return: calculated residual
"""
residuals = []
ref = np.array(ref)
measured = np.array(measured)
if method == ResidualType.ABSOLUTE:
residuals = ref - measured
# residuals = ed(measured, ref)
elif method == ResidualType.RELATIVE:
residuals = (ref - measured)/ref
elif method == ResidualType.NORMALIZED:
residuals = ref - measured
elif method == ResidualType.SCALED:
pass
elif method == ResidualType.STANDARDIZED:
pass
return residuals
if __name__ == '__main__':
svm = SVC(kernel='linear', C=10, verbose=False, class_weight='balanced', )
clf = FaultClassifier(svm, TRAIN_PATH, TEST_PATH, feature_extraction=ResidualType.ABSOLUTE)
clf.train_model()
clf.get_scores()
| [
"sklearn.model_selection.GridSearchCV",
"json.loads",
"sklearn.svm.SVC",
"numpy.mean",
"numpy.shape",
"sklearn.metrics.classification_report",
"numpy.squeeze",
"numpy.array",
"sklearn.preprocessing.normalize",
"ENWrapper.ENSim",
"numpy.random.shuffle"
] | [((5401, 5467), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'C': '(10)', 'verbose': '(False)', 'class_weight': '"""balanced"""'}), "(kernel='linear', C=10, verbose=False, class_weight='balanced')\n", (5404, 5467), False, 'from sklearn.svm import SVC\n'), ((1643, 1663), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (1653, 1663), False, 'import json\n'), ((1679, 1720), 'numpy.array', 'np.array', (['train_data[element][0][feature]'], {}), '(train_data[element][0][feature])\n', (1687, 1720), True, 'import numpy as np\n'), ((1877, 1897), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (1887, 1897), False, 'import json\n'), ((3662, 3673), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3670, 3673), True, 'import numpy as np\n'), ((3686, 3697), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3694, 3697), True, 'import numpy as np\n'), ((3747, 3773), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle'], {}), '(shuffle)\n', (3764, 3773), True, 'import numpy as np\n'), ((3832, 3845), 'numpy.squeeze', 'np.squeeze', (['y'], {}), '(y)\n', (3842, 3845), True, 'import numpy as np\n'), ((3858, 3871), 'numpy.squeeze', 'np.squeeze', (['X'], {}), '(X)\n', (3868, 3871), True, 'import numpy as np\n'), ((4394, 4434), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['self.estimator', 'param_grid'], {}), '(self.estimator, param_grid)\n', (4406, 4434), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4849, 4862), 'numpy.array', 'np.array', (['ref'], {}), '(ref)\n', (4857, 4862), True, 'import numpy as np\n'), ((4882, 4900), 'numpy.array', 'np.array', (['measured'], {}), '(measured)\n', (4890, 4900), True, 'import numpy as np\n'), ((1346, 1370), 'ENWrapper.ENSim', 'ENSim', (['self.network_file'], {}), '(self.network_file)\n', (1351, 1370), False, 'from ENWrapper import ENSim\n'), ((2846, 2898), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {'labels': 'labels'}), '(y_test, y_pred, labels=labels)\n', (2867, 2898), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((3499, 3518), 'sklearn.preprocessing.normalize', 'normalize', (['residual'], {}), '(residual)\n', (3508, 3518), False, 'from sklearn.preprocessing import scale, normalize\n'), ((3542, 3574), 'numpy.mean', 'np.mean', (['residual[35:65]'], {'axis': '(0)'}), '(residual[35:65], axis=0)\n', (3549, 3574), True, 'import numpy as np\n'), ((1759, 1772), 'numpy.shape', 'np.shape', (['ref'], {}), '(ref)\n', (1767, 1772), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
convert compas dataset into S, X1, X2, y quadraple
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import time
import datetime
import sys
import os
import copy
import itertools
from sklearn import svm
from sklearn import tree
from sklearn import ensemble
from sklearn import linear_model
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import pandas as pd
#from statsmodels.discrete import discrete_model
import math
import random
from io import open
import conf
def strip(text):
try:
return text.strip()
except AttributeError:
return text
def read_compas(filename = os.path.join(conf.datadir, "compas-analysis/compas-scores-two-years.csv"), smlfeatures=False, return_all=False, single_S=False): #read compas dataset file (numeric ver)
lines = [line for line in open(filename, "r").readlines() if line.find("?")==-1]
fo = open(filename, "w")
for line in lines:
fo.write(line)
fo.close()
#pd.set_option("display.max_rows", 100)
#pd.set_option("display.max_colwidth", 100)
#print dir(pd)
data = pd.read_csv(filename, sep=',')
int_values = ["age","juv_fel_count","decile_score","juv_misd_count","juv_other_count","v_decile_score","priors_count"] #,"is_recid"
#string_values = ["sex","race","two_year_recid","c_charge_degree","c_charge_desc"]
string_values = ["sex","two_year_recid","type_of_assessment","v_type_of_assessment"]#,"r_charge_desc"]
date_values=["c_jail_in","c_jail_out","c_offense_date","screening_date","in_custody","out_custody"]
my_attrs = []
for int_val in int_values:
my_attrs.append(data[int_val])
for string_val in string_values:
my_attrs.append( pd.get_dummies(data[string_val], prefix=string_val, drop_first=True) )
for date_val in date_values:
temp = pd.to_datetime(data[date_val])
t_min, t_max = min(temp), max(temp)
my_attrs.append( (temp-t_min)/(t_max-t_min) )
new_data = pd.concat(my_attrs, axis=1)
new_data["African-American"] = (data["race"] == "African-American")
new_data = new_data.dropna()
if return_all:
return new_data
new_data.insert(0, "intercept", 1)
corr_akey = []
for akey in new_data.keys():
corr_akey.append((np.corrcoef(new_data[akey], new_data["two_year_recid_1"])[0,1], akey))
if single_S:
S_keys = ["sex_Male"]
else:
S_keys = ["sex_Male", "African-American"]
#race_Native American race_Asian race_Other race_Hispanic race_Caucasian
S = np.transpose([list(new_data[i]) for i in S_keys])
#S = np.array(S, dtype=np.int_)*2-1
y = [v*2.0-1.0 for v in new_data["two_year_recid_1"]]
X_keys = set(new_data.keys()).difference([]+S_keys)
X_keys_nonrace = set()
for akey in X_keys:
if akey.find("race") != 0:
X_keys_nonrace.add(akey)
X_keys = X_keys_nonrace
print("X_keys=",len(X_keys),X_keys)
#print list(race.keys())
#X2_keys = set()
X2_keys = set(["intercept"]).intersection(X_keys)
print("X2 keys=",X2_keys)
X2 = np.transpose([list(new_data[i]) for i in X2_keys])
#print("X2=",str(X2))
X2 = np.array(X2).reshape([len(new_data),len(X2_keys)])
#print "X2=",X2.shape
#print "X2=",X2
X1_keys = X_keys.difference(X2_keys.union(set(["two_year_recid_1"])))
if smlfeatures:
X1_keys = X1_keys.difference(set(["out_custody","decile_score","in_custody","c_jail_out","c_jail_in","screening_date","v_decile_score"]))
X1 = np.transpose([list(new_data[i]) for i in X1_keys])
print("X1 keys=",X1_keys)
#sys.exit()
#print "S=",S[:10]
return np.array(S), np.array(X1), np.array(X2), np.array(y)
if __name__ == '__main__':
read_compas()
| [
"pandas.read_csv",
"numpy.corrcoef",
"os.path.join",
"io.open",
"numpy.array",
"pandas.get_dummies",
"pandas.concat",
"pandas.to_datetime"
] | [((697, 770), 'os.path.join', 'os.path.join', (['conf.datadir', '"""compas-analysis/compas-scores-two-years.csv"""'], {}), "(conf.datadir, 'compas-analysis/compas-scores-two-years.csv')\n", (709, 770), False, 'import os\n'), ((956, 975), 'io.open', 'open', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (960, 975), False, 'from io import open\n'), ((1143, 1173), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '""","""'}), "(filename, sep=',')\n", (1154, 1173), True, 'import pandas as pd\n'), ((1985, 2012), 'pandas.concat', 'pd.concat', (['my_attrs'], {'axis': '(1)'}), '(my_attrs, axis=1)\n', (1994, 2012), True, 'import pandas as pd\n'), ((1851, 1881), 'pandas.to_datetime', 'pd.to_datetime', (['data[date_val]'], {}), '(data[date_val])\n', (1865, 1881), True, 'import pandas as pd\n'), ((3551, 3562), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (3559, 3562), True, 'import numpy as np\n'), ((3564, 3576), 'numpy.array', 'np.array', (['X1'], {}), '(X1)\n', (3572, 3576), True, 'import numpy as np\n'), ((3578, 3590), 'numpy.array', 'np.array', (['X2'], {}), '(X2)\n', (3586, 3590), True, 'import numpy as np\n'), ((3592, 3603), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3600, 3603), True, 'import numpy as np\n'), ((1738, 1806), 'pandas.get_dummies', 'pd.get_dummies', (['data[string_val]'], {'prefix': 'string_val', 'drop_first': '(True)'}), '(data[string_val], prefix=string_val, drop_first=True)\n', (1752, 1806), True, 'import pandas as pd\n'), ((3094, 3106), 'numpy.array', 'np.array', (['X2'], {}), '(X2)\n', (3102, 3106), True, 'import numpy as np\n'), ((894, 913), 'io.open', 'open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (898, 913), False, 'from io import open\n'), ((2259, 2316), 'numpy.corrcoef', 'np.corrcoef', (['new_data[akey]', "new_data['two_year_recid_1']"], {}), "(new_data[akey], new_data['two_year_recid_1'])\n", (2270, 2316), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hamiltonian Monte Carlo, a gradient-based MCMC algorithm.
@@sample_chain
@@sample_annealed_importance_chain
@@kernel
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl as gradients_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import util as distributions_util
__all__ = [
"sample_chain",
"sample_annealed_importance_chain",
"kernel",
]
KernelResults = collections.namedtuple(
"KernelResults",
[
"log_accept_ratio",
"current_grads_target_log_prob", # "Current result" means "accepted".
"current_target_log_prob", # "Current result" means "accepted".
"is_accepted",
"proposed_grads_target_log_prob",
"proposed_state",
"proposed_target_log_prob",
])
def _make_dummy_kernel_results(
dummy_state,
dummy_target_log_prob,
dummy_grads_target_log_prob):
return KernelResults(
log_accept_ratio=dummy_target_log_prob,
current_grads_target_log_prob=dummy_grads_target_log_prob,
current_target_log_prob=dummy_target_log_prob,
is_accepted=array_ops.ones_like(dummy_target_log_prob, dtypes.bool),
proposed_grads_target_log_prob=dummy_grads_target_log_prob,
proposed_state=dummy_state,
proposed_target_log_prob=dummy_target_log_prob,
)
def sample_chain(
num_results,
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
num_burnin_steps=0,
num_steps_between_results=0,
seed=None,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
"""Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC) algorithm
that takes a series of gradient-informed steps to produce a Metropolis
proposal. This function samples from an HMC Markov chain at `current_state`
and whose stationary distribution has log-unnormalized-density
`target_log_prob_fn()`.
This function samples from multiple chains in parallel. It assumes that the
the leftmost dimensions of (each) `current_state` (part) index an independent
chain. The function `target_log_prob_fn()` sums log-probabilities across
event dimensions (i.e., current state (part) rightmost dimensions). Each
element of the output of `target_log_prob_fn()` represents the (possibly
unnormalized) log-probability of the joint distribution over (all) the current
state (parts).
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state. When specifying a
`list`, one must also specify a list of `step_size`s.
Note: `target_log_prob_fn` is called exactly twice.
Since HMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [1]. Such "thinning" is made
possible by setting `num_steps_between_results > 0`. The chain then takes
`num_steps_between_results` extra steps between the steps that make it into
the results. The extra steps are never materialized (in calls to `sess.run`),
and thus do not increase memory requirements.
[1]: "Statistically efficient thinning of a Markov chain sampler."
<NAME>. April 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
#### Examples:
##### Sample from a diagonal-variance Gaussian.
```python
tfd = tf.contrib.distributions
def make_likelihood(true_variances):
return tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(true_variances))
dims = 10
dtype = np.float32
true_variances = tf.linspace(dtype(1), dtype(3), dims)
likelihood = make_likelihood(true_variances)
states, kernel_results = hmc.sample_chain(
num_results=1000,
target_log_prob_fn=likelihood.log_prob,
current_state=tf.zeros(dims),
step_size=0.5,
num_leapfrog_steps=2,
num_burnin_steps=500)
# Compute sample stats.
sample_mean = tf.reduce_mean(states, axis=0)
sample_var = tf.reduce_mean(
tf.squared_difference(states, sample_mean),
axis=0)
```
##### Sampling from factor-analysis posteriors with known factors.
I.e.,
```none
for i=1..n:
w[i] ~ Normal(0, eye(d)) # prior
x[i] ~ Normal(loc=matmul(w[i], F)) # likelihood
```
where `F` denotes factors.
```python
tfd = tf.contrib.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, factors):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, factors, axes=[[0], [-1]]))
# Setup data.
num_weights = 10
num_factors = 4
num_chains = 100
dtype = np.float32
prior = make_prior(num_weights, dtype)
weights = prior.sample(num_chains)
factors = np.random.randn(num_factors, num_weights).astype(dtype)
x = make_likelihood(weights, factors).sample(num_chains)
def target_log_prob(w):
# Target joint is: `f(w) = p(w, x | factors)`.
return prior.log_prob(w) + make_likelihood(w, factors).log_prob(x)
# Get `num_results` samples from `num_chains` independent chains.
chains_states, kernels_results = hmc.sample_chain(
num_results=1000,
target_log_prob_fn=target_log_prob,
current_state=tf.zeros([num_chains, dims], dtype),
step_size=0.1,
num_leapfrog_steps=2,
num_burnin_steps=500)
# Compute sample stats.
sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])
sample_var = tf.reduce_mean(
tf.squared_difference(chains_states, sample_mean),
axis=[0, 1])
```
Args:
num_results: Integer number of Markov chain draws.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
step_size: `Tensor` or Python `list` of `Tensor`s representing the step size
for the leapfrog integrator. Must broadcast with the shape of
`current_state`. Larger step sizes lead to faster progress, but too-large
step sizes make rejection exponentially more likely. When possible, it's
often helpful to match per-variable step sizes to the standard deviations
of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to `step_size *
num_leapfrog_steps`.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
seed: Python integer to seed the random number generator.
current_target_log_prob: (Optional) `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`. The only reason to specify
this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
current_grads_target_log_prob: (Optional) Python list of `Tensor`s
representing gradient of `target_log_prob` at the `current_state` and wrt
the `current_state`. Must have same shape as `current_state`. The only
reason to specify this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "hmc_sample_chain").
Returns:
next_states: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state` but with a prepended `num_results`-size dimension.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
with ops.name_scope(
name, "hmc_sample_chain",
[num_results, current_state, step_size, num_leapfrog_steps,
num_burnin_steps, num_steps_between_results, seed,
current_target_log_prob, current_grads_target_log_prob]):
with ops.name_scope("initialize"):
[
current_state,
step_size,
current_target_log_prob,
current_grads_target_log_prob,
] = _prepare_args(
target_log_prob_fn,
current_state,
step_size,
current_target_log_prob,
current_grads_target_log_prob)
num_results = ops.convert_to_tensor(
num_results,
dtype=dtypes.int32,
name="num_results")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
num_burnin_steps = ops.convert_to_tensor(
num_burnin_steps,
dtype=dtypes.int32,
name="num_burnin_steps")
num_steps_between_results = ops.convert_to_tensor(
num_steps_between_results,
dtype=dtypes.int32,
name="num_steps_between_results")
def _run_chain(num_steps, current_state, kernel_results):
"""Runs the chain(s) for `num_steps`."""
def _loop_body(iter_, current_state, kernel_results):
return [iter_ + 1] + list(kernel(
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed,
kernel_results.current_target_log_prob,
kernel_results.current_grads_target_log_prob))
while_loop_kwargs = dict(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0),
current_state,
kernel_results,
],
)
if seed is not None:
while_loop_kwargs["parallel_iterations"] = 1
return control_flow_ops.while_loop(
**while_loop_kwargs)[1:] # Lop-off "iter_".
def _scan_body(args_list, iter_):
"""Closure which implements `tf.scan` body."""
current_state, kernel_results = args_list
return _run_chain(
1 + array_ops.where(math_ops.equal(iter_, 0),
num_burnin_steps,
num_steps_between_results),
current_state,
kernel_results)
scan_kwargs = dict(
fn=_scan_body,
elems=math_ops.range(num_results), # iter_: used to choose burnin.
initializer=[
current_state,
_make_dummy_kernel_results(
current_state,
current_target_log_prob,
current_grads_target_log_prob),
])
if seed is not None:
scan_kwargs["parallel_iterations"] = 1
return functional_ops.scan(**scan_kwargs)
def sample_annealed_importance_chain(
proposal_log_prob_fn,
num_steps,
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed=None,
name=None):
"""Runs annealed importance sampling (AIS) to estimate normalizing constants.
This function uses Hamiltonian Monte Carlo to sample from a series of
distributions that slowly interpolates between an initial "proposal"
distribution:
`exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`
and the target distribution:
`exp(target_log_prob_fn(x) - target_log_normalizer)`,
accumulating importance weights along the way. The product of these
importance weights gives an unbiased estimate of the ratio of the
normalizing constants of the initial distribution and the target
distribution:
`E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.
Note: `proposal_log_prob_fn` and `target_log_prob_fn` are called exactly three
times (although this may be reduced to two times, in the future).
#### Examples:
##### Estimate the normalizing constant of a log-gamma distribution.
```python
tfd = tf.contrib.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivatiateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfd.bijectors.Invert(tfd.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
hmc.sample_annealed_importance_chain(
proposal_log_prob_fn=proposal.log_prob,
num_steps=1000,
target_log_prob_fn=target.log_prob,
step_size=0.2,
current_state=proposal.sample(num_chains),
num_leapfrog_steps=2))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)
```
##### Estimate marginal likelihood of a Bayesian regression model.
```python
tfd = tf.contrib.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, x):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, x, axes=[[0], [-1]]))
# Run 100 AIS chains in parallel
num_chains = 100
dims = 10
dtype = np.float32
# Make training data.
x = np.random.randn(num_chains, dims).astype(dtype)
true_weights = np.random.randn(dims).astype(dtype)
y = np.dot(x, true_weights) + np.random.randn(num_chains)
# Setup model.
prior = make_prior(dims, dtype)
def target_log_prob_fn(weights):
return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
weight_samples, ais_weights, kernel_results = (
hmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target_log_prob_fn
current_state=tf.zeros([num_chains, dims], dtype),
step_size=0.1,
num_leapfrog_steps=2))
log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
```
Args:
proposal_log_prob_fn: Python callable that returns the log density of the
initial distribution.
num_steps: Integer number of Markov chain updates to run. More
iterations means more expense, but smoother annealing between q
and p, which in turn means exponentially lower variance for the
normalizing constant estimator.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
step_size: `Tensor` or Python `list` of `Tensor`s representing the step size
for the leapfrog integrator. Must broadcast with the shape of
`current_state`. Larger step sizes lead to faster progress, but too-large
step sizes make rejection exponentially more likely. When possible, it's
often helpful to match per-variable step sizes to the standard deviations
of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to `step_size *
num_leapfrog_steps`.
seed: Python integer to seed the random number generator.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "hmc_sample_annealed_importance_chain").
Returns:
next_state: `Tensor` or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at the final iteration. Has same shape as
input `current_state`.
ais_weights: Tensor with the estimated weight(s). Has shape matching
`target_log_prob_fn(current_state)`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
def make_convex_combined_log_prob_fn(iter_):
def _fn(*args):
p = proposal_log_prob_fn(*args)
t = target_log_prob_fn(*args)
dtype = p.dtype.base_dtype
beta = (math_ops.cast(iter_ + 1, dtype)
/ math_ops.cast(num_steps, dtype))
return (1. - beta) * p + beta * t
return _fn
with ops.name_scope(
name, "hmc_sample_annealed_importance_chain",
[num_steps, current_state, step_size, num_leapfrog_steps, seed]):
with ops.name_scope("initialize"):
[
current_state,
step_size,
current_log_prob,
current_grads_log_prob,
] = _prepare_args(
make_convex_combined_log_prob_fn(iter_=0),
current_state,
step_size,
description="convex_combined_log_prob")
num_steps = ops.convert_to_tensor(
num_steps,
dtype=dtypes.int32,
name="num_steps")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
def _loop_body(iter_, ais_weights, current_state, kernel_results):
"""Closure which implements `tf.while_loop` body."""
current_state_parts = (list(current_state)
if _is_list_like(current_state)
else [current_state])
# TODO(b/72994218): Consider refactoring things to avoid this unecessary
# call.
ais_weights += ((target_log_prob_fn(*current_state_parts)
- proposal_log_prob_fn(*current_state_parts))
/ math_ops.cast(num_steps, ais_weights.dtype))
return [iter_ + 1, ais_weights] + list(kernel(
make_convex_combined_log_prob_fn(iter_),
current_state,
step_size,
num_leapfrog_steps,
seed,
kernel_results.current_target_log_prob,
kernel_results.current_grads_target_log_prob))
while_loop_kwargs = dict(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0), # iter_
array_ops.zeros_like(current_log_prob), # ais_weights
current_state,
_make_dummy_kernel_results(current_state,
current_log_prob,
current_grads_log_prob),
])
if seed is not None:
while_loop_kwargs["parallel_iterations"] = 1
[ais_weights, current_state, kernel_results] = control_flow_ops.while_loop(
**while_loop_kwargs)[1:] # Lop-off "iter_".
return [current_state, ais_weights, kernel_results]
def kernel(target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed=None,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
"""Runs one iteration of Hamiltonian Monte Carlo.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)
algorithm that takes a series of gradient-informed steps to produce
a Metropolis proposal. This function applies one step of HMC to
randomly update the variable `x`.
This function can update multiple chains in parallel. It assumes that all
leftmost dimensions of `current_state` index independent chain states (and are
therefore updated independently). The output of `target_log_prob_fn()` should
sum log-probabilities across all event dimensions. Slices along the rightmost
dimensions may have different target distributions; for example,
`current_state[0, :]` could have a different target distribution from
`current_state[1, :]`. This is up to `target_log_prob_fn()`. (The number of
independent chains is `tf.size(target_log_prob_fn(*current_state))`.)
#### Examples:
##### Simple chain with warm-up.
```python
tfd = tf.contrib.distributions
# Tuning acceptance rates:
dtype = np.float32
target_accept_rate = 0.631
num_warmup_iter = 500
num_chain_iter = 500
x = tf.get_variable(name="x", initializer=dtype(1))
step_size = tf.get_variable(name="step_size", initializer=dtype(1))
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
next_x, other_results = hmc.kernel(
target_log_prob_fn=target.log_prob,
current_state=x,
step_size=step_size,
num_leapfrog_steps=3)[:4]
x_update = x.assign(next_x)
step_size_update = step_size.assign_add(
step_size * tf.where(
tf.exp(tf.minimum(other_results.log_accept_ratio), 0.) >
target_accept_rate,
0.01, -0.01))
warmup = tf.group([x_update, step_size_update])
tf.global_variables_initializer().run()
sess.graph.finalize() # No more graph building.
# Warm up the sampler and adapt the step size
for _ in xrange(num_warmup_iter):
sess.run(warmup)
# Collect samples without adapting step size
samples = np.zeros([num_chain_iter])
for i in xrange(num_chain_iter):
_, x_, target_log_prob_, grad_ = sess.run([
x_update,
x,
other_results.target_log_prob,
other_results.grads_target_log_prob])
samples[i] = x_
print(samples.mean(), samples.std())
```
##### Sample from more complicated posterior.
I.e.,
```none
W ~ MVN(loc=0, scale=sigma * eye(dims))
for i=1...num_samples:
X[i] ~ MVN(loc=0, scale=eye(dims))
eps[i] ~ Normal(loc=0, scale=1)
Y[i] = X[i].T * W + eps[i]
```
```python
tfd = tf.contrib.distributions
def make_training_data(num_samples, dims, sigma):
dt = np.asarray(sigma).dtype
zeros = tf.zeros(dims, dtype=dt)
x = tfd.MultivariateNormalDiag(
loc=zeros).sample(num_samples, seed=1)
w = tfd.MultivariateNormalDiag(
loc=zeros,
scale_identity_multiplier=sigma).sample(seed=2)
noise = tfd.Normal(
loc=dt(0),
scale=dt(1)).sample(num_samples, seed=3)
y = tf.tensordot(x, w, axes=[[1], [0]]) + noise
return y, x, w
def make_prior(sigma, dims):
# p(w | sigma)
return tfd.MultivariateNormalDiag(
loc=tf.zeros([dims], dtype=sigma.dtype),
scale_identity_multiplier=sigma)
def make_likelihood(x, w):
# p(y | x, w)
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(x, w, axes=[[1], [0]]))
# Setup assumptions.
dtype = np.float32
num_samples = 150
dims = 10
num_iters = int(5e3)
true_sigma = dtype(0.5)
y, x, true_weights = make_training_data(num_samples, dims, true_sigma)
# Estimate of `log(true_sigma)`.
log_sigma = tf.get_variable(name="log_sigma", initializer=dtype(0))
sigma = tf.exp(log_sigma)
# State of the Markov chain.
weights = tf.get_variable(
name="weights",
initializer=np.random.randn(dims).astype(dtype))
prior = make_prior(sigma, dims)
def joint_log_prob_fn(w):
# f(w) = log p(w, y | x)
return prior.log_prob(w) + make_likelihood(x, w).log_prob(y)
weights_update = weights.assign(
hmc.kernel(target_log_prob_fn=joint_log_prob,
current_state=weights,
step_size=0.1,
num_leapfrog_steps=5)[0])
with tf.control_dependencies([weights_update]):
loss = -prior.log_prob(weights)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
log_sigma_update = optimizer.minimize(loss, var_list=[log_sigma])
sess.graph.finalize() # No more graph building.
tf.global_variables_initializer().run()
sigma_history = np.zeros(num_iters, dtype)
weights_history = np.zeros([num_iters, dims], dtype)
for i in xrange(num_iters):
_, sigma_, weights_, _ = sess.run([log_sigma_update, sigma, weights])
weights_history[i, :] = weights_
sigma_history[i] = sigma_
true_weights_ = sess.run(true_weights)
# Should converge to something close to true_sigma.
plt.plot(sigma_history);
plt.ylabel("sigma");
plt.xlabel("iteration");
```
Args:
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
step_size: `Tensor` or Python `list` of `Tensor`s representing the step size
for the leapfrog integrator. Must broadcast with the shape of
`current_state`. Larger step sizes lead to faster progress, but too-large
step sizes make rejection exponentially more likely. When possible, it's
often helpful to match per-variable step sizes to the standard deviations
of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to `step_size *
num_leapfrog_steps`.
seed: Python integer to seed the random number generator.
current_target_log_prob: (Optional) `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`. The only reason to
specify this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
current_grads_target_log_prob: (Optional) Python list of `Tensor`s
representing gradient of `current_target_log_prob` at the `current_state`
and wrt the `current_state`. Must have same shape as `current_state`. The
only reason to specify this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "hmc_kernel").
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) at each result step. Has same shape as
`current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
ValueError: if there isn't one `step_size` or a list with same length as
`current_state`.
"""
with ops.name_scope(
name, "hmc_kernel",
[current_state, step_size, num_leapfrog_steps, seed,
current_target_log_prob, current_grads_target_log_prob]):
with ops.name_scope("initialize"):
[current_state_parts, step_sizes, current_target_log_prob,
current_grads_target_log_prob] = _prepare_args(
target_log_prob_fn, current_state, step_size,
current_target_log_prob, current_grads_target_log_prob,
maybe_expand=True)
independent_chain_ndims = distributions_util.prefer_static_rank(
current_target_log_prob)
current_momentums = []
for s in current_state_parts:
current_momentums.append(random_ops.random_normal(
shape=array_ops.shape(s),
dtype=s.dtype.base_dtype,
seed=seed))
seed = distributions_util.gen_new_seed(
seed, salt="hmc_kernel_momentums")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
[
proposed_momentums,
proposed_state_parts,
proposed_target_log_prob,
proposed_grads_target_log_prob,
] = _leapfrog_integrator(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
num_leapfrog_steps,
current_target_log_prob,
current_grads_target_log_prob)
energy_change = _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims)
log_accept_ratio = -energy_change
# u < exp(log_accept_ratio), where u~Uniform[0,1)
# ==> log(u) < log_accept_ratio
random_value = random_ops.random_uniform(
shape=array_ops.shape(energy_change),
dtype=energy_change.dtype,
seed=seed)
random_negative = math_ops.log(random_value)
is_accepted = random_negative < log_accept_ratio
accepted_target_log_prob = array_ops.where(is_accepted,
proposed_target_log_prob,
current_target_log_prob)
next_state_parts = [_choose(is_accepted,
proposed_state_part,
current_state_part,
independent_chain_ndims)
for current_state_part, proposed_state_part
in zip(current_state_parts, proposed_state_parts)]
accepted_grads_target_log_prob = [
_choose(is_accepted,
proposed_grad,
grad,
independent_chain_ndims)
for proposed_grad, grad
in zip(proposed_grads_target_log_prob, current_grads_target_log_prob)]
maybe_flatten = lambda x: x if _is_list_like(current_state) else x[0]
return [
maybe_flatten(next_state_parts),
KernelResults(
log_accept_ratio=log_accept_ratio,
current_grads_target_log_prob=accepted_grads_target_log_prob,
current_target_log_prob=accepted_target_log_prob,
is_accepted=is_accepted,
proposed_grads_target_log_prob=proposed_grads_target_log_prob,
proposed_state=maybe_flatten(proposed_state_parts),
proposed_target_log_prob=proposed_target_log_prob,
),
]
def _leapfrog_integrator(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
num_leapfrog_steps,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
"""Applies `num_leapfrog_steps` of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
#### Examples:
##### Simple quadratic potential.
```python
tfd = tf.contrib.distributions
dims = 10
num_iter = int(1e3)
dtype = np.float32
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
[
next_momentums,
next_positions,
] = hmc._leapfrog_integrator(
current_momentums=[momentum],
target_log_prob_fn=tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype)).log_prob,
current_state_parts=[position],
step_sizes=0.1,
num_leapfrog_steps=3)[:2]
sess.graph.finalize() # No more graph building.
momentum_ = np.random.randn(dims).astype(dtype)
position_ = np.random.randn(dims).astype(dtype)
positions = np.zeros([num_iter, dims], dtype)
for i in xrange(num_iter):
position_, momentum_ = sess.run(
[next_momentums[0], next_position[0]],
feed_dict={position: position_, momentum: momentum_})
positions[i] = position_
plt.plot(positions[:, 0]); # Sinusoidal.
```
Args:
current_momentums: Tensor containing the value(s) of the momentum
variable(s) to update.
target_log_prob_fn: Python callable which takes an argument like
`*current_state_parts` and returns its (possibly unnormalized) log-density
under the target distribution.
current_state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s). The first `independent_chain_ndims` of
the `Tensor`(s) index different chains.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to `step_size *
num_leapfrog_steps`.
current_target_log_prob: (Optional) `Tensor` representing the value of
`target_log_prob_fn(*current_state_parts)`. The only reason to specify
this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
current_grads_target_log_prob: (Optional) Python list of `Tensor`s
representing gradient of `target_log_prob_fn(*current_state_parts`) wrt
`current_state_parts`. Must have same shape as `current_state_parts`. The
only reason to specify this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "hmc_leapfrog_integrator").
Returns:
proposed_momentums: Updated value of the momentum.
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
proposed_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
proposed_grads_target_log_prob: Gradient of `proposed_target_log_prob` wrt
`next_state`.
Raises:
ValueError: if `len(momentums) != len(state_parts)`.
ValueError: if `len(state_parts) != len(step_sizes)`.
ValueError: if `len(state_parts) != len(grads_target_log_prob)`.
TypeError: if `not target_log_prob.dtype.is_floating`.
"""
def _loop_body(step,
current_momentums,
current_state_parts,
ignore_current_target_log_prob, # pylint: disable=unused-argument
current_grads_target_log_prob):
return [step + 1] + list(_leapfrog_step(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
current_grads_target_log_prob))
with ops.name_scope(
name, "hmc_leapfrog_integrator",
[current_momentums, current_state_parts, step_sizes, num_leapfrog_steps,
current_target_log_prob, current_grads_target_log_prob]):
if len(current_momentums) != len(current_state_parts):
raise ValueError("`momentums` must be in one-to-one correspondence "
"with `state_parts`")
num_leapfrog_steps = ops.convert_to_tensor(num_leapfrog_steps,
name="num_leapfrog_steps")
current_target_log_prob, current_grads_target_log_prob = (
_maybe_call_fn_and_grads(
target_log_prob_fn,
current_state_parts,
current_target_log_prob,
current_grads_target_log_prob))
return control_flow_ops.while_loop(
cond=lambda iter_, *args: iter_ < num_leapfrog_steps,
body=_loop_body,
loop_vars=[
np.int32(0), # iter_
current_momentums,
current_state_parts,
current_target_log_prob,
current_grads_target_log_prob,
],
back_prop=False)[1:] # Lop-off "iter_".
def _leapfrog_step(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
current_grads_target_log_prob,
name=None):
"""Applies one step of the leapfrog integrator."""
with ops.name_scope(
name, "_leapfrog_step",
[current_momentums, current_state_parts, step_sizes,
current_grads_target_log_prob]):
proposed_momentums = [m + 0.5 * ss * g for m, ss, g
in zip(current_momentums,
step_sizes,
current_grads_target_log_prob)]
proposed_state_parts = [x + ss * m for x, ss, m
in zip(current_state_parts,
step_sizes,
proposed_momentums)]
proposed_target_log_prob = target_log_prob_fn(*proposed_state_parts)
if not proposed_target_log_prob.dtype.is_floating:
raise TypeError("`target_log_prob_fn` must produce a `Tensor` "
"with `float` `dtype`.")
proposed_grads_target_log_prob = gradients_ops.gradients(
proposed_target_log_prob, proposed_state_parts)
if any(g is None for g in proposed_grads_target_log_prob):
raise ValueError(
"Encountered `None` gradient. Does your target `target_log_prob_fn` "
"access all `tf.Variable`s via `tf.get_variable`?\n"
" current_state_parts: {}\n"
" proposed_state_parts: {}\n"
" proposed_grads_target_log_prob: {}".format(
current_state_parts,
proposed_state_parts,
proposed_grads_target_log_prob))
proposed_momentums = [m + 0.5 * ss * g for m, ss, g
in zip(proposed_momentums,
step_sizes,
proposed_grads_target_log_prob)]
return [
proposed_momentums,
proposed_state_parts,
proposed_target_log_prob,
proposed_grads_target_log_prob,
]
def _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which computes the energy change."""
with ops.name_scope(
name, "compute_energy_change",
([current_target_log_prob, proposed_target_log_prob,
independent_chain_ndims] +
current_momentums + proposed_momentums)):
# Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy
# since they're a mouthful and lets us inline more.
lk0, lk1 = [], []
for current_momentum, proposed_momentum in zip(current_momentums,
proposed_momentums):
axis = math_ops.range(independent_chain_ndims,
array_ops.rank(current_momentum))
lk0.append(_log_sum_sq(current_momentum, axis))
lk1.append(_log_sum_sq(proposed_momentum, axis))
lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),
axis=-1)
lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),
axis=-1)
lp0 = -current_target_log_prob # potential
lp1 = -proposed_target_log_prob # proposed_potential
x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],
axis=-1)
# The sum is NaN if any element is NaN or we see both +Inf and -Inf.
# Thus we will replace such rows with infinite energy change which implies
# rejection. Recall that float-comparisons with NaN are always False.
is_sum_determinate = (
math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &
math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))
is_sum_determinate = array_ops.tile(
is_sum_determinate[..., array_ops.newaxis],
multiples=array_ops.concat([
array_ops.ones(array_ops.rank(is_sum_determinate),
dtype=dtypes.int32),
[4],
], axis=0))
x = array_ops.where(is_sum_determinate,
x,
array_ops.fill(array_ops.shape(x),
value=x.dtype.as_numpy_dtype(np.inf)))
return math_ops.reduce_sum(x, axis=-1)
def _choose(is_accepted,
accepted,
rejected,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which expand_dims `is_accepted` to apply tf.where."""
def _expand_is_accepted_like(x):
with ops.name_scope("_choose"):
expand_shape = array_ops.concat([
array_ops.shape(is_accepted),
array_ops.ones([array_ops.rank(x) - array_ops.rank(is_accepted)],
dtype=dtypes.int32),
], axis=0)
multiples = array_ops.concat([
array_ops.ones([array_ops.rank(is_accepted)], dtype=dtypes.int32),
array_ops.shape(x)[independent_chain_ndims:],
], axis=0)
m = array_ops.tile(array_ops.reshape(is_accepted, expand_shape),
multiples)
m.set_shape(x.shape)
return m
with ops.name_scope(name, "_choose", values=[
is_accepted, accepted, rejected, independent_chain_ndims]):
return array_ops.where(_expand_is_accepted_like(accepted),
accepted,
rejected)
def _maybe_call_fn_and_grads(fn,
fn_arg_list,
fn_result=None,
grads_fn_result=None,
description="target_log_prob"):
"""Helper which computes `fn_result` and `grads` if needed."""
fn_arg_list = (list(fn_arg_list) if _is_list_like(fn_arg_list)
else [fn_arg_list])
if fn_result is None:
fn_result = fn(*fn_arg_list)
if not fn_result.dtype.is_floating:
raise TypeError("`{}` must be a `Tensor` with `float` `dtype`.".format(
description))
if grads_fn_result is None:
grads_fn_result = gradients_ops.gradients(
fn_result, fn_arg_list)
if len(fn_arg_list) != len(grads_fn_result):
raise ValueError("`{}` must be in one-to-one correspondence with "
"`grads_{}`".format(*[description]*2))
if any(g is None for g in grads_fn_result):
raise ValueError("Encountered `None` gradient.")
return fn_result, grads_fn_result
def _prepare_args(target_log_prob_fn, state, step_size,
target_log_prob=None, grads_target_log_prob=None,
maybe_expand=False, description="target_log_prob"):
"""Helper which processes input args to meet list-like assumptions."""
state_parts = list(state) if _is_list_like(state) else [state]
state_parts = [ops.convert_to_tensor(s, name="state")
for s in state_parts]
target_log_prob, grads_target_log_prob = _maybe_call_fn_and_grads(
target_log_prob_fn,
state_parts,
target_log_prob,
grads_target_log_prob,
description)
step_sizes = list(step_size) if _is_list_like(step_size) else [step_size]
step_sizes = [
ops.convert_to_tensor(
s, name="step_size", dtype=target_log_prob.dtype)
for s in step_sizes]
if len(step_sizes) == 1:
step_sizes *= len(state_parts)
if len(state_parts) != len(step_sizes):
raise ValueError("There should be exactly one `step_size` or it should "
"have same length as `current_state`.")
maybe_flatten = lambda x: x if maybe_expand or _is_list_like(state) else x[0]
return [
maybe_flatten(state_parts),
maybe_flatten(step_sizes),
target_log_prob,
grads_target_log_prob,
]
def _is_list_like(x):
"""Helper which returns `True` if input is `list`-like."""
return isinstance(x, (tuple, list))
def _log_sum_sq(x, axis=None):
"""Computes log(sum(x**2))."""
return math_ops.reduce_logsumexp(2. * math_ops.log(math_ops.abs(x)), axis)
| [
"tensorflow.python.ops.distributions.util.gen_new_seed",
"numpy.log",
"numpy.int32",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.ones_like",
"... | [((1535, 1763), 'collections.namedtuple', 'collections.namedtuple', (['"""KernelResults"""', "['log_accept_ratio', 'current_grads_target_log_prob',\n 'current_target_log_prob', 'is_accepted',\n 'proposed_grads_target_log_prob', 'proposed_state',\n 'proposed_target_log_prob']"], {}), "('KernelResults', ['log_accept_ratio',\n 'current_grads_target_log_prob', 'current_target_log_prob',\n 'is_accepted', 'proposed_grads_target_log_prob', 'proposed_state',\n 'proposed_target_log_prob'])\n", (1557, 1763), False, 'import collections\n'), ((9566, 9786), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""hmc_sample_chain"""', '[num_results, current_state, step_size, num_leapfrog_steps,\n num_burnin_steps, num_steps_between_results, seed,\n current_target_log_prob, current_grads_target_log_prob]'], {}), "(name, 'hmc_sample_chain', [num_results, current_state,\n step_size, num_leapfrog_steps, num_burnin_steps,\n num_steps_between_results, seed, current_target_log_prob,\n current_grads_target_log_prob])\n", (9580, 9786), False, 'from tensorflow.python.framework import ops\n'), ((12409, 12443), 'tensorflow.python.ops.functional_ops.scan', 'functional_ops.scan', ([], {}), '(**scan_kwargs)\n', (12428, 12443), False, 'from tensorflow.python.ops import functional_ops\n'), ((18210, 18339), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""hmc_sample_annealed_importance_chain"""', '[num_steps, current_state, step_size, num_leapfrog_steps, seed]'], {}), "(name, 'hmc_sample_annealed_importance_chain', [num_steps,\n current_state, step_size, num_leapfrog_steps, seed])\n", (18224, 18339), False, 'from tensorflow.python.framework import ops\n'), ((28080, 28232), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""hmc_kernel"""', '[current_state, step_size, num_leapfrog_steps, seed,\n current_target_log_prob, current_grads_target_log_prob]'], {}), "(name, 'hmc_kernel', [current_state, step_size,\n num_leapfrog_steps, seed, current_target_log_prob,\n current_grads_target_log_prob])\n", (28094, 28232), False, 'from tensorflow.python.framework import ops\n'), ((30250, 30276), 'tensorflow.python.ops.math_ops.log', 'math_ops.log', (['random_value'], {}), '(random_value)\n', (30262, 30276), False, 'from tensorflow.python.ops import math_ops\n'), ((30362, 30441), 'tensorflow.python.ops.array_ops.where', 'array_ops.where', (['is_accepted', 'proposed_target_log_prob', 'current_target_log_prob'], {}), '(is_accepted, proposed_target_log_prob, current_target_log_prob)\n', (30377, 30441), False, 'from tensorflow.python.ops import array_ops\n'), ((36414, 36599), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""hmc_leapfrog_integrator"""', '[current_momentums, current_state_parts, step_sizes, num_leapfrog_steps,\n current_target_log_prob, current_grads_target_log_prob]'], {}), "(name, 'hmc_leapfrog_integrator', [current_momentums,\n current_state_parts, step_sizes, num_leapfrog_steps,\n current_target_log_prob, current_grads_target_log_prob])\n", (36428, 36599), False, 'from tensorflow.python.framework import ops\n'), ((36817, 36885), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['num_leapfrog_steps'], {'name': '"""num_leapfrog_steps"""'}), "(num_leapfrog_steps, name='num_leapfrog_steps')\n", (36838, 36885), False, 'from tensorflow.python.framework import ops\n'), ((37852, 37979), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""_leapfrog_step"""', '[current_momentums, current_state_parts, step_sizes,\n current_grads_target_log_prob]'], {}), "(name, '_leapfrog_step', [current_momentums,\n current_state_parts, step_sizes, current_grads_target_log_prob])\n", (37866, 37979), False, 'from tensorflow.python.framework import ops\n'), ((38708, 38779), 'tensorflow.python.ops.gradients_impl.gradients', 'gradients_ops.gradients', (['proposed_target_log_prob', 'proposed_state_parts'], {}), '(proposed_target_log_prob, proposed_state_parts)\n', (38731, 38779), True, 'from tensorflow.python.ops import gradients_impl as gradients_ops\n'), ((40005, 40177), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""compute_energy_change"""', '([current_target_log_prob, proposed_target_log_prob,\n independent_chain_ndims] + current_momentums + proposed_momentums)'], {}), "(name, 'compute_energy_change', [current_target_log_prob,\n proposed_target_log_prob, independent_chain_ndims] + current_momentums +\n proposed_momentums)\n", (40019, 40177), False, 'from tensorflow.python.framework import ops\n'), ((42117, 42148), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (42136, 42148), False, 'from tensorflow.python.ops import math_ops\n'), ((42992, 43094), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""_choose"""'], {'values': '[is_accepted, accepted, rejected, independent_chain_ndims]'}), "(name, '_choose', values=[is_accepted, accepted, rejected,\n independent_chain_ndims])\n", (43006, 43094), False, 'from tensorflow.python.framework import ops\n'), ((43882, 43929), 'tensorflow.python.ops.gradients_impl.gradients', 'gradients_ops.gradients', (['fn_result', 'fn_arg_list'], {}), '(fn_result, fn_arg_list)\n', (43905, 43929), True, 'from tensorflow.python.ops import gradients_impl as gradients_ops\n'), ((44603, 44641), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['s'], {'name': '"""state"""'}), "(s, name='state')\n", (44624, 44641), False, 'from tensorflow.python.framework import ops\n'), ((44965, 45036), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['s'], {'name': '"""step_size"""', 'dtype': 'target_log_prob.dtype'}), "(s, name='step_size', dtype=target_log_prob.dtype)\n", (44986, 45036), False, 'from tensorflow.python.framework import ops\n'), ((2218, 2273), 'tensorflow.python.ops.array_ops.ones_like', 'array_ops.ones_like', (['dummy_target_log_prob', 'dtypes.bool'], {}), '(dummy_target_log_prob, dtypes.bool)\n', (2237, 2273), False, 'from tensorflow.python.ops import array_ops\n'), ((9812, 9840), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""initialize"""'], {}), "('initialize')\n", (9826, 9840), False, 'from tensorflow.python.framework import ops\n'), ((10169, 10243), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['num_results'], {'dtype': 'dtypes.int32', 'name': '"""num_results"""'}), "(num_results, dtype=dtypes.int32, name='num_results')\n", (10190, 10243), False, 'from tensorflow.python.framework import ops\n'), ((10302, 10395), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['num_leapfrog_steps'], {'dtype': 'dtypes.int32', 'name': '"""num_leapfrog_steps"""'}), "(num_leapfrog_steps, dtype=dtypes.int32, name=\n 'num_leapfrog_steps')\n", (10323, 10395), False, 'from tensorflow.python.framework import ops\n'), ((10447, 10536), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['num_burnin_steps'], {'dtype': 'dtypes.int32', 'name': '"""num_burnin_steps"""'}), "(num_burnin_steps, dtype=dtypes.int32, name=\n 'num_burnin_steps')\n", (10468, 10536), False, 'from tensorflow.python.framework import ops\n'), ((10597, 10704), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['num_steps_between_results'], {'dtype': 'dtypes.int32', 'name': '"""num_steps_between_results"""'}), "(num_steps_between_results, dtype=dtypes.int32, name=\n 'num_steps_between_results')\n", (10618, 10704), False, 'from tensorflow.python.framework import ops\n'), ((18359, 18387), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""initialize"""'], {}), "('initialize')\n", (18373, 18387), False, 'from tensorflow.python.framework import ops\n'), ((18697, 18767), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['num_steps'], {'dtype': 'dtypes.int32', 'name': '"""num_steps"""'}), "(num_steps, dtype=dtypes.int32, name='num_steps')\n", (18718, 18767), False, 'from tensorflow.python.framework import ops\n'), ((18826, 18919), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['num_leapfrog_steps'], {'dtype': 'dtypes.int32', 'name': '"""num_leapfrog_steps"""'}), "(num_leapfrog_steps, dtype=dtypes.int32, name=\n 'num_leapfrog_steps')\n", (18847, 18919), False, 'from tensorflow.python.framework import ops\n'), ((20406, 20454), 'tensorflow.python.ops.control_flow_ops.while_loop', 'control_flow_ops.while_loop', ([], {}), '(**while_loop_kwargs)\n', (20433, 20454), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((28255, 28283), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""initialize"""'], {}), "('initialize')\n", (28269, 28283), False, 'from tensorflow.python.framework import ops\n'), ((28591, 28653), 'tensorflow.python.ops.distributions.util.prefer_static_rank', 'distributions_util.prefer_static_rank', (['current_target_log_prob'], {}), '(current_target_log_prob)\n', (28628, 28653), True, 'from tensorflow.python.ops.distributions import util as distributions_util\n'), ((29012, 29105), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['num_leapfrog_steps'], {'dtype': 'dtypes.int32', 'name': '"""num_leapfrog_steps"""'}), "(num_leapfrog_steps, dtype=dtypes.int32, name=\n 'num_leapfrog_steps')\n", (29033, 29105), False, 'from tensorflow.python.framework import ops\n'), ((42403, 42428), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""_choose"""'], {}), "('_choose')\n", (42417, 42428), False, 'from tensorflow.python.framework import ops\n'), ((11522, 11570), 'tensorflow.python.ops.control_flow_ops.while_loop', 'control_flow_ops.while_loop', ([], {}), '(**while_loop_kwargs)\n', (11549, 11570), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((12046, 12073), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['num_results'], {}), '(num_results)\n', (12060, 12073), False, 'from tensorflow.python.ops import math_ops\n'), ((18066, 18097), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(iter_ + 1)', 'dtype'], {}), '(iter_ + 1, dtype)\n', (18079, 18097), False, 'from tensorflow.python.ops import math_ops\n'), ((18114, 18145), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['num_steps', 'dtype'], {}), '(num_steps, dtype)\n', (18127, 18145), False, 'from tensorflow.python.ops import math_ops\n'), ((19487, 19530), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['num_steps', 'ais_weights.dtype'], {}), '(num_steps, ais_weights.dtype)\n', (19500, 19530), False, 'from tensorflow.python.ops import math_ops\n'), ((28904, 28970), 'tensorflow.python.ops.distributions.util.gen_new_seed', 'distributions_util.gen_new_seed', (['seed'], {'salt': '"""hmc_kernel_momentums"""'}), "(seed, salt='hmc_kernel_momentums')\n", (28935, 28970), True, 'from tensorflow.python.ops.distributions import util as distributions_util\n'), ((30142, 30172), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['energy_change'], {}), '(energy_change)\n', (30157, 30172), False, 'from tensorflow.python.ops import array_ops\n'), ((40578, 40610), 'tensorflow.python.ops.array_ops.rank', 'array_ops.rank', (['current_momentum'], {}), '(current_momentum)\n', (40592, 40610), False, 'from tensorflow.python.ops import array_ops\n'), ((40733, 40744), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (40739, 40744), True, 'import numpy as np\n'), ((40772, 40801), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['lk0'], {'axis': '(-1)'}), '(lk0, axis=-1)\n', (40787, 40801), False, 'from tensorflow.python.ops import array_ops\n'), ((40873, 40884), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (40879, 40884), True, 'import numpy as np\n'), ((40912, 40941), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['lk1'], {'axis': '(-1)'}), '(lk1, axis=-1)\n', (40927, 40941), False, 'from tensorflow.python.ops import array_ops\n'), ((41139, 41156), 'tensorflow.python.ops.math_ops.exp', 'math_ops.exp', (['lk1'], {}), '(lk1)\n', (41151, 41156), False, 'from tensorflow.python.ops import math_ops\n'), ((42007, 42025), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['x'], {}), '(x)\n', (42022, 42025), False, 'from tensorflow.python.ops import array_ops\n'), ((42861, 42905), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['is_accepted', 'expand_shape'], {}), '(is_accepted, expand_shape)\n', (42878, 42905), False, 'from tensorflow.python.ops import array_ops\n'), ((45773, 45788), 'tensorflow.python.ops.math_ops.abs', 'math_ops.abs', (['x'], {}), '(x)\n', (45785, 45788), False, 'from tensorflow.python.ops import math_ops\n'), ((19976, 19987), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (19984, 19987), True, 'import numpy as np\n'), ((20010, 20048), 'tensorflow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', (['current_log_prob'], {}), '(current_log_prob)\n', (20030, 20048), False, 'from tensorflow.python.ops import array_ops\n'), ((41165, 41182), 'tensorflow.python.ops.math_ops.exp', 'math_ops.exp', (['lk0'], {}), '(lk0)\n', (41177, 41182), False, 'from tensorflow.python.ops import math_ops\n'), ((41500, 41521), 'tensorflow.python.ops.math_ops.is_finite', 'math_ops.is_finite', (['x'], {}), '(x)\n', (41518, 41521), False, 'from tensorflow.python.ops import math_ops\n'), ((41574, 41595), 'tensorflow.python.ops.math_ops.is_finite', 'math_ops.is_finite', (['x'], {}), '(x)\n', (41592, 41595), False, 'from tensorflow.python.ops import math_ops\n'), ((42480, 42508), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['is_accepted'], {}), '(is_accepted)\n', (42495, 42508), False, 'from tensorflow.python.ops import array_ops\n'), ((11336, 11347), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (11344, 11347), True, 'import numpy as np\n'), ((11801, 11825), 'tensorflow.python.ops.math_ops.equal', 'math_ops.equal', (['iter_', '(0)'], {}), '(iter_, 0)\n', (11815, 11825), False, 'from tensorflow.python.ops import math_ops\n'), ((37335, 37346), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (37343, 37346), True, 'import numpy as np\n'), ((42773, 42791), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['x'], {}), '(x)\n', (42788, 42791), False, 'from tensorflow.python.ops import array_ops\n'), ((28807, 28825), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['s'], {}), '(s)\n', (28822, 28825), False, 'from tensorflow.python.ops import array_ops\n'), ((41776, 41810), 'tensorflow.python.ops.array_ops.rank', 'array_ops.rank', (['is_sum_determinate'], {}), '(is_sum_determinate)\n', (41790, 41810), False, 'from tensorflow.python.ops import array_ops\n'), ((42712, 42739), 'tensorflow.python.ops.array_ops.rank', 'array_ops.rank', (['is_accepted'], {}), '(is_accepted)\n', (42726, 42739), False, 'from tensorflow.python.ops import array_ops\n'), ((42536, 42553), 'tensorflow.python.ops.array_ops.rank', 'array_ops.rank', (['x'], {}), '(x)\n', (42550, 42553), False, 'from tensorflow.python.ops import array_ops\n'), ((42556, 42583), 'tensorflow.python.ops.array_ops.rank', 'array_ops.rank', (['is_accepted'], {}), '(is_accepted)\n', (42570, 42583), False, 'from tensorflow.python.ops import array_ops\n')] |
import argparse
import logging
import os
import json
import pickle
import random
import time
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset
from transformers import AdamW, get_linear_schedule_with_warmup, set_seed
from utils import set_logger, set_seed
from transformers import BertConfig, BertTokenizer
from transformers import AutoModelForSequenceClassification, AutoConfig, AutoTokenizer, AutoModel
from net.bert_base import BertForSequenceClassification
from net.bert_attention import BertForSequenceClassificationAttention
from net.bert_lstm import BertForSequenceClassificationLSTM
from net.bert_lstm_attention import BertForSequenceClassificationLSTMAttenion
from processor import sentiment_processors as processors
from processor import sentiment_convert_examples_to_features, SentimentDataset
from train_and_eval import train, test, predict, _predict
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"bert_base": (BertConfig, BertForSequenceClassification, BertTokenizer),
"bert_attention": (BertConfig, BertForSequenceClassificationAttention, BertTokenizer),
"bert_lstm": (BertConfig, BertForSequenceClassificationLSTM, BertTokenizer),
"bert_lstm_attention": (BertConfig, BertForSequenceClassificationLSTMAttenion, BertTokenizer),
}
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--log_dir", default="log", type=str, required=True, help="设置日志的输出目录")
parser.add_argument(
"--dataset",
choices=["ISEAR", "TEC", "IECE", "SMP2020"],
default="ISEAR",
type=str,
help="应用的数据集,ISEAR, TEC, IECE, SMP2020中4选1",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--pre_train_path",
default=None,
type=str,
required=True,
help="预训练模型所在的路径,包括 pytorch_model.bin, vocab.txt, bert_config.json",
)
parser.add_argument(
"--output_dir",
default="output",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--max_seq_length", default=256, type=int, help="输入到bert的最大长度,通常不应该超过512")
parser.add_argument("--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.")
parser.add_argument("--num_train_epochs", default=20, type=int, help="epoch 数目")
parser.add_argument("--train_batch_size", default=8, type=int, help="训练集的batch_size")
parser.add_argument("--eval_batch_size", default=512, type=int, help="验证集的batch_size")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1, help="梯度累计更新的步骤,用来弥补GPU过小的情况")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="学习率")
parser.add_argument("--weight_decay", default=0.01, type=float, help="权重衰减")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="最大的梯度更新")
parser.add_argument("--seed", type=int, default=233, help="random seed for initialization")
# parser.add_argument("--warmup_steps", default=0, type=int,
# help="让学习增加到1的步数,在warmup_steps后,再衰减到0")
parser.add_argument(
"--warmup_rate", default=0.00, type=float, help="让学习增加到1的步数,在warmup_steps后,再衰减到0,这里设置一个小数,在总训练步数*rate步时开始增加到1"
)
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.dataset + "_" + args.model_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
assert os.path.exists(os.path.join("data", args.dataset))
assert os.path.exists(args.pre_train_path)
assert os.path.exists(args.output_dir)
# 暂时不写多GPU
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
set_seed(args.seed)
log_dir = os.path.join(
args.log_dir,
args.dataset + "_" + args.model_name + time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time())) + ".log",
)
set_logger(log_dir)
data_dir = os.path.join("data", args.dataset)
processor = processors[args.dataset](args, data_dir)
label_list = processor.get_labels()
num_labels = len(label_list)
args.model_name = args.model_name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_name]
if args.do_train:
logging.info("loading pretrained model... ...")
config = config_class.from_pretrained(args.pre_train_path, num_labels=num_labels)
tokenizer = tokenizer_class.from_pretrained(args.pre_train_path, do_lower_case=args.do_lower_case)
config.save_pretrained(args.output_dir)
tokenizer.save_vocabulary(args.output_dir)
model = model_class.from_pretrained(args.pre_train_path, config=config, args=args)
model.to(args.device)
logging.info("load pretrained model end... ...")
logger.info("Training parameters %s", args)
def convert_to_dataset(examples):
features = sentiment_convert_examples_to_features(
examples=examples, tokenizer=tokenizer, max_length=args.max_seq_length, label_list=label_list
)
return SentimentDataset(features)
# Training
if args.do_train:
logging.info("loading dataset... ...")
train_examples = processor.get_train_examples()
train_dataset = convert_to_dataset(train_examples)
dev_examples = processor.get_dev_examples()
dev_dataset = convert_to_dataset(dev_examples)
logging.info("dataset loaded...")
train_dataset = np.array(train_dataset)
dev_dataset = np.array(dev_dataset)
logging.info("start training... ...")
train(args, train_dataset, dev_dataset, model)
logging.info("train end...")
if args.do_eval:
logging.info("loading trained model... ...")
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
config = config_class.from_pretrained(args.output_dir, num_labels=num_labels)
model = model_class.from_pretrained(args.output_dir, config=config, args=args)
model.to(args.device)
logging.info("load trained model end... ...")
logger.info("Evaluation parameters %s", args)
# Evaluation
if args.do_eval:
logging.info("loading dataset... ...")
test_examples = processor.get_test_examples()
test_dataset = convert_to_dataset(test_examples)
logging.info("dataset loaded...")
test_dataset = np.array(test_dataset)
test_dataset = np.array(test_dataset)
logging.info("start evaluating... ...")
test_probs = test(args, model, test_dataset)
logging.info("evaluate end...")
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"os.path.exists",
"utils.set_logger",
"argparse.ArgumentParser",
"os.makedirs",
"train_and_eval.train",
"os.path.join",
"processor.sentiment_convert_examples_to_features",
"numpy.array",
"torch.cuda.is_available",
"time.time",
"processor.SentimentDataset",
"logging.info"... | [((956, 983), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (973, 983), False, 'import logging\n'), ((1381, 1406), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1404, 1406), False, 'import argparse\n'), ((3947, 4014), 'os.path.join', 'os.path.join', (['args.output_dir', "(args.dataset + '_' + args.model_name)"], {}), "(args.output_dir, args.dataset + '_' + args.model_name)\n", (3959, 4014), False, 'import os\n'), ((4170, 4205), 'os.path.exists', 'os.path.exists', (['args.pre_train_path'], {}), '(args.pre_train_path)\n', (4184, 4205), False, 'import os\n'), ((4217, 4248), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (4231, 4248), False, 'import os\n'), ((4348, 4367), 'utils.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (4356, 4367), False, 'from utils import set_logger, set_seed\n'), ((4550, 4569), 'utils.set_logger', 'set_logger', (['log_dir'], {}), '(log_dir)\n', (4560, 4569), False, 'from utils import set_logger, set_seed\n'), ((4586, 4620), 'os.path.join', 'os.path.join', (['"""data"""', 'args.dataset'], {}), "('data', args.dataset)\n", (4598, 4620), False, 'import os\n'), ((4026, 4057), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (4040, 4057), False, 'import os\n'), ((4067, 4095), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (4078, 4095), False, 'import os\n'), ((4123, 4157), 'os.path.join', 'os.path.join', (['"""data"""', 'args.dataset'], {}), "('data', args.dataset)\n", (4135, 4157), False, 'import os\n'), ((4909, 4956), 'logging.info', 'logging.info', (['"""loading pretrained model... ..."""'], {}), "('loading pretrained model... ...')\n", (4921, 4956), False, 'import logging\n'), ((5382, 5430), 'logging.info', 'logging.info', (['"""load pretrained model end... ..."""'], {}), "('load pretrained model end... ...')\n", (5394, 5430), False, 'import logging\n'), ((5541, 5679), 'processor.sentiment_convert_examples_to_features', 'sentiment_convert_examples_to_features', ([], {'examples': 'examples', 'tokenizer': 'tokenizer', 'max_length': 'args.max_seq_length', 'label_list': 'label_list'}), '(examples=examples, tokenizer=\n tokenizer, max_length=args.max_seq_length, label_list=label_list)\n', (5579, 5679), False, 'from processor import sentiment_convert_examples_to_features, SentimentDataset\n'), ((5712, 5738), 'processor.SentimentDataset', 'SentimentDataset', (['features'], {}), '(features)\n', (5728, 5738), False, 'from processor import sentiment_convert_examples_to_features, SentimentDataset\n'), ((5785, 5823), 'logging.info', 'logging.info', (['"""loading dataset... ..."""'], {}), "('loading dataset... ...')\n", (5797, 5823), False, 'import logging\n'), ((6054, 6087), 'logging.info', 'logging.info', (['"""dataset loaded..."""'], {}), "('dataset loaded...')\n", (6066, 6087), False, 'import logging\n'), ((6113, 6136), 'numpy.array', 'np.array', (['train_dataset'], {}), '(train_dataset)\n', (6121, 6136), True, 'import numpy as np\n'), ((6159, 6180), 'numpy.array', 'np.array', (['dev_dataset'], {}), '(dev_dataset)\n', (6167, 6180), True, 'import numpy as np\n'), ((6190, 6227), 'logging.info', 'logging.info', (['"""start training... ..."""'], {}), "('start training... ...')\n", (6202, 6227), False, 'import logging\n'), ((6236, 6282), 'train_and_eval.train', 'train', (['args', 'train_dataset', 'dev_dataset', 'model'], {}), '(args, train_dataset, dev_dataset, model)\n', (6241, 6282), False, 'from train_and_eval import train, test, predict, _predict\n'), ((6291, 6319), 'logging.info', 'logging.info', (['"""train end..."""'], {}), "('train end...')\n", (6303, 6319), False, 'import logging\n'), ((6350, 6394), 'logging.info', 'logging.info', (['"""loading trained model... ..."""'], {}), "('loading trained model... ...')\n", (6362, 6394), False, 'import logging\n'), ((6709, 6754), 'logging.info', 'logging.info', (['"""load trained model end... ..."""'], {}), "('load trained model end... ...')\n", (6721, 6754), False, 'import logging\n'), ((6856, 6894), 'logging.info', 'logging.info', (['"""loading dataset... ..."""'], {}), "('loading dataset... ...')\n", (6868, 6894), False, 'import logging\n'), ((7014, 7047), 'logging.info', 'logging.info', (['"""dataset loaded..."""'], {}), "('dataset loaded...')\n", (7026, 7047), False, 'import logging\n'), ((7072, 7094), 'numpy.array', 'np.array', (['test_dataset'], {}), '(test_dataset)\n', (7080, 7094), True, 'import numpy as np\n'), ((7118, 7140), 'numpy.array', 'np.array', (['test_dataset'], {}), '(test_dataset)\n', (7126, 7140), True, 'import numpy as np\n'), ((7150, 7189), 'logging.info', 'logging.info', (['"""start evaluating... ..."""'], {}), "('start evaluating... ...')\n", (7162, 7189), False, 'import logging\n'), ((7211, 7242), 'train_and_eval.test', 'test', (['args', 'model', 'test_dataset'], {}), '(args, model, test_dataset)\n', (7215, 7242), False, 'from train_and_eval import train, test, predict, _predict\n'), ((7251, 7282), 'logging.info', 'logging.info', (['"""evaluate end..."""'], {}), "('evaluate end...')\n", (7263, 7282), False, 'import logging\n'), ((4306, 4331), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4329, 4331), False, 'import torch\n'), ((4516, 4527), 'time.time', 'time.time', ([], {}), '()\n', (4525, 4527), False, 'import time\n')] |
"""The classic exhibitor of chaos, consisting of 3 coupled ODEs.
The ODEs are derived by modelling, with many simplifications,
the fluid convection between horizontal plates with different temperatures.
Its phase-plot (with typical param settings) looks like a butterfly.
See demo.py for more info.
"""
import numpy as np
import dapper.mods as modelling
from .extras import LPs, d2x_dtdx, dstep_dx
# Constants
sig = 10.0
rho = 28.0
beta = 8.0/3
# Suggested values
x0 = np.array([1.509, -1.531, 25.46])
Tplot = 4.0
@modelling.ens_compatible
def dxdt(x):
"""Evolution equation (coupled ODEs) specifying the dynamics."""
x, y, z = x
dx = sig*(y - x)
dy = rho*x - y - x*z
dz = x*y - beta*z
return np.array([dx, dy, dz])
step = modelling.with_rk4(dxdt, autonom=True)
| [
"numpy.array",
"dapper.mods.with_rk4"
] | [((478, 510), 'numpy.array', 'np.array', (['[1.509, -1.531, 25.46]'], {}), '([1.509, -1.531, 25.46])\n', (486, 510), True, 'import numpy as np\n'), ((760, 798), 'dapper.mods.with_rk4', 'modelling.with_rk4', (['dxdt'], {'autonom': '(True)'}), '(dxdt, autonom=True)\n', (778, 798), True, 'import dapper.mods as modelling\n'), ((728, 750), 'numpy.array', 'np.array', (['[dx, dy, dz]'], {}), '([dx, dy, dz])\n', (736, 750), True, 'import numpy as np\n')] |
import matplotlib
#matplotlib.use('TkAgg')
from config import *
from plot_utils import *
from shared_utils import *
import pickle as pkl
import numpy as np
from collections import OrderedDict
from matplotlib import pyplot as plt
from pymc3.stats import quantiles
import os
import pandas as pd
from pathlib import Path
# def curves(use_interactions=True, use_report_delay=True, prediction_day=30, save_plot=False):
# Load only one county
def curves(start, county, n_weeks=3, model_i=35, save_plot=False):
with open('../data/counties/counties.pkl', "rb") as f:
counties = pkl.load(f)
start = int(start)
n_weeks = int(n_weeks)
model_i = int(model_i)
# with open('../data/comparison.pkl', "rb") as f:
# best_model = pkl.load(f)
# update to day and new limits!
xlim = (5.5, 15.5)
ylim = (47, 56) # <- 10 weeks
#countyByName = OrderedDict(
# [('Düsseldorf', '05111'), ('Leipzig', '14713'), ('Nürnberg', '09564'), ('München', '09162')])
countyByName = make_county_dict()
# Hier dann das reinspeisen
plot_county_names = {"covid19": [county]}
start_day = pd.Timestamp('2020-01-28') + pd.Timedelta(days=start)
year = str(start_day)[:4]
month = str(start_day)[5:7]
day = str(start_day)[8:10]
# if os.path.exists("../figures/{}_{}_{}/curve_trend_{}.png".format(year, month, day,countyByName[county])):
# return
day_folder_path = "../figures/{}_{}_{}".format(year, month, day)
Path(day_folder_path).mkdir(parents=True, exist_ok=True)
# check for metadata file:
if not os.path.isfile("../figures/{}_{}_{}/metadata.csv".format(year, month, day)):
ids = []
for key in counties:
ids.append(int(key))
df = pd.DataFrame(data=ids, columns=["countyID"])
df["probText"] = ""
df.to_csv("../figures/{}_{}_{}/metadata.csv".format(year, month, day))
# colors for curves
#red
C1 = "#D55E00"
C2 = "#E69F00"
#C3 = "#0073CF"
#green
C4 = "#188500"
C5 = "#29c706"
#C6 = "#0073CF"
# quantiles we want to plot
qs = [0.25, 0.50, 0.75]
fig = plt.figure(figsize=(12, 6))
grid = plt.GridSpec(
1,
1,
top=0.9,
bottom=0.2,
left=0.07,
right=0.97,
hspace=0.25,
wspace=0.15,
)
# for i, disease in enumerate(diseases):
i = 0
disease = "covid19"
prediction_region = "germany"
data = load_daily_data_n_weeks(start, n_weeks, disease, prediction_region, counties)
start_day = pd.Timestamp('2020-01-28') + pd.Timedelta(days=start)
i_start_day = 0
day_0 = start_day + pd.Timedelta(days=n_weeks*7+5)
day_m5 = day_0 - pd.Timedelta(days=5)
day_p5 = day_0 + pd.Timedelta(days=5)
_, target, _, _ = split_data(
data,
train_start=start_day,
test_start=day_0,
post_test=day_p5)
county_ids = target.columns
county_id = countyByName[county]
### SELECTION CRITERION ###
#if np.count_non_zero(target[county_id]) < 7: #???
# stdd = 10
# gaussian = lambda x: np.exp( (-(x)**2) / (2* stdd**2) )
# Load our prediction samples
res = load_pred_model_window(model_i, start, n_weeks)
res_trend = load_pred_model_window(model_i, start, n_weeks, trend=True)
n_days = (day_p5 - start_day).days
prediction_samples = np.reshape(res['y'], (res['y'].shape[0], -1, 412))
prediction_samples_trend = np.reshape(res_trend['μ'], (res_trend['μ'].shape[0], -1, 412))
prediction_samples = prediction_samples[:,i_start_day:i_start_day+n_days,:]
prediction_samples_trend = prediction_samples_trend[:,i_start_day:i_start_day+n_days,:]
ext_index = pd.DatetimeIndex([d for d in target.index] + \
[d for d in pd.date_range(target.index[-1]+timedelta(1),day_p5-timedelta(1))])
# TODO: figure out where quantiles comes from and if its pymc3, how to replace it
prediction_quantiles = quantiles(prediction_samples, (5, 25, 75, 95))
prediction_mean = pd.DataFrame(
data=np.mean(
prediction_samples,
axis=0),
index=ext_index,
columns=target.columns)
prediction_q25 = pd.DataFrame(
data=prediction_quantiles[25],
index=ext_index,
columns=target.columns)
prediction_q75 = pd.DataFrame(
data=prediction_quantiles[75],
index=ext_index,
columns=target.columns)
prediction_q5 = pd.DataFrame(
data=prediction_quantiles[5],
index=ext_index,
columns=target.columns)
prediction_q95 = pd.DataFrame(
data=prediction_quantiles[95],
index=ext_index,
columns=target.columns)
prediction_mean_trend = pd.DataFrame(
data=np.mean(
prediction_samples_trend,
axis=0),
index=ext_index,
columns=target.columns)
# Unnecessary for-loop
for j, name in enumerate(plot_county_names[disease]):
ax = fig.add_subplot(grid[j, i])
county_id = countyByName[name]
dates = [pd.Timestamp(day) for day in ext_index]
days = [ (day - min(dates)).days for day in dates]
# plot our predictions w/ quartiles
p_pred = ax.plot_date(
dates,
prediction_mean[county_id],
"-",
color=C1,
linewidth=2.0,
zorder=4)
# plot our predictions w/ quartiles
p_quant = ax.fill_between(
dates,
prediction_q25[county_id],
prediction_q75[county_id],
facecolor=C2,
alpha=0.5,
zorder=1)
ax.plot_date(
dates,
prediction_q25[county_id],
":",
color=C2,
linewidth=2.0,
zorder=3)
ax.plot_date(
dates,
prediction_q75[county_id],
":",
color=C2,
linewidth=2.0,
zorder=3)
# plot ground truth
p_real = ax.plot_date(dates[:-5], target[county_id], "k.")
print(dates[-5]-pd.Timedelta(12, unit='h'))
# plot 30week marker
ax.axvline(dates[-5]-pd.Timedelta(12,unit='h'),ls='-', lw=2, c='dodgerblue')
ax.axvline(dates[-10]-pd.Timedelta(12,unit='h'),ls='--', lw=2, c='lightskyblue')
ax.set_ylabel("Fallzahlen/Tag nach Meldedatum", fontsize=16)
ax.tick_params(axis="both", direction='out',
size=6, labelsize=16, length=6
)
ticks = [start_day+pd.Timedelta(days=i) for i in [0,5,10,15,20,25,30,35,40]]
labels = ["{}.{}.{}".format(str(d)[8:10], str(d)[5:7], str(d)[:4]) for d in ticks]
plt.xticks(ticks,labels)
#new_ticks = plt.get_xtickslabels()
plt.setp(ax.get_xticklabels()[-4], color="red")
plt.setp(ax.get_xticklabels(), rotation=45)
ax.autoscale(True)
p_quant2 = ax.fill_between(
dates,
prediction_q5[county_id],
prediction_q95[county_id],
facecolor=C2,
alpha=0.25,
zorder=0)
ax.plot_date(dates, prediction_q5[county_id], ":",
color=C2, alpha=0.5, linewidth=2.0, zorder=1)
ax.plot_date(dates, prediction_q95[county_id], ":",
color=C2, alpha=0.5, linewidth=2.0, zorder=1)
# Plot the trend.
'''
p_pred_trend = ax.plot_date(
dates,
prediction_mean_trend[county_id],
"-",
color="green",
linewidth=2.0,
zorder=4)
'''
# Compute probability of increase/decreas
i_county = county_ids.get_loc(county_id)
trace = load_trace_window(disease, model_i, start, n_weeks)
trend_params = pm.trace_to_dataframe(trace, varnames=["W_t_t"]).values
trend_w2 = np.reshape(trend_params, newshape=(1000,412,2))[:,i_county,1]
prob2 = np.mean(trend_w2>0)
# Set axis limits.
ylimmax = max(3*(target[county_id]).max(),10)
ax.set_ylim([-(1/30)*ylimmax,ylimmax])
ax.set_xlim([start_day,day_p5-pd.Timedelta(days=1)])
if (i == 0) & (j == 0):
ax.legend([p_real[0], p_pred[0], p_quant, p_quant2],
["Daten RKI", "Modell",
"25\%-75\%-Quantil", "5\%-95\%-Quantil"],
fontsize=16, loc="upper left")
# Not perfectly positioned.
print("uheufbhwio")
print(ax.get_xticks()[-5])
print(ax.get_ylim()[1])
pos1 = tuple(ax.transData.transform((ax.get_xticks()[-3], ax.get_ylim()[1])))
pos1 = (ax.get_xticks()[-5], ax.get_ylim()[1])
print(pos1)
fontsize_bluebox = 18
fig.text(ax.get_xticks()[-5]+0.65, ax.get_ylim()[1],"Nowcast",ha="left",va="top",fontsize=fontsize_bluebox,bbox=dict(facecolor='lightskyblue', boxstyle='rarrow'), transform=ax.transData)
# fig.text(pos1[0]/1200, pos1[1]/600,"Nowcast",fontsize=fontsize_bluebox,bbox=dict(facecolor='cornflowerblue'))
fig.text(ax.get_xticks()[-4]+0.65, ax.get_ylim()[1],"Forecast",ha="left", va="top",fontsize=fontsize_bluebox,bbox=dict(facecolor='dodgerblue', boxstyle='rarrow'), transform=ax.transData)
'''
fig.text(0,
1 + 0.025,
r"$\textbf{" + plot_county_names["covid19"][j]+ r"}$",
fontsize=22,
transform=ax.transAxes)
'''
#plt.yticks(ax.get_yticks()[:-1], ax.get_yticklabels()[:-1])
# Store text in csv.
#fontsize_probtext = 14
if prob2 >=0.5:
#fig.text(0.865, 0.685, "Die Fallzahlen \n werden mit einer \n Wahrscheinlichkeit \n von {:2.1f}\% steigen.".format(prob2*100), fontsize=fontsize_probtext,bbox=dict(facecolor='white'))
probText = "Die Fallzahlen werden mit einer Wahrscheinlichkeit von {:2.1f}\% steigen.".format(prob2*100)
else:
probText = "Die Fallzahlen werden mit einer Wahrscheinlichkeit von {:2.1f}\% fallen.".format(100-prob2*100)
#fig.text(0.865, 0.685, "Die Fallzahlen \n werden mit einer \n Wahrscheinlichkeit \n von {:2.1f}\% fallen.".format(100-prob2*100), fontsize=fontsize_probtext ,bbox=dict(facecolor='white'))
print(county_id)
df = pd.read_csv("../figures/{}_{}_{}/metadata.csv".format(year, month, day), index_col=0)
county_ix = df["countyID"][df["countyID"]==int(county_id)].index[0]
if prob2 >=0.5:
probVal = prob2*100
else:
probVal = -(100-prob2*100)
df.iloc[county_ix, 1] = probVal#= probText
df.to_csv("../figures/{}_{}_{}/metadata.csv".format(year, month, day))
print(probVal)
plt.tight_layout()
if save_plot:
year = str(start_day)[:4]
month = str(start_day)[5:7]
day = str(start_day)[8:10]
day_folder_path = "../figures/{}_{}_{}".format(year, month, day)
Path(day_folder_path).mkdir(parents=True, exist_ok=True)
plt.savefig("../figures/{}_{}_{}/curve_{}.png".format(year, month, day,countyByName[county]), dpi=200)
plt.close()
return fig
if __name__ == "__main__":
import sys
start = sys.argv[2]
county = sys.argv[4]
_ = curves(start, county ,save_plot=True)
| [
"numpy.mean",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.xticks",
"pathlib.Path",
"pandas.Timedelta",
"pickle.load",
"matplotlib.pyplot.close",
"matplotlib.pyplot.GridSpec",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pymc3.stats.quantiles",
"pandas.Timestamp"... | [((2132, 2159), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2142, 2159), True, 'from matplotlib import pyplot as plt\n'), ((2171, 2263), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(1)', '(1)'], {'top': '(0.9)', 'bottom': '(0.2)', 'left': '(0.07)', 'right': '(0.97)', 'hspace': '(0.25)', 'wspace': '(0.15)'}), '(1, 1, top=0.9, bottom=0.2, left=0.07, right=0.97, hspace=0.25,\n wspace=0.15)\n', (2183, 2263), True, 'from matplotlib import pyplot as plt\n'), ((3385, 3435), 'numpy.reshape', 'np.reshape', (["res['y']", "(res['y'].shape[0], -1, 412)"], {}), "(res['y'], (res['y'].shape[0], -1, 412))\n", (3395, 3435), True, 'import numpy as np\n'), ((3468, 3530), 'numpy.reshape', 'np.reshape', (["res_trend['μ']", "(res_trend['μ'].shape[0], -1, 412)"], {}), "(res_trend['μ'], (res_trend['μ'].shape[0], -1, 412))\n", (3478, 3530), True, 'import numpy as np\n'), ((3972, 4018), 'pymc3.stats.quantiles', 'quantiles', (['prediction_samples', '(5, 25, 75, 95)'], {}), '(prediction_samples, (5, 25, 75, 95))\n', (3981, 4018), False, 'from pymc3.stats import quantiles\n'), ((4210, 4299), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'prediction_quantiles[25]', 'index': 'ext_index', 'columns': 'target.columns'}), '(data=prediction_quantiles[25], index=ext_index, columns=target\n .columns)\n', (4222, 4299), True, 'import pandas as pd\n'), ((4341, 4430), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'prediction_quantiles[75]', 'index': 'ext_index', 'columns': 'target.columns'}), '(data=prediction_quantiles[75], index=ext_index, columns=target\n .columns)\n', (4353, 4430), True, 'import pandas as pd\n'), ((4471, 4559), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'prediction_quantiles[5]', 'index': 'ext_index', 'columns': 'target.columns'}), '(data=prediction_quantiles[5], index=ext_index, columns=target.\n columns)\n', (4483, 4559), True, 'import pandas as pd\n'), ((4601, 4690), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'prediction_quantiles[95]', 'index': 'ext_index', 'columns': 'target.columns'}), '(data=prediction_quantiles[95], index=ext_index, columns=target\n .columns)\n', (4613, 4690), True, 'import pandas as pd\n'), ((11301, 11312), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11310, 11312), True, 'from matplotlib import pyplot as plt\n'), ((585, 596), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (593, 596), True, 'import pickle as pkl\n'), ((1126, 1152), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020-01-28"""'], {}), "('2020-01-28')\n", (1138, 1152), True, 'import pandas as pd\n'), ((1155, 1179), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'start'}), '(days=start)\n', (1167, 1179), True, 'import pandas as pd\n'), ((1745, 1789), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ids', 'columns': "['countyID']"}), "(data=ids, columns=['countyID'])\n", (1757, 1789), True, 'import pandas as pd\n'), ((2555, 2581), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020-01-28"""'], {}), "('2020-01-28')\n", (2567, 2581), True, 'import pandas as pd\n'), ((2584, 2608), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'start'}), '(days=start)\n', (2596, 2608), True, 'import pandas as pd\n'), ((2653, 2687), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(n_weeks * 7 + 5)'}), '(days=n_weeks * 7 + 5)\n', (2665, 2687), True, 'import pandas as pd\n'), ((2705, 2725), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(5)'}), '(days=5)\n', (2717, 2725), True, 'import pandas as pd\n'), ((2747, 2767), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(5)'}), '(days=5)\n', (2759, 2767), True, 'import pandas as pd\n'), ((6738, 6763), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks', 'labels'], {}), '(ticks, labels)\n', (6748, 6763), True, 'from matplotlib import pyplot as plt\n'), ((8076, 8097), 'numpy.mean', 'np.mean', (['(trend_w2 > 0)'], {}), '(trend_w2 > 0)\n', (8083, 8097), True, 'import numpy as np\n'), ((10898, 10916), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10914, 10916), True, 'from matplotlib import pyplot as plt\n'), ((1475, 1496), 'pathlib.Path', 'Path', (['day_folder_path'], {}), '(day_folder_path)\n', (1479, 1496), False, 'from pathlib import Path\n'), ((4070, 4105), 'numpy.mean', 'np.mean', (['prediction_samples'], {'axis': '(0)'}), '(prediction_samples, axis=0)\n', (4077, 4105), True, 'import numpy as np\n'), ((4768, 4809), 'numpy.mean', 'np.mean', (['prediction_samples_trend'], {'axis': '(0)'}), '(prediction_samples_trend, axis=0)\n', (4775, 4809), True, 'import numpy as np\n'), ((5077, 5094), 'pandas.Timestamp', 'pd.Timestamp', (['day'], {}), '(day)\n', (5089, 5094), True, 'import pandas as pd\n'), ((7998, 8047), 'numpy.reshape', 'np.reshape', (['trend_params'], {'newshape': '(1000, 412, 2)'}), '(trend_params, newshape=(1000, 412, 2))\n', (8008, 8047), True, 'import numpy as np\n'), ((6112, 6138), 'pandas.Timedelta', 'pd.Timedelta', (['(12)'], {'unit': '"""h"""'}), "(12, unit='h')\n", (6124, 6138), True, 'import pandas as pd\n'), ((6198, 6224), 'pandas.Timedelta', 'pd.Timedelta', (['(12)'], {'unit': '"""h"""'}), "(12, unit='h')\n", (6210, 6224), True, 'import pandas as pd\n'), ((6284, 6310), 'pandas.Timedelta', 'pd.Timedelta', (['(12)'], {'unit': '"""h"""'}), "(12, unit='h')\n", (6296, 6310), True, 'import pandas as pd\n'), ((6572, 6592), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'i'}), '(days=i)\n', (6584, 6592), True, 'import pandas as pd\n'), ((11121, 11142), 'pathlib.Path', 'Path', (['day_folder_path'], {}), '(day_folder_path)\n', (11125, 11142), False, 'from pathlib import Path\n'), ((8271, 8291), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8283, 8291), True, 'import pandas as pd\n')] |
from sklearn.decomposition import PCA
import numpy as np
import matplotlib.pyplot as plt
from .img_util import avg_spectra
def normalize(data):
""" Normalizes data by shifting origin to mean"""
orig_mean = np.mean(data, axis=0)
norm_data = data - orig_mean
return norm_data
def get_PC(im, show_plots=True, top_n=3, PC_n=1, top_load_n=1, figsize=(8,9)):
"""
get_PC(im)
Returns numpy.ndarray of loading scores for each PC (row) and each feature (column)
Also, returns the scree values (Variance shares) for each PC.
Principal Component Analysis (PCA) gives the significant features for dimentionality reduction
Parameters
----------
im : image passed as numpy array
Returns
-------
out : tuple
A tuple of loading scores, scree values and the original mean of data points.
This mean of data points is a mean spectrum.
"""
#For PCA, each row should be a data point, columns are features
data = np.reshape(im, (im.shape[0]*im.shape[1], im.shape[2])) #reshaping image - independent pixels
data = normalize(data)
pca = PCA() #define PCA object
_ = pca.fit(data) #fit PCA
scree_values = np.round(pca.explained_variance_ratio_, decimals=5) #Gives scree values array for PCs
loading_scores = pca.components_ #Loading scores for each feature and each PC
return (loading_scores, scree_values)
def plot_PCs(ax, loading_scores, scree_values, top_n=3, PC_n=1, top_load_n=1):
"""
Updates plt.fig.axes() objects with PCA plots.
ax must be an array of 2 axes objects.
Parameters
-----------------
ax : array of 2 plt.fig.axes() objects
loading_scores : An array of loading scores (rows are loading scores of each PC and columns are PCs)
top_n : Number of top PCs to plot
PC_n : nth PC to show the loading scores
top_load_n : Top loading scores of PC_n th PC to be shown in analysis
Returns
----------------
out : array
Updated array of axes
"""
feat_arr = np.arange(750, 750+scree_values.shape[0], 1) #array of features
#Getting top top_load_n number of features in PC_n
top_inds = np.argsort(loading_scores[PC_n - 1])[-top_load_n:]
top_feat, top_scores = feat_arr[top_inds], loading_scores[PC_n - 1, top_inds]
#For plots : labelling PCs
PC_names = ["PC-"+ str(i) for i in np.arange(1,scree_values.shape[0]+1,1)]
#SCREE PLOT : Explained Var./Unexplained Var. ------------------------------------------
ax[0].bar(np.arange(1,top_n+1,1), scree_values[:top_n])
ax[0].set_xticks(np.arange(1,top_n+1,1))
ax[0].set_xticklabels(PC_names[:top_n])
ax[0].set_title('Variance/Total Explained Variance')
txt = [str(i) for i in scree_values[:top_n]] #Annotate bar plots
for i, txt_i in enumerate(txt):
ax[0].text(i+0.85, float(txt_i), txt_i, fontsize = 10, color = 'black')
#Single PC analysis : plotting loading scores ------------------------------------------
#Change PC_n to get the corresponding PCs loading scores plots.
ax[1].set_title('Abs(Loading scores) in PC-{}'.format(PC_n))
ax[1].plot(feat_arr, np.abs(loading_scores[PC_n - 1]))
ax[1].grid(color='gray')
for x,y in zip(top_feat, top_scores):
ax[1].plot([x,x], [0,y], linestyle='dashed')
ax[1].scatter(x,y, marker='o', c='yellow', s=200, edgecolors='red')
for i, txt_i in enumerate(top_scores): #Annotate the top features
ax[1].text(top_feat[i], txt_i, str(round(txt_i,1)), fontsize = 8.5, color = 'black')
return ax
def make_PC_images(im_x, loading_scores, PC_num=[1]):
"""
Makes single feature using loading scores of PC_num^th PC, by linear combination of features in im_x
Parameters
----------
im_x : image passed as numpy array
loading_scores : numpy array with ith row should have loading scores of ith PC.
PC_num : if PC_num = n, then nth PC's loading scores will be used to calculate the new feature
Returns
-------
out : ndarray
A new x array, with PC as feature in a single column
"""
mean_spectra = avg_spectra(im_x)
new_im_x = np.reshape(np.dot(im_x-mean_spectra, loading_scores[PC_num[0]-1]),(-1,1))
if len(PC_num)>1:
for PC in PC_num[1:]:
new_im_x = np.hstack([new_im_x, np.reshape(np.dot(im_x-mean_spectra, loading_scores[PC-1]),(-1,1))])
return np.reshape( new_im_x, (im_x.shape[0], im_x.shape[1], len(PC_num)) ) | [
"numpy.mean",
"numpy.abs",
"numpy.reshape",
"numpy.arange",
"sklearn.decomposition.PCA",
"numpy.argsort",
"numpy.dot",
"numpy.round"
] | [((215, 236), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (222, 236), True, 'import numpy as np\n'), ((999, 1055), 'numpy.reshape', 'np.reshape', (['im', '(im.shape[0] * im.shape[1], im.shape[2])'], {}), '(im, (im.shape[0] * im.shape[1], im.shape[2]))\n', (1009, 1055), True, 'import numpy as np\n'), ((1130, 1135), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (1133, 1135), False, 'from sklearn.decomposition import PCA\n'), ((1206, 1257), 'numpy.round', 'np.round', (['pca.explained_variance_ratio_'], {'decimals': '(5)'}), '(pca.explained_variance_ratio_, decimals=5)\n', (1214, 1257), True, 'import numpy as np\n'), ((2074, 2120), 'numpy.arange', 'np.arange', (['(750)', '(750 + scree_values.shape[0])', '(1)'], {}), '(750, 750 + scree_values.shape[0], 1)\n', (2083, 2120), True, 'import numpy as np\n'), ((2213, 2249), 'numpy.argsort', 'np.argsort', (['loading_scores[PC_n - 1]'], {}), '(loading_scores[PC_n - 1])\n', (2223, 2249), True, 'import numpy as np\n'), ((2564, 2590), 'numpy.arange', 'np.arange', (['(1)', '(top_n + 1)', '(1)'], {}), '(1, top_n + 1, 1)\n', (2573, 2590), True, 'import numpy as np\n'), ((2631, 2657), 'numpy.arange', 'np.arange', (['(1)', '(top_n + 1)', '(1)'], {}), '(1, top_n + 1, 1)\n', (2640, 2657), True, 'import numpy as np\n'), ((3194, 3226), 'numpy.abs', 'np.abs', (['loading_scores[PC_n - 1]'], {}), '(loading_scores[PC_n - 1])\n', (3200, 3226), True, 'import numpy as np\n'), ((4219, 4277), 'numpy.dot', 'np.dot', (['(im_x - mean_spectra)', 'loading_scores[PC_num[0] - 1]'], {}), '(im_x - mean_spectra, loading_scores[PC_num[0] - 1])\n', (4225, 4277), True, 'import numpy as np\n'), ((2416, 2458), 'numpy.arange', 'np.arange', (['(1)', '(scree_values.shape[0] + 1)', '(1)'], {}), '(1, scree_values.shape[0] + 1, 1)\n', (2425, 2458), True, 'import numpy as np\n'), ((4389, 4440), 'numpy.dot', 'np.dot', (['(im_x - mean_spectra)', 'loading_scores[PC - 1]'], {}), '(im_x - mean_spectra, loading_scores[PC - 1])\n', (4395, 4440), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 15:37:07 2020
@author: aoust
"""
import numpy as np
import math
class QuadraticPolynomial():
def __init__(self,n,tuples, coefs):
self.n = n
assert(len(tuples)==len(coefs))
self.tuples = tuples
self.coefs = coefs
for (i,j) in tuples:
assert(i<=j)
def check(self):
for (i,j) in self.tuples:
assert(i<=j)
if type(self.coefs)==list:
self.coefs = np.array(self.coefs)
def vpairs(self):
for (i,j) in self.tuples:
if ((i>=0) and (i<j)):
yield i,j
def scale_variables(self,tab):
for k in range(len(self.tuples)):
i,j = self.tuples[k]
factor = 1
if i!=-1:
factor = factor*tab[i]
if j!=-1:
factor = factor*tab[j]
self.coefs[k] = self.coefs[k]*factor
def scale_coefs(self):
self.coefs = self.coefs/(np.linalg.norm(self.coefs,2))
def scale_coefs2(self):
power = int(math.log10(np.linalg.norm(self.coefs,2)))
factor = 10**(power-1)
self.coefs = self.coefs/factor
return factor
def enumerate_triples(self):
for k in range(len(self.tuples)):
i,j = self.tuples[k]
c = self.coefs[k]
yield i,j,c
def variables_list(self):
set_of_variables = set()
for (i,j) in self.tuples:
if i!=-1:
set_of_variables.add(i)
if j!=-1:
set_of_variables.add(j)
res = list(set_of_variables)
res.sort()
return res
def evaluation(self,x):
S = 0
for k in range(len(self.tuples)):
i,j = self.tuples[k]
S+=x[i]*x[j]*self.coefs[k]
return S | [
"numpy.array",
"numpy.linalg.norm"
] | [((544, 564), 'numpy.array', 'np.array', (['self.coefs'], {}), '(self.coefs)\n', (552, 564), True, 'import numpy as np\n'), ((1071, 1100), 'numpy.linalg.norm', 'np.linalg.norm', (['self.coefs', '(2)'], {}), '(self.coefs, 2)\n', (1085, 1100), True, 'import numpy as np\n'), ((1174, 1203), 'numpy.linalg.norm', 'np.linalg.norm', (['self.coefs', '(2)'], {}), '(self.coefs, 2)\n', (1188, 1203), True, 'import numpy as np\n')] |
from mwa_pb import config
from mwa_pb.beam_full_EE import ApertureArray
from mwa_pb.beam_full_EE import Beam
from pyrem.radiotelescope import ideal_gaussian_beam
import numpy as np
from scipy.constants import c
def mwa_fee_model(theta, phi, nu = 150e6):
h5filepath = config.h5file # recent version was MWA_embedded_element_pattern_V02.h5
tile = ApertureArray(h5filepath, nu)
my_Astro_Az = 0
my_ZA = 0
delays = np.zeros([2, 16]) # Dual-pol.
amps = np.ones([2, 16])
tile_beam = Beam(tile, delays, amps=amps)
jones = tile_beam.get_response(phi, theta)
power = jones[0, 0] * jones[0, 0].conjugate() + jones[0, 1] * jones[0, 1].conjugate()
return power/power.max()
def simple_mwa_tile(l, m, frequency=150e6, weights=1, normalisation_only=False,
dipole_sep=1.1):
# meters
x_offsets = np.array([-1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5,
-0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5], dtype=np.float32) * dipole_sep
y_offsets = np.array([1.5, 1.5, 1.5, 1.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5,
-0.5, -0.5, -1.5, -1.5, -1.5, -1.5], dtype=np.float32) * dipole_sep
z_offsets = np.zeros(x_offsets.shape)
weights += np.zeros(x_offsets.shape)
dipole_jones_matrix = ideal_gaussian_beam(l, 0, nu=frequency, diameter=1)
array_factor = get_array_factor(x_offsets, y_offsets, weights, l, m, frequency)
tile_response = array_factor * dipole_jones_matrix
normalisation = tile_response[0]
tile_response /= normalisation
if not normalisation_only:
output = tile_response
if normalisation_only:
output = normalisation
return output
def get_array_factor(x, y, weights, l, m, l0=0, m0=0, frequency=150e6):
wavelength = c / frequency
number_dipoles = len(x)
k_x = (2. * np.pi / wavelength) * l
k_y = (2. * np.pi / wavelength) * m
k_x0 = (2. * np.pi / wavelength) * l0
k_y0 = (2. * np.pi / wavelength) * m0
array_factor_map = np.zeros(l.shape, dtype=complex)
for i in range(number_dipoles):
complex_exponent = -1j * ((k_x - k_x0) * x[i] + (k_y - k_y0) * y[i])
# !This step takes a long time, look into optimisation through vectorisation/clever np usage
dipole_factor = weights[i] * np.exp(complex_exponent)
array_factor_map += dipole_factor
# filter all NaN
array_factor_map[np.isnan(array_factor_map)] = 0
array_factor_map = array_factor_map / np.sum(weights)
return array_factor_map | [
"numpy.ones",
"mwa_pb.beam_full_EE.ApertureArray",
"mwa_pb.beam_full_EE.Beam",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.isnan",
"pyrem.radiotelescope.ideal_gaussian_beam"
] | [((356, 385), 'mwa_pb.beam_full_EE.ApertureArray', 'ApertureArray', (['h5filepath', 'nu'], {}), '(h5filepath, nu)\n', (369, 385), False, 'from mwa_pb.beam_full_EE import ApertureArray\n'), ((433, 450), 'numpy.zeros', 'np.zeros', (['[2, 16]'], {}), '([2, 16])\n', (441, 450), True, 'import numpy as np\n'), ((475, 491), 'numpy.ones', 'np.ones', (['[2, 16]'], {}), '([2, 16])\n', (482, 491), True, 'import numpy as np\n'), ((509, 538), 'mwa_pb.beam_full_EE.Beam', 'Beam', (['tile', 'delays'], {'amps': 'amps'}), '(tile, delays, amps=amps)\n', (513, 538), False, 'from mwa_pb.beam_full_EE import Beam\n'), ((1203, 1228), 'numpy.zeros', 'np.zeros', (['x_offsets.shape'], {}), '(x_offsets.shape)\n', (1211, 1228), True, 'import numpy as np\n'), ((1245, 1270), 'numpy.zeros', 'np.zeros', (['x_offsets.shape'], {}), '(x_offsets.shape)\n', (1253, 1270), True, 'import numpy as np\n'), ((1298, 1349), 'pyrem.radiotelescope.ideal_gaussian_beam', 'ideal_gaussian_beam', (['l', '(0)'], {'nu': 'frequency', 'diameter': '(1)'}), '(l, 0, nu=frequency, diameter=1)\n', (1317, 1349), False, 'from pyrem.radiotelescope import ideal_gaussian_beam\n'), ((2025, 2057), 'numpy.zeros', 'np.zeros', (['l.shape'], {'dtype': 'complex'}), '(l.shape, dtype=complex)\n', (2033, 2057), True, 'import numpy as np\n'), ((853, 973), 'numpy.array', 'np.array', (['[-1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -\n 0.5, 0.5, 1.5]'], {'dtype': 'np.float32'}), '([-1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5,\n -1.5, -0.5, 0.5, 1.5], dtype=np.float32)\n', (861, 973), True, 'import numpy as np\n'), ((1028, 1149), 'numpy.array', 'np.array', (['[1.5, 1.5, 1.5, 1.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5, -0.5, -1.5, -1.5,\n -1.5, -1.5]'], {'dtype': 'np.float32'}), '([1.5, 1.5, 1.5, 1.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5, -0.5, -\n 1.5, -1.5, -1.5, -1.5], dtype=np.float32)\n', (1036, 1149), True, 'import numpy as np\n'), ((2422, 2448), 'numpy.isnan', 'np.isnan', (['array_factor_map'], {}), '(array_factor_map)\n', (2430, 2448), True, 'import numpy as np\n'), ((2496, 2511), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (2502, 2511), True, 'import numpy as np\n'), ((2311, 2335), 'numpy.exp', 'np.exp', (['complex_exponent'], {}), '(complex_exponent)\n', (2317, 2335), True, 'import numpy as np\n')] |
import warnings
import cv2
import imageio
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from PIL import Image
import model
import opt
import train
import pdb
def set_deterministic():
import random
import numpy
import torch
torch.manual_seed(0)
random.seed(0)
numpy.random.seed(0)
torch.backends.cudnn.benchmark = False
def adjust_jupyter_argv():
import sys
sys.argv = sys.argv[:1]
def write_mp4(name, frames, fps=10):
imageio.mimwrite(name + ".mp4", frames, "mp4", fps=fps)
def overlay_image(im, im_overlay, coord=(100, 70)):
# assumes that im is 3 channel and im_overlay 4 (with alpha)
alpha = im_overlay[:, :, 3]
offset_rows = im_overlay.shape[0]
offset_cols = im_overlay.shape[1]
row = coord[0]
col = coord[1]
im[row : row + offset_rows, col : col + offset_cols, :] = (
1 - alpha[:, :, None]
) * im[row : row + offset_rows, col : col + offset_cols, :] + alpha[
:, :, None
] * im_overlay[
:, :, :3
]
return im
def get_parameters(models):
"""Get all model parameters recursively."""
parameters = []
if isinstance(models, list):
for model in models:
parameters += get_parameters(model)
elif isinstance(models, dict):
for model in models.values():
parameters += get_parameters(model)
else:
# single pytorch model
parameters += list(models.parameters())
return parameters
def visualize_depth(depth, cmap=cv2.COLORMAP_JET):
x = depth.cpu().numpy()
x = np.nan_to_num(x) # change nan to 0
mi = np.min(x) # get minimum depth
ma = np.max(x)
x = (x - mi) / (ma - mi + 1e-8) # normalize to 0~1
x = (255 * x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = torchvision.transforms.ToTensor()(x_) # (3, H, W)
return x_
def assign_appearance(ids_train, ids_unassigned):
# described in experiments, (3) NeRF-W: reassign each test embedding to closest train embedding
ids = sorted(ids_train + ids_unassigned)
g = {}
for id in ids_unassigned:
pos = ids.index(id)
if pos == 0:
# then only possible to assign to next embedding
id_reassign = ids[1]
elif pos == len(ids) - 1:
# then only possible to assign to previous embedding
id_reassign = ids[pos - 1]
else:
# otherwise the one that is closes according to frame index
id_prev = ids[pos - 1]
id_next = ids[pos + 1]
id_reassign = min(
(abs(ids[pos] - id_prev), id_prev), (abs(ids[pos] - id_next), id_next)
)[1]
g[ids[pos]] = id_reassign
return g
def init_model(ckpt_path, dataset):
ckpt = torch.load(ckpt_path, map_location="cpu")
opt_hp = opt.get_opts(dataset.vid)
for j in ckpt["hyper_parameters"]:
setattr(opt_hp, j, ckpt["hyper_parameters"][j])
model = train.NeuralDiffSystem(
opt_hp, train_dataset=dataset, val_dataset=dataset
).cuda()
model.load_state_dict(ckpt["state_dict"])
g_test = assign_appearance(dataset.img_ids_train, dataset.img_ids_test)
g_val = assign_appearance(dataset.img_ids_train, dataset.img_ids_val)
for g in [g_test, g_val]:
for i, i_train in g.items():
model.embedding_a.weight.data[i] = model.embedding_a.weight.data[
i_train
]
return model
| [
"cv2.applyColorMap",
"torch.manual_seed",
"opt.get_opts",
"torch.load",
"imageio.mimwrite",
"random.seed",
"numpy.max",
"train.NeuralDiffSystem",
"numpy.random.seed",
"numpy.min",
"model.load_state_dict",
"torchvision.transforms.ToTensor",
"numpy.nan_to_num"
] | [((283, 303), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (300, 303), False, 'import torch\n'), ((308, 322), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (319, 322), False, 'import random\n'), ((327, 347), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (344, 347), False, 'import numpy\n'), ((507, 562), 'imageio.mimwrite', 'imageio.mimwrite', (["(name + '.mp4')", 'frames', '"""mp4"""'], {'fps': 'fps'}), "(name + '.mp4', frames, 'mp4', fps=fps)\n", (523, 562), False, 'import imageio\n'), ((1600, 1616), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {}), '(x)\n', (1613, 1616), True, 'import numpy as np\n'), ((1645, 1654), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1651, 1654), True, 'import numpy as np\n'), ((1685, 1694), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1691, 1694), True, 'import numpy as np\n'), ((2819, 2860), 'torch.load', 'torch.load', (['ckpt_path'], {'map_location': '"""cpu"""'}), "(ckpt_path, map_location='cpu')\n", (2829, 2860), False, 'import torch\n'), ((2874, 2899), 'opt.get_opts', 'opt.get_opts', (['dataset.vid'], {}), '(dataset.vid)\n', (2886, 2899), False, 'import opt\n'), ((3107, 3148), 'model.load_state_dict', 'model.load_state_dict', (["ckpt['state_dict']"], {}), "(ckpt['state_dict'])\n", (3128, 3148), False, 'import model\n'), ((1811, 1837), 'cv2.applyColorMap', 'cv2.applyColorMap', (['x', 'cmap'], {}), '(x, cmap)\n', (1828, 1837), False, 'import cv2\n'), ((1848, 1881), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (1879, 1881), False, 'import torchvision\n'), ((3007, 3081), 'train.NeuralDiffSystem', 'train.NeuralDiffSystem', (['opt_hp'], {'train_dataset': 'dataset', 'val_dataset': 'dataset'}), '(opt_hp, train_dataset=dataset, val_dataset=dataset)\n', (3029, 3081), False, 'import train\n')] |
import tensorflow as tf
import os
import zipfile
from os import path, getcwd, chdir
import os
train_happy_dir = os.path.join('/Users/seanjudelyons/Downloads/happy-or-sad/happy/') # the zip file had the folders called horses and humans
train_sad_dir = os.path.join('/Users/seanjudelyons/Downloads/happy-or-sad/sad/')
train_happy_names = os.listdir(train_happy_dir)
print(train_happy_names[:10])
train_sad_names = os.listdir(train_sad_dir)
print(train_sad_names[:10])
print('total training happy images:', len(os.listdir(train_happy_dir)))
print('total training sad images:', len(os.listdir(train_happy_dir)))
# GRADED FUNCTION: train_happy_sad_model
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('acc') > 0.999):
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
def train_happy_sad_model():
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# The third convolution
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')])
model.summary()
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory('/Users/seanjudelyons/Downloads/happy-or-sad/', target_size=(150, 150), batch_size=20,
class_mode='binary')
history = model.fit(train_generator, steps_per_epoch=4, epochs=20, verbose=1, callbacks=[callbacks])
from keras.preprocessing import image
import numpy as np
path = os.path.join('/Users/seanjudelyons/Downloads/happyorsadtest.png')
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])
if classes[0] > 0.5:
print("The picture is Sad")
else:
print("The picture is Happy")
return history.history['acc'][-1]
train_happy_sad_model()
| [
"keras.preprocessing.image.img_to_array",
"os.listdir",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.Dense",
"numpy.vstack",
"nump... | [((114, 180), 'os.path.join', 'os.path.join', (['"""/Users/seanjudelyons/Downloads/happy-or-sad/happy/"""'], {}), "('/Users/seanjudelyons/Downloads/happy-or-sad/happy/')\n", (126, 180), False, 'import os\n'), ((255, 319), 'os.path.join', 'os.path.join', (['"""/Users/seanjudelyons/Downloads/happy-or-sad/sad/"""'], {}), "('/Users/seanjudelyons/Downloads/happy-or-sad/sad/')\n", (267, 319), False, 'import os\n'), ((341, 368), 'os.listdir', 'os.listdir', (['train_happy_dir'], {}), '(train_happy_dir)\n', (351, 368), False, 'import os\n'), ((418, 443), 'os.listdir', 'os.listdir', (['train_sad_dir'], {}), '(train_sad_dir)\n', (428, 443), False, 'import os\n'), ((2033, 2068), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1 / 255)'}), '(rescale=1 / 255)\n', (2051, 2068), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2474, 2539), 'os.path.join', 'os.path.join', (['"""/Users/seanjudelyons/Downloads/happyorsadtest.png"""'], {}), "('/Users/seanjudelyons/Downloads/happyorsadtest.png')\n", (2486, 2539), False, 'import os\n'), ((2550, 2594), 'keras.preprocessing.image.load_img', 'image.load_img', (['path'], {'target_size': '(150, 150)'}), '(path, target_size=(150, 150))\n', (2564, 2594), False, 'from keras.preprocessing import image\n'), ((2603, 2626), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2621, 2626), False, 'from keras.preprocessing import image\n'), ((2635, 2660), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2649, 2660), True, 'import numpy as np\n'), ((2675, 2689), 'numpy.vstack', 'np.vstack', (['[x]'], {}), '([x])\n', (2684, 2689), True, 'import numpy as np\n'), ((515, 542), 'os.listdir', 'os.listdir', (['train_happy_dir'], {}), '(train_happy_dir)\n', (525, 542), False, 'import os\n'), ((585, 612), 'os.listdir', 'os.listdir', (['train_happy_dir'], {}), '(train_happy_dir)\n', (595, 612), False, 'import os\n'), ((1010, 1095), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': '(150, 150, 3)'}), "(16, (3, 3), activation='relu', input_shape=(150, 150, 3)\n )\n", (1032, 1095), True, 'import tensorflow as tf\n'), ((1100, 1134), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1128, 1134), True, 'import tensorflow as tf\n'), ((1177, 1230), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (1199, 1230), True, 'import tensorflow as tf\n'), ((1240, 1274), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1268, 1274), True, 'import tensorflow as tf\n'), ((1316, 1369), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (1338, 1369), True, 'import tensorflow as tf\n'), ((1379, 1413), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1407, 1413), True, 'import tensorflow as tf\n'), ((1472, 1497), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (1495, 1497), True, 'import tensorflow as tf\n'), ((1541, 1586), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (1562, 1586), True, 'import tensorflow as tf\n'), ((1723, 1769), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1744, 1769), True, 'import tensorflow as tf\n'), ((1903, 1920), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (1910, 1920), False, 'from tensorflow.keras.optimizers import RMSprop\n')] |
import numpy as np
import time
import math
# from cassie_env import CassieEnv
from cassiemujoco import *
from trajectory.trajectory import CassieTrajectory
import matplotlib.pyplot as plt
from matplotlib import style
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
def visualise_sim_graph(file_path, freq_of_sim):
traj = np.load(file_path)
# env = CassieEnv("walking")
# csim = CassieSim("./cassie/cassiemujoco/cassie.xml")
# vis = CassieVis(csim, "./cassie/cassiemujoco/cassie.xml")
u = pd_in_t()
# pelvisXYZ = traj.f.qpos_replay[:, 0:3]
# render_state = vis.draw(csim)
# saved_time = traj.f.time[:]
#################Graphing###########
log_time = traj.f.time[:]
y_val = traj.f.qpos_replay[:,2] #z - height
x_data= log_time
y_data = y_val
delt_x = (x_data[1] - x_data[0]) * 1000 #convert seconds to ms
num_frames = math.ceil(len(x_data) / 10)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
output = plt.plot([])
plt.close()
print(output[0])
x = np.linspace(0,2*np.pi, 100)
fig = plt.figure()
lines = plt.plot([])
line = lines[0]
#other setup //set x and y lims
plt.xlim(x_data.min(), x_data.max())
plt.ylim(y_data.min(), y_data.max())
def animate(frame):
#update
x = x_data[:frame*10]
y = y_data[:frame*10]
# y = np.sin(x + 2*np.pi * frame/100)
line.set_data((x,y))
anim = FuncAnimation(fig, animate, frames=num_frames, interval=(1/freq_of_sim * 1000 + (10 * delt_x))) #20 is 50 fps
anim.save('lines.mp4', writer=writer)
# html = display.HTML(video)
# display.display(html)
plt.close()
visualise_sim_graph("./outfile8.npz", 30) | [
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.load"
] | [((437, 455), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (444, 455), True, 'import numpy as np\n'), ((1148, 1160), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {}), '([])\n', (1156, 1160), True, 'import matplotlib.pyplot as plt\n'), ((1165, 1176), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1174, 1176), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1237), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (1218, 1237), True, 'import numpy as np\n'), ((1246, 1258), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1256, 1258), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1284), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {}), '([])\n', (1280, 1284), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1709), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'animate'], {'frames': 'num_frames', 'interval': '(1 / freq_of_sim * 1000 + 10 * delt_x)'}), '(fig, animate, frames=num_frames, interval=1 / freq_of_sim * \n 1000 + 10 * delt_x)\n', (1624, 1709), False, 'from matplotlib.animation import FuncAnimation\n'), ((1830, 1841), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1839, 1841), True, 'import matplotlib.pyplot as plt\n')] |
from deepspeech import Model
import gradio as gr
import numpy as np
model_file_path = "deepspeech-0.8.2-models.pbmm"
lm_file_path = "deepspeech-0.8.2-models.scorer"
beam_width = 100
lm_alpha = 0.93
lm_beta = 1.18
model = Model(model_file_path)
model.enableExternalScorer(lm_file_path)
model.setScorerAlphaBeta(lm_alpha, lm_beta)
model.setBeamWidth(beam_width)
def reformat_freq(sr, y):
if sr not in (
48000,
16000,
): # Deepspeech only supports 16k, (we convert 48k -> 16k)
raise ValueError("Unsupported rate", sr)
if sr == 48000:
y = (
((y / max(np.max(y), 1)) * 32767)
.reshape((-1, 3))
.mean(axis=1)
.astype("int16")
)
sr = 16000
return sr, y
def transcribe(speech, stream):
_, y = reformat_freq(*speech)
if stream is None:
stream = model.createStream()
stream.feedAudioContent(y)
text = stream.intermediateDecode()
return text, stream
gr.Interface(transcribe, ["microphone", "state"], ["text", "state"], live=True).launch()
| [
"gradio.Interface",
"deepspeech.Model",
"numpy.max"
] | [((223, 245), 'deepspeech.Model', 'Model', (['model_file_path'], {}), '(model_file_path)\n', (228, 245), False, 'from deepspeech import Model\n'), ((987, 1066), 'gradio.Interface', 'gr.Interface', (['transcribe', "['microphone', 'state']", "['text', 'state']"], {'live': '(True)'}), "(transcribe, ['microphone', 'state'], ['text', 'state'], live=True)\n", (999, 1066), True, 'import gradio as gr\n'), ((608, 617), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (614, 617), True, 'import numpy as np\n')] |
#!/usr/bin/python
import sys
from os.path import join,exists,dirname
import numpy as np
from numpy.random import randint
from sklearn.datasets import load_svmlight_file
from torch.autograd import Function, Variable
import torch.nn as nn
import torch.optim as optim
import torch
from torch import FloatTensor
from uda_common import read_feature_groups, read_feature_lookup
# the concepts here come from: https://github.com/fungtion/DANN/blob/master/models/model.py
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
class TwoOutputModel(nn.Module):
def __init__(self, input_features, hidden_nodes, num_outputs):
super(TwoOutputModel, self).__init__()
# Feature takes you from input to the "representation"
self.feature = nn.Sequential()
self.feature.add_module('input_layer', nn.Linear(input_features, hidden_nodes))
self.feature.add_module('relu', nn.ReLU(True))
# self.feature.add_module('hidden_layer', nn.Linear(hidden_nodes, hidden_nodes))
# self.feature.add_module('relu2', nn.ReLU(True))
# task_classifier maps from a feature representation to a task prediction
self.task_classifier = nn.Sequential()
# self.task_classifier.add_module('task_linear', nn.Linear(hidden_nodes, hidden_nodes))
# self.task_classifier.add_module('relu2', nn.ReLU(True))
if num_outputs > 2:
self.task_classifier.add_module('task_linear', nn.Linear(hidden_nodes, num_outputs))
self.task_classifier.add_module('task_softmax', nn.LogSoftmax())
else:
self.task_classifier.add_module('task_binary', nn.Linear(hidden_nodes, 1))
self.task_classifier.add_module('task_sigmoid', nn.Sigmoid())
# domain classifier maps from a feature representation to a domain prediction
self.domain_classifier = nn.Sequential()
self.domain_classifier.add_module('domain_linear', nn.Linear(hidden_nodes, 1))
# # self.domain_classifier.add_module('domain_predict', nn.Linear(100, 1))
self.domain_classifier.add_module('domain_sigmoid', nn.Sigmoid())
def forward(self, input_data, alpha):
## standardize input to -1/1
# input_data = input_data * 2 - 1
feature = self.feature(input_data)
task_prediction = self.task_classifier(feature)
# Get domain prediction
reverse_feature = ReverseLayerF.apply(feature, alpha)
domain_prediction = self.domain_classifier(reverse_feature)
return task_prediction, domain_prediction
def main(args):
if len(args) < 1:
sys.stderr.write("Required arguments: <data file> [backward True|False]\n")
sys.exit(-1)
if torch.cuda.is_available():
cuda = True
if len(args) > 1:
backward = bool(args[1])
print("Direction is backward based on args=%s" % (args[1]))
else:
backward = False
print("Direction is forward by default")
# Read the data:
goal_ind = 2
domain_weight = 0.5
sys.stderr.write("Reading source data from %s\n" % (args[0]))
all_X, all_y = load_svmlight_file(args[0])
# y is 1,2 by default, map to -1,1 for sigmoid training
all_y -= 1 # 0/1
# all_y *= 2 # 0/2
# all_y -= 1 # -1/1
num_instances, num_feats = all_X.shape
domain_map = read_feature_groups(join(dirname(args[0]), 'reduced-feature-groups.txt'))
domain_inds = domain_map['Domain']
feature_map = read_feature_lookup(join(dirname(args[0]), 'reduced-features-lookup.txt'))
# Configure gloabel network params:
lr = 0.01
num_hidden_nodes = 1000
epochs = 100
direction = 1 if backward else 0
sys.stderr.write("using domain %s as source, %s as target\n" %
(feature_map[domain_inds[direction]],feature_map[domain_inds[1-direction]]))
source_instance_inds = np.where(all_X[:,domain_inds[direction]].toarray() > 0)[0]
X_source = all_X[source_instance_inds,:]
X_source[:, domain_inds[direction]] = 0
X_source[:, domain_inds[1-direction]] = 0
y_source = all_y[source_instance_inds]
num_source_instances = X_source.shape[0]
num_train_instances = int(X_source.shape[0] * 0.8)
X_task_train = X_source[:num_train_instances,:]
y_task_train = y_source[:num_train_instances]
X_task_valid = X_source[num_train_instances:, :]
y_task_valid = y_source[num_train_instances:]
target_instance_inds = np.where(all_X[:,domain_inds[1-direction]].toarray() > 0)[0]
X_target = all_X[target_instance_inds,:]
X_target[:, domain_inds[direction]] = 0
X_target[:, domain_inds[1-direction]] = 0
num_target_train = int(X_target.shape[0] * 0.8)
X_target_train = X_target[:num_target_train,:]
# y_target_train = y_target[:num_target_train]
X_target_valid = X_target[num_target_train:, :]
# y_target_dev = y_target[num_target_train:]
# y_test = all_y[target_instance_inds]
num_target_instances = X_target_train.shape[0]
model = TwoOutputModel(num_feats, num_hidden_nodes, 2)
task_loss_fn = nn.BCELoss()
domain_loss_fn = nn.BCELoss()
# optimizer = optim.Adam(model.parameters(), lr=lr)
optimizer = optim.SGD(model.parameters(), lr=lr)
if cuda:
model.cuda()
task_loss_fn.cuda()
domain_loss_fn.cuda()
model.train()
for epoch in range(epochs):
epoch_loss = 0
selected_source_inds = []
# Do a training epoch:
for ind in range(num_train_instances):
model.zero_grad()
## Gradually increase (?) the importance of the regularization term
p = float(ind + epoch * num_train_instances*2) / (epochs * num_train_instances*2)
alpha = 2. / (1. + np.exp(-10 * p)) - 1
## Randomly select a training instance:
source_ind = randint(num_train_instances)
selected_source_inds.append(source_ind)
# standardized_X = (X_train[source_ind,:].toarray() - X_mean) / X_std
source_batch = Variable(FloatTensor(X_task_train[source_ind,:].toarray()))# read input
source_task_labels = Variable(FloatTensor([y_task_train[source_ind],]))# read task labels
source_domain_labels = Variable(FloatTensor([0.,])) # set to 0
if cuda:
source_batch = source_batch.cuda()
source_task_labels = source_task_labels.cuda()
source_domain_labels = source_domain_labels.cuda()
# Get the task loss and domain loss for the source instance:
task_out, source_domain_out = model.forward(source_batch, alpha)
task_loss = task_loss_fn(task_out, source_task_labels)
domain_loss = domain_loss_fn(source_domain_out, source_domain_labels)
# Randomly select a target instance:
target_ind = randint(num_target_instances)
# # standardized_X = (X_test[target_ind,:].toarray() - X_mean) / X_std
target_batch = Variable(FloatTensor(X_target_train[target_ind,:].toarray())) # read input
target_domain_labels = Variable(FloatTensor([1.,])) # set to 1
if cuda:
target_batch = target_batch.cuda()
target_domain_labels = target_domain_labels.cuda()
# Get the domain loss for the target instances:
_, target_domain_out = model.forward(target_batch, alpha)
target_domain_loss = domain_loss_fn(target_domain_out, target_domain_labels)
# Get sum loss update weights:
# domain adaptation:
total_loss = task_loss + domain_weight * (domain_loss + target_domain_loss)
# Task only:
# total_loss = task_loss
epoch_loss += total_loss
total_loss.backward()
optimizer.step()
source_eval_X = X_task_valid
source_eval_y = y_task_valid
source_task_out, source_domain_out = model.forward( Variable(FloatTensor(source_eval_X.toarray())).cuda(), alpha=0.)
# source domain is 0, count up predictions where 1 - prediction = 1
# source_domain_preds = np.sum(1 - source_domain_out.cpu().data.numpy())
# source_domain_acc = source_domain_preds / len(source_eval_y)
source_domain_preds = np.round(source_domain_out.cpu().data.numpy())
source_predicted_count = np.sum(1 - source_domain_preds)
# source_domain_acc = source_predicted_count / len(source_eval_y)
target_eval_X = X_target_valid
_, target_domain_out = model.forward( Variable(FloatTensor(target_eval_X.toarray())).cuda(), alpha=0.)
target_domain_preds = np.round(target_domain_out.cpu().data.numpy())
target_predicted_count = np.sum(target_domain_preds)
domain_acc = (source_predicted_count + target_predicted_count) / (source_eval_X.shape[0] + target_eval_X.shape[0])
source_y_pred = np.round(source_task_out.cpu().data.numpy()[:,0])
# predictions of 1 are the positive class: tps are where prediction and gold are 1
tps = np.sum(source_y_pred * source_eval_y)
true_preds = source_y_pred.sum()
true_labels = source_eval_y.sum()
recall = tps / true_labels
prec = 1 if tps == 0 else tps / true_preds
f1 = 2 * recall * prec / (recall+prec)
print("[Source] Epoch %d: loss=%f\tnum_insts=%d\tdom_acc=%f\tP=%f\tR=%f\tF=%f" % (epoch, epoch_loss, len(source_eval_y), domain_acc, prec, recall, f1))
# No eval:
# print("Epoch loss: %f" % (epoch_loss))
# try an evaluation on the test data:
# model.evaluate()
## To do an eval on the target set:
# target_data = Variable(FloatTensor(X_test.toarray())).cuda()
# task_out, domain_out = model.forward(target_data, alpha=0.)
# # Target domain is 1, predictions of 1 are predictions of target domain:
# target_domain_preds = np.sum(domain_out.cpu().data.numpy())
# domain_acc = target_domain_preds / num_test_instances
# y_pred = np.round(task_out.cpu().data.numpy()[:,0])
# tps = np.sum(y_pred * y_test)
# true_preds = y_pred.sum()
# true_labels = y_test.sum()
# recall = tps / true_labels
# prec = 1 if tps == 0 else tps / true_preds
# f1 = 2 * recall * prec / (recall+prec)
# print("[Target] Epoch %d: loss=%f\tdom_acc=%f\tP=%f\tR=%f\tF=%f" % (epoch, epoch_loss, domain_acc, prec, recall, f1))
if __name__ == "__main__":
main(sys.argv[1:]) | [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"sklearn.datasets.load_svmlight_file",
"torch.nn.Sequential",
"numpy.exp",
"sys.stderr.write",
"torch.nn.BCELoss",
"torch.cuda.is_available",
"numpy.sum",
"os.path.dirname",
"torch.nn.Linear",
"sys.exit",
"numpy.random.randint",
"torch.nn.LogSoftmax",
... | [((2921, 2946), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2944, 2946), False, 'import torch\n'), ((3248, 3307), 'sys.stderr.write', 'sys.stderr.write', (["('Reading source data from %s\\n' % args[0])"], {}), "('Reading source data from %s\\n' % args[0])\n", (3264, 3307), False, 'import sys\n'), ((3329, 3356), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['args[0]'], {}), '(args[0])\n', (3347, 3356), False, 'from sklearn.datasets import load_svmlight_file\n'), ((3907, 4054), 'sys.stderr.write', 'sys.stderr.write', (["('using domain %s as source, %s as target\\n' % (feature_map[domain_inds[\n direction]], feature_map[domain_inds[1 - direction]]))"], {}), "('using domain %s as source, %s as target\\n' % (feature_map\n [domain_inds[direction]], feature_map[domain_inds[1 - direction]]))\n", (3923, 4054), False, 'import sys\n'), ((5290, 5302), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5300, 5302), True, 'import torch.nn as nn\n'), ((5324, 5336), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5334, 5336), True, 'import torch.nn as nn\n'), ((967, 982), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (980, 982), True, 'import torch.nn as nn\n'), ((1387, 1402), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1400, 1402), True, 'import torch.nn as nn\n'), ((2070, 2085), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (2083, 2085), True, 'import torch.nn as nn\n'), ((2812, 2887), 'sys.stderr.write', 'sys.stderr.write', (['"""Required arguments: <data file> [backward True|False]\n"""'], {}), "('Required arguments: <data file> [backward True|False]\\n')\n", (2828, 2887), False, 'import sys\n'), ((2896, 2908), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2904, 2908), False, 'import sys\n'), ((8626, 8657), 'numpy.sum', 'np.sum', (['(1 - source_domain_preds)'], {}), '(1 - source_domain_preds)\n', (8632, 8657), True, 'import numpy as np\n'), ((8993, 9020), 'numpy.sum', 'np.sum', (['target_domain_preds'], {}), '(target_domain_preds)\n', (8999, 9020), True, 'import numpy as np\n'), ((9333, 9370), 'numpy.sum', 'np.sum', (['(source_y_pred * source_eval_y)'], {}), '(source_y_pred * source_eval_y)\n', (9339, 9370), True, 'import numpy as np\n'), ((1030, 1069), 'torch.nn.Linear', 'nn.Linear', (['input_features', 'hidden_nodes'], {}), '(input_features, hidden_nodes)\n', (1039, 1069), True, 'import torch.nn as nn\n'), ((1111, 1124), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1118, 1124), True, 'import torch.nn as nn\n'), ((2145, 2171), 'torch.nn.Linear', 'nn.Linear', (['hidden_nodes', '(1)'], {}), '(hidden_nodes, 1)\n', (2154, 2171), True, 'import torch.nn as nn\n'), ((2316, 2328), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2326, 2328), True, 'import torch.nn as nn\n'), ((3583, 3599), 'os.path.dirname', 'dirname', (['args[0]'], {}), '(args[0])\n', (3590, 3599), False, 'from os.path import join, exists, dirname\n'), ((3715, 3731), 'os.path.dirname', 'dirname', (['args[0]'], {}), '(args[0])\n', (3722, 3731), False, 'from os.path import join, exists, dirname\n'), ((6062, 6090), 'numpy.random.randint', 'randint', (['num_train_instances'], {}), '(num_train_instances)\n', (6069, 6090), False, 'from numpy.random import randint\n'), ((7091, 7120), 'numpy.random.randint', 'randint', (['num_target_instances'], {}), '(num_target_instances)\n', (7098, 7120), False, 'from numpy.random import randint\n'), ((1652, 1688), 'torch.nn.Linear', 'nn.Linear', (['hidden_nodes', 'num_outputs'], {}), '(hidden_nodes, num_outputs)\n', (1661, 1688), True, 'import torch.nn as nn\n'), ((1750, 1765), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {}), '()\n', (1763, 1765), True, 'import torch.nn as nn\n'), ((1840, 1866), 'torch.nn.Linear', 'nn.Linear', (['hidden_nodes', '(1)'], {}), '(hidden_nodes, 1)\n', (1849, 1866), True, 'import torch.nn as nn\n'), ((1928, 1940), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1938, 1940), True, 'import torch.nn as nn\n'), ((6366, 6405), 'torch.FloatTensor', 'FloatTensor', (['[y_task_train[source_ind]]'], {}), '([y_task_train[source_ind]])\n', (6377, 6405), False, 'from torch import FloatTensor\n'), ((6470, 6488), 'torch.FloatTensor', 'FloatTensor', (['[0.0]'], {}), '([0.0])\n', (6481, 6488), False, 'from torch import FloatTensor\n'), ((7350, 7368), 'torch.FloatTensor', 'FloatTensor', (['[1.0]'], {}), '([1.0])\n', (7361, 7368), False, 'from torch import FloatTensor\n'), ((5963, 5978), 'numpy.exp', 'np.exp', (['(-10 * p)'], {}), '(-10 * p)\n', (5969, 5978), True, 'import numpy as np\n')] |
import numpy as np
def dist(x, y, norm=2):
# x: N x D
# y: M x D
n = x.shape[0]
m = y.shape[0]
d = x.shape[1]
assert d == y.shape[1]
x = np.expand_dims(x, axis=1) # (n,d)->(n,1,d)
y = np.expand_dims(y, axis=0) # (m,d)->(1,m,d)
# x = np.repeat(x, m, axis=1) # (n,1,d)->(n,m,d)
# y = np.repeat(y, n, axis=0) # (1,m,d)->(n,m,d)
temp = x - y # broadcast to (n,m,d)
if norm == 2:
return np.power(temp, norm).sum(-1) # (n,m,d)->(n,m)
elif norm == 1:
return np.abs(temp).sum(-1)
else:
raise ValueError(f"Arg '{norm}' only supporting L2 & L1 norm temporarily, either 1 or 2.")
# One-Hot
def to_categorical(y, nb_classes=None):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: 数字标签(integers from 0 to nb_classes).
nb_classes: 类别总数.
# Returns
A binary matrix representation of the input.
"""
y = np.array(y, dtype='int').ravel()
if not nb_classes:
nb_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, nb_classes))
categorical[np.arange(n), y] = 1
return categorical
# convert probability to classes
def probas_to_classes(y_pred):
if len(y_pred.shape) > 1 and y_pred.shape[1] > 1: # 2阶以上,第二阶维度大于1
return np.argmax(y_pred, axis=1)
return np.array([1 if p > 0.5 else 0 for p in y_pred])
| [
"numpy.abs",
"numpy.power",
"numpy.argmax",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.expand_dims",
"numpy.arange"
] | [((167, 192), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (181, 192), True, 'import numpy as np\n'), ((219, 244), 'numpy.expand_dims', 'np.expand_dims', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (233, 244), True, 'import numpy as np\n'), ((1145, 1170), 'numpy.zeros', 'np.zeros', (['(n, nb_classes)'], {}), '((n, nb_classes))\n', (1153, 1170), True, 'import numpy as np\n'), ((1419, 1468), 'numpy.array', 'np.array', (['[(1 if p > 0.5 else 0) for p in y_pred]'], {}), '([(1 if p > 0.5 else 0) for p in y_pred])\n', (1427, 1468), True, 'import numpy as np\n'), ((1382, 1407), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (1391, 1407), True, 'import numpy as np\n'), ((1017, 1041), 'numpy.array', 'np.array', (['y'], {'dtype': '"""int"""'}), "(y, dtype='int')\n", (1025, 1041), True, 'import numpy as np\n'), ((1094, 1103), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1100, 1103), True, 'import numpy as np\n'), ((1187, 1199), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1196, 1199), True, 'import numpy as np\n'), ((465, 485), 'numpy.power', 'np.power', (['temp', 'norm'], {}), '(temp, norm)\n', (473, 485), True, 'import numpy as np\n'), ((547, 559), 'numpy.abs', 'np.abs', (['temp'], {}), '(temp)\n', (553, 559), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from .Qt import QtCore, QtGui
from .Vector import Vector
from .SRTTransform import SRTTransform
import pyqtgraph as pg
import numpy as np
import scipy.linalg
class SRTTransform3D(pg.Transform3D):
"""4x4 Transform matrix that can always be represented as a combination of 3 matrices: scale * rotate * translate
This transform has no shear; angles are always preserved.
"""
def __init__(self, init=None):
pg.Transform3D.__init__(self)
self.reset()
if init is None:
return
if init.__class__ is QtGui.QTransform:
init = SRTTransform(init)
if isinstance(init, dict):
self.restoreState(init)
elif isinstance(init, SRTTransform3D):
self._state = {
'pos': Vector(init._state['pos']),
'scale': Vector(init._state['scale']),
'angle': init._state['angle'],
'axis': Vector(init._state['axis']),
}
self.update()
elif isinstance(init, SRTTransform):
self._state = {
'pos': Vector(init._state['pos']),
'scale': Vector(init._state['scale']),
'angle': init._state['angle'],
'axis': Vector(0, 0, 1),
}
self._state['scale'][2] = 1.0
self.update()
elif isinstance(init, QtGui.QMatrix4x4):
self.setFromMatrix(init)
else:
raise Exception("Cannot build SRTTransform3D from argument type:", type(init))
def getScale(self):
return pg.Vector(self._state['scale'])
def getRotation(self):
"""Return (angle, axis) of rotation"""
return self._state['angle'], pg.Vector(self._state['axis'])
def getTranslation(self):
return pg.Vector(self._state['pos'])
def reset(self):
self._state = {
'pos': Vector(0,0,0),
'scale': Vector(1,1,1),
'angle': 0.0, ## in degrees
'axis': (0, 0, 1)
}
self.update()
def translate(self, *args):
"""Adjust the translation of this transform"""
t = Vector(*args)
self.setTranslate(self._state['pos']+t)
def setTranslate(self, *args):
"""Set the translation of this transform"""
self._state['pos'] = Vector(*args)
self.update()
def scale(self, *args):
"""adjust the scale of this transform"""
## try to prevent accidentally setting 0 scale on z axis
if len(args) == 1 and hasattr(args[0], '__len__'):
args = args[0]
if len(args) == 2:
args = args + (1,)
s = Vector(*args)
self.setScale(self._state['scale'] * s)
def setScale(self, *args):
"""Set the scale of this transform"""
if len(args) == 1 and hasattr(args[0], '__len__'):
args = args[0]
if len(args) == 2:
args = args + (1,)
self._state['scale'] = Vector(*args)
self.update()
def rotate(self, angle, axis=(0,0,1)):
"""Adjust the rotation of this transform"""
origAxis = self._state['axis']
if axis[0] == origAxis[0] and axis[1] == origAxis[1] and axis[2] == origAxis[2]:
self.setRotate(self._state['angle'] + angle)
else:
m = QtGui.QMatrix4x4()
m.translate(*self._state['pos'])
m.rotate(self._state['angle'], *self._state['axis'])
m.rotate(angle, *axis)
m.scale(*self._state['scale'])
self.setFromMatrix(m)
def setRotate(self, angle, axis=(0,0,1)):
"""Set the transformation rotation to angle (in degrees)"""
self._state['angle'] = angle
self._state['axis'] = Vector(axis)
self.update()
def setFromMatrix(self, m):
"""
Set this transform mased on the elements of *m*
The input matrix must be affine AND have no shear,
otherwise the conversion will most likely fail.
"""
for i in range(4):
self.setRow(i, m.row(i))
m = self.matrix().reshape(4,4)
## translation is 4th column
self._state['pos'] = m[:3,3]
## scale is vector-length of first three columns
scale = (m[:3,:3]**2).sum(axis=0)**0.5
## see whether there is an inversion
z = np.cross(m[0, :3], m[1, :3])
if np.dot(z, m[2, :3]) < 0:
scale[1] *= -1 ## doesn't really matter which axis we invert
self._state['scale'] = scale
## rotation axis is the eigenvector with eigenvalue=1
r = m[:3, :3] / scale[:, np.newaxis]
try:
evals, evecs = scipy.linalg.eig(r)
except:
print("Rotation matrix: %s" % str(r))
print("Scale: %s" % str(scale))
print("Original matrix: %s" % str(m))
raise
eigIndex = np.argwhere(np.abs(evals-1) < 1e-6)
if len(eigIndex) < 1:
print("eigenvalues: %s" % str(evals))
print("eigenvectors: %s" % str(evecs))
print("index: %s, %s" % (str(eigIndex), str(evals-1)))
raise Exception("Could not determine rotation axis.")
axis = evecs[:,eigIndex[0,0]].real
axis /= ((axis**2).sum())**0.5
self._state['axis'] = axis
## trace(r) == 2 cos(angle) + 1, so:
cos = (r.trace()-1)*0.5 ## this only gets us abs(angle)
## The off-diagonal values can be used to correct the angle ambiguity,
## but we need to figure out which element to use:
axisInd = np.argmax(np.abs(axis))
rInd,sign = [((1,2), -1), ((0,2), 1), ((0,1), -1)][axisInd]
## Then we have r-r.T = sin(angle) * 2 * sign * axis[axisInd];
## solve for sin(angle)
sin = (r-r.T)[rInd] / (2. * sign * axis[axisInd])
## finally, we get the complete angle from arctan(sin/cos)
self._state['angle'] = np.arctan2(sin, cos) * 180 / np.pi
if self._state['angle'] == 0:
self._state['axis'] = (0,0,1)
def as2D(self):
"""Return a QTransform representing the x,y portion of this transform (if possible)"""
return pg.SRTTransform(self)
#def __div__(self, t):
#"""A / B == B^-1 * A"""
#dt = t.inverted()[0] * self
#return SRTTransform(dt)
#def __mul__(self, t):
#return SRTTransform(QtGui.QTransform.__mul__(self, t))
def saveState(self):
p = self._state['pos']
s = self._state['scale']
ax = self._state['axis']
#if s[0] == 0:
#raise Exception('Invalid scale: %s' % str(s))
return {
'pos': (p[0], p[1], p[2]),
'scale': (s[0], s[1], s[2]),
'angle': self._state['angle'],
'axis': (ax[0], ax[1], ax[2])
}
def restoreState(self, state):
self._state['pos'] = Vector(state.get('pos', (0.,0.,0.)))
scale = state.get('scale', (1.,1.,1.))
scale = tuple(scale) + (1.,) * (3-len(scale))
self._state['scale'] = Vector(scale)
self._state['angle'] = state.get('angle', 0.)
self._state['axis'] = state.get('axis', (0, 0, 1))
self.update()
def update(self):
pg.Transform3D.setToIdentity(self)
## modifications to the transform are multiplied on the right, so we need to reverse order here.
pg.Transform3D.translate(self, *self._state['pos'])
pg.Transform3D.rotate(self, self._state['angle'], *self._state['axis'])
pg.Transform3D.scale(self, *self._state['scale'])
def __repr__(self):
return str(self.saveState())
def matrix(self, nd=3):
if nd == 3:
return np.array(self.copyDataTo()).reshape(4,4)
elif nd == 2:
m = np.array(self.copyDataTo()).reshape(4,4)
m[2] = m[3]
m[:,2] = m[:,3]
return m[:3,:3]
else:
raise Exception("Argument 'nd' must be 2 or 3")
if __name__ == '__main__':
import widgets
import GraphicsView
from functions import *
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
win.show()
cw = GraphicsView.GraphicsView()
#cw.enableMouse()
win.setCentralWidget(cw)
s = QtGui.QGraphicsScene()
cw.setScene(s)
win.resize(600,600)
cw.enableMouse()
cw.setRange(QtCore.QRectF(-100., -100., 200., 200.))
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
self.b = QtGui.QGraphicsRectItem(20, 20, 20, 20, self)
self.b.setPen(QtGui.QPen(mkPen('y')))
self.t1 = QtGui.QGraphicsTextItem(self)
self.t1.setHtml('<span style="color: #F00">R</span>')
self.t1.translate(20, 20)
self.l1 = QtGui.QGraphicsLineItem(10, 0, -10, 0, self)
self.l2 = QtGui.QGraphicsLineItem(0, 10, 0, -10, self)
self.l1.setPen(QtGui.QPen(mkPen('y')))
self.l2.setPen(QtGui.QPen(mkPen('y')))
def boundingRect(self):
return QtCore.QRectF()
def paint(self, *args):
pass
#s.addItem(b)
#s.addItem(t1)
item = Item()
s.addItem(item)
l1 = QtGui.QGraphicsLineItem(10, 0, -10, 0)
l2 = QtGui.QGraphicsLineItem(0, 10, 0, -10)
l1.setPen(QtGui.QPen(mkPen('r')))
l2.setPen(QtGui.QPen(mkPen('r')))
s.addItem(l1)
s.addItem(l2)
tr1 = SRTTransform()
tr2 = SRTTransform()
tr3 = QtGui.QTransform()
tr3.translate(20, 0)
tr3.rotate(45)
print("QTransform -> Transform: %s" % str(SRTTransform(tr3)))
print("tr1: %s" % str(tr1))
tr2.translate(20, 0)
tr2.rotate(45)
print("tr2: %s" % str(tr2))
dt = tr2/tr1
print("tr2 / tr1 = %s" % str(dt))
print("tr2 * tr1 = %s" % str(tr2*tr1))
tr4 = SRTTransform()
tr4.scale(-1, 1)
tr4.rotate(30)
print("tr1 * tr4 = %s" % str(tr1*tr4))
w1 = widgets.TestROI((19,19), (22, 22), invertible=True)
#w2 = widgets.TestROI((0,0), (150, 150))
w1.setZValue(10)
s.addItem(w1)
#s.addItem(w2)
w1Base = w1.getState()
#w2Base = w2.getState()
def update():
tr1 = w1.getGlobalTransform(w1Base)
#tr2 = w2.getGlobalTransform(w2Base)
item.setTransform(tr1)
#def update2():
#tr1 = w1.getGlobalTransform(w1Base)
#tr2 = w2.getGlobalTransform(w2Base)
#t1.setTransform(tr1)
#w1.setState(w1Base)
#w1.applyGlobalTransform(tr2)
w1.sigRegionChanged.connect(update)
#w2.sigRegionChanged.connect(update2)
| [
"numpy.abs",
"pyqtgraph.Transform3D.__init__",
"GraphicsView.GraphicsView",
"numpy.cross",
"pyqtgraph.Transform3D.translate",
"pyqtgraph.Transform3D.setToIdentity",
"pyqtgraph.Vector",
"widgets.TestROI",
"pyqtgraph.Transform3D.scale",
"numpy.dot",
"pyqtgraph.Transform3D.rotate",
"numpy.arctan2... | [((8374, 8401), 'GraphicsView.GraphicsView', 'GraphicsView.GraphicsView', ([], {}), '()\n', (8399, 8401), False, 'import GraphicsView\n'), ((10192, 10244), 'widgets.TestROI', 'widgets.TestROI', (['(19, 19)', '(22, 22)'], {'invertible': '(True)'}), '((19, 19), (22, 22), invertible=True)\n', (10207, 10244), False, 'import widgets\n'), ((452, 481), 'pyqtgraph.Transform3D.__init__', 'pg.Transform3D.__init__', (['self'], {}), '(self)\n', (475, 481), True, 'import pyqtgraph as pg\n'), ((1622, 1653), 'pyqtgraph.Vector', 'pg.Vector', (["self._state['scale']"], {}), "(self._state['scale'])\n", (1631, 1653), True, 'import pyqtgraph as pg\n'), ((1859, 1888), 'pyqtgraph.Vector', 'pg.Vector', (["self._state['pos']"], {}), "(self._state['pos'])\n", (1868, 1888), True, 'import pyqtgraph as pg\n'), ((4487, 4515), 'numpy.cross', 'np.cross', (['m[0, :3]', 'm[1, :3]'], {}), '(m[0, :3], m[1, :3])\n', (4495, 4515), True, 'import numpy as np\n'), ((6361, 6382), 'pyqtgraph.SRTTransform', 'pg.SRTTransform', (['self'], {}), '(self)\n', (6376, 6382), True, 'import pyqtgraph as pg\n'), ((7430, 7464), 'pyqtgraph.Transform3D.setToIdentity', 'pg.Transform3D.setToIdentity', (['self'], {}), '(self)\n', (7458, 7464), True, 'import pyqtgraph as pg\n'), ((7578, 7629), 'pyqtgraph.Transform3D.translate', 'pg.Transform3D.translate', (['self', "*self._state['pos']"], {}), "(self, *self._state['pos'])\n", (7602, 7629), True, 'import pyqtgraph as pg\n'), ((7638, 7709), 'pyqtgraph.Transform3D.rotate', 'pg.Transform3D.rotate', (['self', "self._state['angle']", "*self._state['axis']"], {}), "(self, self._state['angle'], *self._state['axis'])\n", (7659, 7709), True, 'import pyqtgraph as pg\n'), ((7718, 7767), 'pyqtgraph.Transform3D.scale', 'pg.Transform3D.scale', (['self', "*self._state['scale']"], {}), "(self, *self._state['scale'])\n", (7738, 7767), True, 'import pyqtgraph as pg\n'), ((1774, 1804), 'pyqtgraph.Vector', 'pg.Vector', (["self._state['axis']"], {}), "(self._state['axis'])\n", (1783, 1804), True, 'import pyqtgraph as pg\n'), ((4527, 4546), 'numpy.dot', 'np.dot', (['z', 'm[2, :3]'], {}), '(z, m[2, :3])\n', (4533, 4546), True, 'import numpy as np\n'), ((5748, 5760), 'numpy.abs', 'np.abs', (['axis'], {}), '(axis)\n', (5754, 5760), True, 'import numpy as np\n'), ((5048, 5065), 'numpy.abs', 'np.abs', (['(evals - 1)'], {}), '(evals - 1)\n', (5054, 5065), True, 'import numpy as np\n'), ((6107, 6127), 'numpy.arctan2', 'np.arctan2', (['sin', 'cos'], {}), '(sin, cos)\n', (6117, 6127), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 12:22:38 2020
@author: emc1977
"""
import math as math
def function ( x ):
import numpy as np
#Note that here I have negatived all terms, converting the minimiser into a maximiser.
#The functino value is returned as a negative, though in reality it is positive.
value = -2*math.sin(x)+(x**2)/10
return value
def glomin ( a, b, c, m, machep, e, t, f ):
import numpy as np
a0 = b
x = a0
a2 = a
y0 = f ( b )
yb = y0
y2 = f ( a )
y = y2
if ( y0 < y ):
y = y0
else:
x = a
if ( m <= 0.0 or b <= a ):
fx = y
return x, fx
m2 = 0.5 * ( 1.0 + 16.0 * machep ) * m
if ( c <= a or b <= c ):
sc = 0.5 * ( a + b )
else:
sc = c
y1 = f ( sc )
k = 3
d0 = a2 - sc
h = 9.0 / 11.0
if ( y1 < y ):
x = sc
y = y1
while ( True ):
d1 = a2 - a0
d2 = sc - a0
z2 = b - a2
z0 = y2 - y1
z1 = y2 - y0
r = d1 * d1 * z0 - d0 * d0 * z1
p = r
qs = 2.0 * ( d0 * z1 - d1 * z0 )
q = qs
if ( k < 1000000 or y2 <= y ):
while ( True ):
if ( q * ( r * ( yb - y2 ) + z2 * q * ( ( y2 - y ) + t ) ) < \
z2 * m2 * r * ( z2 * q - r ) ):
a3 = a2 + r / q
y3 = f ( a3 )
if ( y3 < y ):
x = a3
y = y3
k = ( ( 1611 * k ) % 1048576 )
q = 1.0
r = ( b - a ) * 0.00001 * float ( k )
if ( z2 <= r ):
break
else:
k = ( ( 1611 * k ) % 1048576 )
q = 1.0
r = ( b - a ) * 0.00001 * float ( k )
while ( r < z2 ):
if ( q * ( r * ( yb - y2 ) + z2 * q * ( ( y2 - y ) + t ) ) < \
z2 * m2 * r * ( z2 * q - r ) ):
a3 = a2 + r / q
y3 = f ( a3 )
if ( y3 < y ):
x = a3
y = y3
k = ( ( 1611 * k ) % 1048576 )
q = 1.0
r = ( b - a ) * 0.00001 * float ( k )
r = m2 * d0 * d1 * d2
s = np.sqrt ( ( ( y2 - y ) + t ) / m2 )
h = 0.5 * ( 1.0 + h )
p = h * ( p + 2.0 * r * s )
q = q + 0.5 * qs
r = - 0.5 * ( d0 + ( z0 + 2.01 * e ) / ( d0 * m2 ) )
if ( r < s or d0 < 0.0 ):
r = a2 + s
else:
r = a2 + r
if ( 0.0 < p * q ):
a3 = a2 + p / q
else:
a3 = r
while ( True ):
a3 = max ( a3, r )
if ( b <= a3 ):
a3 = b
y3 = yb
else:
y3 = f ( a3 )
if ( y3 < y ):
x = a3
y = y3
d0 = a3 - a2
if ( a3 <= r ):
break
p = 2.0 * ( y2 - y3 ) / ( m * d0 )
if ( ( 1.0 + 9.0 * machep ) * d0 <= abs ( p ) ):
break
if ( 0.5 * m2 * ( d0 * d0 + p * p ) <= ( y2 - y ) + ( y3 - y ) + 2.0 * t ):
break
a3 = 0.5 * ( a2 + a3 )
h = 0.9 * h
if ( b <= a3 ):
break
a0 = sc
sc = a2
a2 = a3
y0 = y1
y1 = y2
y2 = y3
fx = y
print(x,fx)
return x, fx
def glomin_test ( ):
import numpy as np
machep = 2.220446049250313E-016
e = np.sqrt ( machep )
t = np.sqrt ( machep )
a = 0.1
b = 2
c = ( a + b ) / 2.0
m = 3
example_test ( a, b, c, m, machep, e, t, function, '.' )
#
# Terminate.
#
print ( '' )
print ( 'GLOMIN_TEST' )
print ( ' Normal end of execution.' )
return
def example_test ( a, b, c, m, machep, e, t, f, title ):
x, fx = glomin ( a, b, c, m, machep, e, t, f )
fa = f ( a )
fb = f ( b )
print ( '' )
print ( ' %s' % ( title ) )
print ( '' )
print ( ' A X B' )
print ( ' F(A) F(X) F(B)' )
print ( '' )
print ( ' %14f %14f %14f' % ( a, x, b ) )
print ( ' %14e %14e %14e' % ( fa, fx, fb ) )
return
def timestamp ( ):
import time
t = time.time ( )
print ( time.ctime ( t ) )
return None
def timestamp_test ( ):
import platform
print ( '' )
print ( 'TIMESTAMP_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' TIMESTAMP prints a timestamp of the current date and time.' )
print ( '' )
timestamp ( )
# Terminate
print ( '' )
print ( 'TIMESTAMP_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
timestamp ( )
glomin_test ( )
timestamp ( )
| [
"time.ctime",
"numpy.sqrt",
"math.sin",
"time.time",
"platform.python_version"
] | [((3104, 3119), 'numpy.sqrt', 'np.sqrt', (['machep'], {}), '(machep)\n', (3111, 3119), True, 'import numpy as np\n'), ((3130, 3145), 'numpy.sqrt', 'np.sqrt', (['machep'], {}), '(machep)\n', (3137, 3145), True, 'import numpy as np\n'), ((3881, 3892), 'time.time', 'time.time', ([], {}), '()\n', (3890, 3892), False, 'import time\n'), ((2017, 2043), 'numpy.sqrt', 'np.sqrt', (['((y2 - y + t) / m2)'], {}), '((y2 - y + t) / m2)\n', (2024, 2043), True, 'import numpy as np\n'), ((3906, 3919), 'time.ctime', 'time.ctime', (['t'], {}), '(t)\n', (3916, 3919), False, 'import time\n'), ((347, 358), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (355, 358), True, 'import math as math\n'), ((4077, 4102), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (4100, 4102), False, 'import platform\n')] |
import json
import numpy
import time
import pyspark
from azureml.core.model import Model
from pyspark.ml import PipelineModel
from azureml.monitoring import ModelDataCollector
from mmlspark import LightGBMRegressor
from mmlspark import LightGBMRegressionModel
def init():
try:
# One-time initialization of PySpark and predictive model
global trainedModel
global spark
global inputs_dc, prediction_dc
model_name = "{model_name}" # interpolated
inputs_dc = ModelDataCollector(model_name,
identifier="inputs",
feature_names=["json_input_data"])
prediction_dc = ModelDataCollector(model_name,
identifier="predictions",
feature_names=["predictions"])
spark = pyspark.sql.SparkSession.builder.appName(
"AML Production Model").getOrCreate()
model_path = Model.get_model_path(model_name)
trainedModel = PipelineModel.load(model_path)
except Exception as e:
trainedModel = e
def run(input_json):
if isinstance(trainedModel, Exception):
return json.dumps({"trainedModel": str(trainedModel)})
try:
sc = spark.sparkContext
input_list = json.loads(input_json)
input_rdd = sc.parallelize(input_list)
input_df = spark.read.json(input_rdd)
# Compute prediction
prediction = trainedModel.transform(input_df)
# result = prediction.first().prediction
predictions = prediction.collect()
# Get each scored result
preds = [str(x['prediction']) for x in predictions]
result = ",".join(preds)
# log input and output data
data = json.loads(input_json)
data = numpy.array(data)
print("saving input data" + time.strftime("%H:%M:%S"))
# this call is saving our input data into our blob
inputs_dc.collect(data)
# this call is saving our prediction data into our blob
prediction_dc.collect(predictions)
except Exception as e:
result = str(e)
return json.dumps({"result": result})
| [
"json.loads",
"pyspark.ml.PipelineModel.load",
"azureml.monitoring.ModelDataCollector",
"json.dumps",
"time.strftime",
"azureml.core.model.Model.get_model_path",
"numpy.array",
"pyspark.sql.SparkSession.builder.appName"
] | [((2187, 2217), 'json.dumps', 'json.dumps', (["{'result': result}"], {}), "({'result': result})\n", (2197, 2217), False, 'import json\n'), ((512, 603), 'azureml.monitoring.ModelDataCollector', 'ModelDataCollector', (['model_name'], {'identifier': '"""inputs"""', 'feature_names': "['json_input_data']"}), "(model_name, identifier='inputs', feature_names=[\n 'json_input_data'])\n", (530, 603), False, 'from azureml.monitoring import ModelDataCollector\n'), ((701, 793), 'azureml.monitoring.ModelDataCollector', 'ModelDataCollector', (['model_name'], {'identifier': '"""predictions"""', 'feature_names': "['predictions']"}), "(model_name, identifier='predictions', feature_names=[\n 'predictions'])\n", (719, 793), False, 'from azureml.monitoring import ModelDataCollector\n'), ((1005, 1037), 'azureml.core.model.Model.get_model_path', 'Model.get_model_path', (['model_name'], {}), '(model_name)\n', (1025, 1037), False, 'from azureml.core.model import Model\n'), ((1061, 1091), 'pyspark.ml.PipelineModel.load', 'PipelineModel.load', (['model_path'], {}), '(model_path)\n', (1079, 1091), False, 'from pyspark.ml import PipelineModel\n'), ((1337, 1359), 'json.loads', 'json.loads', (['input_json'], {}), '(input_json)\n', (1347, 1359), False, 'import json\n'), ((1808, 1830), 'json.loads', 'json.loads', (['input_json'], {}), '(input_json)\n', (1818, 1830), False, 'import json\n'), ((1846, 1863), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (1857, 1863), False, 'import numpy\n'), ((892, 956), 'pyspark.sql.SparkSession.builder.appName', 'pyspark.sql.SparkSession.builder.appName', (['"""AML Production Model"""'], {}), "('AML Production Model')\n", (932, 956), False, 'import pyspark\n'), ((1900, 1925), 'time.strftime', 'time.strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (1913, 1925), False, 'import time\n')] |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from distutils.version import LooseVersion
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph
class TestLogSoftmax(CommonTFLayerTest):
def create_log_softmax_net(self, shape, reduction_axis, ir_version, use_new_frontend):
"""
Tensorflow net IR net
Input->LogSoftmax => Input->Softmax->Log
"""
#
# Create Tensorflow model
#
import tensorflow as tf
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
tf_x_shape = shape.copy()
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
input = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
tf.nn.log_softmax(input, name='Operation', axis=reduction_axis)
else:
tf.nn.log_softmax(input, axis=reduction_axis, name='Operation')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
ref_net = None
reduce_sum_shape = np.copy(shape)
rank = len(shape)
if rank in {4, 5}:
reduction_axis = reduction_axis if reduction_axis >= 0 else rank + reduction_axis
if rank == 4:
reduction_axis = {0: 0, 1: 2, 2: 3, 3: 1}[reduction_axis]
else:
reduction_axis = {0: 0, 1: 2, 2: 3, 3: 4, 4: 1}[reduction_axis]
reduce_sum_shape[reduction_axis] = 1
converted_shape = shape if rank != 1 else shape[0]
if check_ir_version(10, None, ir_version) and not use_new_frontend:
ref_nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter', 'shape': converted_shape},
'input_data': {'shape': shape, 'kind': 'data', 'value': None},
'reduce_max_axis_val': {'shape': int64_array([reduction_axis]).shape,
'kind': 'data',
'value': int64_array([reduction_axis])},
'reduce_max_axis': {'type': 'Const', 'kind': 'op', 'shape': 1},
'reduce_max_axis_data': {'shape': int64_array([1]), 'kind': 'data', 'value': None},
'reduce_max': {'type': 'ReduceMax', 'kind': 'op', 'keep_dims': True},
'reduce_max_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'sub_first': {'type': 'Subtract', 'kind': 'op'},
'sub_first_data': {'shape': shape, 'kind': 'data', 'value': None},
'reduce_sum_axis_val': {'shape': int64_array([reduction_axis]).shape,
'kind': 'data',
'value': int64_array([reduction_axis])},
'reduce_sum_axis': {'type': 'Const', 'kind': 'op', 'shape': 1},
'reduce_sum_axis_data': {'shape': int64_array([1]), 'kind': 'data', 'value': None},
'reduce_sum': {'type': 'ReduceSum', 'kind': 'op', 'keep_dims': True},
'reduce_sum_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'exp': {'type': 'Exp', 'kind': 'op'},
'exp_data': {'shape': shape, 'kind': 'data', 'value': None},
'log': {'type': 'Log', 'kind': 'op'},
'log_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'sub_second': {'type': 'Subtract', 'kind': 'op'},
'sub_second_data': {'shape': shape, 'kind': 'data', 'value': None},
'result': {'kind': 'op', 'type': 'Result'},
}
ref_edges = [
('input', 'input_data'),
('reduce_max_axis_val', 'reduce_max_axis'),
('reduce_max_axis', 'reduce_max_axis_data'),
('reduce_max_axis_data', 'reduce_max', {'in': 1}),
('reduce_max', 'reduce_max_data'),
('input_data', 'reduce_max', {'out': 0, 'in': 0}),
('input_data', 'sub_first', {'out': 0, 'in': 0}),
('reduce_max_data', 'sub_first', {'in': 1}),
('sub_first', 'sub_first_data'),
('reduce_sum_axis_val', 'reduce_sum_axis'),
('reduce_sum_axis', 'reduce_sum_axis_data'),
('reduce_sum_axis_data', 'reduce_sum', {'in': 1}),
('reduce_sum', 'reduce_sum_data'),
('sub_first_data', 'exp'),
('exp', 'exp_data'),
('exp_data', 'reduce_sum', {'in': 0}),
('reduce_sum_data', 'log'),
('log', 'log_data'),
('log_data', 'sub_second', {'in': 1}),
('sub_second', 'sub_second_data'),
('sub_first_data', 'sub_second', {'out': 0, 'in': 0}),
('sub_second_data', 'result'),
]
ref_net = build_graph(ref_nodes_attributes, ref_edges)
return tf_net, ref_net
test_data_precommit = [
pytest.param(dict(shape=[3, 2, 3, 7, 6], reduction_axis=-1),
marks=pytest.mark.skip(reason="Skipped until fixed"))
]
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_log_softmax_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data = [dict(shape=[1], reduction_axis=-1),
dict(shape=[2, 5], reduction_axis=-1),
dict(shape=[5, 3, 7, 4], reduction_axis=-1),
dict(shape=[3, 2, 3, 7, 6], reduction_axis=-1)]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_log_softmax(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
| [
"tensorflow.compat.v1.placeholder",
"numpy.copy",
"common.layer_test_class.check_ir_version",
"distutils.version.LooseVersion",
"pytest.mark.skip",
"tensorflow.nn.log_softmax",
"pytest.mark.parametrize",
"openvino.tools.mo.front.common.partial_infer.utils.int64_array",
"unit_tests.utils.graph.build_... | [((5612, 5666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""params"""', 'test_data_precommit'], {}), "('params', test_data_precommit)\n", (5635, 5666), False, 'import pytest\n'), ((6395, 6439), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""params"""', 'test_data'], {}), "('params', test_data)\n", (6418, 6439), False, 'import pytest\n'), ((822, 856), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (854, 856), True, 'import tensorflow as tf\n'), ((1525, 1539), 'numpy.copy', 'np.copy', (['shape'], {}), '(shape)\n', (1532, 1539), True, 'import numpy as np\n'), ((908, 930), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (928, 930), True, 'import tensorflow as tf\n'), ((1004, 1054), 'common.utils.tf_utils.permute_nchw_to_nhwc', 'permute_nchw_to_nhwc', (['tf_x_shape', 'use_new_frontend'], {}), '(tf_x_shape, use_new_frontend)\n', (1024, 1054), False, 'from common.utils.tf_utils import permute_nchw_to_nhwc\n'), ((1075, 1132), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', 'tf_x_shape', '"""Input"""'], {}), "(tf.float32, tf_x_shape, 'Input')\n", (1099, 1132), True, 'import tensorflow as tf\n'), ((1393, 1436), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (1434, 1436), True, 'import tensorflow as tf\n'), ((2002, 2040), 'common.layer_test_class.check_ir_version', 'check_ir_version', (['(10)', 'None', 'ir_version'], {}), '(10, None, ir_version)\n', (2018, 2040), False, 'from common.layer_test_class import check_ir_version\n'), ((5350, 5394), 'unit_tests.utils.graph.build_graph', 'build_graph', (['ref_nodes_attributes', 'ref_edges'], {}), '(ref_nodes_attributes, ref_edges)\n', (5361, 5394), False, 'from unit_tests.utils.graph import build_graph\n'), ((1148, 1176), 'distutils.version.LooseVersion', 'LooseVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (1160, 1176), False, 'from distutils.version import LooseVersion\n'), ((1179, 1200), 'distutils.version.LooseVersion', 'LooseVersion', (['"""2.0.0"""'], {}), "('2.0.0')\n", (1191, 1200), False, 'from distutils.version import LooseVersion\n'), ((1218, 1281), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['input'], {'name': '"""Operation"""', 'axis': 'reduction_axis'}), "(input, name='Operation', axis=reduction_axis)\n", (1235, 1281), True, 'import tensorflow as tf\n'), ((1316, 1379), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['input'], {'axis': 'reduction_axis', 'name': '"""Operation"""'}), "(input, axis=reduction_axis, name='Operation')\n", (1333, 1379), True, 'import tensorflow as tf\n'), ((5552, 5598), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Skipped until fixed"""'}), "(reason='Skipped until fixed')\n", (5568, 5598), False, 'import pytest\n'), ((2462, 2491), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[reduction_axis]'], {}), '([reduction_axis])\n', (2473, 2491), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((2624, 2640), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[1]'], {}), '([1])\n', (2635, 2640), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((3194, 3223), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[reduction_axis]'], {}), '([reduction_axis])\n', (3205, 3223), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((3356, 3372), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[1]'], {}), '([1])\n', (3367, 3372), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((2320, 2349), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[reduction_axis]'], {}), '([reduction_axis])\n', (2331, 2349), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((3052, 3081), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[reduction_axis]'], {}), '([reduction_axis])\n', (3063, 3081), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n')] |
#! /usr/bin/env python
# Author: <NAME> (srinivas . zinka [at] gmail . com)
# Copyright (c) 2014 <NAME>
# License: New BSD License.
import numpy as np
# from mayavi import mlab
from scipy import integrate
from scipy.special import sph_harm
# adjusting "matplotlib" label fonts
from matplotlib import rc
rc('text', usetex=True)
def SH(fun_str_re, fun_str_im='0', T0=2 * np.pi, m_start=-5, m_stop=5, err_lim=1e-8):
r"""
Function to generate a finite number of spherical harmonic series
coefficients of a periodic function represented in (tht,phi) domain.
"""
N = m_stop - m_start + 1
FS = np.zeros((N, 1), dtype='complex')
m_index = list(range(m_start, m_stop + 1))
w0 = 2 * np.pi / T0
for m in m_index:
fun_re = lambda x: (eval(fun_str_re)) * np.cos(m * w0 * x) + (eval(fun_str_im)) * np.sin(m * w0 * x)
fun_img = lambda x: -(eval(fun_str_re)) * np.sin(m * w0 * x) + (eval(fun_str_im)) * np.cos(m * w0 * x)
FS_re = integrate.quad(fun_re, 0, 2 * np.pi)
FS_img = integrate.quad(fun_img, 0, 2 * np.pi)
if ((FS_re[1] + FS_img[1]) < err_lim):
FS[m - m_start] = (1 / T0) * (FS_re[0] + 1j * FS_img[0])
else:
print("Absolute error of the integration is not less than 1e-10 while calculating Fourier series")
print("error(FS_re): ", FS_re[1])
print("error(FS_img): ", FS_img[1])
m_index = np.array(m_index) * (2 * np.pi / T0)
m_index = np.reshape(m_index, (m_index.size, -1))
return m_index, FS
if __name__ == '__main__':
m = 2;
n = 5
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
# tht and phi definitions are interchanged in scipy
phi, theta = np.mgrid[0:pi:101j, 0:2 * pi:101j]
# tht and phi definitions are interchanged in scipy
x = r * sin(phi) * cos(theta)
y = r * sin(phi) * sin(theta)
z = r * cos(phi)
# mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
# mlab.clf()
#
# s = sph_harm(m, n, theta, phi).real
# mlab.mesh(x, y, z, scalars=s)
# mlab.axes(xlabel="X", ylabel="Y", zlabel="Z")
# mlab.show()
| [
"numpy.reshape",
"scipy.integrate.quad",
"numpy.array",
"numpy.zeros",
"matplotlib.rc",
"numpy.cos",
"numpy.sin"
] | [((308, 331), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (310, 331), False, 'from matplotlib import rc\n'), ((619, 652), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {'dtype': '"""complex"""'}), "((N, 1), dtype='complex')\n", (627, 652), True, 'import numpy as np\n'), ((983, 1019), 'scipy.integrate.quad', 'integrate.quad', (['fun_re', '(0)', '(2 * np.pi)'], {}), '(fun_re, 0, 2 * np.pi)\n', (997, 1019), False, 'from scipy import integrate\n'), ((1037, 1074), 'scipy.integrate.quad', 'integrate.quad', (['fun_img', '(0)', '(2 * np.pi)'], {}), '(fun_img, 0, 2 * np.pi)\n', (1051, 1074), False, 'from scipy import integrate\n'), ((1483, 1522), 'numpy.reshape', 'np.reshape', (['m_index', '(m_index.size, -1)'], {}), '(m_index, (m_index.size, -1))\n', (1493, 1522), True, 'import numpy as np\n'), ((1428, 1445), 'numpy.array', 'np.array', (['m_index'], {}), '(m_index)\n', (1436, 1445), True, 'import numpy as np\n'), ((795, 813), 'numpy.cos', 'np.cos', (['(m * w0 * x)'], {}), '(m * w0 * x)\n', (801, 813), True, 'import numpy as np\n'), ((837, 855), 'numpy.sin', 'np.sin', (['(m * w0 * x)'], {}), '(m * w0 * x)\n', (843, 855), True, 'import numpy as np\n'), ((906, 924), 'numpy.sin', 'np.sin', (['(m * w0 * x)'], {}), '(m * w0 * x)\n', (912, 924), True, 'import numpy as np\n'), ((948, 966), 'numpy.cos', 'np.cos', (['(m * w0 * x)'], {}), '(m * w0 * x)\n', (954, 966), True, 'import numpy as np\n')] |
import gym
from gym import core, spaces
from .gol import utils
import argparse
import itertools
import cv2
import numpy as np
import torch
from torch import ByteTensor, Tensor
from torch.nn import Conv2d, Parameter
from torch.nn.init import zeros_
from .world import World
class GameOfLifeEnv(core.Env):
def __init__(self):
self.num_tools = 1 # bring cell to life
def configure(self, render=False, map_width=16):
self.size = size = map_width
self.observation_space = spaces.Box(low=np.zeros((1, size, size)),
high=np.ones((1, size, size)), dtype=int)
self.action_space = spaces.Discrete(self.num_tools * size * size)
self.step_count = 0
self.prob = prob = 0.5
self.record_entropy = True
self.world = World(self.size, self.size)
self.state = None
#self.device = device = torch.device("cpu")
self.device = device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
self.render = render
self.max_step = 100
self.entropies = []
if self.render:
cv2.namedWindow("Game of Life", cv2.WINDOW_NORMAL)
#with torch.no_grad():
# self.combinations = combinations = 2 ** (3 * 3)
# channels = 1
# self.state = init_world(size, channels, prob).to(device)
# self.get_neighbors = get_neighbors_map(channels).to(device)
# self.structure_similarity = get_structure_similarity(combinations, channels)
# self.structure_similarity.to(device)
# self.i = 0
self.render = render
self.size = map_width
self.intsToActions = [[] for pixel in range(self.num_tools * self.size **2)]
self.actionsToInts = np.zeros((self.num_tools, self.size, self.size))
''' Unrolls the action vector in the same order as the pytorch model
on its forward pass.'''
i = 0
for z in range(self.num_tools):
for x in range(self.size):
for y in range(self.size):
self.intsToActions[i] = [z, x, y]
self.actionsToInts[z, x, y] = i
i += 1
#print('len of intsToActions: {}\n num tools: {}'.format(len(self.intsToActions), self.num_tools))
def reset(self):
self.step_count = 0
self.world.repopulate_cells()
self.world.prepopulate_neighbours()
return self.world.state
def step(self, a):
z, act_x, act_y = self.intsToActions[a]
self.world.build_cell(act_x, act_y, alive=True)
if self.render:
#print(self.world.render())
cv2.imshow("Game of Life", np.array(self.world.state.transpose(1, 2, 0) * 255, dtype=np.uint8))
cv2.waitKey(1)
self.world._tick()
terminal = self.step_count == self.max_step
self.step_count += 1
if self.render:
#print(self.world.render())
cv2.imshow("Game of Life", np.array(self.world.state.transpose(1, 2, 0) * 255, dtype=np.uint8))
cv2.waitKey(1)
reward = self.world.state.sum() / self.max_step
infos = {}
return (self.world.state, reward, terminal, infos)
cv2.destroyAllWindows()
| [
"numpy.ones",
"gym.spaces.Discrete",
"numpy.zeros",
"torch.cuda.is_available",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.namedWindow"
] | [((3262, 3285), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3283, 3285), False, 'import cv2\n'), ((636, 681), 'gym.spaces.Discrete', 'spaces.Discrete', (['(self.num_tools * size * size)'], {}), '(self.num_tools * size * size)\n', (651, 681), False, 'from gym import core, spaces\n'), ((1779, 1827), 'numpy.zeros', 'np.zeros', (['(self.num_tools, self.size, self.size)'], {}), '((self.num_tools, self.size, self.size))\n', (1787, 1827), True, 'import numpy as np\n'), ((1135, 1185), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Game of Life"""', 'cv2.WINDOW_NORMAL'], {}), "('Game of Life', cv2.WINDOW_NORMAL)\n", (1150, 1185), False, 'import cv2\n'), ((2800, 2814), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2811, 2814), False, 'import cv2\n'), ((3106, 3120), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3117, 3120), False, 'import cv2\n'), ((523, 548), 'numpy.zeros', 'np.zeros', (['(1, size, size)'], {}), '((1, size, size))\n', (531, 548), True, 'import numpy as np\n'), ((571, 595), 'numpy.ones', 'np.ones', (['(1, size, size)'], {}), '((1, size, size))\n', (578, 595), True, 'import numpy as np\n'), ((975, 1000), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (998, 1000), False, 'import torch\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse as _argparse
import json as _json
import numpy as _numpy
import os as _os
import plano as _plano
import resource as _resource
import shlex as _shlex
import subprocess as _subprocess
import time as _time
from .common import *
from .common import __version__
from .common import _epilog_address_urls
from .common import _epilog_arrow_impls
from .common import _epilog_count_and_duration_formats
from .common import _urlparse
_description = """
Send or receive a set number of messages as fast as possible using a
single connection.
'quiver-arrow' is one of the Quiver tools for testing the performance
of message servers and APIs.
"""
_epilog = """
operations:
send Send messages
receive Receive messages
{_epilog_address_urls}
{_epilog_count_and_duration_formats}
{_epilog_arrow_impls}
server and passive modes:
By default quiver-arrow operates in client and active modes, meaning
that it creates an outbound connection to a server and actively
initiates creation of the protocol entities (sessions and links)
required for communication. The --server option tells quiver-arrow
to instead listen for and accept incoming connections. The
--passive option tells it to receive and confirm incoming requests
for new protocol entities but not to create them itself.
example usage:
$ qdrouterd & # Start a message server
$ quiver-arrow receive q0 & # Start receiving
$ quiver-arrow send q0 # Start sending
""".format(**globals())
class QuiverArrowCommand(Command):
def __init__(self, home_dir):
super(QuiverArrowCommand, self).__init__(home_dir)
self.parser.description = _description.lstrip()
self.parser.epilog = _epilog.lstrip()
self.parser.add_argument("operation", metavar="OPERATION",
choices=["send", "receive"],
help="Either 'send' or 'receive'")
self.parser.add_argument("url", metavar="ADDRESS-URL",
help="The location of a message source or target")
self.parser.add_argument("--output", metavar="DIR",
help="Save output files to DIR")
self.parser.add_argument("--impl", metavar="NAME",
help="Use NAME implementation",
default=DEFAULT_ARROW_IMPL)
self.parser.add_argument("--info", action="store_true",
help="Print implementation details and exit")
self.parser.add_argument("--impl-info", action="store_true", dest="info",
help=_argparse.SUPPRESS)
self.parser.add_argument("--id", metavar="ID",
help="Use ID as the client or server identity")
self.parser.add_argument("--server", action="store_true",
help="Operate in server mode")
self.parser.add_argument("--passive", action="store_true",
help="Operate in passive mode")
self.parser.add_argument("--prelude", metavar="PRELUDE", default="",
help="Commands to precede the implementation invocation")
self.parser.add_argument("--cert", metavar="CERT.PEM",
help="Certificate filename - used for client authentication")
self.parser.add_argument("--key", metavar="PRIVATE-KEY.PEM",
help="Private key filename - used for client authentication")
self.add_common_test_arguments()
self.add_common_tool_arguments()
def init(self):
self.intercept_info_request(DEFAULT_ARROW_IMPL)
super(QuiverArrowCommand, self).init()
self.operation = self.args.operation
self.impl = require_impl(self.args.impl)
self.id_ = self.args.id
self.connection_mode = "client"
self.channel_mode = "active"
self.prelude = _shlex.split(self.args.prelude)
if self.operation == "send":
self.role = "sender"
self.transfers_parse_func = _parse_send
elif self.operation == "receive":
self.role = "receiver"
self.transfers_parse_func = _parse_receive
else:
raise Exception()
if self.id_ is None:
self.id_ = "quiver-{}-{}".format(self.role, _plano.unique_id(4))
if self.args.server:
self.connection_mode = "server"
if self.args.passive:
self.channel_mode = "passive"
self.cert = self.args.cert
self.key = self.args.key
self.init_url_attributes()
self.init_common_test_attributes()
self.init_common_tool_attributes()
self.init_output_dir()
if _urlparse(self.url).port is None:
if self.impl.name in ("activemq-jms", "activemq-artemis-jms"):
self.port = "61616"
# XXX Drop the flags stuff
flags = list()
if self.durable:
flags.append("durable")
self.flags = ",".join(flags)
self.snapshots_file = _join(self.output_dir, "{}-snapshots.csv".format(self.role))
self.summary_file = _join(self.output_dir, "{}-summary.json".format(self.role))
self.transfers_file = _join(self.output_dir, "{}-transfers.csv".format(self.role))
self.start_time = None
self.timeout_checkpoint = None
self.first_send_time = None
self.last_send_time = None
self.first_receive_time = None
self.last_receive_time = None
self.message_count = None
self.message_rate = None
self.latency_average = None
self.latency_quartiles = None
self.latency_nines = None
def run(self):
args = self.prelude + [
self.impl.file,
"connection-mode={}".format(self.connection_mode),
"channel-mode={}".format(self.channel_mode),
"operation={}".format(self.operation),
"id={}".format(self.id_),
"scheme={}".format(self.scheme),
"host={}".format(self.host),
"port={}".format(self.port),
"path={}".format(self.path),
"duration={}".format(self.duration),
"count={}".format(self.count),
"body-size={}".format(self.body_size),
"credit-window={}".format(self.credit_window),
"transaction-size={}".format(self.transaction_size),
"durable={}".format(1 if self.durable else 0),
]
if self.username:
args.append("username={}".format(self.username))
if self.password:
args.append("password={}".format(self.password))
if self.args.cert and self.args.key:
args.append("key={}".format(self.key))
args.append("cert={}".format(self.cert))
with open(self.transfers_file, "wb") as fout:
env = _plano.ENV
if self.verbose:
env["QUIVER_VERBOSE"] = "1"
proc = _plano.start_process(args, stdout=fout, env=env)
try:
self.monitor_subprocess(proc)
except:
_plano.stop_process(proc)
raise
if proc.returncode != 0:
raise CommandError("{} exited with code {}", self.role, proc.returncode)
if _plano.file_size(self.transfers_file) == 0:
raise CommandError("No transfers")
self.compute_results()
self.save_summary()
if _plano.exists("{}.xz".format(self.transfers_file)):
_plano.remove("{}.xz".format(self.transfers_file))
_plano.call("xz --compress -0 --threads 0 {}", self.transfers_file)
def monitor_subprocess(self, proc):
snap = _StatusSnapshot(self, None)
snap.timestamp = now()
self.start_time = snap.timestamp
self.timeout_checkpoint = snap
sleep = 2.0
with open(self.transfers_file, "rb") as fin:
with open(self.snapshots_file, "ab") as fsnaps:
while proc.poll() is None:
_time.sleep(sleep)
period_start = _time.time()
snap.previous = None
snap = _StatusSnapshot(self, snap)
snap.capture(fin, proc)
fsnaps.write(snap.marshal())
fsnaps.flush()
self.check_timeout(snap)
period = _time.time() - period_start
sleep = max(1.0, 2.0 - period)
def check_timeout(self, snap):
checkpoint = self.timeout_checkpoint
since = (snap.timestamp - checkpoint.timestamp) / 1000
#print("check_timeout", snap.count, "==", checkpoint.count, "and", since, ">", self.timeout)
if snap.count == checkpoint.count and since > self.timeout:
raise CommandError("{} timed out", self.role)
if snap.count > checkpoint.count:
self.timeout_checkpoint = snap
def compute_results(self):
transfers = list()
with open(self.transfers_file, "rb") as f:
for line in f:
try:
transfer = self.transfers_parse_func(line)
except Exception as e:
_plano.error("Failed to parse line '{}': {}", line, e)
continue
transfers.append(transfer)
self.message_count = len(transfers)
if self.message_count == 0:
return
if self.operation == "send":
self.first_send_time = transfers[0][1]
self.last_send_time = transfers[-1][1]
duration = (self.last_send_time - self.first_send_time) / 1000
elif self.operation == "receive":
self.first_receive_time = transfers[0][2]
self.last_receive_time = transfers[-1][2]
duration = (self.last_receive_time - self.first_receive_time) / 1000
self.compute_latencies(transfers)
else:
raise Exception()
if duration > 0:
self.message_rate = int(round(self.message_count / duration))
def compute_latencies(self, transfers):
latencies = list()
for id_, send_time, receive_time in transfers:
latency = receive_time - send_time
latencies.append(latency)
latencies = _numpy.array(latencies, _numpy.int32)
q = 0, 25, 50, 75, 100, 90, 99, 99.9, 99.99, 99.999
percentiles = _numpy.percentile(latencies, q)
percentiles = [int(x) for x in percentiles]
self.latency_average = _numpy.mean(latencies)
self.latency_quartiles = percentiles[:5]
self.latency_nines = percentiles[5:]
def save_summary(self):
props = {
"config": {
"impl": self.impl.name,
"url": self.url,
"output_dir": self.output_dir,
"timeout": self.timeout,
"connection_mode": self.connection_mode,
"channel_mode": self.channel_mode,
"operation": self.operation,
"id": self.id_,
"host": self.host,
"port": self.port,
"path": self.path,
"duration": self.duration,
"count": self.count,
"body_size": self.body_size,
"credit_window": self.credit_window,
"transaction_size": self.transaction_size,
"durable": self.durable,
},
"results": {
"first_send_time": self.first_send_time,
"last_send_time": self.last_send_time,
"first_receive_time": self.first_receive_time,
"last_receive_time": self.last_receive_time,
"message_count": self.message_count,
"message_rate": self.message_rate,
"latency_average": self.latency_average,
"latency_quartiles": self.latency_quartiles,
"latency_nines": self.latency_nines,
},
}
with open(self.summary_file, "w") as f:
_json.dump(props, f, indent=2)
class _StatusSnapshot:
def __init__(self, command, previous):
self.command = command
self.previous = previous
self.timestamp = 0
self.period = 0
self.count = 0
self.period_count = 0
self.latency = 0
self.cpu_time = 0
self.period_cpu_time = 0
self.rss = 0
def capture(self, transfers_file, proc):
self.timestamp = now()
self.period = self.timestamp - self.command.start_time
if self.previous is not None:
self.period = self.timestamp - self.previous.timestamp
self.capture_transfers(transfers_file)
self.capture_proc_info(proc)
def capture_proc_info(self, proc):
proc_file = _join("/", "proc", str(proc.pid), "stat")
try:
with open(proc_file, "r") as f:
line = f.read()
except IOError:
return
fields = line.split()
self.cpu_time = int(sum(map(int, fields[13:17])) / _ticks_per_ms)
self.period_cpu_time = self.cpu_time
if self.previous is not None:
self.period_cpu_time = self.cpu_time - self.previous.cpu_time
self.rss = int(fields[23]) * _page_size
def capture_transfers(self, transfers_file):
transfers = list()
for line in _read_lines(transfers_file):
try:
record = self.command.transfers_parse_func(line)
except Exception as e:
_plano.error("Failed to parse line '{}': {}", line, e)
continue
transfers.append(record)
self.period_count = len(transfers)
self.count = self.previous.count + self.period_count
if self.period_count > 0 and self.command.operation == "receive":
latencies = list()
for id_, send_time, receive_time in transfers:
latency = receive_time - send_time
latencies.append(latency)
self.latency = int(_numpy.mean(latencies))
def marshal(self):
fields = (self.timestamp,
self.period,
self.count,
self.period_count,
self.latency,
self.cpu_time,
self.period_cpu_time,
self.rss)
fields = map(str, fields)
line = "{}\n".format(",".join(fields))
return line.encode("ascii")
def unmarshal(self, line):
line = line.decode("ascii")
fields = [int(x) for x in line.split(",")]
(self.timestamp,
self.period,
self.count,
self.period_count,
self.latency,
self.cpu_time,
self.period_cpu_time,
self.rss) = fields
def _read_lines(file_):
while True:
fpos = file_.tell()
line = file_.readline()
if line == b"":
break
if not line.endswith(b"\n"):
file_.seek(fpos)
break
yield line[:-1]
def _parse_send(line):
message_id, send_time = line.split(b",", 1)
send_time = int(send_time)
return message_id, send_time
def _parse_receive(line):
message_id, send_time, receive_time = line.split(b",", 2)
send_time = int(send_time)
receive_time = int(receive_time)
return message_id, send_time, receive_time
_join = _plano.join
_ticks_per_ms = _os.sysconf(_os.sysconf_names["SC_CLK_TCK"]) / 1000
_page_size = _resource.getpagesize()
| [
"numpy.mean",
"plano.error",
"plano.stop_process",
"shlex.split",
"plano.unique_id",
"time.sleep",
"resource.getpagesize",
"numpy.array",
"plano.call",
"plano.file_size",
"os.sysconf",
"plano.start_process",
"numpy.percentile",
"time.time",
"json.dump"
] | [((16552, 16575), 'resource.getpagesize', '_resource.getpagesize', ([], {}), '()\n', (16573, 16575), True, 'import resource as _resource\n'), ((16487, 16531), 'os.sysconf', '_os.sysconf', (["_os.sysconf_names['SC_CLK_TCK']"], {}), "(_os.sysconf_names['SC_CLK_TCK'])\n", (16498, 16531), True, 'import os as _os\n'), ((4819, 4850), 'shlex.split', '_shlex.split', (['self.args.prelude'], {}), '(self.args.prelude)\n', (4831, 4850), True, 'import shlex as _shlex\n'), ((8529, 8596), 'plano.call', '_plano.call', (['"""xz --compress -0 --threads 0 {}"""', 'self.transfers_file'], {}), "('xz --compress -0 --threads 0 {}', self.transfers_file)\n", (8540, 8596), True, 'import plano as _plano\n'), ((11282, 11319), 'numpy.array', '_numpy.array', (['latencies', '_numpy.int32'], {}), '(latencies, _numpy.int32)\n', (11294, 11319), True, 'import numpy as _numpy\n'), ((11403, 11434), 'numpy.percentile', '_numpy.percentile', (['latencies', 'q'], {}), '(latencies, q)\n', (11420, 11434), True, 'import numpy as _numpy\n'), ((11519, 11541), 'numpy.mean', '_numpy.mean', (['latencies'], {}), '(latencies)\n', (11530, 11541), True, 'import numpy as _numpy\n'), ((7906, 7954), 'plano.start_process', '_plano.start_process', (['args'], {'stdout': 'fout', 'env': 'env'}), '(args, stdout=fout, env=env)\n', (7926, 7954), True, 'import plano as _plano\n'), ((8242, 8279), 'plano.file_size', '_plano.file_size', (['self.transfers_file'], {}), '(self.transfers_file)\n', (8258, 8279), True, 'import plano as _plano\n'), ((13073, 13103), 'json.dump', '_json.dump', (['props', 'f'], {'indent': '(2)'}), '(props, f, indent=2)\n', (13083, 13103), True, 'import json as _json\n'), ((5236, 5255), 'plano.unique_id', '_plano.unique_id', (['(4)'], {}), '(4)\n', (5252, 5255), True, 'import plano as _plano\n'), ((15100, 15122), 'numpy.mean', '_numpy.mean', (['latencies'], {}), '(latencies)\n', (15111, 15122), True, 'import numpy as _numpy\n'), ((8055, 8080), 'plano.stop_process', '_plano.stop_process', (['proc'], {}), '(proc)\n', (8074, 8080), True, 'import plano as _plano\n'), ((8991, 9009), 'time.sleep', '_time.sleep', (['sleep'], {}), '(sleep)\n', (9002, 9009), True, 'import time as _time\n'), ((9046, 9058), 'time.time', '_time.time', ([], {}), '()\n', (9056, 9058), True, 'import time as _time\n'), ((14586, 14640), 'plano.error', '_plano.error', (['"""Failed to parse line \'{}\': {}"""', 'line', 'e'], {}), '("Failed to parse line \'{}\': {}", line, e)\n', (14598, 14640), True, 'import plano as _plano\n'), ((9361, 9373), 'time.time', '_time.time', ([], {}), '()\n', (9371, 9373), True, 'import time as _time\n'), ((10180, 10234), 'plano.error', '_plano.error', (['"""Failed to parse line \'{}\': {}"""', 'line', 'e'], {}), '("Failed to parse line \'{}\': {}", line, e)\n', (10192, 10234), True, 'import plano as _plano\n')] |
import logging
import numpy as np
from numpy.linalg import norm
from scipy.stats import moment
from scipy.special import cbrt
def common_usr(molecule, ctd=None, cst=None, fct=None, ftf=None, atoms_type=None):
"""Function used in USR and USRCAT function
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USR shape descriptor
ctd : numpy array or None (default = None)
Coordinates of the molecular centroid
If 'None', the point is calculated
cst : numpy array or None (default = None)
Coordinates of the closest atom to the molecular centroid
If 'None', the point is calculated
fct : numpy array or None (default = None)
Coordinates of the farthest atom to the molecular centroid
If 'None', the point is calculated
ftf : numpy array or None (default = None)
Coordinates of the farthest atom
to the farthest atom to the molecular centroid
If 'None', the point is calculated
atoms_type : str or None (default None)
Type of atoms to be selected from atom_dict
If 'None', all atoms are used to calculate shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (12)
Array describing shape of molecule
"""
if atoms_type is None:
atoms = molecule.atom_dict['coords']
else:
if atoms_type == 'ishydrophobe':
mask = (molecule.atom_dict['ishalogen'] |
molecule.atom_dict['ishydrophobe'] |
(molecule.atom_dict['atomicnum'] == 16))
else:
mask = molecule.atom_dict[atoms_type]
atoms = molecule.atom_dict[mask]['coords']
if len(atoms) == 0:
return np.zeros(12), ((0., 0., 0.),) * 4
if ctd is None:
ctd = atoms.mean(0)
distances_ctd = norm(atoms - ctd, axis=1)
if cst is None:
cst = atoms[distances_ctd.argmin()]
distances_cst = norm(atoms - cst, axis=1)
if fct is None:
fct = atoms[distances_ctd.argmax()]
distances_fct = norm(atoms - fct, axis=1)
if ftf is None:
ftf = atoms[distances_fct.argmax()]
distances_ftf = norm(atoms - ftf, axis=1)
distances_list = [distances_ctd, distances_cst, distances_fct, distances_ftf]
shape_descriptor = np.zeros(12)
for i, distances in enumerate(distances_list):
shape_descriptor[i * 3 + 0] = np.mean(distances)
shape_descriptor[i * 3 + 1] = np.var(distances)
shape_descriptor[i * 3 + 2] = moment(distances, moment=3)
return shape_descriptor, (ctd, cst, fct, ftf)
def usr(molecule):
"""Computes USR shape descriptor based on
<NAME>, <NAME> (2007). Ultrafast shape recognition to search
compound databases for similar molecular shapes. Journal of
computational chemistry, 28(10):1711-23.
http://dx.doi.org/10.1002/jcc.20681
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USR shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (12)
Array describing shape of molecule
"""
return common_usr(molecule)[0]
def usr_cat(molecule):
"""Computes USRCAT shape descriptor based on
<NAME>, <NAME> (2012). USRCAT: real-time ultrafast
shape recognition with pharmacophoric constraints. Journal of
Cheminformatics, 2012 4:27.
http://dx.doi.org/10.1186/1758-2946-4-27
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USRCAT shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (60)
Array describing shape of molecule
"""
all_atoms_shape, points = common_usr(molecule)
ctd, cst, fct, ftf = points
hydrophobic_shape = common_usr(
molecule, ctd, cst, fct, ftf, 'ishydrophobe')[0]
aromatic_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isaromatic')[0]
acceptor_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isacceptor')[0]
donor_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isdonor')[0]
cat_shape = np.hstack((all_atoms_shape, hydrophobic_shape,
aromatic_shape, acceptor_shape, donor_shape))
return np.nan_to_num(cat_shape)
def electroshape(mol):
"""Computes shape descriptor based on
<NAME> al. ElectroShape: fast molecular similarity
calculations incorporating shape, chirality and electrostatics.
J Comput Aided Mol Des 24, 789-801 (2010).
http://dx.doi.org/doi:10.1007/s10822-010-9374-0
Aside from spatial coordinates, atoms' charges are also used
as the fourth dimension to describe shape of the molecule.
Parameters
----------
mol : oddt.toolkit.Molecule
Molecule to compute Electroshape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (15)
Array describing shape of molecule
"""
if (mol.atom_dict['coords'] == 0).all():
raise Exception('Molecule needs 3D coordinates')
if (mol.atom_dict['charge'] == 0).all():
logging.warning('All partial charges are zero. ElectroShape strongly relies on them.')
if np.isnan(mol.atom_dict['charge']).any():
logging.warning('Nan values in charge values of molecule ' + mol.title)
charge = np.nan_to_num(mol.atom_dict['charge'])
mi = 25 # scaling factor converting electron charges to Angstroms
four_dimensions = np.column_stack((mol.atom_dict['coords'], charge * mi))
c1 = four_dimensions.mean(0) # geometric centre of the molecule
distances_c1 = norm(four_dimensions - c1, axis=1)
c2 = four_dimensions[distances_c1.argmax()] # atom position furthest from c1
distances_c2 = norm(four_dimensions - c2, axis=1)
c3 = four_dimensions[distances_c2.argmax()] # atom position furthest from c2
distances_c3 = norm(four_dimensions - c3, axis=1)
vector_a = c2 - c1
vector_b = c3 - c1
vector_as = vector_a[:3] # spatial parts of these vectors -
vector_bs = vector_b[:3] # the first three coordinates
vector_c = ((norm(vector_a) /
(2 * norm(np.cross(vector_as, vector_bs))))
* np.cross(vector_as, vector_bs))
vector_c1s = c1[:3]
max_charge = np.array(np.amax(charge) * mi)
min_charge = np.array(np.amin(charge) * mi)
c4 = np.append(vector_c1s + vector_c, max_charge)
c5 = np.append(vector_c1s + vector_c, min_charge)
distances_c4 = norm(four_dimensions - c4, axis=1)
distances_c5 = norm(four_dimensions - c5, axis=1)
distances_list = [distances_c1, distances_c2, distances_c3,
distances_c4, distances_c5]
shape_descriptor = np.zeros(15)
i = 0
for distances in distances_list:
mean = np.mean(distances)
shape_descriptor[0 + i] = mean
shape_descriptor[1 + i] = np.std(distances)
shape_descriptor[2 + i] = cbrt(np.sum(((distances - mean) ** 3) / distances.size))
i += 3
return shape_descriptor
def usr_similarity(mol1_shape, mol2_shape, ow=1., hw=1., rw=1., aw=1., dw=1.):
"""Computes similarity between molecules
Parameters
----------
mol1_shape : numpy array
USR shape descriptor
mol2_shape : numpy array
USR shape descriptor
ow : float (default = 1.)
Scaling factor for all atoms
Only used for USRCAT, ignored for other types
hw : float (default = 1.)
Scaling factor for hydrophobic atoms
Only used for USRCAT, ignored for other types
rw : float (default = 1.)
Scaling factor for aromatic atoms
Only used for USRCAT, ignored for other types
aw : float (default = 1.)
Scaling factor for acceptors
Only used for USRCAT, ignored for other types
dw : float (default = 1.)
Scaling factor for donors
Only used for USRCAT, ignored for other types
Returns
-------
similarity : float from 0 to 1
Similarity between shapes of molecules,
1 indicates identical molecules
"""
if mol1_shape.shape[0] == 12 and mol2_shape.shape[0] == 12:
sim = 1. / (1. + (1. / 12) * np.sum(np.fabs(mol1_shape - mol2_shape)))
elif mol1_shape.shape[0] == 60 and mol2_shape.shape[0] == 60:
w = np.array([ow, hw, rw, aw, dw])
# Normalize weights
w = w / w.sum()
shape_diff = np.abs(mol1_shape - mol2_shape).reshape(-1, 12)
sim = 1. / (1 + (w * (1. / 12) * shape_diff.sum(axis=1)).sum())
elif mol1_shape.shape[0] == 15 and mol2_shape.shape[0] == 15:
sim = 1. / (1 + (1. / 15) * np.sum(np.fabs(mol1_shape - mol2_shape)))
else:
raise Exception('Given vectors are not valid USR shape descriptors '
'or come from different methods. Correct vector lengths'
'are: 12 for USR, 60 for USRCAT, 15 for Electroshape')
return sim
| [
"numpy.hstack",
"numpy.column_stack",
"numpy.array",
"numpy.linalg.norm",
"numpy.mean",
"numpy.cross",
"numpy.abs",
"numpy.amin",
"numpy.nan_to_num",
"scipy.stats.moment",
"logging.warning",
"numpy.isnan",
"numpy.std",
"numpy.fabs",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"nump... | [((1859, 1884), 'numpy.linalg.norm', 'norm', (['(atoms - ctd)'], {'axis': '(1)'}), '(atoms - ctd, axis=1)\n', (1863, 1884), False, 'from numpy.linalg import norm\n'), ((1970, 1995), 'numpy.linalg.norm', 'norm', (['(atoms - cst)'], {'axis': '(1)'}), '(atoms - cst, axis=1)\n', (1974, 1995), False, 'from numpy.linalg import norm\n'), ((2081, 2106), 'numpy.linalg.norm', 'norm', (['(atoms - fct)'], {'axis': '(1)'}), '(atoms - fct, axis=1)\n', (2085, 2106), False, 'from numpy.linalg import norm\n'), ((2192, 2217), 'numpy.linalg.norm', 'norm', (['(atoms - ftf)'], {'axis': '(1)'}), '(atoms - ftf, axis=1)\n', (2196, 2217), False, 'from numpy.linalg import norm\n'), ((2325, 2337), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (2333, 2337), True, 'import numpy as np\n'), ((4119, 4215), 'numpy.hstack', 'np.hstack', (['(all_atoms_shape, hydrophobic_shape, aromatic_shape, acceptor_shape,\n donor_shape)'], {}), '((all_atoms_shape, hydrophobic_shape, aromatic_shape,\n acceptor_shape, donor_shape))\n', (4128, 4215), True, 'import numpy as np\n'), ((4251, 4275), 'numpy.nan_to_num', 'np.nan_to_num', (['cat_shape'], {}), '(cat_shape)\n', (4264, 4275), True, 'import numpy as np\n'), ((5335, 5373), 'numpy.nan_to_num', 'np.nan_to_num', (["mol.atom_dict['charge']"], {}), "(mol.atom_dict['charge'])\n", (5348, 5373), True, 'import numpy as np\n'), ((5469, 5524), 'numpy.column_stack', 'np.column_stack', (["(mol.atom_dict['coords'], charge * mi)"], {}), "((mol.atom_dict['coords'], charge * mi))\n", (5484, 5524), True, 'import numpy as np\n'), ((5614, 5648), 'numpy.linalg.norm', 'norm', (['(four_dimensions - c1)'], {'axis': '(1)'}), '(four_dimensions - c1, axis=1)\n', (5618, 5648), False, 'from numpy.linalg import norm\n'), ((5751, 5785), 'numpy.linalg.norm', 'norm', (['(four_dimensions - c2)'], {'axis': '(1)'}), '(four_dimensions - c2, axis=1)\n', (5755, 5785), False, 'from numpy.linalg import norm\n'), ((5888, 5922), 'numpy.linalg.norm', 'norm', (['(four_dimensions - c3)'], {'axis': '(1)'}), '(four_dimensions - c3, axis=1)\n', (5892, 5922), False, 'from numpy.linalg import norm\n'), ((6375, 6419), 'numpy.append', 'np.append', (['(vector_c1s + vector_c)', 'max_charge'], {}), '(vector_c1s + vector_c, max_charge)\n', (6384, 6419), True, 'import numpy as np\n'), ((6429, 6473), 'numpy.append', 'np.append', (['(vector_c1s + vector_c)', 'min_charge'], {}), '(vector_c1s + vector_c, min_charge)\n', (6438, 6473), True, 'import numpy as np\n'), ((6494, 6528), 'numpy.linalg.norm', 'norm', (['(four_dimensions - c4)'], {'axis': '(1)'}), '(four_dimensions - c4, axis=1)\n', (6498, 6528), False, 'from numpy.linalg import norm\n'), ((6548, 6582), 'numpy.linalg.norm', 'norm', (['(four_dimensions - c5)'], {'axis': '(1)'}), '(four_dimensions - c5, axis=1)\n', (6552, 6582), False, 'from numpy.linalg import norm\n'), ((6722, 6734), 'numpy.zeros', 'np.zeros', (['(15)'], {}), '(15)\n', (6730, 6734), True, 'import numpy as np\n'), ((2428, 2446), 'numpy.mean', 'np.mean', (['distances'], {}), '(distances)\n', (2435, 2446), True, 'import numpy as np\n'), ((2485, 2502), 'numpy.var', 'np.var', (['distances'], {}), '(distances)\n', (2491, 2502), True, 'import numpy as np\n'), ((2541, 2568), 'scipy.stats.moment', 'moment', (['distances'], {'moment': '(3)'}), '(distances, moment=3)\n', (2547, 2568), False, 'from scipy.stats import moment\n'), ((5105, 5196), 'logging.warning', 'logging.warning', (['"""All partial charges are zero. ElectroShape strongly relies on them."""'], {}), "(\n 'All partial charges are zero. ElectroShape strongly relies on them.')\n", (5120, 5196), False, 'import logging\n'), ((5249, 5320), 'logging.warning', 'logging.warning', (["('Nan values in charge values of molecule ' + mol.title)"], {}), "('Nan values in charge values of molecule ' + mol.title)\n", (5264, 5320), False, 'import logging\n'), ((6211, 6241), 'numpy.cross', 'np.cross', (['vector_as', 'vector_bs'], {}), '(vector_as, vector_bs)\n', (6219, 6241), True, 'import numpy as np\n'), ((6798, 6816), 'numpy.mean', 'np.mean', (['distances'], {}), '(distances)\n', (6805, 6816), True, 'import numpy as np\n'), ((6890, 6907), 'numpy.std', 'np.std', (['distances'], {}), '(distances)\n', (6896, 6907), True, 'import numpy as np\n'), ((1756, 1768), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (1764, 1768), True, 'import numpy as np\n'), ((5200, 5233), 'numpy.isnan', 'np.isnan', (["mol.atom_dict['charge']"], {}), "(mol.atom_dict['charge'])\n", (5208, 5233), True, 'import numpy as np\n'), ((6116, 6130), 'numpy.linalg.norm', 'norm', (['vector_a'], {}), '(vector_a)\n', (6120, 6130), False, 'from numpy.linalg import norm\n'), ((6295, 6310), 'numpy.amax', 'np.amax', (['charge'], {}), '(charge)\n', (6302, 6310), True, 'import numpy as np\n'), ((6343, 6358), 'numpy.amin', 'np.amin', (['charge'], {}), '(charge)\n', (6350, 6358), True, 'import numpy as np\n'), ((6947, 6995), 'numpy.sum', 'np.sum', (['((distances - mean) ** 3 / distances.size)'], {}), '((distances - mean) ** 3 / distances.size)\n', (6953, 6995), True, 'import numpy as np\n'), ((8314, 8344), 'numpy.array', 'np.array', (['[ow, hw, rw, aw, dw]'], {}), '([ow, hw, rw, aw, dw])\n', (8322, 8344), True, 'import numpy as np\n'), ((6159, 6189), 'numpy.cross', 'np.cross', (['vector_as', 'vector_bs'], {}), '(vector_as, vector_bs)\n', (6167, 6189), True, 'import numpy as np\n'), ((8418, 8449), 'numpy.abs', 'np.abs', (['(mol1_shape - mol2_shape)'], {}), '(mol1_shape - mol2_shape)\n', (8424, 8449), True, 'import numpy as np\n'), ((8201, 8233), 'numpy.fabs', 'np.fabs', (['(mol1_shape - mol2_shape)'], {}), '(mol1_shape - mol2_shape)\n', (8208, 8233), True, 'import numpy as np\n'), ((8647, 8679), 'numpy.fabs', 'np.fabs', (['(mol1_shape - mol2_shape)'], {}), '(mol1_shape - mol2_shape)\n', (8654, 8679), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 13:24:45 2021
@author: admin
"""
import numpy as np
import struct
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
# 测试集文件
test_images_file = 'MNIST/t10k-images.idx3-ubyte'
# 测试集标签文件
test_labels_file = 'MNIST/t10k-labels.idx1-ubyte'
def load_images_file(filename):
"""
解析idx3文件的通用函数
:param idx3_ubyte_file: idx3文件路径
:return: 数据集
"""
# 读取二进制数据
bin_data = open(filename, 'rb').read()
# 解析文件头信息,依次为魔数、图片数量、每张图片高、每张图片宽
offset = 0
fmt_header = '>iiii' #因为数据结构中前4行的数据类型都是32位整型,所以采用i格式,但我们需要读取前4行数据,所以需要4个i。我们后面会看到标签集中,只使用2个ii。
magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
print('魔数:%d, 图片数量: %d张, 图片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))
# 解析数据集
image_size = num_rows * num_cols
offset += struct.calcsize(fmt_header) #获得数据在缓存中的指针位置,从前面介绍的数据结构可以看出,读取了前4行之后,指针位置(即偏移位置offset)指向0016。
print(offset)
fmt_image = '>' + str(image_size) + 'B' #图像数据像素值的类型为unsigned char型,对应的format格式为B。这里还有加上图像大小784,是为了读取784个B格式数据,如果没有则只会读取一个值(即一副图像中的一个像素值)
print(fmt_image,offset,struct.calcsize(fmt_image))
images = np.empty((num_images, num_rows, num_cols))
#plt.figure()
for i in range(num_images):
if (i + 1) % 10000 == 0:
print('已解析 %d' % (i + 1) + '张')
print(offset)
images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
#print(images[i])
offset += struct.calcsize(fmt_image)
# plt.imshow(images[i],'gray')
# plt.pause(0.00001)
# plt.show()
#plt.show()
return images
def load_labels_file(filename):
"""
解析idx1文件的通用函数
:param idx1_ubyte_file: idx1文件路径
:return: 数据集
"""
# 读取二进制数据
bin_data = open(filename, 'rb').read()
# 解析文件头信息,依次为魔数和标签数
offset = 0
fmt_header = '>ii'
magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
print('魔数:%d, 图片数量: %d张' % (magic_number, num_images))
# 解析数据集
offset += struct.calcsize(fmt_header)
fmt_image = '>B'
labels = np.empty(num_images)
for i in range(num_images):
if (i + 1) % 10000 == 0:
print ('已解析 %d' % (i + 1) + '张')
labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
offset += struct.calcsize(fmt_image)
return labels
#继承nn.Module构建自己的简单神经网络SNet
class ConvNet(torch.nn.Module):
#构建三层全连接网络
def __init__(self,CH_1, CH_2, CH_3, CH_4):
super(ConvNet, self).__init__()
#定义每层的结构
self.features = nn.Sequential(
# 28*28*1
nn.Conv2d(CH_1, CH_2, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
# 28*28*CH_2
nn.MaxPool2d(kernel_size=2, stride=2),
# 14*14*CH_2
nn.Conv2d(CH_2, CH_3, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
# 14*14*CH_3
nn.MaxPool2d(kernel_size=2, stride=2),
# 7*7*CH_3
nn.Conv2d(CH_3, CH_4, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
# 7*7*CH_4
)
self.classifier = nn.Sequential(
nn.Linear(CH_4 * 7 * 7, 1024),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(1024, 128),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(128, 10),
)
#使用ConvNet会自动运行forward(前向传播)方法,方法连接各个隐藏层,并产生非线性
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# 提取特征用
def FeatureExtractor(self, x):
x = self.features(x)
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"]="3"
# 读取标签和图像
test_images = load_images_file(test_images_file)
test_labels = load_labels_file(test_labels_file)
#对label进行onehot编码(多分类)
test_label_oh = np.eye(10)[test_labels.astype(int)]
#训练和测试进行类型转换
test_feature_t = torch.from_numpy(test_images).unsqueeze(1).float()
test_label_t = torch.from_numpy(test_label_oh).float()
# 读取模型
# model = torch.load('./SaveModel/MNIST_model.pt', map_location='cpu')
# 读取参数 对应 --> torch.save(model.state_dict(), './SaveModel/MNIST_model_dict.pt')
CH_1, CH_2, CH_3, CH_4 = 1, 5, 10, 15
model = ConvNet(CH_1, CH_2, CH_3, CH_4)
model.load_state_dict(torch.load('./SaveModel/MNIST_model_dict.pt', map_location='cpu'))
#######################测试过程##############################
model.eval() #保证BN和dropout不发生变化
cnt = 0 #初始化正确的计数值
#输入训练集得到测试结果
test_out = model(test_feature_t)
# test_out = test_out.cpu()
_, test_out_np= torch.max(test_out,1) #onehot解码,返回值第一个是最大值(不需要),第二个是最大值的序号
#迭代922个测试样本输出和统计
for test_i in range(10000):
print("No.",test_i,"\npre:",test_out_np.numpy()[test_i],"\nGT:",test_labels[test_i])
print("****************")
if test_out_np.numpy()[test_i] == test_labels[test_i]:
#print("correct")
cnt += 1
#正确率计算
correct_rate = cnt/10000.0
print("correct_rate:",correct_rate) | [
"struct.calcsize",
"numpy.eye",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.load",
"torch.max",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"numpy.empty",
"torch.nn.Linear",
"struct.unpack_from"
] | [((727, 775), 'struct.unpack_from', 'struct.unpack_from', (['fmt_header', 'bin_data', 'offset'], {}), '(fmt_header, bin_data, offset)\n', (745, 775), False, 'import struct\n'), ((932, 959), 'struct.calcsize', 'struct.calcsize', (['fmt_header'], {}), '(fmt_header)\n', (947, 959), False, 'import struct\n'), ((1253, 1295), 'numpy.empty', 'np.empty', (['(num_images, num_rows, num_cols)'], {}), '((num_images, num_rows, num_cols))\n', (1261, 1295), True, 'import numpy as np\n'), ((2021, 2069), 'struct.unpack_from', 'struct.unpack_from', (['fmt_header', 'bin_data', 'offset'], {}), '(fmt_header, bin_data, offset)\n', (2039, 2069), False, 'import struct\n'), ((2156, 2183), 'struct.calcsize', 'struct.calcsize', (['fmt_header'], {}), '(fmt_header)\n', (2171, 2183), False, 'import struct\n'), ((2218, 2238), 'numpy.empty', 'np.empty', (['num_images'], {}), '(num_images)\n', (2226, 2238), True, 'import numpy as np\n'), ((4851, 4873), 'torch.max', 'torch.max', (['test_out', '(1)'], {}), '(test_out, 1)\n', (4860, 4873), False, 'import torch\n'), ((1212, 1238), 'struct.calcsize', 'struct.calcsize', (['fmt_image'], {}), '(fmt_image)\n', (1227, 1238), False, 'import struct\n'), ((1601, 1627), 'struct.calcsize', 'struct.calcsize', (['fmt_image'], {}), '(fmt_image)\n', (1616, 1627), False, 'import struct\n'), ((2438, 2464), 'struct.calcsize', 'struct.calcsize', (['fmt_image'], {}), '(fmt_image)\n', (2453, 2464), False, 'import struct\n'), ((4051, 4061), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (4057, 4061), True, 'import numpy as np\n'), ((4534, 4599), 'torch.load', 'torch.load', (['"""./SaveModel/MNIST_model_dict.pt"""'], {'map_location': '"""cpu"""'}), "('./SaveModel/MNIST_model_dict.pt', map_location='cpu')\n", (4544, 4599), False, 'import torch\n'), ((2369, 2416), 'struct.unpack_from', 'struct.unpack_from', (['fmt_image', 'bin_data', 'offset'], {}), '(fmt_image, bin_data, offset)\n', (2387, 2416), False, 'import struct\n'), ((2739, 2786), 'torch.nn.Conv2d', 'nn.Conv2d', (['CH_1', 'CH_2'], {'kernel_size': '(3)', 'padding': '(1)'}), '(CH_1, CH_2, kernel_size=3, padding=1)\n', (2748, 2786), True, 'import torch.nn as nn\n'), ((2800, 2821), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2807, 2821), True, 'import torch.nn as nn\n'), ((2860, 2897), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (2872, 2897), True, 'import torch.nn as nn\n'), ((2936, 2983), 'torch.nn.Conv2d', 'nn.Conv2d', (['CH_2', 'CH_3'], {'kernel_size': '(3)', 'padding': '(1)'}), '(CH_2, CH_3, kernel_size=3, padding=1)\n', (2945, 2983), True, 'import torch.nn as nn\n'), ((2997, 3018), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3004, 3018), True, 'import torch.nn as nn\n'), ((3057, 3094), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (3069, 3094), True, 'import torch.nn as nn\n'), ((3131, 3178), 'torch.nn.Conv2d', 'nn.Conv2d', (['CH_3', 'CH_4'], {'kernel_size': '(3)', 'padding': '(1)'}), '(CH_3, CH_4, kernel_size=3, padding=1)\n', (3140, 3178), True, 'import torch.nn as nn\n'), ((3192, 3213), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3199, 3213), True, 'import torch.nn as nn\n'), ((3310, 3339), 'torch.nn.Linear', 'nn.Linear', (['(CH_4 * 7 * 7)', '(1024)'], {}), '(CH_4 * 7 * 7, 1024)\n', (3319, 3339), True, 'import torch.nn as nn\n'), ((3353, 3366), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3360, 3366), True, 'import torch.nn as nn\n'), ((3380, 3392), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (3390, 3392), True, 'import torch.nn as nn\n'), ((3406, 3426), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(128)'], {}), '(1024, 128)\n', (3415, 3426), True, 'import torch.nn as nn\n'), ((3440, 3453), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3447, 3453), True, 'import torch.nn as nn\n'), ((3467, 3479), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (3477, 3479), True, 'import torch.nn as nn\n'), ((3493, 3511), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(10)'], {}), '(128, 10)\n', (3502, 3511), True, 'import torch.nn as nn\n'), ((4203, 4234), 'torch.from_numpy', 'torch.from_numpy', (['test_label_oh'], {}), '(test_label_oh)\n', (4219, 4234), False, 'import torch\n'), ((1478, 1525), 'struct.unpack_from', 'struct.unpack_from', (['fmt_image', 'bin_data', 'offset'], {}), '(fmt_image, bin_data, offset)\n', (1496, 1525), False, 'import struct\n'), ((4133, 4162), 'torch.from_numpy', 'torch.from_numpy', (['test_images'], {}), '(test_images)\n', (4149, 4162), False, 'import torch\n')] |
import math
import hydrostats as hs
import hydrostats.data as hd
import numpy as np
import pandas as pd
def solve_gumbel1(std, xbar, rp):
"""
Solves the Gumbel Type I pdf = exp(-exp(-b))
where b is the covariate
"""
# xbar = statistics.mean(year_max_flow_list)
# std = statistics.stdev(year_max_flow_list, xbar=xbar)
return -math.log(-math.log(1 - (1 / rp))) * std * .7797 + xbar - (.45 * std)
def statistics_tables(corrected: pd.DataFrame, simulated: pd.DataFrame, observed: pd.DataFrame) -> pd.DataFrame:
# merge the datasets together
merged_sim_obs = hd.merge_data(sim_df=simulated, obs_df=observed)
merged_cor_obs = hd.merge_data(sim_df=corrected, obs_df=observed)
metrics = ['ME', 'RMSE', 'NRMSE (Mean)', 'MAPE', 'NSE', 'KGE (2009)', 'KGE (2012)']
# Merge Data
table1 = hs.make_table(merged_dataframe=merged_sim_obs, metrics=metrics)
table2 = hs.make_table(merged_dataframe=merged_cor_obs, metrics=metrics)
table2 = table2.rename(index={'Full Time Series': 'Corrected Full Time Series'})
table1 = table1.rename(index={'Full Time Series': 'Original Full Time Series'})
table1 = table1.transpose()
table2 = table2.transpose()
return pd.merge(table1, table2, right_index=True, left_index=True)
def compute_fdc(flows: np.array, steps: int = 500, exceed: bool = True, col_name: str = 'flow'):
percentiles = [round((1 / steps) * i * 100, 5) for i in range(steps + 1)]
flows = np.nanpercentile(flows, percentiles)
if exceed:
percentiles.reverse()
return pd.DataFrame(flows, columns=[col_name, ], index=percentiles)
def compute_scalar_fdc(first_fdc, second_fdc):
first_fdc = compute_fdc(first_fdc)
second_fdc = compute_fdc(second_fdc)
ratios = np.divide(first_fdc['flow'].values.flatten(), second_fdc['flow'].values.flatten())
columns = (first_fdc.columns[0], 'Scalars')
scalars_df = pd.DataFrame(np.transpose([first_fdc.values[:, 0], ratios]), columns=columns)
scalars_df.replace(np.inf, np.nan, inplace=True)
scalars_df.dropna(inplace=True)
return scalars_df
| [
"numpy.nanpercentile",
"pandas.merge",
"math.log",
"hydrostats.make_table",
"hydrostats.data.merge_data",
"pandas.DataFrame",
"numpy.transpose"
] | [((595, 643), 'hydrostats.data.merge_data', 'hd.merge_data', ([], {'sim_df': 'simulated', 'obs_df': 'observed'}), '(sim_df=simulated, obs_df=observed)\n', (608, 643), True, 'import hydrostats.data as hd\n'), ((665, 713), 'hydrostats.data.merge_data', 'hd.merge_data', ([], {'sim_df': 'corrected', 'obs_df': 'observed'}), '(sim_df=corrected, obs_df=observed)\n', (678, 713), True, 'import hydrostats.data as hd\n'), ((833, 896), 'hydrostats.make_table', 'hs.make_table', ([], {'merged_dataframe': 'merged_sim_obs', 'metrics': 'metrics'}), '(merged_dataframe=merged_sim_obs, metrics=metrics)\n', (846, 896), True, 'import hydrostats as hs\n'), ((910, 973), 'hydrostats.make_table', 'hs.make_table', ([], {'merged_dataframe': 'merged_cor_obs', 'metrics': 'metrics'}), '(merged_dataframe=merged_cor_obs, metrics=metrics)\n', (923, 973), True, 'import hydrostats as hs\n'), ((1220, 1279), 'pandas.merge', 'pd.merge', (['table1', 'table2'], {'right_index': '(True)', 'left_index': '(True)'}), '(table1, table2, right_index=True, left_index=True)\n', (1228, 1279), True, 'import pandas as pd\n'), ((1469, 1505), 'numpy.nanpercentile', 'np.nanpercentile', (['flows', 'percentiles'], {}), '(flows, percentiles)\n', (1485, 1505), True, 'import numpy as np\n'), ((1562, 1620), 'pandas.DataFrame', 'pd.DataFrame', (['flows'], {'columns': '[col_name]', 'index': 'percentiles'}), '(flows, columns=[col_name], index=percentiles)\n', (1574, 1620), True, 'import pandas as pd\n'), ((1926, 1972), 'numpy.transpose', 'np.transpose', (['[first_fdc.values[:, 0], ratios]'], {}), '([first_fdc.values[:, 0], ratios])\n', (1938, 1972), True, 'import numpy as np\n'), ((366, 386), 'math.log', 'math.log', (['(1 - 1 / rp)'], {}), '(1 - 1 / rp)\n', (374, 386), False, 'import math\n')] |
import numpy as np
from typing import Callable
from ..problem import Problem
def rescale(points, lb: np.ndarray, ub: np.ndarray) -> np.ndarray:
"""
Rescale points from [0, 1] to [lb, ub].
Parameters
----------
points: ndarray, shape=(n_starts, dim)
Points in bounds [lb, ub]
lb, ub: ndarray, shape=(1, dim)
The boundaries, all components must be finite.
"""
rescaled_points = points * (ub - lb) + lb
return rescaled_points
def assign_startpoints(
n_starts: int,
startpoint_method: Callable,
problem: Problem,
startpoint_resample: bool,
) -> np.ndarray:
"""
Assign start points.
"""
# check if startpoints needed
if startpoint_method is False:
# fill with dummies
startpoints = np.zeros(n_starts, problem.dim)
startpoints[:] = np.nan
return startpoints
x_guesses = problem.x_guesses
dim = problem.lb.size
# number of required startpoints
n_guessed_points = x_guesses.shape[0]
n_required_points = n_starts - n_guessed_points
if n_required_points <= 0:
return x_guesses[:n_starts, :]
# apply startpoint method
x_sampled = startpoint_method(
n_starts=n_required_points,
lb=problem.lb_init, ub=problem.ub_init,
x_guesses=problem.x_guesses,
objective=problem.objective
)
# put together
startpoints = np.zeros((n_starts, dim))
startpoints[0:n_guessed_points, :] = x_guesses
startpoints[n_guessed_points:n_starts, :] = x_sampled
# resample startpoints
if startpoint_resample:
startpoints = resample_startpoints(
startpoints=startpoints,
problem=problem,
method=startpoint_method
)
return startpoints
def resample_startpoints(startpoints, problem, method):
"""
Resample startpoints having non-finite value according to the
startpoint_method. Also orders startpoints according to their objective
function values (in ascending order)
"""
n_starts = startpoints.shape[0]
resampled_startpoints = np.zeros_like(startpoints)
lb = problem.lb_init
ub = problem.ub_init
x_guesses = problem.x_guesses
fvals = np.empty((n_starts,))
# iterate over startpoints
for j in range(0, n_starts):
startpoint = startpoints[j, :]
# apply method until found valid point
fvals[j] = problem.objective(startpoint)
while fvals[j] == np.inf or fvals[j] == np.nan:
startpoint = method(
n_starts=1,
lb=lb,
ub=ub,
x_guesses=x_guesses
)[0, :]
fvals[j] = problem.objective(startpoint)
# assign startpoint
resampled_startpoints[j, :] = startpoint
starpoint_order = np.argsort(fvals)
return resampled_startpoints[starpoint_order, :]
| [
"numpy.argsort",
"numpy.zeros",
"numpy.zeros_like",
"numpy.empty"
] | [((1425, 1450), 'numpy.zeros', 'np.zeros', (['(n_starts, dim)'], {}), '((n_starts, dim))\n', (1433, 1450), True, 'import numpy as np\n'), ((2119, 2145), 'numpy.zeros_like', 'np.zeros_like', (['startpoints'], {}), '(startpoints)\n', (2132, 2145), True, 'import numpy as np\n'), ((2243, 2264), 'numpy.empty', 'np.empty', (['(n_starts,)'], {}), '((n_starts,))\n', (2251, 2264), True, 'import numpy as np\n'), ((2836, 2853), 'numpy.argsort', 'np.argsort', (['fvals'], {}), '(fvals)\n', (2846, 2853), True, 'import numpy as np\n'), ((803, 834), 'numpy.zeros', 'np.zeros', (['n_starts', 'problem.dim'], {}), '(n_starts, problem.dim)\n', (811, 834), True, 'import numpy as np\n')] |
import logging
import numpy as np
from luigi.util import requires
from netCDF4 import Dataset, Group, Variable
from iasi.file import CopyNetcdfFile, MoveVariables
from iasi.quadrant import Quadrant
from iasi.util import root_group_of
logger = logging.getLogger(__name__)
class CompositionException(Exception):
pass
class Composition:
@staticmethod
def factory(group: Group):
if 'U' in group.variables.keys():
return SingularValueComposition(group)
if 'Q' in group.variables.keys():
return EigenComposition(group)
raise CompositionException(
'Group {} cannot be composed'.format(group.name))
def __init__(self, group: Group):
self.group = group
def reconstruct(self, nol: np.ma.MaskedArray, target: Dataset = None):
raise NotImplementedError
def _export_reconstruction(self, target: Dataset, array: np.ma.MaskedArray, quadrant: Quadrant):
root = root_group_of(self.group)
existing_dimensions = target.dimensions.keys()
for name, dim in root.dimensions.items():
if name in existing_dimensions:
continue
target.createDimension(
name, len(dim) if not dim.isunlimited() else None)
var = quadrant.create_variable(target, self.group.path)
var[:] = array[:]
class SingularValueComposition(Composition):
def __init__(self, group: Group):
super().__init__(group)
vars = group.variables.keys()
assert 'U' in vars and 's' in vars and 'Vh' in vars and 'k' in vars
self.U = group['U']
self.s = group['s']
self.Vh = group['Vh']
self.k = group['k']
self.quadrant = Quadrant.for_disassembly(
group.parent.name, group.name, self.U)
def reconstruct(self, nol: np.ma.MaskedArray, target: Dataset = None) -> np.ma.MaskedArray:
result = np.ma.masked_all(
self.quadrant.transformed_shape(), dtype=np.float32)
k_all = self.k[...]
U_all = self.U[...]
s_all = self.s[...]
Vh_all = self.Vh[...]
for event in range(self.Vh.shape[0]):
if np.ma.is_masked(nol[event]) or nol.data[event] > 29:
continue
if np.ma.is_masked(k_all[event]) or k_all.data[event] <= 0:
continue
level = int(nol.data[event])
k = k_all.data[event]
U = U_all.data[event, :, :k]
s = s_all.data[event, :k]
Vh = Vh_all.data[event, :k, :]
reconstruction = (U * s).dot(Vh)
self.quadrant.assign_disassembly(
reconstruction, result[event], level)
# result[event] = q.disassemble(reconstruction, nol[event])
if target:
self._export_reconstruction(target, result, self.quadrant)
return result
class EigenComposition(Composition):
def __init__(self, group: Group):
super().__init__(group)
vars = group.variables.keys()
assert 'Q' in vars and 's' in vars and 'k' in vars
self.Q = group['Q']
self.s = group['s']
self.k = group['k']
self.quadrant = Quadrant.for_disassembly(
group.parent.name, group.name, self.Q)
def reconstruct(self, nol: np.ma.MaskedArray, target: Dataset = None) -> np.ma.MaskedArray:
result = np.ma.masked_all(
self.quadrant.transformed_shape(), dtype=np.float32)
Q_all = self.Q[...]
s_all = self.s[...]
k_all = self.k[...]
for event in range(self.Q.shape[0]):
if np.ma.is_masked(nol[event]) or nol.data[event] > 29:
continue
if np.ma.is_masked(k_all[event]) or k_all.data[event] <= 0:
continue
level = int(nol.data[event])
k = k_all.data[event]
Q = Q_all.data[event, :, :k]
s = s_all.data[event, :k]
reconstruction = (Q * s).dot(Q.T)
self.quadrant.assign_disassembly(
reconstruction, result[event], level)
if target:
self._export_reconstruction(target, result, self.quadrant)
return result
| [
"logging.getLogger",
"numpy.ma.is_masked",
"iasi.quadrant.Quadrant.for_disassembly",
"iasi.util.root_group_of"
] | [((247, 274), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (264, 274), False, 'import logging\n'), ((964, 989), 'iasi.util.root_group_of', 'root_group_of', (['self.group'], {}), '(self.group)\n', (977, 989), False, 'from iasi.util import root_group_of\n'), ((1727, 1790), 'iasi.quadrant.Quadrant.for_disassembly', 'Quadrant.for_disassembly', (['group.parent.name', 'group.name', 'self.U'], {}), '(group.parent.name, group.name, self.U)\n', (1751, 1790), False, 'from iasi.quadrant import Quadrant\n'), ((3192, 3255), 'iasi.quadrant.Quadrant.for_disassembly', 'Quadrant.for_disassembly', (['group.parent.name', 'group.name', 'self.Q'], {}), '(group.parent.name, group.name, self.Q)\n', (3216, 3255), False, 'from iasi.quadrant import Quadrant\n'), ((2176, 2203), 'numpy.ma.is_masked', 'np.ma.is_masked', (['nol[event]'], {}), '(nol[event])\n', (2191, 2203), True, 'import numpy as np\n'), ((2269, 2298), 'numpy.ma.is_masked', 'np.ma.is_masked', (['k_all[event]'], {}), '(k_all[event])\n', (2284, 2298), True, 'import numpy as np\n'), ((3610, 3637), 'numpy.ma.is_masked', 'np.ma.is_masked', (['nol[event]'], {}), '(nol[event])\n', (3625, 3637), True, 'import numpy as np\n'), ((3703, 3732), 'numpy.ma.is_masked', 'np.ma.is_masked', (['k_all[event]'], {}), '(k_all[event])\n', (3718, 3732), True, 'import numpy as np\n')] |
import os
import cv2
import sys
import math
import pyprind
import torch
import numpy as np
import torch.tensor as Tensor
# import torch.nn.functional as F
import torchvision.transforms as transforms
import dl_modules.dataset as ds
epsilon = 0.008
def enhance_images(folder: str, denoise_strength: int,
window_size: int, contrast: int, kernel_size: int) -> None:
folder = ds.SAVE_DIR + 'data/' + folder
if not os.path.isdir(folder):
print('Folder "' + folder + '" does not exist!')
return
transform = transforms.Compose([
transforms.ToTensor()
])
dataset = ds.Dataset(folder, scale=ds.scale,
normalization=transform, downscaling='none')
loader = torch.utils.data.DataLoader(dataset, batch_size=ds.valid_batch_size,
shuffle=False, num_workers=0)
total = len(loader)
if not os.path.isdir(folder + '/enhanced'):
os.makedirs(folder + '/enhanced')
iter_bar = pyprind.ProgBar(total, title="Enhance", stream=sys.stdout)
i = 0
with torch.no_grad():
for data in loader:
downscaled, source = data
noisy = np.transpose(source.squeeze(0).cpu().numpy(), (1, 2, 0)) * 255
noisy = cv2.cvtColor(noisy, cv2.COLOR_RGB2BGR)
enhanced = enhance(noisy, denoise_strength, window_size, contrast, kernel_size)
cv2.imwrite(folder + '/enhanced/' + dataset.ids[i][:-4] + '_e.png', enhanced.astype(np.uint8))
iter_bar.update()
i += 1
iter_bar.update()
def correct_colors(image: Tensor, source: Tensor) -> Tensor:
if len(image.shape) == 4:
image = image.squeeze(0)
if len(source.shape) == 4:
source = source.squeeze(0)
channels = []
for i in range(image.shape[0]):
my_std = image[i, :, :].std()
if torch.abs(my_std) < epsilon:
alpha = 1.0
else:
alpha = source[i, :, :].std() / my_std
beta = source[i, :, :].mean() - alpha * image[i, :, :].mean()
channels.append(alpha * image[i, :, :] + beta)
return torch.clamp(
torch.stack(channels, dim=0),
min=-1.0,
max=1.0
)
def enhance(image, denoise_strength: int=2, window_size: int=5, contrast: int=0, kernel_size: int=5):
denoised = gentle_denoise(image, denoise_strength, window_size, kernel_size)
equalized = auto_contrast(denoised, strength=contrast)
dithered = dither(equalized)
return dithered
def dither(image, dither_strength: int=1):
return np.clip(np.round(
image.astype(np.float) + (np.random.rand(*image.shape) - 0.5) * dither_strength
), a_min=0, a_max=255)
def gentle_denoise(noisy, denoise_strength, window_size, kernel_size):
denoised = cv2.fastNlMeansDenoisingColored(
noisy.astype(np.uint8), None, denoise_strength, denoise_strength, window_size, window_size * 3
)
noisy = noisy.astype(np.float32)
extracted_noise = noisy - denoised.astype(np.float32)
if kernel_size > 0:
kernel = np.ones((kernel_size, kernel_size), np.float32) / (kernel_size ** 2)
extracted_noise -= cv2.filter2D(extracted_noise, -1, kernel)
denoised = noisy - extracted_noise
return denoised
def auto_contrast(image, clip_hist_percent: int=1,
strength: int=5, saturation: int=64):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Calculate grayscale histogram
hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
hist_size = len(hist)
# Calculate cumulative distribution from the histogram
accumulator = [float(hist[0])]
for index in range(1, hist_size):
accumulator.append(accumulator[index - 1] + float(hist[index]))
# Locate points to clip
maximum = accumulator[-1]
clip_hist_percent *= (maximum / 100.0)
clip_hist_percent /= 2.0
# Locate left cut
minimum_gray = 0
while accumulator[minimum_gray] < clip_hist_percent:
minimum_gray += 1
# Locate right cut
maximum_gray = hist_size - 1
while accumulator[maximum_gray] >= (maximum - clip_hist_percent):
maximum_gray -= 1
if maximum_gray <= minimum_gray:
maximum_gray = minimum_gray + 1
# Calculate values
alpha = 255 / (maximum_gray - minimum_gray)
average_gray = (maximum_gray + minimum_gray) // 2
if alpha > 4:
contrast = 4
else:
contrast = int(round(saturate(
alpha * strength,
saturation
)))
correlation = 1.0
brightness = int(round(saturate(
correlation * strength * contrast * (127 - average_gray) / 128,
saturation
)))
# print('\n', brightness, contrast)
auto_result = apply_brightness_contrast(image, brightness, contrast)
return auto_result
def saturate(x: float, threshhold: float):
return sign(x) * threshhold * (1 - math.exp(-abs(x) / threshhold))
def sign(x: float):
if x > 0:
return 1
elif x == 0:
return 0
return -1
def apply_brightness_contrast(input_img, brightness: int=0, contrast: int=0):
if brightness != 0:
if brightness > 0:
shadow = brightness
highlight = 255
else:
shadow = 0
highlight = 255 + brightness
alpha_b = (highlight - shadow) / 255
gamma_b = shadow
buf = cv2.addWeighted(input_img, alpha_b, input_img, 0, gamma_b)
else:
buf = input_img.copy()
if contrast != 0:
f = 131 * (contrast + 127) / (127 * (131 - contrast))
alpha_c = f
gamma_c = 127 * (1 - f)
buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)
return buf
| [
"cv2.calcHist",
"pyprind.ProgBar",
"torch.abs",
"os.makedirs",
"numpy.ones",
"numpy.random.rand",
"torch.stack",
"cv2.filter2D",
"cv2.addWeighted",
"os.path.isdir",
"dl_modules.dataset.Dataset",
"cv2.cvtColor",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torchvision.transforms.ToTen... | [((624, 703), 'dl_modules.dataset.Dataset', 'ds.Dataset', (['folder'], {'scale': 'ds.scale', 'normalization': 'transform', 'downscaling': '"""none"""'}), "(folder, scale=ds.scale, normalization=transform, downscaling='none')\n", (634, 703), True, 'import dl_modules.dataset as ds\n'), ((742, 844), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'ds.valid_batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(dataset, batch_size=ds.valid_batch_size,\n shuffle=False, num_workers=0)\n', (769, 844), False, 'import torch\n'), ((1011, 1069), 'pyprind.ProgBar', 'pyprind.ProgBar', (['total'], {'title': '"""Enhance"""', 'stream': 'sys.stdout'}), "(total, title='Enhance', stream=sys.stdout)\n", (1026, 1069), False, 'import pyprind\n'), ((3396, 3435), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3408, 3435), False, 'import cv2\n'), ((3484, 3532), 'cv2.calcHist', 'cv2.calcHist', (['[gray]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([gray], [0], None, [256], [0, 256])\n', (3496, 3532), False, 'import cv2\n'), ((440, 461), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (453, 461), False, 'import os\n'), ((917, 952), 'os.path.isdir', 'os.path.isdir', (["(folder + '/enhanced')"], {}), "(folder + '/enhanced')\n", (930, 952), False, 'import os\n'), ((962, 995), 'os.makedirs', 'os.makedirs', (["(folder + '/enhanced')"], {}), "(folder + '/enhanced')\n", (973, 995), False, 'import os\n'), ((1090, 1105), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1103, 1105), False, 'import torch\n'), ((2157, 2185), 'torch.stack', 'torch.stack', (['channels'], {'dim': '(0)'}), '(channels, dim=0)\n', (2168, 2185), False, 'import torch\n'), ((3175, 3216), 'cv2.filter2D', 'cv2.filter2D', (['extracted_noise', '(-1)', 'kernel'], {}), '(extracted_noise, -1, kernel)\n', (3187, 3216), False, 'import cv2\n'), ((5400, 5458), 'cv2.addWeighted', 'cv2.addWeighted', (['input_img', 'alpha_b', 'input_img', '(0)', 'gamma_b'], {}), '(input_img, alpha_b, input_img, 0, gamma_b)\n', (5415, 5458), False, 'import cv2\n'), ((5652, 5698), 'cv2.addWeighted', 'cv2.addWeighted', (['buf', 'alpha_c', 'buf', '(0)', 'gamma_c'], {}), '(buf, alpha_c, buf, 0, gamma_c)\n', (5667, 5698), False, 'import cv2\n'), ((581, 602), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (600, 602), True, 'import torchvision.transforms as transforms\n'), ((1276, 1314), 'cv2.cvtColor', 'cv2.cvtColor', (['noisy', 'cv2.COLOR_RGB2BGR'], {}), '(noisy, cv2.COLOR_RGB2BGR)\n', (1288, 1314), False, 'import cv2\n'), ((1880, 1897), 'torch.abs', 'torch.abs', (['my_std'], {}), '(my_std)\n', (1889, 1897), False, 'import torch\n'), ((3079, 3126), 'numpy.ones', 'np.ones', (['(kernel_size, kernel_size)', 'np.float32'], {}), '((kernel_size, kernel_size), np.float32)\n', (3086, 3126), True, 'import numpy as np\n'), ((2632, 2660), 'numpy.random.rand', 'np.random.rand', (['*image.shape'], {}), '(*image.shape)\n', (2646, 2660), True, 'import numpy as np\n')] |
from typing import Tuple, List
import argparse
import random
from pathlib import Path
from itertools import chain
from functools import reduce
import cv2
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.utils import Sequence
from tensorflow.keras import optimizers as optim
from tensorflow.keras import backend as K
from tensorflow.keras import models as KM
from tensorflow.keras import layers as KL
def init(seed: int):
random.seed(seed)
np.random.seed(seed)
class Transform:
def __init__(self, *args, **kwargs):
pass
def __call__(self, image: np.ndarray):
pass
class GaussianNoise(Transform):
def __init__(self, size: Tuple[int, int] = None, mean: float = .0, std: float = .1):
super(GaussianNoise, self).__init__()
self.size = size
self.mean = mean
self.std = std
def __call__(self, image: np.ndarray):
super(GaussianNoise, self).__call__(image)
image += np.random.normal(self.mean, self.std, self.size)
return image
class Crop(Transform):
def __init__(self, size: Tuple[int, int] = None, pos: Tuple[int, int] = None):
super(Crop, self).__init__()
self.size = size
self.pos = pos
def __call__(self, image: np.ndarray):
super(Crop, self).__call__(image)
w, h = self.size or (
np.random.randint(int(np.size(image, 0) / 2)),
np.random.randint(int(np.size(image, 1) / 2)),
)
x, y = self.pos or (
np.random.randint(np.size(image, 0) - w),
np.random.randint(np.size(image, 1) - h),
)
return image[x:x + w, y:y + h]
class Resize(Transform):
def __init__(self, size: Tuple[int, int] = (0, 0), scale: float = 1.,
metric=cv2.INTER_CUBIC):
super(Resize, self).__init__()
self.size = size
self.scale = scale
self.metric = metric
def __call__(self, image: np.ndarray):
scale = self.scale
if self.size == (0, 0) and self.scale == 1.:
scale = (np.random.rand(1) * .5 + .5)[0]
return cv2.resize(image, self.size, fx=scale, fy=scale,
interpolation=self.metric)
class Eval:
def __init__(self, filename: str):
self.image = np.expand_dims(cv2.imread(filename) / 255., axis=0)
def set_result(self, image: np.ndarray):
self.image = image
return self
def to_png(self, filename: str):
*path, ext = filename.split('.')
filename = 'Model1.png'
cv2.imwrite(filename, self.image)
class Dataset(keras.utils.Sequence):
def __init__(self, train: bool = True,
source_transforms: List[Transform] = None,
target_transforms: List[Transform] = None,
batch: int = 32, shuffle: bool = True):
self.batch = batch
self.shuffle = shuffle
self.channels = 3
self.is_training = True
(self.x_train, _), (self.x_test, _) = keras.datasets.cifar10.load_data()
self.images = self.x_train
self.size = self.x_train[0].shape[:2]
self.source_transforms = source_transforms or []
self.target_transforms = target_transforms or []
self.indices = np.arange(len(self.x_train))
def train(self, flag: bool = True):
self.is_training = flag
def eval(self):
self.train(False)
def on_epoch_end(self) \
-> None:
if self.shuffle:
np.random.shuffle(self.indices)
def __len__(self) \
-> int:
return len(self.images)
def __getitem__(self, item: int) \
-> Tuple[np.ndarray, np.ndarray]:
sources = np.empty((self.batch, *self.size, self.channels), dtype=np.float32)
targets = np.empty((self.batch, *self.size, self.channels), dtype=np.float32)
indices = np.roll(self.indices, item)
for b in range(self.batch):
image = self.images[indices[b]]
sources[b] = reduce(lambda i, t: t(i), [image / 255.] + self.source_transforms)
targets[b] = reduce(lambda i, t: t(i), [image / 255.] + self.target_transforms)
return sources, targets
class DenoisingNetwork(object):
def __new__(cls, mode: str) \
-> KM.Model:
assert mode in ['base', 'skip', 'bn']
inputs = KL.Input(shape=[None, None, 3],
name="input_image")
x = inputs
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer1")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer2")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer3")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer4")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(3, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer5")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
if mode == 'skip' or mode == 'bn':
x = KL.average([x, inputs])
return KM.Model(inputs=inputs, outputs=x,
name='denoising')
@staticmethod
def loss(y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return K.mean(K.square(y_pred - y_true))
@classmethod
def metric(cls, y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return tf.image.psnr(y_true, y_pred, max_val=1.)
@classmethod
def compile(cls, model, optimizer, loss, metric)\
-> None:
model.compile(optimizer=optimizer,
loss=loss,
metrics=[metric])
class DenoisingModel(object):
def __init__(self, mode: str):
self.klass = DenoisingNetwork
self.model = self.klass(mode)
def train(self,
train_generator: Sequence,
val_generator: Sequence,
config: object, epochs: int) \
-> None:
optimizer = optim.Adam(lr=config.lr,
decay=config.lr_decay)
self.klass.compile(self.model,
optimizer=optimizer,
loss=self.klass.loss,
metric=self.klass.metric)
self.model.fit_generator(
train_generator,
epochs=epochs,
steps_per_epoch=len(train_generator),
validation_data=val_generator,
validation_steps=100,
workers=4,
use_multiprocessing=True,
callbacks=[
# TensorBoard(log_dir=config.log, write_graph=True, write_images=True),
# CustomCallback(log_dir=config.log, interval=config.interval,
# train=train_generator[0], test=[v for v in val_generator]),
]
)
def predict(self, inputs):
result, *_ = self.model.predict(inputs)
return result
def save(self, path: str):
self.model.save(path)
def main(args: argparse.Namespace):
train_generator = Dataset(
batch=args.batch,
target_transforms=[
], source_transforms=[
GaussianNoise(),
]
)
val_generator = Dataset(
train=False,
batch=1,
target_transforms=[
], source_transforms=[
GaussianNoise(),
]
)
model = DenoisingModel(mode=args.mode)
model.train(train_generator=train_generator,
val_generator=val_generator,
epochs=args.epoch, config=args)
model.save('model.hdf5')
if args.test:
eval_dataset = Eval(args.test)
result = model.predict(eval_dataset.image)
eval_dataset.set_result(result * 255.).to_png(args.test)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate Image Denoising')
parser.add_argument("--mode", default='base', choices=['base', 'skip', 'bn'],
help="Select mode for training model")
parser.add_argument("--epoch", type=int, default=100, required=False,
help="Epoch for training")
parser.add_argument("--interval", type=int, default=1, required=False)
parser.add_argument("--batch", type=int, default=32, required=False,
help="Mini-batch for training")
parser.add_argument("--lr", type=float, default=.001, required=False)
parser.add_argument("--lr-decay", type=float, default=.0, required=False)
parser.add_argument("--test", type=str, default='noisy.png', required=False,
help="Test filename")
parser.add_argument("--log", type=str, default='./logs', required=False,
help="Logging directory")
parser.add_argument("--seed", type=int, default=42, required=False,
help="The answer to life the universe and everything")
arguments = parser.parse_args()
init(arguments.seed)
main(arguments)
| [
"numpy.random.rand",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.average",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Conv2D",
"argparse.ArgumentParser",
"tensorflow.keras.datasets.cifar10.load_data",
"numpy.empty",
"numpy.random.seed",
"tensorflow.keras.m... | [((477, 494), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (488, 494), False, 'import random\n'), ((499, 519), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (513, 519), True, 'import numpy as np\n'), ((8822, 8885), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate Image Denoising"""'}), "(description='Evaluate Image Denoising')\n", (8845, 8885), False, 'import argparse\n'), ((1004, 1052), 'numpy.random.normal', 'np.random.normal', (['self.mean', 'self.std', 'self.size'], {}), '(self.mean, self.std, self.size)\n', (1020, 1052), True, 'import numpy as np\n'), ((2159, 2234), 'cv2.resize', 'cv2.resize', (['image', 'self.size'], {'fx': 'scale', 'fy': 'scale', 'interpolation': 'self.metric'}), '(image, self.size, fx=scale, fy=scale, interpolation=self.metric)\n', (2169, 2234), False, 'import cv2\n'), ((2607, 2640), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'self.image'], {}), '(filename, self.image)\n', (2618, 2640), False, 'import cv2\n'), ((3063, 3097), 'tensorflow.keras.datasets.cifar10.load_data', 'keras.datasets.cifar10.load_data', ([], {}), '()\n', (3095, 3097), True, 'import tensorflow.keras as keras\n'), ((3769, 3836), 'numpy.empty', 'np.empty', (['(self.batch, *self.size, self.channels)'], {'dtype': 'np.float32'}), '((self.batch, *self.size, self.channels), dtype=np.float32)\n', (3777, 3836), True, 'import numpy as np\n'), ((3855, 3922), 'numpy.empty', 'np.empty', (['(self.batch, *self.size, self.channels)'], {'dtype': 'np.float32'}), '((self.batch, *self.size, self.channels), dtype=np.float32)\n', (3863, 3922), True, 'import numpy as np\n'), ((3942, 3969), 'numpy.roll', 'np.roll', (['self.indices', 'item'], {}), '(self.indices, item)\n', (3949, 3969), True, 'import numpy as np\n'), ((4427, 4478), 'tensorflow.keras.layers.Input', 'KL.Input', ([], {'shape': '[None, None, 3]', 'name': '"""input_image"""'}), "(shape=[None, None, 3], name='input_image')\n", (4435, 4478), True, 'from tensorflow.keras import layers as KL\n'), ((6073, 6125), 'tensorflow.keras.models.Model', 'KM.Model', ([], {'inputs': 'inputs', 'outputs': 'x', 'name': '"""denoising"""'}), "(inputs=inputs, outputs=x, name='denoising')\n", (6081, 6125), True, 'from tensorflow.keras import models as KM\n'), ((6416, 6458), 'tensorflow.image.psnr', 'tf.image.psnr', (['y_true', 'y_pred'], {'max_val': '(1.0)'}), '(y_true, y_pred, max_val=1.0)\n', (6429, 6458), True, 'import tensorflow as tf\n'), ((7000, 7047), 'tensorflow.keras.optimizers.Adam', 'optim.Adam', ([], {'lr': 'config.lr', 'decay': 'config.lr_decay'}), '(lr=config.lr, decay=config.lr_decay)\n', (7010, 7047), True, 'from tensorflow.keras import optimizers as optim\n'), ((3555, 3586), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (3572, 3586), True, 'import numpy as np\n'), ((4536, 4655), 'tensorflow.keras.layers.Conv2D', 'KL.Conv2D', (['(64)', '(3, 3)'], {'padding': '"""SAME"""', 'kernel_initializer': '"""random_uniform"""', 'bias_initializer': '"""zeros"""', 'name': '"""layer1"""'}), "(64, (3, 3), padding='SAME', kernel_initializer='random_uniform',\n bias_initializer='zeros', name='layer1')\n", (4545, 4655), True, 'from tensorflow.keras import layers as KL\n'), ((4801, 4810), 'tensorflow.keras.layers.ReLU', 'KL.ReLU', ([], {}), '()\n', (4808, 4810), True, 'from tensorflow.keras import layers as KL\n'), ((4826, 4945), 'tensorflow.keras.layers.Conv2D', 'KL.Conv2D', (['(64)', '(3, 3)'], {'padding': '"""SAME"""', 'kernel_initializer': '"""random_uniform"""', 'bias_initializer': '"""zeros"""', 'name': '"""layer2"""'}), "(64, (3, 3), padding='SAME', kernel_initializer='random_uniform',\n bias_initializer='zeros', name='layer2')\n", (4835, 4945), True, 'from tensorflow.keras import layers as KL\n'), ((5091, 5100), 'tensorflow.keras.layers.ReLU', 'KL.ReLU', ([], {}), '()\n', (5098, 5100), True, 'from tensorflow.keras import layers as KL\n'), ((5116, 5235), 'tensorflow.keras.layers.Conv2D', 'KL.Conv2D', (['(64)', '(3, 3)'], {'padding': '"""SAME"""', 'kernel_initializer': '"""random_uniform"""', 'bias_initializer': '"""zeros"""', 'name': '"""layer3"""'}), "(64, (3, 3), padding='SAME', kernel_initializer='random_uniform',\n bias_initializer='zeros', name='layer3')\n", (5125, 5235), True, 'from tensorflow.keras import layers as KL\n'), ((5381, 5390), 'tensorflow.keras.layers.ReLU', 'KL.ReLU', ([], {}), '()\n', (5388, 5390), True, 'from tensorflow.keras import layers as KL\n'), ((5406, 5525), 'tensorflow.keras.layers.Conv2D', 'KL.Conv2D', (['(64)', '(3, 3)'], {'padding': '"""SAME"""', 'kernel_initializer': '"""random_uniform"""', 'bias_initializer': '"""zeros"""', 'name': '"""layer4"""'}), "(64, (3, 3), padding='SAME', kernel_initializer='random_uniform',\n bias_initializer='zeros', name='layer4')\n", (5415, 5525), True, 'from tensorflow.keras import layers as KL\n'), ((5671, 5680), 'tensorflow.keras.layers.ReLU', 'KL.ReLU', ([], {}), '()\n', (5678, 5680), True, 'from tensorflow.keras import layers as KL\n'), ((5696, 5814), 'tensorflow.keras.layers.Conv2D', 'KL.Conv2D', (['(3)', '(3, 3)'], {'padding': '"""SAME"""', 'kernel_initializer': '"""random_uniform"""', 'bias_initializer': '"""zeros"""', 'name': '"""layer5"""'}), "(3, (3, 3), padding='SAME', kernel_initializer='random_uniform',\n bias_initializer='zeros', name='layer5')\n", (5705, 5814), True, 'from tensorflow.keras import layers as KL\n'), ((5960, 5969), 'tensorflow.keras.layers.ReLU', 'KL.ReLU', ([], {}), '()\n', (5967, 5969), True, 'from tensorflow.keras import layers as KL\n'), ((6033, 6056), 'tensorflow.keras.layers.average', 'KL.average', (['[x, inputs]'], {}), '([x, inputs])\n', (6043, 6056), True, 'from tensorflow.keras import layers as KL\n'), ((6270, 6295), 'tensorflow.keras.backend.square', 'K.square', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (6278, 6295), True, 'from tensorflow.keras import backend as K\n'), ((2350, 2370), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (2360, 2370), False, 'import cv2\n'), ((4762, 4785), 'tensorflow.keras.layers.BatchNormalization', 'KL.BatchNormalization', ([], {}), '()\n', (4783, 4785), True, 'from tensorflow.keras import layers as KL\n'), ((5052, 5075), 'tensorflow.keras.layers.BatchNormalization', 'KL.BatchNormalization', ([], {}), '()\n', (5073, 5075), True, 'from tensorflow.keras import layers as KL\n'), ((5342, 5365), 'tensorflow.keras.layers.BatchNormalization', 'KL.BatchNormalization', ([], {}), '()\n', (5363, 5365), True, 'from tensorflow.keras import layers as KL\n'), ((5632, 5655), 'tensorflow.keras.layers.BatchNormalization', 'KL.BatchNormalization', ([], {}), '()\n', (5653, 5655), True, 'from tensorflow.keras import layers as KL\n'), ((5921, 5944), 'tensorflow.keras.layers.BatchNormalization', 'KL.BatchNormalization', ([], {}), '()\n', (5942, 5944), True, 'from tensorflow.keras import layers as KL\n'), ((1573, 1590), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (1580, 1590), True, 'import numpy as np\n'), ((1627, 1644), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (1634, 1644), True, 'import numpy as np\n'), ((2111, 2128), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2125, 2128), True, 'import numpy as np\n'), ((1419, 1436), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (1426, 1436), True, 'import numpy as np\n'), ((1478, 1495), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (1485, 1495), True, 'import numpy as np\n')] |
import cv2
import json
import numpy as np
from rich import print
from PIL import ImageFile
import torch
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from constants import NORM_MEAN, NORM_STD, DPAC_AGE_LABEL_TO_IDX, DPAC_GENDER_LABEL_TO_IDX, \
DPAC_EMOTION_LABEL_TO_IDX, IMG_HEIGHT, IMG_WIDTH
# Update the data path
DATA_PATH = './data/'
NUM_WORKERS = 12 # no. of subprocesses to use for data loading
torch.manual_seed(0)
# FIX: Truncated Image Error
ImageFile.LOAD_TRUNCATED_IMAGES = True
class BaseDataset(Dataset):
def __init__(self, x_context, x_target, x_pose, y_age, y_gender, y_emotion, transform):
super(BaseDataset, self).__init__()
self.x_context = x_context
self.x_target = x_target
self.x_pose = x_pose
self.y_age = y_age
self.y_gender = y_gender
self.y_emotion = y_emotion
self.transform = transform
self.norm = transforms.Normalize(NORM_MEAN, NORM_STD)
def __len__(self):
return len(self.y_age)
def __getitem__(self, index):
context = self.x_context[index]
target = self.x_target[index]
pose = torch.tensor(self.x_pose[index], dtype=torch.float32)
age_label = self.y_age[index]
gender_label = self.y_gender[index]
emotion_label = self.y_emotion[index]
if self.transform:
target = self.transform(target)
context = self.transform(context)
if self.norm:
context = self.norm(context)
target = self.norm(target)
data = {
'target': target,
'context': context,
'pose': pose,
'labels': {
'age': age_label,
'gender': gender_label,
'emotion': emotion_label
}
}
return data
def load_data(batch_size, ob_face_region=None, ob_people=None, datatype='train'):
'''
Args:
batch_size (int)
ob_face_region (str): {none, eye, lower, face, head}
ob_people (str): {none, target, all}
datatype (str): {train, test}
Return:
dataloader (Dataloader):
'''
assert datatype in ['train', 'test']
assert ob_face_region in [None, 'eye', 'lower', 'face', 'head']
assert ob_people in [None, 'AO', 'TO']
if ob_people and not ob_face_region:
ob_face_region = "face" # setting default ob_face_region as face
if ob_face_region and not ob_people:
ob_people = "AO" # setting default ob_people as AO -- All Obfuscated
print("Loading {} data for:\n Obfuscated Face Region: {} \n Obfuscated People: {}".format(
datatype.upper(), ob_face_region, ob_people))
with open(DATA_PATH + "data.json", encoding='utf-8') as f:
all_data = json.load(f)
with open(DATA_PATH + "train_test_split.json", encoding='utf-8') as f:
splits_data = json.load(f)
obfus_path = None
intact_path = None
pose_path = None
if datatype == 'test' or ob_face_region == None or ob_people == "TO": # testing is done on intact faces
# intact_path = DATA_PATH
# replace with the ideal scenario
intact_path = DATA_PATH + "intact/"
pose_path = intact_path + "pose/"
if datatype == 'train' and ob_face_region:
obfus_path = DATA_PATH + "privacy/{}/".format(ob_face_region)
pose_path = obfus_path + "pose/"
# labels
data_gender = []
data_emotion = []
data_age = []
# inputs
data_context = []
data_target = []
data_pose = []
count = 0
for img_id in splits_data[datatype]:
# TODO: Fix these errorneous test images
# if img_id == "6383_1_3" or img_id == "7074_2_1":
# continue
if count > 1:
break
count += 1
img_data = all_data[str(img_id)]
print(img_id, len(img_data["persons"]))
for target_count, target in enumerate(img_data["persons"]):
# adding the age, gender and emotion labels
data_age.append(DPAC_AGE_LABEL_TO_IDX[target['attributes']['age']])
data_gender.append(
DPAC_GENDER_LABEL_TO_IDX[target['attributes']['gender']])
data_emotion.append(
DPAC_EMOTION_LABEL_TO_IDX[target['attributes']['emotion']])
# context -- entire image
if intact_path:
context_img = cv2.cvtColor(cv2.imread(
intact_path + "images/" + str(img_id) + ".jpg"), cv2.COLOR_BGR2RGB)
dup_context_img = context_img.copy() # duplicating for cropping a target
# bounding box of a target
body_cor = target['body_bb']
if obfus_path:
obfus_context_img = cv2.cvtColor(cv2.imread(
obfus_path + "images/" + str(img_id) + ".jpg"), cv2.COLOR_BGR2RGB)
dup_context_img = obfus_context_img.copy()
if ob_people == "TO": # set only obfuscated target in the image
context_img[abs(body_cor[1]):abs(body_cor[3]), abs(body_cor[0]):abs(
body_cor[2])] = obfus_context_img[abs(body_cor[1]):abs(body_cor[3]), abs(body_cor[0]):abs(body_cor[2])]
if ob_people == "AO": # use obfuscated images with for all
context_img = obfus_context_img
context_img = cv2.resize(context_img, (IMG_HEIGHT, IMG_WIDTH))
data_context.append(context_img)
target_img = dup_context_img[abs(body_cor[1]):abs(
body_cor[3]), abs(body_cor[0]):abs(body_cor[2])]
target_img = cv2.resize(target_img, (IMG_HEIGHT, IMG_WIDTH))
data_target.append(target_img)
# Update path for pose
# naming of a pose file: {img_id}_{index_of_target_in_list}
pose = np.load(pose_path + str(img_id) + "_" +
str(target_count) + ".npy")
pose = np.reshape(pose, (25, 25, 18))
# pose = np.reshape(pose, (24, 8, 18))
# pose = cv2.resize(pose, (25, 25))
# pose = np.einsum('kli->ikl', pose)
data_pose.append(pose)
if datatype == 'train':
transform = transforms.Compose([transforms.ToPILImage(), transforms.RandomHorizontalFlip(
), transforms.RandomResizedCrop(400), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), transforms.ToTensor()])
else:
transform = transforms.Compose(
[transforms.ToPILImage(), transforms.ToTensor()])
dataset = BaseDataset(data_context, data_target, data_pose, data_age,
data_gender, data_emotion, transform)
print('{} data loaded of size (no. of targets): {}'.format(
datatype, len(dataset)))
# if facing batch-size use drop_last=True argument
dataloader = DataLoader(dataset, pin_memory=True, batch_size=batch_size,
shuffle=True, num_workers=NUM_WORKERS)
return dataloader
| [
"torch.manual_seed",
"numpy.reshape",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.RandomHorizontalFlip",
"torch.tensor",
"torchvision.transforms.ColorJitter",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"json.load",
"cv2.resize",
"torchvision.transforms.To... | [((447, 467), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (464, 467), False, 'import torch\n'), ((6901, 7003), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'pin_memory': '(True)', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'NUM_WORKERS'}), '(dataset, pin_memory=True, batch_size=batch_size, shuffle=True,\n num_workers=NUM_WORKERS)\n', (6911, 7003), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((952, 993), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['NORM_MEAN', 'NORM_STD'], {}), '(NORM_MEAN, NORM_STD)\n', (972, 993), False, 'from torchvision import transforms\n'), ((1177, 1230), 'torch.tensor', 'torch.tensor', (['self.x_pose[index]'], {'dtype': 'torch.float32'}), '(self.x_pose[index], dtype=torch.float32)\n', (1189, 1230), False, 'import torch\n'), ((2817, 2829), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2826, 2829), False, 'import json\n'), ((2929, 2941), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2938, 2941), False, 'import json\n'), ((5417, 5465), 'cv2.resize', 'cv2.resize', (['context_img', '(IMG_HEIGHT, IMG_WIDTH)'], {}), '(context_img, (IMG_HEIGHT, IMG_WIDTH))\n', (5427, 5465), False, 'import cv2\n'), ((5665, 5712), 'cv2.resize', 'cv2.resize', (['target_img', '(IMG_HEIGHT, IMG_WIDTH)'], {}), '(target_img, (IMG_HEIGHT, IMG_WIDTH))\n', (5675, 5712), False, 'import cv2\n'), ((5997, 6027), 'numpy.reshape', 'np.reshape', (['pose', '(25, 25, 18)'], {}), '(pose, (25, 25, 18))\n', (6007, 6027), True, 'import numpy as np\n'), ((6282, 6305), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6303, 6305), False, 'from torchvision import transforms\n'), ((6307, 6340), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (6338, 6340), False, 'from torchvision import transforms\n'), ((6351, 6384), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(400)'], {}), '(400)\n', (6379, 6384), False, 'from torchvision import transforms\n'), ((6386, 6454), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)'}), '(brightness=0.4, contrast=0.4, saturation=0.4)\n', (6408, 6454), False, 'from torchvision import transforms\n'), ((6456, 6477), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6475, 6477), False, 'from torchvision import transforms\n'), ((6543, 6566), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6564, 6566), False, 'from torchvision import transforms\n'), ((6568, 6589), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6587, 6589), False, 'from torchvision import transforms\n')] |
import torch
from copy import deepcopy
import numpy as np
from .torch_triggered_dataset import TorchTriggeredDataset
from .dataset_preprocessor import datasetPreprocessor
class SCDatasetPreprocessor(datasetPreprocessor):
def __init__(self, dataset, trigger, trigger_models, tokenizer):
super().__init__(dataset, trigger, trigger_models, tokenizer)
def _tokenize(self, original_dataset):
max_seq_length = self.get_max_seq_length()
def prepare_train_features(examples):
tokenized_examples = self.tokenizer(
examples['data'],
truncation=True,
max_length=max_seq_length,
stride=self.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
return_token_type_ids=True)
labels = np.array(examples['label'])
mapping = tokenized_examples['overflow_to_sample_mapping']
tokenized_examples['label'] = labels[mapping]
return tokenized_examples
tokenized_dataset = original_dataset.map(
prepare_train_features,
batched=True,
num_proc=1,
remove_columns=original_dataset.column_names,
keep_in_memory=True)
cols_to_remove = ['overflow_to_sample_mapping', 'offset_mapping']
tokenized_dataset = tokenized_dataset.remove_columns(cols_to_remove)
return tokenized_dataset
def _select_inputs_with_source_class(self, unique_inputs_dataset):
if self.trigger.source_labels is None:
return unique_inputs_dataset
else:
labels = np.array(unique_inputs_dataset['label'])
label_mask = np.isin(labels, self.trigger.source_labels)
rows_with_source_label = np.argwhere(label_mask)[:, 0]
unique_rows = np.unique(rows_with_source_label)
return unique_inputs_dataset.select(unique_rows)
def _insert_dummy(self, unique_inputs_dataset):
dummy = self.tokenizer.pad_token_id
def _initialize_dummy_trigger_helper(examples):
result = {k: torch.tensor(v) for k, v in examples.items()}
def _find_insertion_location(trigger_loc):
if trigger_loc == 'start':
insertion_ixs = 1
elif trigger_loc == 'middle':
insertion_ixs = self.get_max_seq_length()//2
elif trigger_loc == 'end':
insertion_ixs = self.get_max_seq_length()-1
else:
return NotImplementedError
return insertion_ixs
insertion_ixs = _find_insertion_location(self.trigger.location)
def _insert(insertion_ixs, base, insert):
return torch.cat([base[:, :insertion_ixs],
insert,
base[:, insertion_ixs:]], 1)
def _expand_tensor(tensor, num_rows):
return tensor.unsqueeze(0).repeat(num_rows, 1)
num_examples = len(examples['input_ids'])
trigger_length = len(self.trigger.input_ids)
expanded_dummy = torch.tensor([dummy]*trigger_length)
trigger_input_ids = _expand_tensor(expanded_dummy, num_examples)
expanded_ones = torch.tensor([1]*trigger_length)
trigger_attention = _expand_tensor(expanded_ones, num_examples)
expanded_zeros = torch.tensor([0]*trigger_length)
token_type_ids = _expand_tensor(expanded_zeros, num_examples)
temp_attn_mask = deepcopy(result['attention_mask'])
zeros = torch.zeros_like(result['attention_mask'])
result['input_ids'] = _insert(insertion_ixs,
result['input_ids'],
trigger_input_ids)
result['attention_mask'] = _insert(insertion_ixs,
result['attention_mask'],
trigger_attention)
result['token_type_ids'] = _insert(insertion_ixs,
result['token_type_ids'],
token_type_ids)
result['attention_mask_without_trigger'] = _insert(insertion_ixs,
temp_attn_mask,
token_type_ids)
result['trigger_mask'] = _insert(insertion_ixs,
zeros,
deepcopy(trigger_attention))
result = {k: v.tolist() for k, v in result.items()}
return result
dataset_with_dummy = unique_inputs_dataset.map(
_initialize_dummy_trigger_helper,
batched=True,
num_proc=1,
remove_columns=unique_inputs_dataset.column_names,
keep_in_memory=True)
dataset_with_dummy.set_format('torch',
columns=dataset_with_dummy.column_names)
return dataset_with_dummy
def _add_baseline_probabilities(self, dataset_with_dummy):
dataset = dataset_with_dummy.map(
self._add_baseline_probabilities_helper,
batched=True,
num_proc=1,
keep_in_memory=True,
batch_size=20)
return dataset
@torch.no_grad()
def _add_baseline_probabilities_helper(self, batch):
modified_batch = deepcopy(batch)
ignore_attn = modified_batch['attention_mask_without_trigger']
modified_batch['attention_mask'] = ignore_attn
all_logits = self.trigger_models(modified_batch)
suspicious_logits = all_logits['suspicious']['logits']
probabilities = [self._get_probabilitites(suspicious_logits)]
for output in all_logits['clean']:
clean_logits = output['logits']
probabilities += [self._get_probabilitites(clean_logits)]
probabilities = torch.stack(probabilities)
batch['baseline_probabilities'] = self.agg_fn(probabilities, dim=0)
batch = {k: v.detach().cpu().numpy() for k, v in batch.items()}
return batch
@staticmethod
def _get_probabilitites(logits):
scores = torch.exp(logits)
probs = scores/torch.sum(scores, dim=1, keepdim=True)
return probs
def _package_into_torch_dataset(self, dataset_with_baseline_probabilities):
return self.SCTriggeredDataset(dataset_with_baseline_probabilities,
len(self.trigger.input_ids))
class SCTriggeredDataset(TorchTriggeredDataset):
def __init__(self, huggingface_dataset, trigger_length):
super().__init__(huggingface_dataset, trigger_length)
self.label = huggingface_dataset['label'].clone().detach().long()
def __getitem__(self, idx):
sample = super().__getitem__(idx)
sample['label'] = self.label[idx]
return sample
| [
"numpy.unique",
"torch.stack",
"torch.exp",
"numpy.isin",
"numpy.array",
"torch.tensor",
"numpy.argwhere",
"torch.sum",
"copy.deepcopy",
"torch.no_grad",
"torch.zeros_like",
"torch.cat"
] | [((5558, 5573), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5571, 5573), False, 'import torch\n'), ((5656, 5671), 'copy.deepcopy', 'deepcopy', (['batch'], {}), '(batch)\n', (5664, 5671), False, 'from copy import deepcopy\n'), ((6169, 6195), 'torch.stack', 'torch.stack', (['probabilities'], {}), '(probabilities)\n', (6180, 6195), False, 'import torch\n'), ((6438, 6455), 'torch.exp', 'torch.exp', (['logits'], {}), '(logits)\n', (6447, 6455), False, 'import torch\n'), ((899, 926), 'numpy.array', 'np.array', (["examples['label']"], {}), "(examples['label'])\n", (907, 926), True, 'import numpy as np\n'), ((1702, 1742), 'numpy.array', 'np.array', (["unique_inputs_dataset['label']"], {}), "(unique_inputs_dataset['label'])\n", (1710, 1742), True, 'import numpy as np\n'), ((1768, 1811), 'numpy.isin', 'np.isin', (['labels', 'self.trigger.source_labels'], {}), '(labels, self.trigger.source_labels)\n', (1775, 1811), True, 'import numpy as np\n'), ((1905, 1938), 'numpy.unique', 'np.unique', (['rows_with_source_label'], {}), '(rows_with_source_label)\n', (1914, 1938), True, 'import numpy as np\n'), ((3239, 3277), 'torch.tensor', 'torch.tensor', (['([dummy] * trigger_length)'], {}), '([dummy] * trigger_length)\n', (3251, 3277), False, 'import torch\n'), ((3382, 3416), 'torch.tensor', 'torch.tensor', (['([1] * trigger_length)'], {}), '([1] * trigger_length)\n', (3394, 3416), False, 'import torch\n'), ((3521, 3555), 'torch.tensor', 'torch.tensor', (['([0] * trigger_length)'], {}), '([0] * trigger_length)\n', (3533, 3555), False, 'import torch\n'), ((3658, 3692), 'copy.deepcopy', 'deepcopy', (["result['attention_mask']"], {}), "(result['attention_mask'])\n", (3666, 3692), False, 'from copy import deepcopy\n'), ((3713, 3755), 'torch.zeros_like', 'torch.zeros_like', (["result['attention_mask']"], {}), "(result['attention_mask'])\n", (3729, 3755), False, 'import torch\n'), ((6479, 6517), 'torch.sum', 'torch.sum', (['scores'], {'dim': '(1)', 'keepdim': '(True)'}), '(scores, dim=1, keepdim=True)\n', (6488, 6517), False, 'import torch\n'), ((1849, 1872), 'numpy.argwhere', 'np.argwhere', (['label_mask'], {}), '(label_mask)\n', (1860, 1872), True, 'import numpy as np\n'), ((2180, 2195), 'torch.tensor', 'torch.tensor', (['v'], {}), '(v)\n', (2192, 2195), False, 'import torch\n'), ((2842, 2914), 'torch.cat', 'torch.cat', (['[base[:, :insertion_ixs], insert, base[:, insertion_ixs:]]', '(1)'], {}), '([base[:, :insertion_ixs], insert, base[:, insertion_ixs:]], 1)\n', (2851, 2914), False, 'import torch\n'), ((4729, 4756), 'copy.deepcopy', 'deepcopy', (['trigger_attention'], {}), '(trigger_attention)\n', (4737, 4756), False, 'from copy import deepcopy\n')] |
import cv2
import time
import sys
import numpy as np
class ObjectDetection():
def __init__(self):
self.INPUT_WIDTH = 640
self.INPUT_HEIGHT = 640
self.SCORE_THRESHOLD = 0.2
self.NMS_THRESHOLD = 0.4
self.CONFIDENCE_THRESHOLD = 0.4
self.class_list = self.load_classes()
self.colors = [(255, 255, 0), (0, 255, 0), (0, 255, 255), (255, 0, 0)]
self.net = self.build_model()
self.start = time.time_ns()
self.frame_count = 0
self.total_frames = 0
self.fps = -1
self.cap_device = 0
self.cap = cv2.VideoCapture(self.cap_device, cv2.CAP_DSHOW)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.INPUT_WIDTH)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.INPUT_HEIGHT)
def build_model(self):
net = cv2.dnn.readNet("F:/Hung Luu/BK_19_22/3_Junior/212/DMC/App Design/Version 1/code/ObjectDetection/best.onnx")
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
return net
def detect(self,image):
blob = cv2.dnn.blobFromImage(image, 1/255.0, (self.INPUT_WIDTH, self.INPUT_HEIGHT), swapRB=True, crop=False)
self.net.setInput(blob)
preds = self.net.forward()
return preds
def load_classes(self):
class_list = []
with open("F:/Hung Luu/BK_19_22/3_Junior/212/DMC/App Design/Version 1/code/ObjectDetection/classes.txt", "r") as f:
class_list = [cname.strip() for cname in f.readlines()]
return class_list
def wrap_detection(self,input_image, output_data):
class_ids = []
confidences = []
boxes = []
rows = output_data.shape[0]
image_width, image_height, _ = input_image.shape
x_factor = image_width / self.INPUT_WIDTH
y_factor = image_height / self.INPUT_HEIGHT
for r in range(rows):
row = output_data[r]
confidence = row[4]
if confidence >= 0.4:
classes_scores = row[5:]
_, _, _, max_indx = cv2.minMaxLoc(classes_scores)
class_id = max_indx[1]
if (classes_scores[class_id] > .25):
confidences.append(confidence)
class_ids.append(class_id)
x, y, w, h = row[0].item(), row[1].item(), row[2].item(), row[3].item()
left = int((x - 0.5 * w) * x_factor)
top = int((y - 0.5 * h) * y_factor)
width = int(w * x_factor)
height = int(h * y_factor)
box = np.array([left, top, width, height])
boxes.append(box)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.25, 0.45)
result_class_ids = []
result_confidences = []
result_boxes = []
for i in indexes:
result_confidences.append(confidences[i])
result_class_ids.append(class_ids[i])
result_boxes.append(boxes[i])
return result_class_ids, result_confidences, result_boxes
def format_yolov5(self,frame):
row, col, _ = frame.shape
_max = max(col, row)
result = np.zeros((_max, _max, 3), np.uint8)
result[0:row, 0:col] = frame
return result
def main(self):
_, frame = self.cap.read()
#frame = cv2.flip(frame, 1)
#if frame is None:
#print("End of stream")
#break
inputImage = self.format_yolov5(frame)
outs = self.detect(inputImage)
class_ids, confidences, boxes = self.wrap_detection(inputImage, outs[0])
self.frame_count += 1
self.total_frames += 1
for (classid, confidence, box) in zip(class_ids, confidences, boxes):
color = self.colors[int(classid) % len(self.colors)]
cv2.rectangle(frame, box, color, 2)
cv2.rectangle(frame, (box[0], box[1] - 20), (box[0] + box[2], box[1]), color, -1)
cv2.putText(frame, self.class_list[classid], (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0))
if self.frame_count >= 30:
end = time.time_ns()
self.fps = 1000000000 * self.frame_count / (end - self.start)
self.frame_count = 0
self.start = time.time_ns()
if self.fps > 0:
fps_label = "FPS: %.2f" % self.fps
cv2.putText(frame, fps_label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
return frame
#print("Total frames: " + str(self.total_frames)) | [
"cv2.dnn.blobFromImage",
"cv2.rectangle",
"cv2.putText",
"time.time_ns",
"cv2.minMaxLoc",
"numpy.zeros",
"numpy.array",
"cv2.VideoCapture",
"cv2.dnn.NMSBoxes",
"cv2.dnn.readNet"
] | [((462, 476), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (474, 476), False, 'import time\n'), ((606, 654), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.cap_device', 'cv2.CAP_DSHOW'], {}), '(self.cap_device, cv2.CAP_DSHOW)\n', (622, 654), False, 'import cv2\n'), ((829, 947), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['"""F:/Hung Luu/BK_19_22/3_Junior/212/DMC/App Design/Version 1/code/ObjectDetection/best.onnx"""'], {}), "(\n 'F:/Hung Luu/BK_19_22/3_Junior/212/DMC/App Design/Version 1/code/ObjectDetection/best.onnx'\n )\n", (844, 947), False, 'import cv2\n'), ((1128, 1236), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1 / 255.0)', '(self.INPUT_WIDTH, self.INPUT_HEIGHT)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image, 1 / 255.0, (self.INPUT_WIDTH, self.\n INPUT_HEIGHT), swapRB=True, crop=False)\n', (1149, 1236), False, 'import cv2\n'), ((2761, 2809), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', '(0.25)', '(0.45)'], {}), '(boxes, confidences, 0.25, 0.45)\n', (2777, 2809), False, 'import cv2\n'), ((3257, 3292), 'numpy.zeros', 'np.zeros', (['(_max, _max, 3)', 'np.uint8'], {}), '((_max, _max, 3), np.uint8)\n', (3265, 3292), True, 'import numpy as np\n'), ((3916, 3951), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'box', 'color', '(2)'], {}), '(frame, box, color, 2)\n', (3929, 3951), False, 'import cv2\n'), ((3965, 4050), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(box[0], box[1] - 20)', '(box[0] + box[2], box[1])', 'color', '(-1)'], {}), '(frame, (box[0], box[1] - 20), (box[0] + box[2], box[1]),\n color, -1)\n', (3978, 4050), False, 'import cv2\n'), ((4060, 4174), 'cv2.putText', 'cv2.putText', (['frame', 'self.class_list[classid]', '(box[0], box[1] - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 0)'], {}), '(frame, self.class_list[classid], (box[0], box[1] - 10), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n', (4071, 4174), False, 'import cv2\n'), ((4221, 4235), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (4233, 4235), False, 'import time\n'), ((4368, 4382), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (4380, 4382), False, 'import time\n'), ((4476, 4564), 'cv2.putText', 'cv2.putText', (['frame', 'fps_label', '(10, 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), '(frame, fps_label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0,\n 255), 2)\n', (4487, 4564), False, 'import cv2\n'), ((2119, 2148), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['classes_scores'], {}), '(classes_scores)\n', (2132, 2148), False, 'import cv2\n'), ((2667, 2703), 'numpy.array', 'np.array', (['[left, top, width, height]'], {}), '([left, top, width, height])\n', (2675, 2703), True, 'import numpy as np\n')] |
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import pytest
import numpy as np
from mlxtend.externals.estimator_checks import NotFittedError
from mlxtend.utils import assert_raises
from mlxtend.regressor import StackingRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from scipy import sparse
from numpy.testing import assert_almost_equal
from sklearn.base import clone
# Generating a sample dataset
np.random.seed(1)
X1 = np.sort(5 * np.random.rand(40, 1), axis=0)
X2 = np.sort(5 * np.random.rand(40, 2), axis=0)
y = np.sin(X1).ravel()
y[::5] += 3 * (0.5 - np.random.rand(8))
y2 = np.sin(X2)
w = np.random.random(40)
def test_different_models():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
svr_rbf = SVR(kernel='rbf', gamma='auto')
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge],
meta_regressor=svr_rbf)
stregr.fit(X1, y).predict(X1)
mse = 0.21
got = np.mean((stregr.predict(X1) - y) ** 2)
assert round(got, 2) == mse
def test_multivariate():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
svr_rbf = SVR(kernel='rbf', gamma='auto')
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge],
meta_regressor=svr_rbf)
stregr.fit(X2, y).predict(X2)
mse = 0.22
got = np.mean((stregr.predict(X2) - y) ** 2)
assert round(got, 2) == mse
def test_multivariate_class():
lr = LinearRegression()
ridge = Ridge(random_state=1)
meta = LinearRegression(normalize=True)
stregr = StackingRegressor(regressors=[lr, ridge],
meta_regressor=meta)
stregr.fit(X2, y2).predict(X2)
mse = 0.12
got = np.mean((stregr.predict(X2) - y2) ** 2.)
# there seems to be an issue with the following test on Windows
# sometimes via Appveyor
assert round(got, 2) == mse, got
def test_sample_weight():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
svr_rbf = SVR(kernel='rbf', gamma='auto')
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge],
meta_regressor=svr_rbf)
pred1 = stregr.fit(X1, y, sample_weight=w).predict(X1)
mse = 0.22
got = np.mean((stregr.predict(X1) - y) ** 2)
assert round(got, 2) == mse
# make sure that this is not equivalent to the model with no weight
pred2 = stregr.fit(X1, y).predict(X1)
maxdiff = np.max(np.abs(pred1 - pred2))
assert maxdiff > 1e-3, "max diff is %.4f" % maxdiff
def test_weight_ones():
# sample weight of ones should produce equivalent outcome as no weight
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
svr_rbf = SVR(kernel='rbf', gamma='auto')
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge],
meta_regressor=svr_rbf)
pred1 = stregr.fit(X1, y).predict(X1)
pred2 = stregr.fit(X1, y, sample_weight=np.ones(40)).predict(X1)
maxdiff = np.max(np.abs(pred1 - pred2))
assert maxdiff < 1e-3, "max diff is %.4f" % maxdiff
def test_weight_unsupported_regressor():
# including regressor that does not support
# sample_weight should raise error
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
svr_rbf = SVR(kernel='rbf', gamma='auto')
lasso = Lasso(random_state=1)
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge, lasso],
meta_regressor=svr_rbf)
with pytest.raises(TypeError):
stregr.fit(X1, y, sample_weight=w).predict(X1)
def test_weight_unsupported_meta():
# meta regressor with no support for
# sample_weight should raise error
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
lasso = Lasso(random_state=1)
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge],
meta_regressor=lasso)
with pytest.raises(TypeError):
stregr.fit(X1, y, sample_weight=w).predict(X1)
def test_weight_unsupported_with_no_weight():
# pass no weight to regressors with no weight support
# should not be a problem
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
svr_rbf = SVR(kernel='rbf', gamma='auto')
lasso = Lasso(random_state=1)
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge, lasso],
meta_regressor=svr_rbf)
stregr.fit(X1, y).predict(X1)
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge],
meta_regressor=lasso)
stregr.fit(X1, y).predict(X1)
def test_gridsearch():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
svr_rbf = SVR(kernel='rbf', gamma='auto')
stregr = StackingRegressor(regressors=[svr_lin, lr, ridge],
meta_regressor=svr_rbf)
params = {'ridge__alpha': [0.01, 1.0],
'svr__C': [0.01, 1.0],
'meta_regressor__C': [0.01, 1.0]}
grid = GridSearchCV(estimator=stregr,
param_grid=params,
cv=5,
iid=False,
refit=True,
verbose=0)
grid = grid.fit(X1, y)
best = 0.1
got = round(grid.best_score_, 2)
assert best == got
def test_gridsearch_numerate_regr():
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
svr_rbf = SVR(kernel='rbf', gamma='auto')
stregr = StackingRegressor(regressors=[svr_lin, ridge, ridge],
meta_regressor=svr_rbf)
params = {'ridge-1__alpha': [0.01, 1.0],
'ridge-2__alpha': [0.01, 1.0],
'svr__C': [0.01, 1.0],
'meta_regressor__C': [0.01, 1.0]}
grid = GridSearchCV(estimator=stregr,
param_grid=params,
cv=5,
iid=False,
refit=True,
verbose=0)
grid = grid.fit(X1, y)
best = 0.1
got = round(grid.best_score_, 2)
assert best == got
def test_get_coeff():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[svr_lin, lr],
meta_regressor=ridge)
stregr.fit(X1, y)
got = stregr.coef_
expect = np.array([0.4874216, 0.45518317])
assert_almost_equal(got, expect)
def test_get_intercept():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[svr_lin, lr],
meta_regressor=ridge)
stregr.fit(X1, y)
got = stregr.intercept_
expect = 0.02
assert round(got, 2) == expect
# ValueError was changed to AttributeError in sklearn >= 0.19
def test_get_coeff_fail():
lr = LinearRegression()
svr_rbf = SVR(kernel='rbf', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[ridge, lr],
meta_regressor=svr_rbf)
with pytest.raises(AttributeError):
stregr = stregr.fit(X1, y)
r = stregr.coef_
assert r
def test_get_params():
lr = LinearRegression()
svr_rbf = SVR(kernel='rbf', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[ridge, lr],
meta_regressor=svr_rbf)
got = sorted(list({s.split('__')[0] for s in stregr.get_params().keys()}))
expect = ['linearregression',
'meta_regressor',
'refit',
'regressors',
'ridge',
'store_train_meta_features',
'use_features_in_secondary',
'verbose']
assert got == expect, got
def test_regressor_gridsearch():
lr = LinearRegression()
svr_rbf = SVR(kernel='rbf', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[lr],
meta_regressor=svr_rbf)
params = {'regressors': [[lr], [lr, ridge]]}
grid = GridSearchCV(estimator=stregr,
param_grid=params,
cv=5,
iid=False,
refit=True)
grid.fit(X1, y)
assert len(grid.best_params_['regressors']) == 2
def test_predict_meta_features():
lr = LinearRegression()
svr_rbf = SVR(kernel='rbf', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[lr, ridge],
meta_regressor=svr_rbf)
X_train, X_test, y_train, y_test = train_test_split(X2, y, test_size=0.3)
stregr.fit(X_train, y_train)
test_meta_features = stregr.predict(X_test)
assert test_meta_features.shape[0] == X_test.shape[0]
def test_train_meta_features_():
lr = LinearRegression()
svr_rbf = SVR(kernel='rbf', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[lr, ridge],
meta_regressor=svr_rbf,
store_train_meta_features=True)
X_train, X_test, y_train, y_test = train_test_split(X2, y, test_size=0.3)
stregr.fit(X_train, y_train)
train_meta_features = stregr.train_meta_features_
assert train_meta_features.shape[0] == X_train.shape[0]
def test_not_fitted_predict():
lr = LinearRegression()
svr_rbf = SVR(kernel='rbf', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[lr, ridge],
meta_regressor=svr_rbf,
store_train_meta_features=True)
X_train, X_test, y_train, y_test = train_test_split(X2, y, test_size=0.3)
expect = ("This StackingRegressor instance is not fitted yet. Call "
"'fit' with appropriate arguments before using this method.")
assert_raises(NotFittedError,
expect,
stregr.predict,
X_train)
assert_raises(NotFittedError,
expect,
stregr.predict_meta_features,
X_train)
def test_clone():
lr = LinearRegression()
svr_rbf = SVR(kernel='rbf', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[lr, ridge],
meta_regressor=svr_rbf,
store_train_meta_features=True)
clone(stregr)
def test_features_in_secondary():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
rf = RandomForestRegressor(n_estimators=10, random_state=2)
ridge = Ridge(random_state=0)
svr_rbf = SVR(kernel='rbf', gamma='auto')
stack = StackingRegressor(regressors=[svr_lin, lr, ridge, rf],
meta_regressor=svr_rbf,
use_features_in_secondary=True)
stack.fit(X1, y).predict(X1)
mse = 0.14
got = np.mean((stack.predict(X1) - y) ** 2)
print(got)
assert round(got, 2) == mse
stack = StackingRegressor(regressors=[svr_lin, lr, ridge, rf],
meta_regressor=svr_rbf,
use_features_in_secondary=False)
# dense
stack.fit(X1, y).predict(X1)
mse = 0.12
got = np.mean((stack.predict(X1) - y) ** 2)
print(got)
assert round(got, 2) == mse
def test_predictions_from_sparse_matrix():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
ridge = Ridge(random_state=1)
stregr = StackingRegressor(regressors=[svr_lin, lr],
meta_regressor=ridge)
# dense
stregr.fit(X1, y)
print(stregr.score(X1, y))
assert round(stregr.score(X1, y), 2) == 0.61
# sparse
stregr.fit(sparse.csr_matrix(X1), y)
print(stregr.score(X1, y))
assert round(stregr.score(X1, y), 2) == 0.61
def test_sparse_matrix_inputs_and_features_in_secondary():
lr = LinearRegression()
svr_lin = SVR(kernel='linear', gamma='auto')
rf = RandomForestRegressor(n_estimators=10, random_state=2)
ridge = Ridge(random_state=0)
svr_rbf = SVR(kernel='rbf', gamma='auto')
stack = StackingRegressor(regressors=[svr_lin, lr, ridge, rf],
meta_regressor=svr_rbf,
use_features_in_secondary=True)
# dense
stack.fit(X1, y).predict(X1)
mse = 0.14
got = np.mean((stack.predict(X1) - y) ** 2)
assert round(got, 2) == mse
# sparse
stack.fit(sparse.csr_matrix(X1), y)
mse = 0.14
got = np.mean((stack.predict(sparse.csr_matrix(X1)) - y) ** 2)
assert round(got, 2) == mse
| [
"sklearn.model_selection.GridSearchCV",
"numpy.random.rand",
"sklearn.linear_model.Lasso",
"numpy.array",
"mlxtend.utils.assert_raises",
"numpy.sin",
"sklearn.ensemble.RandomForestRegressor",
"numpy.random.random",
"numpy.testing.assert_almost_equal",
"numpy.random.seed",
"scipy.sparse.csr_matri... | [((745, 762), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (759, 762), True, 'import numpy as np\n'), ((927, 937), 'numpy.sin', 'np.sin', (['X2'], {}), '(X2)\n', (933, 937), True, 'import numpy as np\n'), ((942, 962), 'numpy.random.random', 'np.random.random', (['(40)'], {}), '(40)\n', (958, 962), True, 'import numpy as np\n'), ((1003, 1021), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1019, 1021), False, 'from sklearn.linear_model import LinearRegression\n'), ((1036, 1070), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (1039, 1070), False, 'from sklearn.svm import SVR\n'), ((1083, 1104), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (1088, 1104), False, 'from sklearn.linear_model import Ridge\n'), ((1119, 1150), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (1122, 1150), False, 'from sklearn.svm import SVR\n'), ((1164, 1238), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge]', 'meta_regressor': 'svr_rbf'}), '(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf)\n', (1181, 1238), False, 'from mlxtend.regressor import StackingRegressor\n'), ((1436, 1454), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1452, 1454), False, 'from sklearn.linear_model import LinearRegression\n'), ((1469, 1503), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (1472, 1503), False, 'from sklearn.svm import SVR\n'), ((1516, 1537), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (1521, 1537), False, 'from sklearn.linear_model import Ridge\n'), ((1552, 1583), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (1555, 1583), False, 'from sklearn.svm import SVR\n'), ((1597, 1671), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge]', 'meta_regressor': 'svr_rbf'}), '(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf)\n', (1614, 1671), False, 'from mlxtend.regressor import StackingRegressor\n'), ((1875, 1893), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1891, 1893), False, 'from sklearn.linear_model import LinearRegression\n'), ((1906, 1927), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (1911, 1927), False, 'from sklearn.linear_model import Ridge\n'), ((1939, 1971), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'normalize': '(True)'}), '(normalize=True)\n', (1955, 1971), False, 'from sklearn.linear_model import LinearRegression\n'), ((1985, 2047), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[lr, ridge]', 'meta_regressor': 'meta'}), '(regressors=[lr, ridge], meta_regressor=meta)\n', (2002, 2047), False, 'from mlxtend.regressor import StackingRegressor\n'), ((2351, 2369), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2367, 2369), False, 'from sklearn.linear_model import LinearRegression\n'), ((2384, 2418), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (2387, 2418), False, 'from sklearn.svm import SVR\n'), ((2431, 2452), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2436, 2452), False, 'from sklearn.linear_model import Ridge\n'), ((2467, 2498), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (2470, 2498), False, 'from sklearn.svm import SVR\n'), ((2512, 2586), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge]', 'meta_regressor': 'svr_rbf'}), '(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf)\n', (2529, 2586), False, 'from mlxtend.regressor import StackingRegressor\n'), ((3097, 3115), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3113, 3115), False, 'from sklearn.linear_model import LinearRegression\n'), ((3130, 3164), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (3133, 3164), False, 'from sklearn.svm import SVR\n'), ((3177, 3198), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (3182, 3198), False, 'from sklearn.linear_model import Ridge\n'), ((3213, 3244), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (3216, 3244), False, 'from sklearn.svm import SVR\n'), ((3258, 3332), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge]', 'meta_regressor': 'svr_rbf'}), '(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf)\n', (3275, 3332), False, 'from mlxtend.regressor import StackingRegressor\n'), ((3714, 3732), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3730, 3732), False, 'from sklearn.linear_model import LinearRegression\n'), ((3747, 3781), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (3750, 3781), False, 'from sklearn.svm import SVR\n'), ((3794, 3815), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (3799, 3815), False, 'from sklearn.linear_model import Ridge\n'), ((3830, 3861), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (3833, 3861), False, 'from sklearn.svm import SVR\n'), ((3874, 3895), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'random_state': '(1)'}), '(random_state=1)\n', (3879, 3895), False, 'from sklearn.linear_model import Lasso\n'), ((3909, 3995), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge, lasso]', 'meta_regressor': 'svr_rbf'}), '(regressors=[svr_lin, lr, ridge, lasso], meta_regressor=\n svr_rbf)\n', (3926, 3995), False, 'from mlxtend.regressor import StackingRegressor\n'), ((4240, 4258), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4256, 4258), False, 'from sklearn.linear_model import LinearRegression\n'), ((4273, 4307), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (4276, 4307), False, 'from sklearn.svm import SVR\n'), ((4320, 4341), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (4325, 4341), False, 'from sklearn.linear_model import Ridge\n'), ((4354, 4375), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'random_state': '(1)'}), '(random_state=1)\n', (4359, 4375), False, 'from sklearn.linear_model import Lasso\n'), ((4389, 4461), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge]', 'meta_regressor': 'lasso'}), '(regressors=[svr_lin, lr, ridge], meta_regressor=lasso)\n', (4406, 4461), False, 'from mlxtend.regressor import StackingRegressor\n'), ((4729, 4747), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4745, 4747), False, 'from sklearn.linear_model import LinearRegression\n'), ((4762, 4796), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (4765, 4796), False, 'from sklearn.svm import SVR\n'), ((4809, 4830), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (4814, 4830), False, 'from sklearn.linear_model import Ridge\n'), ((4845, 4876), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (4848, 4876), False, 'from sklearn.svm import SVR\n'), ((4889, 4910), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'random_state': '(1)'}), '(random_state=1)\n', (4894, 4910), False, 'from sklearn.linear_model import Lasso\n'), ((4924, 5010), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge, lasso]', 'meta_regressor': 'svr_rbf'}), '(regressors=[svr_lin, lr, ridge, lasso], meta_regressor=\n svr_rbf)\n', (4941, 5010), False, 'from mlxtend.regressor import StackingRegressor\n'), ((5085, 5157), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge]', 'meta_regressor': 'lasso'}), '(regressors=[svr_lin, lr, ridge], meta_regressor=lasso)\n', (5102, 5157), False, 'from mlxtend.regressor import StackingRegressor\n'), ((5257, 5275), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5273, 5275), False, 'from sklearn.linear_model import LinearRegression\n'), ((5290, 5324), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (5293, 5324), False, 'from sklearn.svm import SVR\n'), ((5337, 5358), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (5342, 5358), False, 'from sklearn.linear_model import Ridge\n'), ((5373, 5404), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (5376, 5404), False, 'from sklearn.svm import SVR\n'), ((5418, 5492), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge]', 'meta_regressor': 'svr_rbf'}), '(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf)\n', (5435, 5492), False, 'from mlxtend.regressor import StackingRegressor\n'), ((5665, 5759), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'stregr', 'param_grid': 'params', 'cv': '(5)', 'iid': '(False)', 'refit': '(True)', 'verbose': '(0)'}), '(estimator=stregr, param_grid=params, cv=5, iid=False, refit=\n True, verbose=0)\n', (5677, 5759), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6030, 6064), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (6033, 6064), False, 'from sklearn.svm import SVR\n'), ((6077, 6098), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (6082, 6098), False, 'from sklearn.linear_model import Ridge\n'), ((6113, 6144), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (6116, 6144), False, 'from sklearn.svm import SVR\n'), ((6158, 6235), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, ridge, ridge]', 'meta_regressor': 'svr_rbf'}), '(regressors=[svr_lin, ridge, ridge], meta_regressor=svr_rbf)\n', (6175, 6235), False, 'from mlxtend.regressor import StackingRegressor\n'), ((6455, 6549), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'stregr', 'param_grid': 'params', 'cv': '(5)', 'iid': '(False)', 'refit': '(True)', 'verbose': '(0)'}), '(estimator=stregr, param_grid=params, cv=5, iid=False, refit=\n True, verbose=0)\n', (6467, 6549), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6800, 6818), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6816, 6818), False, 'from sklearn.linear_model import LinearRegression\n'), ((6833, 6867), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (6836, 6867), False, 'from sklearn.svm import SVR\n'), ((6880, 6901), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (6885, 6901), False, 'from sklearn.linear_model import Ridge\n'), ((6915, 6980), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr]', 'meta_regressor': 'ridge'}), '(regressors=[svr_lin, lr], meta_regressor=ridge)\n', (6932, 6980), False, 'from mlxtend.regressor import StackingRegressor\n'), ((7070, 7103), 'numpy.array', 'np.array', (['[0.4874216, 0.45518317]'], {}), '([0.4874216, 0.45518317])\n', (7078, 7103), True, 'import numpy as np\n'), ((7108, 7140), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['got', 'expect'], {}), '(got, expect)\n', (7127, 7140), False, 'from numpy.testing import assert_almost_equal\n'), ((7178, 7196), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7194, 7196), False, 'from sklearn.linear_model import LinearRegression\n'), ((7211, 7245), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (7214, 7245), False, 'from sklearn.svm import SVR\n'), ((7258, 7279), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (7263, 7279), False, 'from sklearn.linear_model import Ridge\n'), ((7293, 7358), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr]', 'meta_regressor': 'ridge'}), '(regressors=[svr_lin, lr], meta_regressor=ridge)\n', (7310, 7358), False, 'from mlxtend.regressor import StackingRegressor\n'), ((7593, 7611), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7609, 7611), False, 'from sklearn.linear_model import LinearRegression\n'), ((7626, 7657), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (7629, 7657), False, 'from sklearn.svm import SVR\n'), ((7670, 7691), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (7675, 7691), False, 'from sklearn.linear_model import Ridge\n'), ((7705, 7770), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[ridge, lr]', 'meta_regressor': 'svr_rbf'}), '(regressors=[ridge, lr], meta_regressor=svr_rbf)\n', (7722, 7770), False, 'from mlxtend.regressor import StackingRegressor\n'), ((7954, 7972), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7970, 7972), False, 'from sklearn.linear_model import LinearRegression\n'), ((7987, 8018), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (7990, 8018), False, 'from sklearn.svm import SVR\n'), ((8031, 8052), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (8036, 8052), False, 'from sklearn.linear_model import Ridge\n'), ((8066, 8131), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[ridge, lr]', 'meta_regressor': 'svr_rbf'}), '(regressors=[ridge, lr], meta_regressor=svr_rbf)\n', (8083, 8131), False, 'from mlxtend.regressor import StackingRegressor\n'), ((8568, 8586), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8584, 8586), False, 'from sklearn.linear_model import LinearRegression\n'), ((8601, 8632), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (8604, 8632), False, 'from sklearn.svm import SVR\n'), ((8645, 8666), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (8650, 8666), False, 'from sklearn.linear_model import Ridge\n'), ((8680, 8738), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[lr]', 'meta_regressor': 'svr_rbf'}), '(regressors=[lr], meta_regressor=svr_rbf)\n', (8697, 8738), False, 'from mlxtend.regressor import StackingRegressor\n'), ((8832, 8910), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'stregr', 'param_grid': 'params', 'cv': '(5)', 'iid': '(False)', 'refit': '(True)'}), '(estimator=stregr, param_grid=params, cv=5, iid=False, refit=True)\n', (8844, 8910), False, 'from sklearn.model_selection import GridSearchCV\n'), ((9126, 9144), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9142, 9144), False, 'from sklearn.linear_model import LinearRegression\n'), ((9159, 9190), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (9162, 9190), False, 'from sklearn.svm import SVR\n'), ((9203, 9224), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (9208, 9224), False, 'from sklearn.linear_model import Ridge\n'), ((9238, 9303), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[lr, ridge]', 'meta_regressor': 'svr_rbf'}), '(regressors=[lr, ridge], meta_regressor=svr_rbf)\n', (9255, 9303), False, 'from mlxtend.regressor import StackingRegressor\n'), ((9374, 9412), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X2', 'y'], {'test_size': '(0.3)'}), '(X2, y, test_size=0.3)\n', (9390, 9412), False, 'from sklearn.model_selection import train_test_split\n'), ((9596, 9614), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9612, 9614), False, 'from sklearn.linear_model import LinearRegression\n'), ((9629, 9660), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (9632, 9660), False, 'from sklearn.svm import SVR\n'), ((9673, 9694), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (9678, 9694), False, 'from sklearn.linear_model import Ridge\n'), ((9708, 9809), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[lr, ridge]', 'meta_regressor': 'svr_rbf', 'store_train_meta_features': '(True)'}), '(regressors=[lr, ridge], meta_regressor=svr_rbf,\n store_train_meta_features=True)\n', (9725, 9809), False, 'from mlxtend.regressor import StackingRegressor\n'), ((9907, 9945), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X2', 'y'], {'test_size': '(0.3)'}), '(X2, y, test_size=0.3)\n', (9923, 9945), False, 'from sklearn.model_selection import train_test_split\n'), ((10135, 10153), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (10151, 10153), False, 'from sklearn.linear_model import LinearRegression\n'), ((10168, 10199), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (10171, 10199), False, 'from sklearn.svm import SVR\n'), ((10212, 10233), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (10217, 10233), False, 'from sklearn.linear_model import Ridge\n'), ((10247, 10348), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[lr, ridge]', 'meta_regressor': 'svr_rbf', 'store_train_meta_features': '(True)'}), '(regressors=[lr, ridge], meta_regressor=svr_rbf,\n store_train_meta_features=True)\n', (10264, 10348), False, 'from mlxtend.regressor import StackingRegressor\n'), ((10446, 10484), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X2', 'y'], {'test_size': '(0.3)'}), '(X2, y, test_size=0.3)\n', (10462, 10484), False, 'from sklearn.model_selection import train_test_split\n'), ((10640, 10702), 'mlxtend.utils.assert_raises', 'assert_raises', (['NotFittedError', 'expect', 'stregr.predict', 'X_train'], {}), '(NotFittedError, expect, stregr.predict, X_train)\n', (10653, 10702), False, 'from mlxtend.utils import assert_raises\n'), ((10762, 10838), 'mlxtend.utils.assert_raises', 'assert_raises', (['NotFittedError', 'expect', 'stregr.predict_meta_features', 'X_train'], {}), '(NotFittedError, expect, stregr.predict_meta_features, X_train)\n', (10775, 10838), False, 'from mlxtend.utils import assert_raises\n'), ((10922, 10940), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (10938, 10940), False, 'from sklearn.linear_model import LinearRegression\n'), ((10955, 10986), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (10958, 10986), False, 'from sklearn.svm import SVR\n'), ((10999, 11020), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (11004, 11020), False, 'from sklearn.linear_model import Ridge\n'), ((11034, 11135), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[lr, ridge]', 'meta_regressor': 'svr_rbf', 'store_train_meta_features': '(True)'}), '(regressors=[lr, ridge], meta_regressor=svr_rbf,\n store_train_meta_features=True)\n', (11051, 11135), False, 'from mlxtend.regressor import StackingRegressor\n'), ((11198, 11211), 'sklearn.base.clone', 'clone', (['stregr'], {}), '(stregr)\n', (11203, 11211), False, 'from sklearn.base import clone\n'), ((11257, 11275), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11273, 11275), False, 'from sklearn.linear_model import LinearRegression\n'), ((11290, 11324), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (11293, 11324), False, 'from sklearn.svm import SVR\n'), ((11334, 11388), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(10)', 'random_state': '(2)'}), '(n_estimators=10, random_state=2)\n', (11355, 11388), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((11401, 11422), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(0)'}), '(random_state=0)\n', (11406, 11422), False, 'from sklearn.linear_model import Ridge\n'), ((11437, 11468), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (11440, 11468), False, 'from sklearn.svm import SVR\n'), ((11481, 11596), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge, rf]', 'meta_regressor': 'svr_rbf', 'use_features_in_secondary': '(True)'}), '(regressors=[svr_lin, lr, ridge, rf], meta_regressor=\n svr_rbf, use_features_in_secondary=True)\n', (11498, 11596), False, 'from mlxtend.regressor import StackingRegressor\n'), ((11809, 11925), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge, rf]', 'meta_regressor': 'svr_rbf', 'use_features_in_secondary': '(False)'}), '(regressors=[svr_lin, lr, ridge, rf], meta_regressor=\n svr_rbf, use_features_in_secondary=False)\n', (11826, 11925), False, 'from mlxtend.regressor import StackingRegressor\n'), ((12191, 12209), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12207, 12209), False, 'from sklearn.linear_model import LinearRegression\n'), ((12224, 12258), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (12227, 12258), False, 'from sklearn.svm import SVR\n'), ((12271, 12292), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(1)'}), '(random_state=1)\n', (12276, 12292), False, 'from sklearn.linear_model import Ridge\n'), ((12306, 12371), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr]', 'meta_regressor': 'ridge'}), '(regressors=[svr_lin, lr], meta_regressor=ridge)\n', (12323, 12371), False, 'from mlxtend.regressor import StackingRegressor\n'), ((12723, 12741), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12739, 12741), False, 'from sklearn.linear_model import LinearRegression\n'), ((12756, 12790), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""', 'gamma': '"""auto"""'}), "(kernel='linear', gamma='auto')\n", (12759, 12790), False, 'from sklearn.svm import SVR\n'), ((12800, 12854), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(10)', 'random_state': '(2)'}), '(n_estimators=10, random_state=2)\n', (12821, 12854), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((12867, 12888), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(0)'}), '(random_state=0)\n', (12872, 12888), False, 'from sklearn.linear_model import Ridge\n'), ((12903, 12934), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (12906, 12934), False, 'from sklearn.svm import SVR\n'), ((12947, 13062), 'mlxtend.regressor.StackingRegressor', 'StackingRegressor', ([], {'regressors': '[svr_lin, lr, ridge, rf]', 'meta_regressor': 'svr_rbf', 'use_features_in_secondary': '(True)'}), '(regressors=[svr_lin, lr, ridge, rf], meta_regressor=\n svr_rbf, use_features_in_secondary=True)\n', (12964, 13062), False, 'from mlxtend.regressor import StackingRegressor\n'), ((780, 801), 'numpy.random.rand', 'np.random.rand', (['(40)', '(1)'], {}), '(40, 1)\n', (794, 801), True, 'import numpy as np\n'), ((828, 849), 'numpy.random.rand', 'np.random.rand', (['(40)', '(2)'], {}), '(40, 2)\n', (842, 849), True, 'import numpy as np\n'), ((863, 873), 'numpy.sin', 'np.sin', (['X1'], {}), '(X1)\n', (869, 873), True, 'import numpy as np\n'), ((903, 920), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (917, 920), True, 'import numpy as np\n'), ((2908, 2929), 'numpy.abs', 'np.abs', (['(pred1 - pred2)'], {}), '(pred1 - pred2)\n', (2914, 2929), True, 'import numpy as np\n'), ((3496, 3517), 'numpy.abs', 'np.abs', (['(pred1 - pred2)'], {}), '(pred1 - pred2)\n', (3502, 3517), True, 'import numpy as np\n'), ((4032, 4056), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4045, 4056), False, 'import pytest\n'), ((4503, 4527), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4516, 4527), False, 'import pytest\n'), ((7812, 7841), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (7825, 7841), False, 'import pytest\n'), ((12547, 12568), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X1'], {}), '(X1)\n', (12564, 12568), False, 'from scipy import sparse\n'), ((13287, 13308), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X1'], {}), '(X1)\n', (13304, 13308), False, 'from scipy import sparse\n'), ((3450, 3461), 'numpy.ones', 'np.ones', (['(40)'], {}), '(40)\n', (3457, 3461), True, 'import numpy as np\n'), ((13361, 13382), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X1'], {}), '(X1)\n', (13378, 13382), False, 'from scipy import sparse\n')] |
""" Features selection
- select_features (func) : features selection following method
"""
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
class FeatSelector(object):
"""features selection following method
- pca : use pca to reduce dataset dimensions
- no_rescale_pca : use pca without rescaling data
Parameters
----------
method : string (Default pca)
method use to select features
"""
def __init__(self,
method='pca'
):
assert method in ['pca', 'no_rescale_pca'], 'invalid method : select pca / no_rescale_pca'
self.method = method
self.is_fitted = False
self.l_select_var = []
self.selector = None
self.scaler = None
"""
----------------------------------------------------------------------------------------------
"""
def fit(self, df, l_var=None, verbose=False):
"""fit selector
Parameters
----------
df : DataFrame
input dataset
l_var : list
features to encode.
If None, all features identified as numerical
verbose : boolean (Default False)
Get logging information
"""
# get categorical and boolean features (see Features_Type module doc)
l_num = [col for col in df.columns.tolist() if df[col].dtype != 'object']
# list of features to encode
if l_var is None:
self.l_select_var = l_num
else:
self.l_select_var = [col for col in l_var if col in l_num]
if len(self.l_select_var) > 1:
# PCA method
if self.method in ['pca', 'no_rescale_pca']:
if self.method == 'pca':
scaler = StandardScaler()
df_local = scaler.fit_transform(df[self.l_select_var])
self.scaler = scaler
else:
df_local = df[self.l_select_var].copy()
# init pca object
pca = PCA()
# fit and transform with pca
pca.fit(df_local)
self.selector = pca
# Fitted !
self.is_fitted = True
# verbose
if verbose:
print(" **method : " + self.method)
print(" >", len(self.l_select_var), "features to encode")
else:
print('not enough features !')
"""
----------------------------------------------------------------------------------------------
"""
def transform(self, df, verbose=False):
""" apply features selection on a dataset
Parameters
----------
df : DataFrame
dataset to transform
verbose : boolean (Default False)
Get logging information
Returns
-------
DataFrame : modified dataset
"""
assert self.is_fitted, 'fit the encoding first using .fit method'
l_var_other = [col for col in df.columns.tolist() if col not in self.l_select_var]
df_local = df[self.l_select_var].copy()
# pca methods
if self.method in ['pca', 'no_rescale_pca']:
if self.scaler is not None:
df_local = self.scaler.transform(df_local)
pca = self.selector
df_local = pd.DataFrame(pca.transform(df_local))
df_local = df_local.rename(
columns=dict(zip(df_local.columns.tolist(), ['Dim' + str(v) for v in df_local.columns.tolist()])))
# find argmin to get 90% of variance
n_dim = np.argwhere(np.cumsum(pca.explained_variance_ratio_) > 0.95)[0][0]
# concat with other dataset features
if len(l_var_other) > 0:
df_reduced = pd.concat((df[l_var_other].reset_index(drop=True), df_local.iloc[:, :n_dim + 1]), axis=1)
else:
df_reduced = df_local.iloc[:, :n_dim + 1]
# verbose
if verbose:
print("Numerical Dimensions reduction : " + str(len(self.l_select_var)) + " - > " + str(n_dim + 1))
print("explained inertia : " + str(round(np.cumsum(pca.explained_variance_ratio_)[n_dim], 4)))
return df_reduced
"""
----------------------------------------------------------------------------------------------
"""
def fit_transform(self, df, l_var, verbose=False):
""" fit and apply features selection
Parameters
----------
df : DataFrame
input dataset
l_var : list
features to encode.
If None, all features identified as dates (see Features_Type module)
verbose : boolean (Default False)
Get logging information
Returns
-------
DataFrame : modified dataset
"""
df_local = df.copy()
self.fit(df_local, l_var=l_var, verbose=verbose)
df_reduced = self.transform(df_local, verbose=verbose)
return df_reduced
"""
----------------------------------------------------------------------------------------------
"""
def select_features(df, target, method='pca', verbose=False):
"""features selection following method
- pca : use pca to reduce dataset dimensions
- no_rescale_pca : use pca without rescaling data
Parameters
----------
df : DataFrame
input dataset containing features
target : string
target name
method : string (Default pca)
method use to select features
verbose : boolean (Default False)
Get logging information
Returns
-------
DataFrame
modified dataset
"""
# assert valid method
assert method in ['pca', 'no_rescale_pca'], method + " invalid method : select pca, no_rescale_pca"
# get numerical features (except target) and others
l_num = [col for col in df._get_numeric_data().columns.tolist() if col != target]
l_other = [col for col in df.columns.tolist() if col not in l_num]
# prepare dataset to apply PCA
df_num = df[l_num].copy()
# PCA method
if method in ['pca', 'no_rescale_pca']:
if method == 'pca':
scaler = StandardScaler()
X = scaler.fit_transform(df_num)
else:
X = df_num.copy()
# init pca object
pca = PCA()
# fit and transform with pca
X_transform = pd.DataFrame(pca.fit_transform(X))
X_transform = X_transform.rename(
columns=dict(zip(X_transform.columns.tolist(), ['Dim' + str(v) for v in X_transform.columns.tolist()])))
# find argmin to get 90% of variance
n_dim = np.argwhere(np.cumsum(pca.explained_variance_ratio_) > 0.95)[0][0]
# concat with other dataset features
if len(l_other) > 0:
df_pca = pd.concat((df[l_other].reset_index(drop=True), X_transform.iloc[:, :n_dim + 1]), axis=1)
else:
df_pca = X_transform.iloc[:, :n_dim + 1]
# verbose
if verbose:
print("Numerical Dimensions reduction : " + str(len(l_num)) + " - > " + str(n_dim + 1))
print("explained inertia : " + str(round(np.cumsum(pca.explained_variance_ratio_)[n_dim], 4)))
return df_pca
| [
"sklearn.decomposition.PCA",
"numpy.cumsum",
"sklearn.preprocessing.StandardScaler"
] | [((6398, 6403), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (6401, 6403), False, 'from sklearn.decomposition import PCA\n'), ((2112, 2117), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (2115, 2117), False, 'from sklearn.decomposition import PCA\n'), ((6251, 6267), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6265, 6267), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1848, 1864), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1862, 1864), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3690, 3730), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (3699, 3730), True, 'import numpy as np\n'), ((6732, 6772), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (6741, 6772), True, 'import numpy as np\n'), ((7196, 7236), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (7205, 7236), True, 'import numpy as np\n'), ((4211, 4251), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (4220, 4251), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Copyright 2016-present <NAME>.
#
# Licensed under the MIT License.
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/mit-license.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
#
# Author <NAME> (<EMAIL>)
#
# ------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import abc
import copy
import warnings
import json
import numpy as np
from util.const import CONST
from util.validation import (
MShape,
MType,
OneOfType
)
from npcore.layer.layer import Layer
# ------------------------------------------------------------------------
class OBJECTIVE(CONST):
LABEL = 'objective'
MAE_LOSS_LABEL = 'mae_loss'
MSE_LOSS_LABEL = 'mse_loss'
LOG_COSH_LOSS_LABEL = 'log_cosh_loss'
XTANH_LOSS_LABEL = 'xtanh_loss'
XSIGMOID_LOSS_LABEL = 'xsigmoid_loss'
ALGEBRAIC_LOSS_LABEL = 'algebraic_loss'
SIGMOID_CROSSENTROPY_LOSS = 'sigmoid_crossentropy_loss'
SOFTMAX_CROSSENTROPY_LOSS = 'softmax_crossentropy_loss'
ARRANGEMENT = ('2', '')
# ------------------------------------------------------------------------
class Objective(Layer):
_label = OBJECTIVE.LABEL
_arrangement = OBJECTIVE.ARRANGEMENT
"""
Abtraction of a base objective layer. Manages objective loss.
Arguments:
size: objective size
name: objective name
metric: loss metric
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss',)):
self._y_t = None
self._y_prime_t = None
self._evaluation = {
'count': 0,
'metric': {}
}
self._residue = {}
self._monitor = None
super().__init__(shape=(1, size), name=name)
self.reconfig(metric=metric)
def __str__(self):
return super().__str__() + '_' + OBJECTIVE.LABEL
# ------------------------------------------------------------------------
@property
def inputs(self):
"""
Get objective forward pass input tensor.
Returns:
tensor
"""
if self.has_prev:
return self.prev.outputs
else:
return None
@property
def outputs(self):
"""
Get objective forward pass output tensor
Returns:
tensor
"""
if self._y_t is not None:
return self._y_t.copy()
else:
return None
@property
def evaluation_metric(self):
"""
Get objective evaluation metric
"""
evaluation_count = self._evaluation['count']
evaluation_metric = copy.deepcopy(self._evaluation['metric'])
if evaluation_count > 1:
for key in evaluation_metric.keys():
evaluation_metric[key] /= evaluation_count
return evaluation_metric
def unassign_hooks(self):
"""
Unassign all callback functions
"""
self._monitor = None
@MType(monitor=OneOfType(callable, None))
def assign_hook(self, *,
monitor=None):
"""
Assign callback functions
Arguments:
monitor: callback function to do probing during forward/backward pass
"""
if monitor is not None:
self._monitor = monitor
def reset(self):
"""
Reset internal states.
"""
self._y_t = None
self._y_prime_t = None
self._residue = {}
self._evaluation['count'] = 0
for key in self._evaluation['metric'].keys():
self._evaluation['metric'][key] = 0
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(as_json=bool, beautify_json=bool)
def snapshot(self, *, as_json=False, beautify_json=True):
"""
Return objective as a snapshot dict data
Arguments:
as_json:
beautify_json:
Returns:
snapshot
"""
snapshot = super().snapshot(as_json=False, beautify_json=False)
snapshot.update({
'base_label': Objective.label + '_' + snapshot['base_label'],
'metric': tuple(self._evaluation['metric'].keys())
})
if as_json:
if beautify_json:
return json.dumps(snapshot, indent=4, sort_keys=False)
else:
return json.dumps(snapshot)
else:
return snapshot.copy()
@MType(dict, np.ndarray, residue=dict)
@MShape(axis=1)
def forward(self, stage, a_t, *, residue={}):
"""
Do forward pass method.
Arguments:
stage: forward stage
a_t: post-nonlinearity (a) tensor
residue:
Returns:
layer
"""
self._y_t = a_t # a_t.copy()
self._residue = residue
if self._monitor is not None:
report = {
'pass': 'forward',
'stage': stage,
'inputs': self.inputs,
'outputs': self.outputs,
'residue': residue
}
self._monitor(report)
if self.has_next:
warnings.warn(f'Objective {self.name} layer must be the last in connection. There should be no connection to next layer.', UserWarning)
return self
@MType(np.ndarray)
@MShape(axis=1)
def evaluate(self, y_prime_t):
"""
Get evaluation metric given the expected truth.
Arguments:
y_prime_t: expected output (y) tensor
Returns:
self
"""
self._evaluation['count'] += 1
self._y_prime_t = y_prime_t # y_prime_t.copy()
evaluation_metric = self._evaluation['metric']
(ly_t, residue) = self.compute_loss(self._y_t, self._y_prime_t, residue=self._residue)
metric = self.compute_evaluation_metric(self._y_t, self._y_prime_t, ly_t, evaluation_metric)
self._evaluation['metric'] = metric
self._residue = residue
return self
@MType(dict)
def backward(self, stage):
"""
Do backward pass by passing the loss gradient tensor back to the prev link.
Arguments:
stage: backward stage
Returns:
layer
"""
if self._y_t is None:
warnings.warn(f'Objective {self.name} cannot do backward pass. Need to run forward pass first.', UserWarning)
return self
elif self._y_prime_t is None:
warnings.warn(f'Objective {self.name} cannot do backward pass. Need to run evaluation first.', UserWarning)
return self
else:
hparam = stage['hparam']
batch_size = hparam['batch_size']
(eyg_t, residue) = self.compute_loss_grad(self._y_t, self._y_prime_t, residue=self._residue)
eyg_t = eyg_t / batch_size if batch_size > 1 else eyg_t
if self._monitor is not None:
report = {
'pass': 'backward',
'stage': stage,
'error': self._ey_t,
'grad': {
'error': eyg_t
},
'evaluation': self._evaluation,
'residue': residue
}
self._monitor(report)
if self.has_prev:
return self.prev.backward(stage, eyg_t, residue=residue)
else:
warnings.warn(f'Objective {self.name} connection is incomplete. Missing connection to previous layer.', UserWarning)
return self
@abc.abstractmethod
def compute_evaluation_metric(self):
"""
Compute the evaluation metric.
"""
pass
@abc.abstractmethod
def compute_loss(self):
"""
Compute the loss tensor. Not implemented
"""
pass
@abc.abstractmethod
def compute_loss_grad(self):
"""
Compute the loss gradient tensor for backpropagation. Not implemented
"""
pass
# ------------------------------------------------------------------------
class MAELoss(Objective):
_label = OBJECTIVE.MAE_LOSS_LABEL
"""
Objective using mean absolute error for loss function
"""
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'Mean absolute error objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
ly_t = np.abs(ey_t)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
eyg_t = np.vectorize(lambda element: (element and 1) or (not element and -1))(y_t > y_prime_t)
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class MSELoss(Objective):
_label = OBJECTIVE.MSE_LOSS_LABEL
"""
Objective using mean square error for loss function.
"""
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'Mean square error objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
ly_t = np.square(ey_t)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
eyg_t = 2 * ey_t
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class LogCoshLoss(Objective):
_label = OBJECTIVE.LOG_COSH_LOSS_LABEL
"""
Objective using log-cosh loss for loss functionself.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the l2 loss, but will not be so strongly affected by the
occasional wildly incorrect prediction.
"""
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'Log-cosh loss objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
ly_t = np.log(np.cosh(ey_t) + 1e-12)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
eyg_t = np.tanh(ey_t)
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class XTanhLoss(Objective):
_label = OBJECTIVE.XTANH_LOSS_LABEL
"""
Arguments:
size: objective size
name: objective name
metric: loss metric
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss',)):
self._cache = None
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'XTanh loss objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
tanh_of_ey_t = np.tanh(ey_t)
ly_t = np.multiply(ey_t, tanh_of_ey_t)
self._cache = tanh_of_ey_t
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
tanh_of_ey_t = self._cache
eyg_t = tanh_of_ey_t + ey_t * (1 - np.square(tanh_of_ey_t))
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class XSigmoidLoss(Objective):
_label = OBJECTIVE.XSIGMOID_LOSS_LABEL
"""
Arguments:
size: objective size
name: objective name
metric: loss metric
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss',)):
self._cache = None
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'XSigmoid loss objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
sigmoid_of_ey_t = np.exp(-np.logaddexp(0, -ey_t + 1e-12))
ly_t = np.multiply(2 * ey_t, sigmoid_of_ey_t) - ey_t
self._cache = sigmoid_of_ey_t
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
sigmoid_of_ey_t = self._cache
eyg_t = 2 * sigmoid_of_ey_t + np.multiply(np.multiply(2 * ey_t, np.exp(-ey_t)), np.square(sigmoid_of_ey_t)) - 1
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class AlgebraicLoss(Objective):
_label = OBJECTIVE.ALGEBRAIC_LOSS_LABEL
"""
Arguments:
size: objective size
name: objective name
metric: loss metric
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss',)):
self._cache = None
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'Algebraic loss objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
sqr_of_ey_t = np.square(ey_t)
inv_of_ey_t = 1 / (1 + sqr_of_ey_t)
inv_sqrt_of_ey_t = np.sqrt(inv_of_ey_t)
ly_t = np.multiply(sqr_of_ey_t, inv_sqrt_of_ey_t)
self._cache = (sqr_of_ey_t, inv_of_ey_t, inv_sqrt_of_ey_t)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
(sqr_of_ey_t, inv_of_ey_t, inv_sqrt_of_ey_t) = self._cache
eyg_t = np.multiply(2 * ey_t + np.multiply(ey_t, sqr_of_ey_t), np.multiply(inv_of_ey_t, inv_sqrt_of_ey_t))
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class SigmoidCrossentropyLoss(Objective):
_label = OBJECTIVE.SIGMOID_CROSSENTROPY_LOSS
"""
Objective using sigmoid (binary)crossentropyfor loss function.
Arguments:
size: objective size
name: objective name
metric: loss and accuracy metrics
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss', 'accuracy')):
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc'):
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric:
self._evaluation['metric']['accuracy'] = 0
if ('recall' or 'rc') in metric:
self._evaluation['metric']['recall'] = 0
if ('precision' or 'prec') in metric:
self._evaluation['metric']['precision'] = 0
if ('f1_score' or 'f1') in metric:
self._evaluation['metric']['f1_score'] = 0
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(dict, np.ndarray, residue=dict)
@MShape(axis=1)
def forward(self, stage, a_t, *, residue={}):
"""
Do forward pass method.
Arguments:
stage: forward stage
a_t: post-nonlinearity (a) tensor
residue:
Returns:
layer
"""
sigmoid_of_a_t = np.exp(-np.logaddexp(0, -a_t + 1e-12))
return super().forward(stage, sigmoid_of_a_t, residue=residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
y_prime_t = y_prime_t.astype(np.float32)
ly_t = -(y_prime_t * np.log(y_t + 1e-12) + (1 - y_prime_t) * np.log((1 - y_t) + 1e-12))
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
eyg_t = ey_t
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
if 'accuracy' in evaluation_metric:
evaluation_metric['accuracy'] += np.equal(y_prime_t, y_t.round()).astype(np.int8).mean()
if 'recall' in evaluation_metric or 'precision' in evaluation_metric or 'f1_score' in evaluation_metric:
y_t = np.round(y_t)
true_pos = np.sum(np.multiply(y_t, y_prime_t), axis=0).astype(np.float)
# true_neg = np.sum(np.multiply((1 - y_t), (1 - y_prime_t)), axis=0).astype(np.float)
false_pos = np.sum(np.multiply(y_t, (1 - y_prime_t)), axis=0).astype(np.float)
false_neg = np.sum(np.multiply((1 - y_t), y_prime_t), axis=0).astype(np.float)
recall = true_pos / (true_pos + false_neg + 1e-12)
precision = true_pos / (true_pos + false_pos + 1e-12)
if 'recall' in evaluation_metric:
evaluation_metric['recall'] = recall.mean()
if 'precision' in evaluation_metric:
evaluation_metric['precision'] = precision.mean()
if 'f1_score' in evaluation_metric:
evaluation_metric['f1_score'] = (2 * np.multiply(precision, recall) / (precision + recall + 1e-12)).mean()
return evaluation_metric
# ------------------------------------------------------------------------
class SoftmaxCrossentropyLoss(Objective):
_label = OBJECTIVE.SOFTMAX_CROSSENTROPY_LOSS
"""
Objective using softmax (multinomial)crossentropyfor loss function.
Arguments:
size: objective size
name: objective name
metric: loss and accuracy metrics
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss', 'accuracy')):
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc'):
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric:
self._evaluation['metric']['accuracy'] = 0
if ('recall' or 'rc') in metric:
self._evaluation['metric']['recall'] = 0
if ('precision' or 'prec') in metric:
self._evaluation['metric']['precision'] = 0
if ('f1_score' or 'f1') in metric:
self._evaluation['metric']['f1_score'] = 0
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(dict, np.ndarray, residue=dict)
@MShape(axis=1)
def forward(self, stage, a_t, *, residue={}):
"""
Do forward pass method.
Arguments:
stage: forward stage
a_t: post-nonlinearity (a) tensor
residue:
Returns:
layer
"""
exps_a_t = np.exp(a_t - a_t.max(axis=1, keepdims=True))
softmax_a_t = exps_a_t / exps_a_t.sum(axis=1, keepdims=True)
return super().forward(stage, softmax_a_t, residue=residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
y_prime_t = y_prime_t.astype(np.float32)
ly_t = -np.log(y_t[range(y_t.shape[0]), y_prime_t.argmax(axis=1)] + 1e-12)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
eyg_t = ey_t
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
evaluation_metric:
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
if 'accuracy' in evaluation_metric:
evaluation_metric['accuracy'] += np.equal(y_prime_t.argmax(axis=1), y_t.argmax(axis=1)).astype(np.int8).mean()
if 'recall' in evaluation_metric or 'precision' in evaluation_metric or 'f1_score' in evaluation_metric:
y_t = np.round(y_t)
true_pos = np.sum(np.multiply(y_t, y_prime_t), axis=0).astype(np.float)
# true_neg = np.sum(np.multiply((1 - y_t), (1 - y_prime_t)), axis=0).astype(np.float)
false_pos = np.sum(np.multiply(y_t, (1 - y_prime_t)), axis=0).astype(np.float)
false_neg = np.sum(np.multiply((1 - y_t), y_prime_t), axis=0).astype(np.float)
recall = true_pos / (true_pos + false_neg + 1e-12)
precision = true_pos / (true_pos + false_pos + 1e-12)
if 'recall' in evaluation_metric:
evaluation_metric['recall'] = recall.mean()
if 'precision' in evaluation_metric:
evaluation_metric['precision'] = precision.mean()
if 'f1_score' in evaluation_metric:
evaluation_metric['f1_score'] = (2 * np.multiply(precision, recall) / (precision + recall + 1e-12)).mean()
return evaluation_metric
| [
"numpy.abs",
"numpy.multiply",
"numpy.sqrt",
"json.dumps",
"util.validation.OneOfType",
"numpy.tanh",
"util.validation.MShape",
"numpy.square",
"numpy.logaddexp",
"numpy.log",
"numpy.exp",
"numpy.cosh",
"copy.deepcopy",
"warnings.warn",
"util.validation.MType",
"numpy.vectorize",
"nu... | [((1900, 1940), 'util.validation.MType', 'MType', ([], {'size': 'int', 'name': 'str', 'metric': '(str,)'}), '(size=int, name=str, metric=(str,))\n', (1905, 1940), False, 'from util.validation import MShape, MType, OneOfType\n'), ((4842, 4881), 'util.validation.MType', 'MType', ([], {'as_json': 'bool', 'beautify_json': 'bool'}), '(as_json=bool, beautify_json=bool)\n', (4847, 4881), False, 'from util.validation import MShape, MType, OneOfType\n'), ((5607, 5644), 'util.validation.MType', 'MType', (['dict', 'np.ndarray'], {'residue': 'dict'}), '(dict, np.ndarray, residue=dict)\n', (5612, 5644), False, 'from util.validation import MShape, MType, OneOfType\n'), ((5650, 5664), 'util.validation.MShape', 'MShape', ([], {'axis': '(1)'}), '(axis=1)\n', (5656, 5664), False, 'from util.validation import MShape, MType, OneOfType\n'), ((6488, 6505), 'util.validation.MType', 'MType', (['np.ndarray'], {}), '(np.ndarray)\n', (6493, 6505), False, 'from util.validation import MShape, MType, OneOfType\n'), ((6511, 6525), 'util.validation.MShape', 'MShape', ([], {'axis': '(1)'}), '(axis=1)\n', (6517, 6525), False, 'from util.validation import MShape, MType, OneOfType\n'), ((7195, 7206), 'util.validation.MType', 'MType', (['dict'], {}), '(dict)\n', (7200, 7206), False, 'from util.validation import MShape, MType, OneOfType\n'), ((10564, 10599), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (10569, 10599), False, 'from util.validation import MShape, MType, OneOfType\n'), ((10965, 11000), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (10970, 11000), False, 'from util.validation import MShape, MType, OneOfType\n'), ((11461, 11508), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, np.ndarray, dict)\n', (11466, 11508), False, 'from util.validation import MShape, MType, OneOfType\n'), ((13210, 13245), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (13215, 13245), False, 'from util.validation import MShape, MType, OneOfType\n'), ((13614, 13649), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (13619, 13649), False, 'from util.validation import MShape, MType, OneOfType\n'), ((14063, 14110), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, np.ndarray, dict)\n', (14068, 14110), False, 'from util.validation import MShape, MType, OneOfType\n'), ((16181, 16216), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (16186, 16216), False, 'from util.validation import MShape, MType, OneOfType\n'), ((16600, 16635), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (16605, 16635), False, 'from util.validation import MShape, MType, OneOfType\n'), ((17054, 17101), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, np.ndarray, dict)\n', (17059, 17101), False, 'from util.validation import MShape, MType, OneOfType\n'), ((17813, 17853), 'util.validation.MType', 'MType', ([], {'size': 'int', 'name': 'str', 'metric': '(str,)'}), '(size=int, name=str, metric=(str,))\n', (17818, 17853), False, 'from util.validation import MShape, MType, OneOfType\n'), ((19233, 19268), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (19238, 19268), False, 'from util.validation import MShape, MType, OneOfType\n'), ((19726, 19761), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (19731, 19761), False, 'from util.validation import MShape, MType, OneOfType\n'), ((20253, 20300), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, np.ndarray, dict)\n', (20258, 20300), False, 'from util.validation import MShape, MType, OneOfType\n'), ((21018, 21058), 'util.validation.MType', 'MType', ([], {'size': 'int', 'name': 'str', 'metric': '(str,)'}), '(size=int, name=str, metric=(str,))\n', (21023, 21058), False, 'from util.validation import MShape, MType, OneOfType\n'), ((22441, 22476), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (22446, 22476), False, 'from util.validation import MShape, MType, OneOfType\n'), ((22981, 23016), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (22986, 23016), False, 'from util.validation import MShape, MType, OneOfType\n'), ((23563, 23610), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, np.ndarray, dict)\n', (23568, 23610), False, 'from util.validation import MShape, MType, OneOfType\n'), ((24330, 24370), 'util.validation.MType', 'MType', ([], {'size': 'int', 'name': 'str', 'metric': '(str,)'}), '(size=int, name=str, metric=(str,))\n', (24335, 24370), False, 'from util.validation import MShape, MType, OneOfType\n'), ((25754, 25789), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (25759, 25789), False, 'from util.validation import MShape, MType, OneOfType\n'), ((26383, 26418), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (26388, 26418), False, 'from util.validation import MShape, MType, OneOfType\n'), ((26989, 27036), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, np.ndarray, dict)\n', (26994, 27036), False, 'from util.validation import MShape, MType, OneOfType\n'), ((27852, 27892), 'util.validation.MType', 'MType', ([], {'size': 'int', 'name': 'str', 'metric': '(str,)'}), '(size=int, name=str, metric=(str,))\n', (27857, 27892), False, 'from util.validation import MShape, MType, OneOfType\n'), ((29364, 29401), 'util.validation.MType', 'MType', (['dict', 'np.ndarray'], {'residue': 'dict'}), '(dict, np.ndarray, residue=dict)\n', (29369, 29401), False, 'from util.validation import MShape, MType, OneOfType\n'), ((29407, 29421), 'util.validation.MShape', 'MShape', ([], {'axis': '(1)'}), '(axis=1)\n', (29413, 29421), False, 'from util.validation import MShape, MType, OneOfType\n'), ((29823, 29858), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (29828, 29858), False, 'from util.validation import MShape, MType, OneOfType\n'), ((30311, 30346), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (30316, 30346), False, 'from util.validation import MShape, MType, OneOfType\n'), ((30756, 30803), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, np.ndarray, dict)\n', (30761, 30803), False, 'from util.validation import MShape, MType, OneOfType\n'), ((32798, 32838), 'util.validation.MType', 'MType', ([], {'size': 'int', 'name': 'str', 'metric': '(str,)'}), '(size=int, name=str, metric=(str,))\n', (32803, 32838), False, 'from util.validation import MShape, MType, OneOfType\n'), ((34310, 34347), 'util.validation.MType', 'MType', (['dict', 'np.ndarray'], {'residue': 'dict'}), '(dict, np.ndarray, residue=dict)\n', (34315, 34347), False, 'from util.validation import MShape, MType, OneOfType\n'), ((34353, 34367), 'util.validation.MShape', 'MShape', ([], {'axis': '(1)'}), '(axis=1)\n', (34359, 34367), False, 'from util.validation import MShape, MType, OneOfType\n'), ((34835, 34870), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (34840, 34870), False, 'from util.validation import MShape, MType, OneOfType\n'), ((35310, 35345), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, dict)\n', (35315, 35345), False, 'from util.validation import MShape, MType, OneOfType\n'), ((35755, 35802), 'util.validation.MType', 'MType', (['np.ndarray', 'np.ndarray', 'np.ndarray', 'dict'], {}), '(np.ndarray, np.ndarray, np.ndarray, dict)\n', (35760, 35802), False, 'from util.validation import MShape, MType, OneOfType\n'), ((3224, 3265), 'copy.deepcopy', 'copy.deepcopy', (["self._evaluation['metric']"], {}), "(self._evaluation['metric'])\n", (3237, 3265), False, 'import copy\n'), ((10915, 10927), 'numpy.abs', 'np.abs', (['ey_t'], {}), '(ey_t)\n', (10921, 10927), True, 'import numpy as np\n'), ((13561, 13576), 'numpy.square', 'np.square', (['ey_t'], {}), '(ey_t)\n', (13570, 13576), True, 'import numpy as np\n'), ((17001, 17014), 'numpy.tanh', 'np.tanh', (['ey_t'], {}), '(ey_t)\n', (17008, 17014), True, 'import numpy as np\n'), ((19592, 19605), 'numpy.tanh', 'np.tanh', (['ey_t'], {}), '(ey_t)\n', (19599, 19605), True, 'import numpy as np\n'), ((19621, 19652), 'numpy.multiply', 'np.multiply', (['ey_t', 'tanh_of_ey_t'], {}), '(ey_t, tanh_of_ey_t)\n', (19632, 19652), True, 'import numpy as np\n'), ((26112, 26127), 'numpy.square', 'np.square', (['ey_t'], {}), '(ey_t)\n', (26121, 26127), True, 'import numpy as np\n'), ((26199, 26219), 'numpy.sqrt', 'np.sqrt', (['inv_of_ey_t'], {}), '(inv_of_ey_t)\n', (26206, 26219), True, 'import numpy as np\n'), ((26235, 26277), 'numpy.multiply', 'np.multiply', (['sqr_of_ey_t', 'inv_sqrt_of_ey_t'], {}), '(sqr_of_ey_t, inv_sqrt_of_ey_t)\n', (26246, 26277), True, 'import numpy as np\n'), ((3585, 3610), 'util.validation.OneOfType', 'OneOfType', (['callable', 'None'], {}), '(callable, None)\n', (3594, 3610), False, 'from util.validation import MShape, MType, OneOfType\n'), ((4222, 4245), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (4231, 4245), False, 'from util.validation import MShape, MType, OneOfType\n'), ((4265, 4288), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (4274, 4288), False, 'from util.validation import MShape, MType, OneOfType\n'), ((6326, 6471), 'warnings.warn', 'warnings.warn', (['f"""Objective {self.name} layer must be the last in connection. There should be no connection to next layer."""', 'UserWarning'], {}), "(\n f'Objective {self.name} layer must be the last in connection. There should be no connection to next layer.'\n , UserWarning)\n", (6339, 6471), False, 'import warnings\n'), ((7476, 7595), 'warnings.warn', 'warnings.warn', (['f"""Objective {self.name} cannot do backward pass. Need to run forward pass first."""', 'UserWarning'], {}), "(\n f'Objective {self.name} cannot do backward pass. Need to run forward pass first.'\n , UserWarning)\n", (7489, 7595), False, 'import warnings\n'), ((9540, 9563), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (9549, 9563), False, 'from util.validation import MShape, MType, OneOfType\n'), ((9583, 9606), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (9592, 9606), False, 'from util.validation import MShape, MType, OneOfType\n'), ((11335, 11400), 'numpy.vectorize', 'np.vectorize', (['(lambda element: element and 1 or not element and -1)'], {}), '(lambda element: element and 1 or not element and -1)\n', (11347, 11400), True, 'import numpy as np\n'), ((12264, 12287), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (12273, 12287), False, 'from util.validation import MShape, MType, OneOfType\n'), ((12307, 12330), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (12316, 12330), False, 'from util.validation import MShape, MType, OneOfType\n'), ((15143, 15166), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (15152, 15166), False, 'from util.validation import MShape, MType, OneOfType\n'), ((15186, 15209), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (15195, 15209), False, 'from util.validation import MShape, MType, OneOfType\n'), ((18198, 18221), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (18207, 18221), False, 'from util.validation import MShape, MType, OneOfType\n'), ((18241, 18264), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (18250, 18264), False, 'from util.validation import MShape, MType, OneOfType\n'), ((21403, 21426), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (21412, 21426), False, 'from util.validation import MShape, MType, OneOfType\n'), ((21446, 21469), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (21455, 21469), False, 'from util.validation import MShape, MType, OneOfType\n'), ((22859, 22897), 'numpy.multiply', 'np.multiply', (['(2 * ey_t)', 'sigmoid_of_ey_t'], {}), '(2 * ey_t, sigmoid_of_ey_t)\n', (22870, 22897), True, 'import numpy as np\n'), ((24715, 24738), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (24724, 24738), False, 'from util.validation import MShape, MType, OneOfType\n'), ((24758, 24781), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (24767, 24781), False, 'from util.validation import MShape, MType, OneOfType\n'), ((26906, 26948), 'numpy.multiply', 'np.multiply', (['inv_of_ey_t', 'inv_sqrt_of_ey_t'], {}), '(inv_of_ey_t, inv_sqrt_of_ey_t)\n', (26917, 26948), True, 'import numpy as np\n'), ((28221, 28244), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (28230, 28244), False, 'from util.validation import MShape, MType, OneOfType\n'), ((28264, 28287), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (28273, 28287), False, 'from util.validation import MShape, MType, OneOfType\n'), ((31488, 31501), 'numpy.round', 'np.round', (['y_t'], {}), '(y_t)\n', (31496, 31501), True, 'import numpy as np\n'), ((33167, 33190), 'util.validation.OneOfType', 'OneOfType', (['(int,)', 'None'], {}), '((int,), None)\n', (33176, 33190), False, 'from util.validation import MShape, MType, OneOfType\n'), ((33210, 33233), 'util.validation.OneOfType', 'OneOfType', (['(str,)', 'None'], {}), '((str,), None)\n', (33219, 33233), False, 'from util.validation import MShape, MType, OneOfType\n'), ((36540, 36553), 'numpy.round', 'np.round', (['y_t'], {}), '(y_t)\n', (36548, 36553), True, 'import numpy as np\n'), ((5442, 5489), 'json.dumps', 'json.dumps', (['snapshot'], {'indent': '(4)', 'sort_keys': '(False)'}), '(snapshot, indent=4, sort_keys=False)\n', (5452, 5489), False, 'import json\n'), ((5531, 5551), 'json.dumps', 'json.dumps', (['snapshot'], {}), '(snapshot)\n', (5541, 5551), False, 'import json\n'), ((7660, 7777), 'warnings.warn', 'warnings.warn', (['f"""Objective {self.name} cannot do backward pass. Need to run evaluation first."""', 'UserWarning'], {}), "(\n f'Objective {self.name} cannot do backward pass. Need to run evaluation first.'\n , UserWarning)\n", (7673, 7777), False, 'import warnings\n'), ((10250, 10369), 'warnings.warn', 'warnings.warn', (['f"""Mean absolute error objective only have loss metric. Ignoring metrics {metric}"""', 'UserWarning'], {}), "(\n f'Mean absolute error objective only have loss metric. Ignoring metrics {metric}'\n , UserWarning)\n", (10263, 10369), False, 'import warnings\n'), ((12898, 13015), 'warnings.warn', 'warnings.warn', (['f"""Mean square error objective only have loss metric. Ignoring metrics {metric}"""', 'UserWarning'], {}), "(\n f'Mean square error objective only have loss metric. Ignoring metrics {metric}'\n , UserWarning)\n", (12911, 13015), False, 'import warnings\n'), ((16539, 16552), 'numpy.cosh', 'np.cosh', (['ey_t'], {}), '(ey_t)\n', (16546, 16552), True, 'import numpy as np\n'), ((22811, 22841), 'numpy.logaddexp', 'np.logaddexp', (['(0)', '(-ey_t + 1e-12)'], {}), '(0, -ey_t + 1e-12)\n', (22823, 22841), True, 'import numpy as np\n'), ((26874, 26904), 'numpy.multiply', 'np.multiply', (['ey_t', 'sqr_of_ey_t'], {}), '(ey_t, sqr_of_ey_t)\n', (26885, 26904), True, 'import numpy as np\n'), ((29715, 29744), 'numpy.logaddexp', 'np.logaddexp', (['(0)', '(-a_t + 1e-12)'], {}), '(0, -a_t + 1e-12)\n', (29727, 29744), True, 'import numpy as np\n'), ((8627, 8753), 'warnings.warn', 'warnings.warn', (['f"""Objective {self.name} connection is incomplete. Missing connection to previous layer."""', 'UserWarning'], {}), "(\n f'Objective {self.name} connection is incomplete. Missing connection to previous layer.'\n , UserWarning)\n", (8640, 8753), False, 'import warnings\n'), ((15873, 15986), 'warnings.warn', 'warnings.warn', (['f"""Log-cosh loss objective only have loss metric. Ignoring metrics {metric}"""', 'UserWarning'], {}), "(\n f'Log-cosh loss objective only have loss metric. Ignoring metrics {metric}'\n , UserWarning)\n", (15886, 15986), False, 'import warnings\n'), ((18928, 19037), 'warnings.warn', 'warnings.warn', (['f"""XTanh loss objective only have loss metric. Ignoring metrics {metric}"""', 'UserWarning'], {}), "(\n f'XTanh loss objective only have loss metric. Ignoring metrics {metric}',\n UserWarning)\n", (18941, 19037), False, 'import warnings\n'), ((20189, 20212), 'numpy.square', 'np.square', (['tanh_of_ey_t'], {}), '(tanh_of_ey_t)\n', (20198, 20212), True, 'import numpy as np\n'), ((22133, 22246), 'warnings.warn', 'warnings.warn', (['f"""XSigmoid loss objective only have loss metric. Ignoring metrics {metric}"""', 'UserWarning'], {}), "(\n f'XSigmoid loss objective only have loss metric. Ignoring metrics {metric}'\n , UserWarning)\n", (22146, 22246), False, 'import warnings\n'), ((23492, 23518), 'numpy.square', 'np.square', (['sigmoid_of_ey_t'], {}), '(sigmoid_of_ey_t)\n', (23501, 23518), True, 'import numpy as np\n'), ((25445, 25559), 'warnings.warn', 'warnings.warn', (['f"""Algebraic loss objective only have loss metric. Ignoring metrics {metric}"""', 'UserWarning'], {}), "(\n f'Algebraic loss objective only have loss metric. Ignoring metrics {metric}'\n , UserWarning)\n", (25458, 25559), False, 'import warnings\n'), ((30206, 30225), 'numpy.log', 'np.log', (['(y_t + 1e-12)'], {}), '(y_t + 1e-12)\n', (30212, 30225), True, 'import numpy as np\n'), ((30246, 30269), 'numpy.log', 'np.log', (['(1 - y_t + 1e-12)'], {}), '(1 - y_t + 1e-12)\n', (30252, 30269), True, 'import numpy as np\n'), ((23476, 23489), 'numpy.exp', 'np.exp', (['(-ey_t)'], {}), '(-ey_t)\n', (23482, 23489), True, 'import numpy as np\n'), ((31532, 31559), 'numpy.multiply', 'np.multiply', (['y_t', 'y_prime_t'], {}), '(y_t, y_prime_t)\n', (31543, 31559), True, 'import numpy as np\n'), ((31715, 31746), 'numpy.multiply', 'np.multiply', (['y_t', '(1 - y_prime_t)'], {}), '(y_t, 1 - y_prime_t)\n', (31726, 31746), True, 'import numpy as np\n'), ((31806, 31837), 'numpy.multiply', 'np.multiply', (['(1 - y_t)', 'y_prime_t'], {}), '(1 - y_t, y_prime_t)\n', (31817, 31837), True, 'import numpy as np\n'), ((36584, 36611), 'numpy.multiply', 'np.multiply', (['y_t', 'y_prime_t'], {}), '(y_t, y_prime_t)\n', (36595, 36611), True, 'import numpy as np\n'), ((36767, 36798), 'numpy.multiply', 'np.multiply', (['y_t', '(1 - y_prime_t)'], {}), '(y_t, 1 - y_prime_t)\n', (36778, 36798), True, 'import numpy as np\n'), ((36858, 36889), 'numpy.multiply', 'np.multiply', (['(1 - y_t)', 'y_prime_t'], {}), '(1 - y_t, y_prime_t)\n', (36869, 36889), True, 'import numpy as np\n'), ((32317, 32347), 'numpy.multiply', 'np.multiply', (['precision', 'recall'], {}), '(precision, recall)\n', (32328, 32347), True, 'import numpy as np\n'), ((37369, 37399), 'numpy.multiply', 'np.multiply', (['precision', 'recall'], {}), '(precision, recall)\n', (37380, 37399), True, 'import numpy as np\n')] |
import time
import math
from typing import Any, Dict, Sequence
from coba.utilities import PackageChecker
from coba.simulations import Context, Action
from coba.learners.core import Learner, Key
class RegCBLearner(Learner):
"""A learner using the RegCB algorithm by Foster et al.
and the online bin search implementation by Bietti et al.
References:
Foster, Dylan, <NAME>, <NAME>, <NAME>, and <NAME>.
"Practical contextual bandits with regression oracles." In International
Conference on Machine Learning, pp. 1539-1548. PMLR, 2018.
<NAME>, <NAME>, and <NAME>.
"A contextual bandit bake-off." arXiv preprint
arXiv:1802.04064 (2018).
"""
@property
def family(self) -> str:
"""The family of the learner.
See the base class for more information
"""
return f"RegCB"
@property
def params(self) -> Dict[str, Any]:
"""The parameters of the learner.
See the base class for more information
"""
dict = {'beta': self._beta, 'alpha': self._alpha, 'interactions': self._interactions}
return dict
def __init__(self, *, beta: float, alpha: float, learning_rate:float=0.1, interactions: Sequence[str] = ['a', 'ax']) -> None:
"""Instantiate a RegCBLearner.
Args:
beta : square-loss tolerance
alpha: confidence bounds precision
interactions: the set of interactions the learner will use. x refers to context and a refers to actions,
e.g. xaa would mean interactions between context, actions and actions.
"""
PackageChecker.sklearn("RegCBLearner")
from sklearn.feature_extraction import FeatureHasher
from sklearn.preprocessing import PolynomialFeatures
self._beta = beta
self._alpha = alpha
self._iter = 0
self._core_model = []
self._times = [0,0,0,0]
self._interactions = interactions
self._terms = []
self._learning_rate = learning_rate
for term in self._interactions:
term = term.lower()
x_num = term.count('x')
a_num = term.count('a')
if x_num + a_num != len(term):
raise Exception("Letters other than x and a were passed for parameter interactions. Please remove other letters/characters.")
self._terms.append((x_num, a_num))
max_x_term = max(max(term[0] for term in self._terms),1)
max_a_term = max(max(term[1] for term in self._terms),1)
self._x_p = PolynomialFeatures(degree=max_x_term, include_bias=False, interaction_only=False)
self._a_p = PolynomialFeatures(degree=max_a_term, include_bias=False, interaction_only=False)
self._h = FeatureHasher(input_type='pair')
def predict(self, key: Key, context: Context, actions: Sequence[Action]) -> Sequence[float]:
"""Determine a PMF with which to select the given actions.
Args:
key: The key identifying the interaction we are choosing for.
context: The context we're currently in. See the base class for more information.
actions: The actions to choose from. See the base class for more information.
Returns:
The probability of taking each action. See the base class for more information.
"""
import numpy as np
from scipy import sparse
if self._iter == 0:
if isinstance(context,dict) or isinstance(actions[0],dict):
self._core_model = sparse.csr_matrix(self._featurize(context, actions[0]).shape)
else:
self._core_model = np.zeros(self._featurize(context, actions[0]).shape)
if self._iter == 200:
self._times = [0,0,0,0]
if (self._iter < 200):
return [1/len(actions)] * len(actions)
else:
maxScore = -float('inf')
maxAction = None
for action in actions:
features = self._featurize(context,action)
score = self._bin_search(features, len(actions))
if score > maxScore:
maxAction = action
maxScore = score
return [int(action == maxAction) for action in actions]
def learn(self, key: Key, context: Context, action: Action, reward: float, probability: float) -> None:
"""Learn from the given interaction.
Args:
key: The key identifying the interaction this observed reward came from.
context: The context we're learning about. See the base class for more information.
action: The action that was selected in the context. See the base class for more information.
reward: The reward that was gained from the action. See the base class for more information.
probability: The probability that the given action was taken.
"""
start = time.time()
features = self._featurize(context, action)
self._core_model = self._update_model(self._core_model, features, reward, 1)
self._times[2] += time.time()-start
self._iter += 1
# if (self._iter-200-1) % 50 == 0 and self._iter > 200:
# print(f'avg phi time: {round(self._times[0]/(self._iter-200),2)}')
# print(f'avg bin time: {round(self._times[1]/(self._iter-200),2)}')
# print(f'avg lrn time: {round(self._times[2]/(self._iter-200),2)}')
def _bin_search(self, features, K_t) -> float:
start = time.time()
y_u = 2
w = 1
f_u_a_w = self._update_model(self._core_model, features, y_u, w)
f_x_t_a = self._predict_model(self._core_model, features)
s_u_a = (self._predict_model(f_u_a_w, features) - f_x_t_a) / w
obj = lambda w: w*(f_x_t_a-y_u)**2 - w*(f_x_t_a+s_u_a*w-y_u)**2
lower_search_bound = 0
upper_search_bound = (f_x_t_a-y_u)/(-s_u_a)
width_search_bound = upper_search_bound - lower_search_bound
constraint = self._alpha * math.log(K_t)
w_old = lower_search_bound
w_now = lower_search_bound + 1/2*width_search_bound
o = obj(w_now)
while abs(w_now-w_old) > width_search_bound*(1/2)**30 or o >= constraint:
w_diff = abs(w_now-w_old)
w_old = w_now
if o < constraint:
w_now += w_diff/2
else:
w_now -= w_diff/2
o = obj(w_now)
self._times[1] += time.time() - start
return f_x_t_a + s_u_a*w_now
def _featurize(self, context, action):
import numpy as np #type: ignore
start = time.time()
is_sparse = isinstance(context, dict) or isinstance(action, dict)
if isinstance(context, dict):
context_values = list(context.values())
context_names = list([ f"x{k}" for k in context.keys() ])
else:
context_values = (context or [1])
context_names = [''] if not is_sparse else [ f"x{i}" for i in range(len(context_values)) ]
if isinstance(action, dict):
action_names = list([ f"a{k}" for k in action.keys() ])
action_values = list(action.values())
else:
action_values = action
action_names = [''] if not is_sparse else [ f"a{i}" for i in range(len(action_values)) ]
x_terms_by_degree = self._terms_by_degree(len(context_values), self._x_p.fit_transform([context_values])[0])
a_terms_by_degree = self._terms_by_degree(len(action_values) , self._a_p.fit_transform([action_values])[0])
features = self._interaction_terms(x_terms_by_degree, a_terms_by_degree, [1])
if is_sparse:
x_names_by_degree = self._terms_by_degree(len(context_values), self._x_p.get_feature_names(context_names))
a_names_by_degree = self._terms_by_degree(len(context_values), self._a_p.get_feature_names(action_names))
names = self._interaction_terms(x_names_by_degree, a_names_by_degree, [''])
final_features = np.array(features) if not is_sparse else self._h.fit_transform([list(zip(names,features))])
self._times[0] += time.time() - start
return final_features
def _terms_by_degree(self, base_term_count:int, terms:Sequence[Any], with_bias:bool = False) -> Dict[int,Sequence[Any]]:
terms_by_degree = {}
index = 0 if not with_bias else 1
degree = 1
while index != len(terms):
degree_terms_count = int((base_term_count**degree + base_term_count)/2)
terms_by_degree[degree] = terms[index:degree_terms_count]
index += degree_terms_count
degree += 1
return terms_by_degree
def _interaction_terms(self, x_terms_by_degree, a_terms_by_degree, default):
import numpy as np
interaction_terms = []
for term in self._terms:
x_for_degree = x_terms_by_degree.get(term[0], default)
a_for_degree = a_terms_by_degree.get(term[1], default)
if not isinstance(x_for_degree[0],str):
outer = np.outer(x_for_degree, a_for_degree)
else:
outer = np.char.array(x_for_degree)[:,None] + np.char.array(a_for_degree)
interaction_terms += outer.T.reshape((1,-1)).squeeze().tolist()
return interaction_terms
def _predict_model(self, model, features):
import numpy as np
import scipy.sparse as sp
if sp.issparse(model):
return model.multiply(features).data.sum()
else:
return np.dot(model, features)
def _update_model(self, model, features, value, importance):
error = self._predict_model(model, features) - value
return model - self._learning_rate*features*error*importance | [
"coba.utilities.PackageChecker.sklearn",
"sklearn.preprocessing.PolynomialFeatures",
"scipy.sparse.issparse",
"math.log",
"numpy.array",
"numpy.dot",
"numpy.outer",
"numpy.char.array",
"time.time",
"sklearn.feature_extraction.FeatureHasher"
] | [((1651, 1689), 'coba.utilities.PackageChecker.sklearn', 'PackageChecker.sklearn', (['"""RegCBLearner"""'], {}), "('RegCBLearner')\n", (1673, 1689), False, 'from coba.utilities import PackageChecker\n'), ((2615, 2701), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'max_x_term', 'include_bias': '(False)', 'interaction_only': '(False)'}), '(degree=max_x_term, include_bias=False, interaction_only=\n False)\n', (2633, 2701), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((2717, 2803), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'max_a_term', 'include_bias': '(False)', 'interaction_only': '(False)'}), '(degree=max_a_term, include_bias=False, interaction_only=\n False)\n', (2735, 2803), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((2819, 2851), 'sklearn.feature_extraction.FeatureHasher', 'FeatureHasher', ([], {'input_type': '"""pair"""'}), "(input_type='pair')\n", (2832, 2851), False, 'from sklearn.feature_extraction import FeatureHasher\n'), ((5017, 5028), 'time.time', 'time.time', ([], {}), '()\n', (5026, 5028), False, 'import time\n'), ((5612, 5623), 'time.time', 'time.time', ([], {}), '()\n', (5621, 5623), False, 'import time\n'), ((6748, 6759), 'time.time', 'time.time', ([], {}), '()\n', (6757, 6759), False, 'import time\n'), ((9625, 9643), 'scipy.sparse.issparse', 'sp.issparse', (['model'], {}), '(model)\n', (9636, 9643), True, 'import scipy.sparse as sp\n'), ((5192, 5203), 'time.time', 'time.time', ([], {}), '()\n', (5201, 5203), False, 'import time\n'), ((6132, 6145), 'math.log', 'math.log', (['K_t'], {}), '(K_t)\n', (6140, 6145), False, 'import math\n'), ((6588, 6599), 'time.time', 'time.time', ([], {}), '()\n', (6597, 6599), False, 'import time\n'), ((8184, 8202), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (8192, 8202), True, 'import numpy as np\n'), ((8303, 8314), 'time.time', 'time.time', ([], {}), '()\n', (8312, 8314), False, 'import time\n'), ((9733, 9756), 'numpy.dot', 'np.dot', (['model', 'features'], {}), '(model, features)\n', (9739, 9756), True, 'import numpy as np\n'), ((9248, 9284), 'numpy.outer', 'np.outer', (['x_for_degree', 'a_for_degree'], {}), '(x_for_degree, a_for_degree)\n', (9256, 9284), True, 'import numpy as np\n'), ((9365, 9392), 'numpy.char.array', 'np.char.array', (['a_for_degree'], {}), '(a_for_degree)\n', (9378, 9392), True, 'import numpy as np\n'), ((9327, 9354), 'numpy.char.array', 'np.char.array', (['x_for_degree'], {}), '(x_for_degree)\n', (9340, 9354), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
SUMMER RESEARCH 2016/2017/2018
ASSIGNMENT: Plot correlations
AUTHOR: <NAME> (<EMAIL>)
SUPERVISOR: <NAME>
VERSION: 2019-Mar-25
PURPOSE: Plot various parameters from multiple data tables while
calculating Spearman rank correlations and associated p-values
using SciPy.
"""
# imports
import numpy as np
from astropy.io import ascii
#import linmix
#import matplotlib as mpl # for publication-quality plots
#mpl.rcParams['font.serif'] = "Times New Roman"
#mpl.rcParams['font.family'] = "serif"
#mpl.rcParams['text.usetex'] = False # have to install LaTeX and then set to True
import matplotlib.pyplot as plt
import scipy.stats as sp
from scipy import linalg
from time import ctime
import warnings
warnings.filterwarnings("ignore", category = RuntimeWarning) # ignore warnings
# read in data from sample catalog
dat = ascii.read('accept_catalog.csv') # requires columns to have unique names
zz, K0, K100, Tx = dat['z'], dat['K0'], dat['K100'], dat['Tx']
Lbol, LHa, Lrad = dat['Lbol'], dat['LHa'], dat['Lrad']
# these values are for an annulus with inner radius ~20 kpc
Rin, Rout, eDen, PLent = dat['Rin'], dat['Rout'], dat['nelec'], dat['Kitpl']
flatent, PLpress, flatpress = dat['Kflat'], dat['Pitpl'], dat['Pflat']
clusmass, clustemp = dat['Mgrav'], dat['clustemp']
coolingtime52, coolingtime = dat['tcool5/2'], dat['tcool3/2']
UVSFR, IRSFR, seventySFR = dat['UVSFR'], dat['IRSFR'], dat['70SFR']
twentyfourSFR, BCGmass = dat['24SFR'], dat['BCGmass']
ROIout, ansize = dat['ROIout'], dat['D_A']
asymm, clump, concen = dat['asymm_v0'], dat['clumpy_v0'], dat['concen_v0']
sym, peak, align = dat['Symmetry'], dat['Peakiness'], dat['Alignment']
cavpow = dat['completeCavPow']
BCGalt, SFRalt = dat['BCG_Stellar_Mass'], dat['BCG_SFR']
tcool = dat['alt_tcool']
# axis label dictionary
DICT = {
# parameters from main table for entire cluster
'zz':'Redshift',
'K0':'Central Entropy (keV$\cdot$cm$^2$)',
'K100':'Entropy at 100 kpc (keV$\cdot$cm$^2$)',
'Tx':'Average Cluster Temperature (keV)',
'Lbol':'Cluster Bolometric Luminosity ($10^{44}$ ergs s$^{-1}$)',
'LHa':r'Cluster H$\alpha$ Luminosity ($10^{40}$ ergs s$^{-1}$)',
'Lrad':'Cluster Radio Luminosity ($10^{40}$ ergs s$^{-1}$)',
# parameters for annulus with inner radius ~20 kpc
'eDen':'Electron Density (cm$^{-3}$)',
'PLent':'Entropy using a Power Law (keV$\cdot$cm$^2$)',
'flatent':'Entropy using a Flat Relation (keV$\cdot$cm$^2$)',
'PLpress':'Pressure (dyne cm$^{-2}$)', #'Pressure (Power Law)',
'flatpress':'Pressure (dyne cm$^{-2}$)', #'Pressure (Flat Relation)',
'clusmass':'Cluster Mass ($M_\odot$)',
'clustemp':'Cluster X-ray Temperature (keV)',
'coolingtime52':'Cooling Time using the 5/2 Model (Gyr)', # 5*0.6 = 3
'coolingtime':'Cooling Time (Gyr)', # uses the 3/2 model
# star-formation parameters for Brightest Cluster Galaxy (BCG)
'UVSFR':'UV SFR ($M_\odot$ yr$^{-1}$)',
'IRSFR':'IR SFR ($M_\odot$ yr$^{-1}$)',
'seventySFR':'70 $\mu$m SFR ($M_\odot$ yr$^{-1}$)',
'twentyfourSFR':'24 $\mu$m SFR ($M_\odot$ yr$^{-1}$)',
'BCGmass':'BCG Stellar Mass ($10^{10} \/ M_\odot$)',
# CAS parameters and extras for entire cluster
'asymm':'Asymmetry',
'clump':'Clumpiness',
'concen':'Concentration',
# 'ROIout':'Outer Radius of Region of Interest (Mpc)',
# 'angsize':'Angular Size Distance (Mpc)',
# SPA parameters and cavity power for entire cluster
'sym':'Symmetry',
'peak':'Peakiness',
'align':'Alignment',
'cavpow':'Cavity Power ($10^{42}$ ergs s$^{-1}$)',
# BCG and SFR parameters coming from Fraser-McKelvie et al. (2014)
'BCGalt':'BCG Stellar Mass ($10^{10} \/ M_\odot$)\nfrom F-M+ (2014)',
'SFRalt':'SFR ($M_\odot$ yr$^{-1}$)\nfrom F-M+ (2014)',
# general axes titles and legend entries for mutli-plots
'pressure':'Pressure (dyne cm$^{-2}$)',
'PL':'Power Law Model',
'flat':'Flat Relation Model'
}
# dictionary to access associated errors
UNCERTS = {
'zz':dat['z_err'],
'K0':dat['K0_err'], # NEED TO FINISH GETTING
'K100':dat['K100_err'], # NEED TO FINISH GETTING
'Tx':dat['Tx_err'], # error for Tx: standard dev. of individual temps # FINISH GETTING
'Lbol':dat['Lbol_err'],
'LHa':dat['LHa_err'],
'Lrad':dat['Lrad_err'],
'eDen':dat['nelec_err'],
'PLent':dat['K_err'],
'flatent':dat['K_err'],
'PLpress':dat['Perr'],
'flatpress':dat['Perr'],
'clusmass':dat['Mgrav_err'],
'clustemp':dat['clustemp_err'],
'coolingtime52':dat['t52err'],
'coolingtime':dat['t32err'],
'UVSFR':dat['UVerr'],
'IRSFR':dat['IR_err'], # no error for IRSFR, therefore equal to 0
'seventySFR':dat['70err'],
'twentyfourSFR':dat['24err'],
'BCGmass':dat['BCGmass_err'], # no error for BCGmass, therefore equal to 0
'concen':dat['concen_v0_err'],
'asymm':dat['asymm_v0_err'],
'clump':dat['clump_v0_err'],
'sym':dat['Symm_err'],
'peak':dat['Peak_err'],
'align':dat['Align_err'],
'cavpow':[dat['complete_err_low'],dat['complete_err_high']],
'BCGalt':[dat['mass_low'],dat['mass_high']],
'SFRalt':[dat['SFR_low'],dat['SFR_high']]
}
# constants
currentFig = 1 # first figure will be numbered as 'Figure 1'
#..........................................................................main
def main(xvals, xlab, yvals, ylab, xmin=None, xmax=None, ymin=None,
ymax=None, logx=False, logy=False, linear=False, errors=True,
showplot=True, printfit=False) :
"""
This function plots one parameter against the other, while labelling
the respective axes correctly.
"""
global currentFig
spear = sp.spearmanr(xvals, yvals, nan_policy='omit') # find Spearman rank
# of the correlation
print("Figure %2.1d %13s vs %-13s Spearman: %8.3g pvalue: %8.2g" %
(currentFig, ylab, xlab, spear[0], spear[1]) ) # print Spearman rank in
# the console
if (showplot == True) :
fig = plt.figure(currentFig) # the current figure
currentFig += 1
plt.clf() # clear the figure before each run
ax = fig.add_subplot(111) # set axes, figure location
if (errors == False) :
if (logx == True) and (logy == False) and (linear == False) :
ax.semilogx(xvals, yvals, 'ko') # use semilogx for peakiness
elif (logx == False) and (logy == True) and (linear == False) :
ax.semilogy(xvals, yvals, 'ko')
elif (logx == False) and (logy == False) and (linear == True) :
ax.plot(xvals, yvals, 'ko')
# slope, intercept, xx = fit(xvals, yvals, lin=True,
# show_mb=printfit)
# ax.plot(xx, slope*xx + intercept, 'r-')
elif (logx == True) and (logy == True) and (linear == False) :
ax.loglog(xvals, yvals, 'ko') # use loglog for power laws
else :
ax.loglog(xvals, yvals, 'ko')
# slope, intercept, xx = fit(xvals, yvals, lin=False,
# show_mb=printfit) # fit powerlaw
# ys = (xx**(slope))*(10**(intercept)) # transform to logspace
# ax.loglog(xx, ys, 'k-') # plot the powerlaw
# theoreticals = (xx**(2/3))*(10**(intercept)) # for tcool vs K0
# ax.loglog(xx, theoreticals, 'r-')
else :
if (logx == True) and (logy == False) and (linear == False) :
ax.set_xscale('log')
ax.set_yscale('linear')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
elif (logx == False) and (logy == True) and (linear == False) :
ax.set_xscale('linear')
ax.set_yscale('log')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
elif (logx == False) and (logy == False) and (linear == True) :
ax.set_xscale('linear')
ax.set_yscale('linear')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
elif (logx == True) and (logy == True) and (linear == False) :
ax.set_xscale('log')
ax.set_yscale('log')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
else :
ax.set_xscale('log')
ax.set_yscale('log')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
ax.set_xlabel("%s" % DICT[xlab], fontsize = 15 )
ax.set_ylabel("%s" % DICT[ylab], fontsize = 15 )
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# ax.plot([0.01,1000],[0.01,1000],linewidth=1,color='black',ls='--')
# plot a dotted line increasing from bottom left to top right
# ax.annotate('Spearman: %.3g, pval: %.2g' % (spear[0], spear[1]),
# xy=(0.98, 0.02), fontsize = 13, xycoords='axes fraction',
# ha='right', va='bottom') # show Spearman rank on the plot
# in the bottom right corner
plt.tight_layout()
plt.show() # show the figure
# showTermination() # confirm the process completed as expected
return
else :
# showTermination() # confirm the process completed as expected
return
#.....................................................................all_corrs
def all_corrs(param, label, plots=True) :
# the complete set of all correlations, besides "Rout" and "angsize"
main(param, label, zz, 'zz', showplot=plots)
main(param, label, K0, 'K0', showplot=plots)
main(param, label, K100, 'K100', showplot=plots)
main(param, label, Tx, 'Tx', showplot=plots)
main(param, label, Lbol, 'Lbol', showplot=plots)
main(param, label, LHa, 'LHa', showplot=plots)
main(param, label, Lrad, 'Lrad', showplot=plots)
main(param, label, eDen, 'eDen', showplot=plots)
main(param, label, PLent, 'PLent', showplot=plots)
main(param, label, flatent, 'flatent', showplot=plots)
main(param, label, PLpress, 'PLpress', showplot=plots)
main(param, label, flatpress, 'flatpress', showplot=plots)
main(param, label, clusmass, 'clusmass', showplot=plots)
main(param, label, clustemp, 'clustemp', showplot=plots)
main(param, label, coolingtime52, 'coolingtime52', showplot=plots)
main(param, label, coolingtime, 'coolingtime', showplot=plots)
main(param, label, UVSFR, 'UVSFR', showplot=plots)
main(param, label, IRSFR, 'IRSFR', showplot=plots)
main(param, label, seventySFR, 'seventySFR', showplot=plots)
main(param, label, twentyfourSFR, 'twentyfourSFR', showplot=plots)
main(param, label, BCGmass, 'BCGmass', showplot=plots)
main(param, label, asymm, 'asymm', logx=True, showplot=plots)
main(param, label, clump, 'clump', logx=True, showplot=plots)
main(param, label, concen, 'concen', logx=True, showplot=plots)
main(param, label, sym, 'sym', logx=True, showplot=plots)
main(param, label, peak, 'peak', logx=True, showplot=plots)
main(param, label, align, 'align', logx=True, showplot=plots)
# main(param, label, raff, 'cavpow') # individual cavity powers may have
# main(param, label, cavag, 'cavpow') # insufficient entries for
# main(param, label, osul, 'cavpow') # statistically significant analysis
# main(param, label, hlava, ' cavpow')
main(param, label, cavpow, 'cavpow', showplot=plots)
return
#........................................................................cavPow
def cavPow(yvals, ylab, ymin=None, ymax=None, linear=False,
location='upper left') :
# plots a parameter against the individual cavity powers, but all together
global currentFig
fig = plt.figure(currentFig)
currentFig += 1
plt.clf()
ax = fig.add_subplot(111)
ax.set_ylim(ymin, ymax)
if linear == True :
ax.semilogx(raff, yvals, 'ro', label = 'Rafferty et al. (2006)')
ax.semilogx(cavag, yvals, 'go', label = 'Cavagnolo et al. (2010)')
ax.semilogx(osul, yvals, 'bo', label = 'O’Sullivan et al. (2011)')
ax.semilogx(hlava, yvals, 'ko',
label='Hlavacek-Larrondo et al. (2012)')
else :
ax.loglog(raff, yvals, 'ro', label = 'Rafferty et al. (2006)')
ax.loglog(cavag, yvals, 'go', label = 'Cavagnolo et al. (2010)')
ax.loglog(osul, yvals, 'bo', label = 'O’Sullivan et al. (2011)')
ax.loglog(hlava, yvals, 'ko',
label = 'Hlavacek-Larrondo et al. (2012)')
ax.set_xlabel('Cavity Power ($10^{42}$ ergs s$^{-1}$)', fontsize = 15)
ax.set_ylabel('%s' % DICT[ylab], fontsize = 15)
plt.legend(loc = location)
plt.tight_layout()
plt.show()
return
#...................................................................checkcommon
def checkcommon(param1, param2, noprint=False) :
count = 0
for i in range(len(param1)) :
if (~np.isnan(param1[i])) and (~np.isnan(param2[i])) :
count += 1
print("%6g %6g" % (param1[i], param2[i]) )
if noprint==False :
print("\nNumber in common is %g." % count)
else :
return count
return
#...................................................................checknonnan
def checknonnan(param, noprint=False) :
num = np.count_nonzero(~np.isnan(param)) # '~' inverts the bool matrix
if noprint==False :
print("\nNumber of non-nan elements is %g." % num)
else :
return num
return
#..................................................................checkunique1
def checkunique1(param1, param2) :
count = 0
for i in range(len(param1)) :
if (~np.isnan(param1[i])) or (~np.isnan(param2[i])) :
count += 1
# print("%6g %6g" % (param1[i], param2[i]) )
# print("\nNumber of unique elements is %g." % count)
return count
#..................................................................checkunique2
def checkunique2(param1, param2) :
count = 0
count += checknonnan(param1, noprint=True)
count += checknonnan(param2, noprint=True)
count -= checkcommon(param1, param2, noprint=True)
# print("\nNumber of unique elements is %g." % count)
return count
#...................................................................checkunique
def checkunique(param1, param2) :
num1 = checkunique1(param1, param2)
num2 = checkunique2(param1, param2)
if (num1 == num2) :
print("\nNumber of unique elements is %g." % num1)
else :
print("\nError! The two checks did not return the same number of " +
"unique elements.")
return
#....................................................................delete_val
def delete_val(param1, param2, param_of_interest, value) :
badIndex = np.where(param_of_interest == value)
newparam1 = np.delete(param1, badIndex)
newparam2 = np.delete(param2, badIndex)
return newparam1, newparam2
#....................................................................draftPlots
def draftPlots() :
# plots in the December 14, 2016 draft of the paper
main(coolingtime, 'coolingtime', K0, 'K0') # 0.531 7.8e-19
main(coolingtime, 'coolingtime', IRSFR, 'IRSFR') # -0.000698 1
main(coolingtime, 'coolingtime', UVSFR, 'UVSFR') # -0.24 0.011
main(coolingtime, 'coolingtime', LHa, 'LHa') # -0.295 0.0016
main(IRSFR, 'IRSFR', LHa, 'LHa') # 0.705 7.8e-07
main(cavpow, 'cavpow', Lrad, 'Lrad') # 0.457 0.0018
multi(Lrad, PLpress, Lrad, flatpress, 'Lrad', 'pressure', 'PL', 'flat')
# 0.524 3.5e-18 on average
main(cavpow, 'cavpow', coolingtime, 'coolingtime') # -0.4 0.0072
main(cavpow, 'cavpow', LHa, 'LHa') # 0.575 0.0017
main(cavpow, 'cavpow', IRSFR, 'IRSFR') # 0.74 6.9e-06
main(cavpow, 'cavpow', K0, 'K0') # 0.612 1e-05
main(cavpow, 'cavpow', BCGmass, 'BCGmass') # 0.711 2.2e-05
main(BCGmass,'BCGmass', zz,'zz') # 0.674 4.1e-10
main(cavpow, 'cavpow', zz, 'zz') # 0.696 1.6e-07
main(BCGmass, 'BCGmass', coolingtime, 'coolingtime') # 0.0978 0.43
main(BCGmass, 'BCGmass',K0,'K0') # 0.524 5.4e-06
main(zz, 'zz', K0, 'K0') # 0.355 1.5e-08
main(BCGmass, 'BCGmass', IRSFR, 'IRSFR') # 0.503 1.4e-05
main(concen, 'concen', peak, 'peak', linear=True) # 0.774 7.4e-09
main(align, 'align', asymm, 'asymm', linear=True) # -0.544 0.00034
main(sym, 'sym', asymm, 'asymm', linear=True) # -0.54 0.00038
main(coolingtime, 'coolingtime', asymm, 'asymm', logx=True) # 0.37 8.1e-05
main(K0, 'K0', asymm, 'asymm', logx=True) # 0.526 4.8e-09
main(cavpow, 'cavpow', asymm, 'asymm', logx=True)
# old versions of cavity power plots
# cavPow(Lrad, 'Lrad')
# cavPow(coolingtime, 'coolingtime')
# cavPow(LHa, 'LHa')
# cavPow(IRSFR, 'IRSFR')
# cavPow(K0, 'K0')
# cavPow(BCGmass, 'BCGmass')
# cavPow(zz, 'zz')
# cavPow(asymm, 'asymm', location='lower left')
return
#...........................................................................fit
def fit(param1, param2, lin=False, show_mb=False) :
from scipy.optimize import curve_fit
x, y = getcommon(param1, param2) # get the common values that aren't nans
xs = np.linspace(min(x), max(x), 1000)
if (lin == True) :
popt, pcov = curve_fit(linear, x, y)
else :
logparam1, logparam2 = np.log10(x), np.log10(y) # this will break for
# any values of 0
popt, pcov = curve_fit(linear, logparam1, logparam2)
perr = np.sqrt( np.diag(pcov) )
if show_mb == True :
print('\nSlope: %.3g +/- %.1g' % (popt[0], perr[0]) )
print('Intercept: %.3g +/- %.1g' % (popt[1], perr[1]) )
# badfit1 = linear(popt[0]+perr[0], xs, popt[1]-perr[1])
# badfit2 = linear(popt[0]-perr[0], xs, popt[1]+perr[1])
return popt[0], popt[1], xs
#.....................................................................getcommon
def getcommon(param1, param2) :
newList1 = []
newList2 = []
for i in range(len(param1)) :
if (~np.isnan(param1[i])) and (~np.isnan(param2[i])) :
newList1.append(param1[i])
newList2.append(param2[i])
return newList1, newList2
#.........................................................................histo
def histo(param, label, num_bins) :
global currentFig
fig = plt.figure(currentFig)
currentFig += 1
plt.clf()
vals, dummy_vals = getcommon(param, param)
ax = fig.add_subplot(111)
ax.hist(vals, bins=num_bins, density=True, color='k')
plt.xlabel("%s" % DICT[label], fontsize = 15)
plt.tight_layout()
plt.show()
return
#........................................................................linear
def linear(m, x, b) : # helper function for fit function
return m*x + b
#...................................................................linmix_test
def linmix_test() :
# main(K0, 'K0', coolingtime, 'coolingtime') # for comparison
newK0_err, newct_err = delete_val(K0_err, ct_err, K0, 0)
newK0, newcoolingtime = delete_val(K0, coolingtime, K0, 0)
logK0 = np.log10(newK0)
logK0_err = np.log10(newK0_err)
logct = np.log10(newcoolingtime)
logct_err = np.log10(newct_err)
lm = linmix.LinMix(logK0, logct, logK0_err, logct_err)
lm.run_mcmc(silent=True)
global currentFig
fig = plt.figure(currentFig)
currentFig += 1
plt.clf()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.errorbar(newK0, newcoolingtime, xerr=newK0_err, yerr=newct_err,
fmt='ko', elinewidth=0.3, capsize=1.5, errorevery=1)
# slope = lm.chain['alpha']
# intercept = lm.chain['beta']
# xs = np.linspace(min(newK0), max(newK0), 1000)
# ys = (xs**(slope))*(10**(intercept)) # transform to logspace
# ax.loglog(xs, ys, 'r-') # plot the powerlaw
# theoreticals = (xs**(2/3))*(10**(intercept)) # for tcool vs K0
# ax.loglog(xs, theoreticals, 'r-')
ax.set_xlabel("%s" % DICT['K0'], fontsize = 15 )
ax.set_ylabel("%s" % DICT['coolingtime'], fontsize = 15 )
plt.tight_layout()
plt.show()
return
#..........................................................................misc
def misc() :
# miscellaneous functions that are sometimes helpful
print(np.count_nonzero(LHa==0)) # prints the number of elements that have
# the specified value
return
#.........................................................................multi
def multi(xvals, xlab, yvals1, ylab1, yvals2, ylab2, #legend1, legend2,
xmin=None, xmax=None, ymin=None,
ymax=None, location='upper right') :
global currentFig
spear1 = sp.spearmanr(xvals, yvals1, nan_policy='omit')
spear2 = sp.spearmanr(xvals, yvals2, nan_policy='omit')
print("Figure %2.1d Spearman: %6.3g pvalue: %8.2g" %
(currentFig, spear1[0], spear1[1]) )
print("Figure %2.1d Spearman: %6.3g pvalue: %8.2g" %
(currentFig, spear2[0], spear2[1]) )
fig = plt.figure(currentFig) # the current figure
currentFig += 1
plt.clf()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.errorbar(xvals, yvals1, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab1], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1, label = "%s" % DICT[ylab1])
ax.errorbar(xvals, yvals2, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab2], fmt='ro', elinewidth=0.3,
capsize=1.5, errorevery=1, label = "%s" % DICT[ylab2])
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xlabel("%s" % DICT[xlab], fontsize = 15 )
ax.set_ylabel("%s" % DICT[ylab1], fontsize = 15 )
plt.legend(loc = location)
# ax.annotate('Power Law Spearman: %.3g, pval: %.2g' %(spear1[0], spear1[1]),
# xy=(0.98, 0.05), fontsize = 13, xycoords='axes fraction',
# ha='right', va='bottom')
# ax.annotate('Flat Spearman: %.3g, pval: %.2g' % (spear2[0], spear2[1]),
# xy=(0.98, 0.02), fontsize = 13, xycoords='axes fraction',
# ha='right', va='bottom')
plt.tight_layout()
plt.show()
return
#..................................................................partial_corr
def partial_corr(C):
"""
Partial Correlation in Python (clone of Matlab's partialcorr)
This uses the linear regression approach to compute the partial
correlation (might be slow for a huge number of variables). The
algorithm is detailed here:
http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
Taking X and Y two variables of interest and Z the matrix with all
the variable minus {X, Y}, the algorithm can be summarized as
1) perform a normal linear least-squares regression with X as the
target and Z as the predictor
2) calculate the residuals in Step #1
3) perform a normal linear least-squares regression with Y as the
target and Z as the predictor
4) calculate the residuals in Step #3
5) calculate the correlation coefficient between the residuals from
Steps #2 and #4;
The result is the partial correlation between X and Y while controlling
for the effect of Z.
Date: Nov 2014
Author: <NAME>, <EMAIL>
Testing: <NAME>, <EMAIL>
"""
"""
Returns the sample linear partial correlation coefficients between pairs of
variables in C, controlling for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables. Each column of C is taken as a
variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j]
controlling for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot(beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
# corr = sp.pearsonr(res_i, res_j)[0]
corr = sp.spearmanr(res_i, res_j, nan_policy='omit')[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
#........................................................................p_corr
def p_corr(param1, param2) :
"""
Create a master mask based on the two input arrays, then mask those two
arrays and then remove the masked entries. Finally create a 2D array of the
two input arrays, where they are columns, and then calculate the partial
correlation as seen in partial_corr.
"""
newmask = (~np.isnan(param1)) & (~np.isnan(param2))
new_param1 = np.ma.array(param1, mask=~newmask)
new_param2 = np.ma.array(param2, mask=~newmask)
onlydata1 = np.ma.compressed(new_param1)
onlydata2 = np.ma.compressed(new_param2)
matrix = np.column_stack( (onlydata1,onlydata2) )
#print(matrix)
partial = partial_corr(matrix)
print(partial)
return
#...............................................................showTermination
def showTermination() :
"""
This function prints a final message identifying the programmer,
giving the date, and saying the program has finished.
"""
print("\nProgrammed by <NAME>")
print( "Date: " + ctime() )
print("End of processing")
return
#..............................................................end of functions
# print a table of the CAS and SPA parameters
#count = 0
#for i in range(241) :
# if (np.isnan(clump[i])==False) and (np.isnan(sym[i])==False) :
# count += 1
# print("%6g %9g %9g %5g %4g %5g" % (concen[i], asymm[i],
# clump[i], sym[i],
# align[i], peak[i]) )
#print(count)
# test the partial correlation function
#p_corr(Lrad, PLpress)
#main(PLpress, "PLpress", Lrad, "Lrad", errors=False)
| [
"numpy.log10",
"numpy.column_stack",
"numpy.count_nonzero",
"time.ctime",
"scipy.linalg.lstsq",
"numpy.where",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"numpy.ma.compressed",
"scipy.stats.spearmanr",
"numpy.ones",
"numpy.ma.array",
"numpy.isnan",
"warnings.filterwarni... | [((811, 869), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (834, 869), False, 'import warnings\n'), ((935, 967), 'astropy.io.ascii.read', 'ascii.read', (['"""accept_catalog.csv"""'], {}), "('accept_catalog.csv')\n", (945, 967), False, 'from astropy.io import ascii\n'), ((6400, 6445), 'scipy.stats.spearmanr', 'sp.spearmanr', (['xvals', 'yvals'], {'nan_policy': '"""omit"""'}), "(xvals, yvals, nan_policy='omit')\n", (6412, 6445), True, 'import scipy.stats as sp\n'), ((13462, 13484), 'matplotlib.pyplot.figure', 'plt.figure', (['currentFig'], {}), '(currentFig)\n', (13472, 13484), True, 'import matplotlib.pyplot as plt\n'), ((13511, 13520), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (13518, 13520), True, 'import matplotlib.pyplot as plt\n'), ((14432, 14456), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'location'}), '(loc=location)\n', (14442, 14456), True, 'import matplotlib.pyplot as plt\n'), ((14470, 14488), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14486, 14488), True, 'import matplotlib.pyplot as plt\n'), ((14494, 14504), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14502, 14504), True, 'import matplotlib.pyplot as plt\n'), ((16728, 16764), 'numpy.where', 'np.where', (['(param_of_interest == value)'], {}), '(param_of_interest == value)\n', (16736, 16764), True, 'import numpy as np\n'), ((16788, 16815), 'numpy.delete', 'np.delete', (['param1', 'badIndex'], {}), '(param1, badIndex)\n', (16797, 16815), True, 'import numpy as np\n'), ((16833, 16860), 'numpy.delete', 'np.delete', (['param2', 'badIndex'], {}), '(param2, badIndex)\n', (16842, 16860), True, 'import numpy as np\n'), ((20436, 20458), 'matplotlib.pyplot.figure', 'plt.figure', (['currentFig'], {}), '(currentFig)\n', (20446, 20458), True, 'import matplotlib.pyplot as plt\n'), ((20485, 20494), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20492, 20494), True, 'import matplotlib.pyplot as plt\n'), ((20650, 20693), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('%s' % DICT[label])"], {'fontsize': '(15)'}), "('%s' % DICT[label], fontsize=15)\n", (20660, 20693), True, 'import matplotlib.pyplot as plt\n'), ((20707, 20725), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20723, 20725), True, 'import matplotlib.pyplot as plt\n'), ((20731, 20741), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20739, 20741), True, 'import matplotlib.pyplot as plt\n'), ((21252, 21267), 'numpy.log10', 'np.log10', (['newK0'], {}), '(newK0)\n', (21260, 21267), True, 'import numpy as np\n'), ((21285, 21304), 'numpy.log10', 'np.log10', (['newK0_err'], {}), '(newK0_err)\n', (21293, 21304), True, 'import numpy as np\n'), ((21318, 21342), 'numpy.log10', 'np.log10', (['newcoolingtime'], {}), '(newcoolingtime)\n', (21326, 21342), True, 'import numpy as np\n'), ((21360, 21379), 'numpy.log10', 'np.log10', (['newct_err'], {}), '(newct_err)\n', (21368, 21379), True, 'import numpy as np\n'), ((21520, 21542), 'matplotlib.pyplot.figure', 'plt.figure', (['currentFig'], {}), '(currentFig)\n', (21530, 21542), True, 'import matplotlib.pyplot as plt\n'), ((21569, 21578), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (21576, 21578), True, 'import matplotlib.pyplot as plt\n'), ((22302, 22320), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22318, 22320), True, 'import matplotlib.pyplot as plt\n'), ((22326, 22336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22334, 22336), True, 'import matplotlib.pyplot as plt\n'), ((22957, 23003), 'scipy.stats.spearmanr', 'sp.spearmanr', (['xvals', 'yvals1'], {'nan_policy': '"""omit"""'}), "(xvals, yvals1, nan_policy='omit')\n", (22969, 23003), True, 'import scipy.stats as sp\n'), ((23018, 23064), 'scipy.stats.spearmanr', 'sp.spearmanr', (['xvals', 'yvals2'], {'nan_policy': '"""omit"""'}), "(xvals, yvals2, nan_policy='omit')\n", (23030, 23064), True, 'import scipy.stats as sp\n'), ((23304, 23326), 'matplotlib.pyplot.figure', 'plt.figure', (['currentFig'], {}), '(currentFig)\n', (23314, 23326), True, 'import matplotlib.pyplot as plt\n'), ((23375, 23384), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (23382, 23384), True, 'import matplotlib.pyplot as plt\n'), ((24046, 24070), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'location'}), '(loc=location)\n', (24056, 24070), True, 'import matplotlib.pyplot as plt\n'), ((24490, 24508), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (24506, 24508), True, 'import matplotlib.pyplot as plt\n'), ((24514, 24524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24522, 24524), True, 'import matplotlib.pyplot as plt\n'), ((26325, 26338), 'numpy.asarray', 'np.asarray', (['C'], {}), '(C)\n', (26335, 26338), True, 'import numpy as np\n'), ((26373, 26405), 'numpy.zeros', 'np.zeros', (['(p, p)'], {'dtype': 'np.float'}), '((p, p), dtype=np.float)\n', (26381, 26405), True, 'import numpy as np\n'), ((27546, 27580), 'numpy.ma.array', 'np.ma.array', (['param1'], {'mask': '(~newmask)'}), '(param1, mask=~newmask)\n', (27557, 27580), True, 'import numpy as np\n'), ((27599, 27633), 'numpy.ma.array', 'np.ma.array', (['param2'], {'mask': '(~newmask)'}), '(param2, mask=~newmask)\n', (27610, 27633), True, 'import numpy as np\n'), ((27657, 27685), 'numpy.ma.compressed', 'np.ma.compressed', (['new_param1'], {}), '(new_param1)\n', (27673, 27685), True, 'import numpy as np\n'), ((27703, 27731), 'numpy.ma.compressed', 'np.ma.compressed', (['new_param2'], {}), '(new_param2)\n', (27719, 27731), True, 'import numpy as np\n'), ((27752, 27791), 'numpy.column_stack', 'np.column_stack', (['(onlydata1, onlydata2)'], {}), '((onlydata1, onlydata2))\n', (27767, 27791), True, 'import numpy as np\n'), ((6826, 6848), 'matplotlib.pyplot.figure', 'plt.figure', (['currentFig'], {}), '(currentFig)\n', (6836, 6848), True, 'import matplotlib.pyplot as plt\n'), ((6905, 6914), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6912, 6914), True, 'import matplotlib.pyplot as plt\n'), ((10707, 10725), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10723, 10725), True, 'import matplotlib.pyplot as plt\n'), ((10735, 10745), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10743, 10745), True, 'import matplotlib.pyplot as plt\n'), ((19284, 19307), 'scipy.optimize.curve_fit', 'curve_fit', (['linear', 'x', 'y'], {}), '(linear, x, y)\n', (19293, 19307), False, 'from scipy.optimize import curve_fit\n'), ((19496, 19535), 'scipy.optimize.curve_fit', 'curve_fit', (['linear', 'logparam1', 'logparam2'], {}), '(linear, logparam1, logparam2)\n', (19505, 19535), False, 'from scipy.optimize import curve_fit\n'), ((19557, 19570), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (19564, 19570), True, 'import numpy as np\n'), ((22527, 22553), 'numpy.count_nonzero', 'np.count_nonzero', (['(LHa == 0)'], {}), '(LHa == 0)\n', (22543, 22553), True, 'import numpy as np\n'), ((15152, 15167), 'numpy.isnan', 'np.isnan', (['param'], {}), '(param)\n', (15160, 15167), True, 'import numpy as np\n'), ((19352, 19363), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (19360, 19363), True, 'import numpy as np\n'), ((19365, 19376), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (19373, 19376), True, 'import numpy as np\n'), ((26508, 26533), 'numpy.ones', 'np.ones', (['p'], {'dtype': 'np.bool'}), '(p, dtype=np.bool)\n', (26515, 26533), True, 'import numpy as np\n'), ((27482, 27498), 'numpy.isnan', 'np.isnan', (['param1'], {}), '(param1)\n', (27490, 27498), True, 'import numpy as np\n'), ((27504, 27520), 'numpy.isnan', 'np.isnan', (['param2'], {}), '(param2)\n', (27512, 27520), True, 'import numpy as np\n'), ((28208, 28215), 'time.ctime', 'ctime', ([], {}), '()\n', (28213, 28215), False, 'from time import ctime\n'), ((14726, 14745), 'numpy.isnan', 'np.isnan', (['param1[i]'], {}), '(param1[i])\n', (14734, 14745), True, 'import numpy as np\n'), ((14753, 14772), 'numpy.isnan', 'np.isnan', (['param2[i]'], {}), '(param2[i])\n', (14761, 14772), True, 'import numpy as np\n'), ((15529, 15548), 'numpy.isnan', 'np.isnan', (['param1[i]'], {}), '(param1[i])\n', (15537, 15548), True, 'import numpy as np\n'), ((15555, 15574), 'numpy.isnan', 'np.isnan', (['param2[i]'], {}), '(param2[i])\n', (15563, 15574), True, 'import numpy as np\n'), ((20109, 20128), 'numpy.isnan', 'np.isnan', (['param1[i]'], {}), '(param1[i])\n', (20117, 20128), True, 'import numpy as np\n'), ((20136, 20155), 'numpy.isnan', 'np.isnan', (['param2[i]'], {}), '(param2[i])\n', (20144, 20155), True, 'import numpy as np\n'), ((26612, 26644), 'scipy.linalg.lstsq', 'linalg.lstsq', (['C[:, idx]', 'C[:, j]'], {}), '(C[:, idx], C[:, j])\n', (26624, 26644), False, 'from scipy import linalg\n'), ((26670, 26702), 'scipy.linalg.lstsq', 'linalg.lstsq', (['C[:, idx]', 'C[:, i]'], {}), '(C[:, idx], C[:, i])\n', (26682, 26702), False, 'from scipy import linalg\n'), ((26910, 26955), 'scipy.stats.spearmanr', 'sp.spearmanr', (['res_i', 'res_j'], {'nan_policy': '"""omit"""'}), "(res_i, res_j, nan_policy='omit')\n", (26922, 26955), True, 'import scipy.stats as sp\n')] |
import os
import pandas_datareader
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import keras
import pandas
import pandas as pd
import plotly.express as px
import pandas_datareader.data as web
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import numpy as np
def cumulativeDifference(stock):
"""Method: Gets the cumulative return based on the stock prices"""
for index in stock.columns[1:]:
stock[index] = stock[index] / stock[index][0]
return stock
def plotlyPlot(title, stock):
"""Method: Displays an interactive representation of given stock data in a
line graph on your browser"""
fig = px.line(title=title)
for index in stock.columns[1:]:
fig.add_scatter(x=stock['Date'], y=stock[index], name=index)
fig.show()
def individualStock(priceDataFrame , volumeDataFrame, name):
return pd.DataFrame({'Date':priceDataFrame['Date'], 'Close':priceDataFrame[name], 'Volume':volumeDataFrame[name]})
def tradingWindow(data, n):
"""Method: Creates a column that would form the price target prediction for a stock
by getting the price for n days after each price"""
dayShift = n
data['target'] = data[['Adj Close']].shift(-dayShift)
# Removes the last n rows to prevent errors
data = data[:-n]
return data
def LSTM (X_Train , X_Test):
# Reshape the 1D to 3D arrays to feed in the mode, reshaping the training data.
xTrain = np.reshape(X_Train, (X_Train.shape[0], X_Train.shape[1] , 1))
xTest = np.reshape(X_Test, (X_Test.shape[0], X_Test.shape[1] , 1))
# Building the LSTM deep neural network model
inputLayer = keras.layers.Input(shape = (xTrain.shape[1] , xTrain.shape[2]))
# return_sequences=True : Basically connects to the previous layer..
hidden = keras.layers.LSTM(150, return_sequences=True) (inputLayer)
hidden = keras.layers.LSTM(150, return_sequences=True)(hidden)
hidden = keras.layers.LSTM(150, return_sequences=True)(hidden)
# The output layer
outputLayer = keras.layers.Dense(1 , activation='linear')(hidden)
# Creating the model itself
brainModel = keras.Model(inputs = inputLayer, outputs = outputLayer)
brainModel.compile(optimizer = 'adam', loss = 'mse')
brainModel.summary()
# validation split would perform cross validation..
brainModel.fit(X_Train , Y_Train, epochs = 20, batch_size = 32, validation_split = 0.2)
return brainModel
def retrieveData(Start, End, Ticker):
modifiedStart = pd.to_datetime(Start)
modifiedEnd = pd.to_datetime(End)
stock = web.DataReader(Ticker,'yahoo',modifiedStart, modifiedEnd)
# Resets the date index to be able to use it as a column
stock.reset_index(inplace=True, drop=False)
return stock
# Retrieving the stockmarket data..
stockDataframe = retrieveData('2008-01-01','2020-10-01','GOOG')
stock = (stockDataframe[['Date' , 'Adj Close', 'Volume']])
priceVolumeTargetDataframe = tradingWindow(stock , 1)
print(priceVolumeTargetDataframe)
# normalizing the prices and volume with a feature range between 0-1
normalizeObj = MinMaxScaler(feature_range=(0,1))
priceVolumeTargetScaledDataframe = normalizeObj.fit_transform(priceVolumeTargetDataframe.drop(columns=['Date']))
# Feature : X , Target : Y
# This will get all the first two columns which are [Close , Volume]
X = priceVolumeTargetScaledDataframe[:,:2]
Y = priceVolumeTargetScaledDataframe[:,2:]
# Perform the trainTestSplit
X_Train, X_Test, Y_Train, Y_Test = train_test_split(X,Y,train_size=0.65)
# Make Predictions
brainModel = LSTM(X_Train , X_Test)
predictions = brainModel.predict(X)
# Append the predicted values...
testPredictions = []
for elem in predictions:
testPredictions.append(elem[0][0])
# Original closing prices
close = []
for i in priceVolumeTargetScaledDataframe:
close.append(i[0])
dataFramePrediction = stock[1:][['Date']]
dataFramePrediction['Predictions'] = testPredictions
dataFramePrediction['Adj Close'] = close
plotlyPlot("LSTM Stock Performance Results" , dataFramePrediction)
| [
"tensorflow.keras.layers.Input",
"numpy.reshape",
"tensorflow.keras.Model",
"pandas_datareader.data.DataReader",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.LSTM",
"plotly.express.line",
"tensorflow.keras.layers.Dense",
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScal... | [((3244, 3278), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (3256, 3278), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3650, 3689), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'train_size': '(0.65)'}), '(X, Y, train_size=0.65)\n', (3666, 3689), False, 'from sklearn.model_selection import train_test_split\n'), ((727, 747), 'plotly.express.line', 'px.line', ([], {'title': 'title'}), '(title=title)\n', (734, 747), True, 'import plotly.express as px\n'), ((951, 1065), 'pandas.DataFrame', 'pd.DataFrame', (["{'Date': priceDataFrame['Date'], 'Close': priceDataFrame[name], 'Volume':\n volumeDataFrame[name]}"], {}), "({'Date': priceDataFrame['Date'], 'Close': priceDataFrame[name],\n 'Volume': volumeDataFrame[name]})\n", (963, 1065), True, 'import pandas as pd\n'), ((1550, 1610), 'numpy.reshape', 'np.reshape', (['X_Train', '(X_Train.shape[0], X_Train.shape[1], 1)'], {}), '(X_Train, (X_Train.shape[0], X_Train.shape[1], 1))\n', (1560, 1610), True, 'import numpy as np\n'), ((1625, 1682), 'numpy.reshape', 'np.reshape', (['X_Test', '(X_Test.shape[0], X_Test.shape[1], 1)'], {}), '(X_Test, (X_Test.shape[0], X_Test.shape[1], 1))\n', (1635, 1682), True, 'import numpy as np\n'), ((1755, 1815), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(xTrain.shape[1], xTrain.shape[2])'}), '(shape=(xTrain.shape[1], xTrain.shape[2]))\n', (1773, 1815), False, 'from tensorflow import keras\n'), ((2250, 2301), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputLayer', 'outputs': 'outputLayer'}), '(inputs=inputLayer, outputs=outputLayer)\n', (2261, 2301), False, 'from tensorflow import keras\n'), ((2633, 2654), 'pandas.to_datetime', 'pd.to_datetime', (['Start'], {}), '(Start)\n', (2647, 2654), True, 'import pandas as pd\n'), ((2674, 2693), 'pandas.to_datetime', 'pd.to_datetime', (['End'], {}), '(End)\n', (2688, 2693), True, 'import pandas as pd\n'), ((2707, 2766), 'pandas_datareader.data.DataReader', 'web.DataReader', (['Ticker', '"""yahoo"""', 'modifiedStart', 'modifiedEnd'], {}), "(Ticker, 'yahoo', modifiedStart, modifiedEnd)\n", (2721, 2766), True, 'import pandas_datareader.data as web\n'), ((1907, 1952), 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['(150)'], {'return_sequences': '(True)'}), '(150, return_sequences=True)\n', (1924, 1952), False, 'from tensorflow import keras\n'), ((1980, 2025), 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['(150)'], {'return_sequences': '(True)'}), '(150, return_sequences=True)\n', (1997, 2025), False, 'from tensorflow import keras\n'), ((2048, 2093), 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['(150)'], {'return_sequences': '(True)'}), '(150, return_sequences=True)\n', (2065, 2093), False, 'from tensorflow import keras\n'), ((2145, 2187), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (2163, 2187), False, 'from tensorflow import keras\n')] |
#!/usr/bin/python3
"""Analyse metadynamics trajectories from xtb."""
import argparse
import numpy as np
import matplotlib.pyplot as plt
import MDAnalysis as mda
import MDAnalysis.analysis.pca as pca
from MDAnalysis.lib import distances
from scipy.constants import calorie
from scipy.constants import kilo
from scipy.constants import N_A
from scipy.constants import physical_constants
from scipy import stats
from rmsd import element
from rmsd import read_xyz
hartree, _, _ = physical_constants["Hartree energy"]
commands = {
"q": "exit",
"h": "help",
"e": "energies",
"s": "current selection",
"p": "graph",
"*anything else*": "any MDAnalysis selection, see <https://userguide.mdanalysis.org/selections.html>.",
}
def main():
"""Run main procedure."""
# TODO(schneiderfelipe): accept multiple files
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("traj_files", nargs="+")
args = parser.parse_args()
gnorms = []
energies = []
all_atomnos = []
all_atomcoords = []
for traj_file in args.traj_files:
atomnos, comments, atomcoords = read_xyz(traj_file)
all_atomnos.extend(atomnos)
all_atomcoords.extend(atomcoords)
for comment in comments:
fields = comment.split()
gnorms.append(float(fields[3]))
energies.append(float(fields[1]))
energies = np.array(energies)
energies -= energies.min()
energies *= hartree * N_A / (kilo * calorie)
u = mda.Universe.empty(n_atoms=len(all_atomnos[0]), trajectory=True)
u.add_TopologyAttr("type", [element[i] for i in all_atomnos[0]])
u.load_new(all_atomcoords, order="fac")
print(u)
selection = None
print("(enter 'q' for exit, 'h' for help)")
while True:
code = input("select> ").strip().split()
if code[0] == "q":
break
elif code[0] == "h":
for key in commands:
print(f"{key:15s}: {commands[key]}")
elif code[0] == "e":
fig, ax = plt.subplots(2)
ax[0].plot(energies)
ax[0].set_xlabel("frame")
ax[0].set_ylabel("energy (kcal/mol)")
ax[1].plot(gnorms)
ax[1].set_xlabel("frame")
ax[1].set_ylabel("grad. norm (Eh/a0)")
plt.show()
elif code[0] == "s":
print(selection)
if selection is not None:
print(selection_text)
elif code[0] == "pca":
if selection is None:
print("empty selection, doing nothing")
continue
p = pca.PCA(u, select=selection_text)
p.run()
n_pcs = np.where(p.cumulated_variance > 0.95)[0][0]
print(n_pcs)
print(p.cumulated_variance[0:n_pcs])
pca_space = p.transform(selection, n_components=n_pcs)
print(pca_space)
print(pca.cosine_content(pca_space, 0))
elif code[0] == "p":
if selection is None:
print("empty selection, doing nothing")
continue
n = len(selection)
if n == 2:
data_label = "bond length (Å)"
elif n == 3:
data_label = "bond angle (°)"
elif n == 4:
data_label = "dihedral angle (°)"
else:
print("too few or too many indices")
continue
data = []
for i, (e, ts) in enumerate(zip(energies, u.trajectory)):
if n == 2:
d = distances.calc_bonds(
selection[0].position, selection[1].position
)
elif n == 3:
d = np.degrees(
distances.calc_angles(
selection[0].position,
selection[1].position,
selection[2].position,
)
)
elif n == 4:
d = np.degrees(
distances.calc_dihedrals(
selection[0].position,
selection[1].position,
selection[2].position,
selection[3].position,
)
)
data.append(d)
if i % 100 == 0 or i == len(u.trajectory) - 1:
print(
f"frame = {ts.frame:4d}: e = {e:5.1f} kcal/mol, {data_label.split('(')[0][:-1]} = {d:7.3f} {data_label[-2]}"
)
data = np.array(data)
fig, ax = plt.subplots(1, 2)
ax[0].plot(data)
ax[0].set_xlabel("frame")
ax[0].set_ylabel(data_label)
ax[1].plot(energies, data, "o", label="data points")
ax[1].set_xlabel("energy (kcal/mol)")
ax[1].set_ylabel(data_label)
if n == 2:
dx = 0.1
elif n == 3:
dx = 10.0
elif n == 4:
dx = 10.0
res = stats.binned_statistic(
data, energies, "min", min(25, (data.max() - data.min()) / dx)
)
# print(res.statistic)
mask = np.isnan(res.statistic)
res.statistic[mask] = np.interp(
np.flatnonzero(mask), np.flatnonzero(~mask), res.statistic[~mask]
)
# print(res.statistic)
# ax[1].hlines(res.statistic, res.bin_edges[:-1], res.bin_edges[1:], colors='g', lw=2, label='binned min. energies')
ax[1].barh(
(res.bin_edges[:-1] + res.bin_edges[1:]) / 2,
res.statistic,
align="center",
height=res.bin_edges[1:] - res.bin_edges[:-1],
alpha=0.25,
label="binned min. energies",
)
ax[1].legend()
plt.show()
else:
try:
selection_text = " ".join(code)
selection = u.select_atoms(selection_text)
except mda.exceptions.SelectionError as e:
print(e)
print("bye")
if __name__ == "__main__":
main()
| [
"MDAnalysis.lib.distances.calc_dihedrals",
"rmsd.read_xyz",
"argparse.ArgumentParser",
"numpy.where",
"MDAnalysis.analysis.pca.PCA",
"numpy.flatnonzero",
"MDAnalysis.lib.distances.calc_bonds",
"MDAnalysis.lib.distances.calc_angles",
"numpy.array",
"numpy.isnan",
"MDAnalysis.analysis.pca.cosine_c... | [((853, 897), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (876, 897), False, 'import argparse\n'), ((1409, 1427), 'numpy.array', 'np.array', (['energies'], {}), '(energies)\n', (1417, 1427), True, 'import numpy as np\n'), ((1136, 1155), 'rmsd.read_xyz', 'read_xyz', (['traj_file'], {}), '(traj_file)\n', (1144, 1155), False, 'from rmsd import read_xyz\n'), ((2054, 2069), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (2066, 2069), True, 'import matplotlib.pyplot as plt\n'), ((2324, 2334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2332, 2334), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2665), 'MDAnalysis.analysis.pca.PCA', 'pca.PCA', (['u'], {'select': 'selection_text'}), '(u, select=selection_text)\n', (2639, 2665), True, 'import MDAnalysis.analysis.pca as pca\n'), ((2939, 2971), 'MDAnalysis.analysis.pca.cosine_content', 'pca.cosine_content', (['pca_space', '(0)'], {}), '(pca_space, 0)\n', (2957, 2971), True, 'import MDAnalysis.analysis.pca as pca\n'), ((4694, 4708), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4702, 4708), True, 'import numpy as np\n'), ((4732, 4750), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (4744, 4750), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5380), 'numpy.isnan', 'np.isnan', (['res.statistic'], {}), '(res.statistic)\n', (5365, 5380), True, 'import numpy as np\n'), ((6027, 6037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6035, 6037), True, 'import matplotlib.pyplot as plt\n'), ((2707, 2744), 'numpy.where', 'np.where', (['(p.cumulated_variance > 0.95)'], {}), '(p.cumulated_variance > 0.95)\n', (2715, 2744), True, 'import numpy as np\n'), ((5442, 5462), 'numpy.flatnonzero', 'np.flatnonzero', (['mask'], {}), '(mask)\n', (5456, 5462), True, 'import numpy as np\n'), ((5464, 5485), 'numpy.flatnonzero', 'np.flatnonzero', (['(~mask)'], {}), '(~mask)\n', (5478, 5485), True, 'import numpy as np\n'), ((3605, 3671), 'MDAnalysis.lib.distances.calc_bonds', 'distances.calc_bonds', (['selection[0].position', 'selection[1].position'], {}), '(selection[0].position, selection[1].position)\n', (3625, 3671), False, 'from MDAnalysis.lib import distances\n'), ((3807, 3901), 'MDAnalysis.lib.distances.calc_angles', 'distances.calc_angles', (['selection[0].position', 'selection[1].position', 'selection[2].position'], {}), '(selection[0].position, selection[1].position,\n selection[2].position)\n', (3828, 3901), False, 'from MDAnalysis.lib import distances\n'), ((4120, 4240), 'MDAnalysis.lib.distances.calc_dihedrals', 'distances.calc_dihedrals', (['selection[0].position', 'selection[1].position', 'selection[2].position', 'selection[3].position'], {}), '(selection[0].position, selection[1].position,\n selection[2].position, selection[3].position)\n', (4144, 4240), False, 'from MDAnalysis.lib import distances\n')] |
import cv2
import tensorflow as tf
import numpy as np
OUTPUT_PATH = "../events/"
NUM_FILTERS = 10
FILTER_SIZE = (3, 3)
STRIDES = (1, 1)
def nn(input_node):
with tf.variable_scope('nn'):
w = tf.get_variable(
name='weight',
shape=[FILTER_SIZE[0], FILTER_SIZE[1], 3, NUM_FILTERS],
dtype=tf.float32)
b = tf.get_variable(
name='bias',
shape=[NUM_FILTERS],
dtype=tf.float32)
out = tf.nn.conv2d(input_node, filter=w, strides=(1, 1),
padding='SAME')
out = out + b
return out
def layer(input_node):
out = tf.layers.conv2d(input_node, NUM_FILTERS, FILTER_SIZE, strides=STRIDES, padding='same', name='layer')
return out
def slim(input_node):
out = tf.contrib.slim.conv2d(input_node, NUM_FILTERS, FILTER_SIZE, stride=STRIDES, padding='SAME',
activation_fn=None, scope='slim')
return out
def keras(input_node):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(NUM_FILTERS, FILTER_SIZE, strides=STRIDES, padding='same')
], name='keras')
return model(input_node)
if __name__ == '__main__':
node = tf.placeholder(shape=[None, 100, 100, 3], dtype=tf.float32)
nn_out = nn(node)
layer_out = layer(node)
slim_out = slim(node)
keras_out = keras(node)
tf.summary.FileWriter(OUTPUT_PATH, graph=tf.get_default_graph())
image = cv2.imread('ithome.jpg')
image = np.expand_dims(image, 0)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
nn_result, layer_result, slim_result, keras_result = \
sess.run([nn_out, layer_out, slim_out, keras_out], feed_dict={node: image})
print(f'nn shape: {nn_result.shape}')
print(f'layer shape: {layer_result.shape}')
print(f'slim shape: {slim_result.shape}')
print(f'keras shape: {keras_result.shape}')
| [
"tensorflow.nn.conv2d",
"tensorflow.local_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.get_variable",
"tensorflow.keras.layers.Conv2D",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.layers.conv2d",
"numpy.expand_dims",
... | [((645, 750), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['input_node', 'NUM_FILTERS', 'FILTER_SIZE'], {'strides': 'STRIDES', 'padding': '"""same"""', 'name': '"""layer"""'}), "(input_node, NUM_FILTERS, FILTER_SIZE, strides=STRIDES,\n padding='same', name='layer')\n", (661, 750), True, 'import tensorflow as tf\n'), ((797, 927), 'tensorflow.contrib.slim.conv2d', 'tf.contrib.slim.conv2d', (['input_node', 'NUM_FILTERS', 'FILTER_SIZE'], {'stride': 'STRIDES', 'padding': '"""SAME"""', 'activation_fn': 'None', 'scope': '"""slim"""'}), "(input_node, NUM_FILTERS, FILTER_SIZE, stride=STRIDES,\n padding='SAME', activation_fn=None, scope='slim')\n", (819, 927), True, 'import tensorflow as tf\n'), ((1212, 1271), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 100, 100, 3]', 'dtype': 'tf.float32'}), '(shape=[None, 100, 100, 3], dtype=tf.float32)\n', (1226, 1271), True, 'import tensorflow as tf\n'), ((1460, 1484), 'cv2.imread', 'cv2.imread', (['"""ithome.jpg"""'], {}), "('ithome.jpg')\n", (1470, 1484), False, 'import cv2\n'), ((1497, 1521), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (1511, 1521), True, 'import numpy as np\n'), ((168, 191), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""nn"""'], {}), "('nn')\n", (185, 191), True, 'import tensorflow as tf\n'), ((205, 313), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""weight"""', 'shape': '[FILTER_SIZE[0], FILTER_SIZE[1], 3, NUM_FILTERS]', 'dtype': 'tf.float32'}), "(name='weight', shape=[FILTER_SIZE[0], FILTER_SIZE[1], 3,\n NUM_FILTERS], dtype=tf.float32)\n", (220, 313), True, 'import tensorflow as tf\n'), ((359, 426), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""bias"""', 'shape': '[NUM_FILTERS]', 'dtype': 'tf.float32'}), "(name='bias', shape=[NUM_FILTERS], dtype=tf.float32)\n", (374, 426), True, 'import tensorflow as tf\n'), ((478, 544), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_node'], {'filter': 'w', 'strides': '(1, 1)', 'padding': '"""SAME"""'}), "(input_node, filter=w, strides=(1, 1), padding='SAME')\n", (490, 544), True, 'import tensorflow as tf\n'), ((1532, 1544), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1542, 1544), True, 'import tensorflow as tf\n'), ((1040, 1126), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['NUM_FILTERS', 'FILTER_SIZE'], {'strides': 'STRIDES', 'padding': '"""same"""'}), "(NUM_FILTERS, FILTER_SIZE, strides=STRIDES, padding=\n 'same')\n", (1062, 1126), True, 'import tensorflow as tf\n'), ((1423, 1445), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1443, 1445), True, 'import tensorflow as tf\n'), ((1571, 1604), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1602, 1604), True, 'import tensorflow as tf\n'), ((1623, 1655), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (1653, 1655), True, 'import tensorflow as tf\n')] |
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
# This python file is a library of python functions which provides modular
# common set-up commands for solving a problem in OpenCMISS.
# Each function has a range of input options and calls the appropriate
# OpenCMISS linked commands to set up the problem. This is a high
# level library that will allow shorter scripting for solving cardiac mechanics
# simulations and also making it easier to debug.
# Author: <NAME>
# Start Date: 20th October 2014
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
from opencmiss.iron import iron
import numpy
import math
import os
# =================================================================================#
def BasicSetUp(regionUserNumber, coordinateSystemUserNumber):
# This function sets up the world region, 3D CS, parallel computing nodes, and
# diagnostics.
# Set up diagnostics/debug
#iron.DiagnosticsSetOn(iron.DiagnosticTypes.IN,[1,2,3,4,5],
#"Diagnostics",["DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE"])
# Get computational node information for parallel computing
numberOfComputationalNodes = iron.ComputationalNumberOfNodesGet()
computationalNodeNumber = iron.ComputationalNodeNumberGet()
# Set up 3D RC coordinate system
coordinateSystem = iron.CoordinateSystem()
coordinateSystem.CreateStart(coordinateSystemUserNumber)
coordinateSystem.dimension = 3
coordinateSystem.CreateFinish()
# Create world region
region = iron.Region()
region.CreateStart(regionUserNumber, iron.WorldRegion)
region.label = "Region"
region.coordinateSystem = coordinateSystem
region.CreateFinish()
# Output for diagnostics
print("----> Set up coordinate system and world region <----\n")
return numberOfComputationalNodes, computationalNodeNumber, coordinateSystem, region
# =================================================================================#
#=================================================================================#
def BasisFunction(basisUserNumber, numOfXi, option, collapsed):
# This function sets up the basis function depending on the option given.
if option[0] == 1:
# Trilinear basis function for interpolation of geometry.
basis = iron.Basis()
basis.CreateStart(basisUserNumber)
basis.numberOfXi = numOfXi
basis.type = iron.BasisTypes.LAGRANGE_HERMITE_TP
basis.interpolationXi = [iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE] * numOfXi
basis.QuadratureLocalFaceGaussEvaluateSet(True)
basis.quadratureNumberOfGaussXi = [2,2,2]
basis.CreateFinish()
# Output for diagnostics
print("----> Set up trilinear basis functions for geometry, use element based interpolation for pressure <----\n")
if collapsed:
basisCol = iron.Basis()
basisCol.CreateStart(basisUserNumber+1)
basisCol.numberOfXi = numOfXi
basisCol.type = iron.BasisTypes.LAGRANGE_HERMITE_TP
basisCol.interpolationXi = [iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE] * numOfXi
basisCol.QuadratureLocalFaceGaussEvaluateSet(True)
basisCol.quadratureNumberOfGaussXi = [2,2,2]
basisCol.CollapsedXiSet([iron.BasisXiCollapse.XI_COLLAPSED, iron.BasisXiCollapse.COLLAPSED_AT_XI0, iron.BasisXiCollapse.NOT_COLLAPSED])
print("---> Set up collapsed basis functions for apical elements")
basisCol.CreateFinish()
return basis, basisCol
return basis
elif option[0] == 2:
quadBasis = iron.Basis()
quadBasis.CreateStart(basisUserNumber[0])
quadBasis.InterpolationXiSet([iron.BasisInterpolationSpecifications.QUADRATIC_LAGRANGE]*numOfXi)
quadBasis.QuadratureNumberOfGaussXiSet([4]*numOfXi)
quadBasis.QuadratureLocalFaceGaussEvaluateSet(True)
quadBasis.CreateFinish()
# Tricubic Hermite basis function for interpolation of geometry.
cubicBasis = iron.Basis() # For geometry.
cubicBasis.CreateStart(basisUserNumber[1])
cubicBasis.InterpolationXiSet([iron.BasisInterpolationSpecifications.CUBIC_HERMITE] * numOfXi)
cubicBasis.QuadratureNumberOfGaussXiSet([4] * numOfXi)
cubicBasis.QuadratureLocalFaceGaussEvaluateSet(True)
cubicBasis.CreateFinish()
# Output for diagnostics
print("----> Set up tricubic hermite basis function for geometry and trilinear for hydrostatic pressure <----\n")
return quadBasis, cubicBasis
#=================================================================================#
#=================================================================================#
def GeneratedMesh(generatedMeshUserNumber, meshUserNumber, region, bases, dimensions, elements):
# This function sets up a generated mesh using user specified dimensions.
generatedMesh = iron.GeneratedMesh()
generatedMesh.CreateStart(generatedMeshUserNumber, region)
generatedMesh.TypeSet(iron.GeneratedMeshTypes.REGULAR)
generatedMesh.BasisSet(bases)
generatedMesh.ExtentSet(dimensions)
generatedMesh.NumberOfElementsSet(elements)
mesh = iron.Mesh()
generatedMesh.CreateFinish(meshUserNumber, mesh)
return generatedMesh, mesh
#=================================================================================#
#=================================================================================#
def DecompositionSetUp(decompositionUserNumber, mesh, numberOfComputationalNodes):
# This function sets up the decomposition of the mesh.
decomposition = iron.Decomposition()
decomposition.CreateStart(decompositionUserNumber, mesh)
decomposition.type = iron.DecompositionTypes.CALCULATED
decomposition.NumberOfDomainsSet(numberOfComputationalNodes)
decomposition.CalculateFacesSet(True)
decomposition.CreateFinish()
# Output for diagnostics
print("----> Set up decomposition <----\n")
return decomposition
#=================================================================================#
#=================================================================================#
def GeometricFieldSetUp(geometricFieldUserNumber, region, decomposition, option):
# Set up geometry field
geometricField = iron.Field() # Initialise
geometricField.CreateStart(geometricFieldUserNumber, region)
geometricField.MeshDecompositionSet(decomposition)
geometricField.VariableLabelSet(iron.FieldVariableTypes.U, "Geometry")
if option[0] == 2:
# Tricubic Hermite
if option[1] == 1:
geometricField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
# Output for diagnostics
print("----> Set up tricubic Hermite geometric field with unit scaling <----\n")
elif option[1] == 2:
geometricField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
# Output for diagnostics
print("----> Set up tricubic Hermite geometric field with arithmetic mean scaling <----\n")
geometricField.CreateFinish()
return geometricField
#=================================================================================#
#=================================================================================#
def GeometricFieldInitialise(xNodes, yNodes, zNodes, geometricField, numNodes, option):
# This function initialises the geometric field with user specified coordinates.
# Initialise nodal values.
for node, value in enumerate(xNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 1, value)
for node, value in enumerate(yNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 2, value)
for node, value in enumerate(zNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 3, value)
# Initialise first derivatives.
if option[0] == 2:
# Tricubic Hermite basis.
for node in range(numNodes):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1, node + 1, 1, max(xNodes))
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2, node + 1, 2, max(yNodes))
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S3, node + 1, 3, max(zNodes))
# Output
print("----> Initialised geometric nodal values <----\n")
return geometricField
#=================================================================================#
#=================================================================================#
def GeometricFieldExport(region, filename):
# This function exports the undeformed geometric field.
if not os.path.exists("./results"):
os.makedirs("./results")
exportField = iron.Fields()
exportField.CreateRegion(region)
exportField.NodesExport("./results/" + filename, "FORTRAN")
exportField.ElementsExport("./results/" + filename, "FORTRAN")
exportField.Finalise()
# Output
print("----> Export undeformed geometry <----\n")
#=================================================================================#
#=================================================================================#
def ExtractNodesElements(filename):
# This function extracts nodes and element connectivity information from
# exnode and exelem files.
try:
fid_node = open(filename+'.exnode', 'r')
except IOError:
print('ERROR: Unable to open '+filename+'.exnode')
return
try:
fid_elem = open(filename+'.exelem', 'r')
except IOError:
print('ERROR: Unable to open '+filename+'.exelem')
return
for i in range(1,86):
junk = fid_elem.readline()
nodesX = []
nodesY = []
nodesZ = []
elements = []
for i in [1,2,3,4,5,6]:
junk = fid_node.readline()
# Read nodal information.
i = 0
temp = fid_node.readline()
while temp != '':
currentNode = temp.split()[1]
temp = fid_node.readline()
nodesX.append(temp.split())
temp = fid_node.readline()
nodesY.append(temp.split())
temp = fid_node.readline()
nodesZ.append(temp.split())
i = i+1
temp = fid_node.readline()
nodesX = numpy.array(nodesX)
nodesY = numpy.array(nodesY)
nodesZ = numpy.array(nodesZ)
nodes = [nodesX, nodesY, nodesZ]
nodes = numpy.array(nodes)
# Read element connectivity
temp = fid_elem.readline()
#print temp.split()[0]
while temp.split() != []:
currentElem = temp.split()[1]
junk = fid_elem.readline()
temp = fid_elem.readline()
elements.append(temp.split())
junk = fid_elem.readline()
junk = fid_elem.readline()
temp = fid_elem.readline()
elements = numpy.array(elements)
return nodes, elements
#=================================================================================#
#=================================================================================#
def FibreFieldSetUp(fibreFieldUserNumber, region, decomposition, geometricField, option, microstructure, inputNodes):
# This function sets up the fibre field and initialises the values.
# Sets up the fibre field.
fibreField = iron.Field()
fibreField.CreateStart(fibreFieldUserNumber, region)
fibreField.TypeSet(iron.FieldTypes.FIBRE)
fibreField.MeshDecompositionSet(decomposition)
fibreField.GeometricFieldSet(geometricField)
fibreField.VariableLabelSet(iron.FieldVariableTypes.U, "Fibre")
if option[0] == 1:
fibreField.NumberOfVariablesSet(1)
fibreField.NumberOfComponentsSet(iron.FieldVariableTypes.U, 3)
if microstructure == 1:
for component in [1, 2, 3]:
fibreField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.CONSTANT)
elif microstructure == 2:
for component in [1, 2, 3]:
fibreField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, component, 1)
elif option[0] == 2:
# Tricubic Hermite interpolation
if option[1] == 1:
fibreField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
# Output
print("----> Set up tricubic hermite fibre field with unit scaling <----\n")
elif option[1] == 2:
fibreField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
# Output
print("----> Set up tricubic hermite fibre field with arithmetic mean scaling <----\n")
if microstructure == 1:
# Homogeneous fibre field.
for component in [1, 2, 3]:
fibreField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.CONSTANT)
elif microstructure == 2:
# Heterogeneous fibre field using linear interpolation.
for component in [1, 2, 3]:
fibreField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, component, 1)
fibreField.CreateFinish()
#####################################################
if microstructure == 2:
# Inhomogeneous fibre field using linear interpolation.
for n in range(1, inputNodes.num_nodes+1):
for component in [1,2,3]:
component_name = ["x","y","z"][component-1]
angle = inputNodes.node_values("fibers", component_name, n)
angle = float(angle[0])
angle = angle*math.pi/180
fibreField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES,
1, iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, n,
component, angle)
print("----> Initialised heterogeneous fibre angles <----\n")
return fibreField
#=================================================================================#
#=================================================================================#
def MaterialFieldSetUpAuto(materialFieldUserNumber, equationsSet, params, cellMLOption):
# This function is used for setting up material field when using CellML
# description of constitutive model.
# Sets up material field, and apply field to mesh component.
materialField = iron.Field()
equationsSet.MaterialsCreateStart(materialFieldUserNumber, materialField)
materialField.VariableLabelSet(iron.FieldVariableTypes.U, "Material")
if cellMLOption[0]:
print("----> CellML Material Field using gauss point interpolation <----\n")
for component, param in enumerate(params, 1):
materialField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.GAUSS_POINT_BASED)
materialField.CreateFinish()
#########################################################################
# Initialise parameter values.
for component, param in enumerate(params, 1):
materialField.ComponentValuesInitialiseDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES,
component, param)
# Output
print("----> Initialised " + str(len(params)) + " material parameters <----\n")
return materialField, equationsSet
#=================================================================================#
#=================================================================================#
def MaterialFieldSetUp(materialFieldUserNumber, region, decomposition, geometricField, params, option, cellMLOption):
# Sets up material field, and apply field to mesh component.
materialField = iron.Field()
materialField.CreateStart(materialFieldUserNumber, region)
materialField.TypeSet(iron.FieldTypes.MATERIAL)
materialField.MeshDecompositionSet(decomposition)
materialField.GeometricFieldSet(geometricField)
materialField.VariableLabelSet(iron.FieldVariableTypes.U, "Material")
materialField.NumberOfVariablesSet(1)
materialField.NumberOfComponentsSet(iron.FieldVariableTypes.U,len(params))
materialField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
if cellMLOption[0]:
print("----> CellML Material Field using gauss point interpolation <----\n")
for component, param in enumerate(params, 1):
materialField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.GAUSS_POINT_BASED)
else:
print("----> Material Field using constant interpolation <----\n")
for component, param in enumerate(params, 1):
materialField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.CONSTANT)
if option[0] == 2:
# Tricubic Hermite
if option[1] == 1:
materialField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
elif option[1] == 2:
materialField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
materialField.CreateFinish()
for component, param in enumerate(params, 1):
materialField.ComponentValuesInitialiseDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES,
component, param)
materialField.ParameterSetUpdateStart(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES)
materialField.ParameterSetUpdateFinish(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES)
# Output
print("----> Initialised " + str(len(params)) + " material parameters <----\n")
return materialField
#=================================================================================#
#=================================================================================#
def DependentFieldSetUp(dependentFieldUserNumber, equationsSet, option, cellMLOption):
# Set up dependent field
dependentField = iron.Field()
equationsSet.DependentCreateStart(dependentFieldUserNumber, dependentField)
dependentField.VariableLabelSet(iron.FieldVariableTypes.U, "Dependent")
if cellMLOption[0]:
print('----> Labelling dependent field strain and stress <----\n')
dependentField.VariableLabelSet(iron.FieldVariableTypes.U1, "Strain")
dependentField.VariableLabelSet(iron.FieldVariableTypes.U2, "Stress")
if option[0] == 1:
# Trilinear
for i in [1, 2, 3]:
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, i, 1)
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.DELUDELN, i, 1)
dependentField.ComponentInterpolationSet(iron.FieldVariableTypes.U, 4,
iron.FieldInterpolationTypes.ELEMENT_BASED)
dependentField.ComponentInterpolationSet(iron.FieldVariableTypes.DELUDELN, 4,
iron.FieldInterpolationTypes.ELEMENT_BASED)
# Output
print("----> Use element based interpolation for hydrostatic pressure <----\n")
elif option[0] == 2:
# Tricubic Hermite
for i in [1, 2, 3]:
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, i, 1)
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.DELUDELN, i, 1)
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, 4, 2)
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.DELUDELN, 4, 2)
dependentField.ComponentInterpolationSet(iron.FieldVariableTypes.U, 4,
iron.FieldInterpolationTypes.NODE_BASED)
dependentField.ComponentInterpolationSet(iron.FieldVariableTypes.DELUDELN, 4,
iron.FieldInterpolationTypes.NODE_BASED)
# Output
print("----> Interpolate hydrostatic pressure linearly <----\n")
if option[1] == 1:
dependentField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
# Output
print("----> Set up dependent field with unit scaling <----\n")
elif option[1] == 2:
dependentField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
# Output
print("----> Set up dependent field with arithmetic mean scaling <----\n")
if cellMLOption[0]:
for i in [1,2,3,4,5,6]:
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.U1, i, 1)
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.U2, i, 1)
equationsSet.DependentCreateFinish()
return dependentField, equationsSet
#=================================================================================#
#=================================================================================#
def DependentFieldInitialise(dependentField, geometricField, hydroInit):
# This function initialises the dependent field with reference geometry and
# initial guess for hydrostatic pressure.
# Copy over undeformed geometry to initialise dependent field.
for i in [1, 2, 3]:
iron.Field.ParametersToFieldParametersComponentCopy(geometricField, iron.FieldVariableTypes.U,
iron.FieldParameterSetTypes.VALUES, i, dependentField,
iron.FieldVariableTypes.U,
iron.FieldParameterSetTypes.VALUES, i)
# Output
print("----> Initialised dependent field with undeformed geometry <----\n")
# Set hydrostatic pressure initial guess.
dependentField.ComponentValuesInitialise(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 4,
hydroInit)
dependentField.ParameterSetUpdateStart(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES)
dependentField.ParameterSetUpdateFinish(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES)
# Output
print("----> Initialised hydrostatic pressure guess of " + str(hydroInit) + " <----\n")
#=================================================================================#
#=================================================================================#
def DependentFieldWarmStart(dependentField, deformedGeomDOFs, deformedHydro, option):
# This function reads in warm-start solution to the dependent field.
if option[0] == 1:
# Trilinear elements
# deformedGeomDOFs indices: component, node
numNodes = len(deformedGeomDOFs[0,:])
for component in [1,2,3]:
for node in range(1, numNodes+1):
value = deformedGeomDOFs[component-1, node-1]
dependentField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES,
1, 1, node, component, value)
for e in range(1, len(deformedHydro)+1):
dependentField.ParameterSetUpdateElementDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, e,
4, deformedHydro[e-1])
elif option[0] == 2:
# Initialise dependent field to deformed warmstart solution.
numNodes = len(deformedGeomDOFs[0,:,0])
for component in [1,2,3]:
print('Component: ', component)
for node in range(1, numNodes+1):
print(' Node number: ', node)
for deriv in [1,2,3,4,5,6,7,8]:
value = deformedGeomDOFs[component-1,node-1,deriv-1]
print(' value: ', value)
dependentField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
deriv, node, component, value)
# Output
print("----> Initialised dependent field with warm-start geometry <----\n")
# Set hydrostatic pressure initial guess.
for node in range(1,numNodes+1):
dependentField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES,1,
1, node, 4, deformedHydro[node-1])
dependentField.ParameterSetUpdateStart(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES)
dependentField.ParameterSetUpdateFinish(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES)
# Output
print("----> Initialised warm-start hydrostatic pressure of " + str(deformedHydro) + " <----\n")
#=================================================================================#
#=================================================================================#
def ParseWarmStart(filename, option):
# Read in warmstart solutions from ipinit or exnode files.
temp = filename.split('.')[1]
nodesX = []
nodesY = []
nodesZ = []
if option[0] == 2:
if temp == 'ipinit':
try:
fid = open(filename, 'r')
except IOError:
print('ERROR: Unable to open ', filename)
return
try:
junk = fid.readline()
toggle = True
while toggle:
if junk == " Dependent variable initial conditions:\n":
toggle = False
else:
junk = fid.readline()
junk = fid.readline()
junk = fid.readline()
# Read DOF's in X direction
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
while num != '0':
derivs = []
for i in [0,1,2,3,4,5,6,7]:
temp = fid.readline()
temp = temp.split()
derivs.append(float(temp[len(temp)-1]))
nodesX.append(derivs)
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
nodesX = numpy.array(nodesX)
# Read DOF's in Y direction
junk = fid.readline()
junk = fid.readline()
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
while num != '0':
derivs = []
for i in [0,1,2,3,4,5,6,7]:
temp = fid.readline()
temp = temp.split()
derivs.append(float(temp[len(temp)-1]))
nodesY.append(derivs)
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
nodesY = numpy.array(nodesY)
# Read DOF's in Y direction
junk = fid.readline()
junk = fid.readline()
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
while num != '0':
derivs = []
for i in [0,1,2,3,4,5,6,7]:
temp = fid.readline()
temp = temp.split()
derivs.append(float(temp[len(temp)-1]))
nodesZ.append(derivs)
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
nodesZ = numpy.array(nodesZ)
# The indices for nodes goes: component (x,y,z), node number, derivative number.
nodes = [nodesX, nodesY, nodesZ]
nodes = numpy.array(nodes)
# Read hydrostatic pressure at nodes
junk = fid.readline()
junk = fid.readline()
node_idx = 0
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
hydro = []
while num!='0':
temp = fid.readline()
temp = temp.split()
hydro.append(float(temp[len(temp)-1]))
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
hydro = numpy.array(hydro)
print(hydro)
return nodes, hydro
finally:
fid.close()
elif temp == 'exnode':
nodes = ExtractNodesElements(filename.split('.')[1])
return nodes
elif option[0] == 1:
# Trilinear solution
if temp == 'ipinit':
try:
fid = open(filename, 'r')
except IOError:
print('ERROR: Unable to open ', filename)
return
try:
junk = fid.readline()
toggle = True
while toggle:
if junk == " Dependent variable initial conditions:\n":
toggle = False
else:
junk = fid.readline()
junk = fid.readline()
junk = fid.readline()
# Read DOF's in X direction
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
while num != '0':
temp = fid.readline()
temp = temp.split()
nodesX.append(float(temp[len(temp)-1]))
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
nodesX = numpy.array(nodesX)
# Read DOF's in Y direction
junk = fid.readline()
junk = fid.readline()
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
while num != '0':
temp = fid.readline()
temp = temp.split()
nodesY.append(float(temp[len(temp)-1]))
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
nodesY = numpy.array(nodesY)
# Read DOF's in Y direction
junk = fid.readline()
junk = fid.readline()
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
while num != '0':
temp = fid.readline()
temp = temp.split()
nodesZ.append(float(temp[len(temp)-1]))
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
nodesZ = numpy.array(nodesZ)
# The indices for nodes goes: component (x,y,z), node number, derivative number.
nodes = [nodesX, nodesY, nodesZ]
nodes = numpy.array(nodes)
# Read hydrostatic pressure at nodes
junk = fid.readline()
junk = fid.readline()
node_idx = 0
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
hydro = []
while num!='0':
temp = fid.readline()
temp = temp.split()
hydro.append(float(temp[len(temp)-1]))
temp = fid.readline()
temp = temp.split()
while (temp[0] != 'Enter'):
temp = fid.readline()
temp = temp.split()
num = temp[len(temp)-1]
hydro = numpy.array(hydro)
print(hydro)
return nodes, hydro
finally:
fid.close()
elif temp == 'exnode':
nodes = ExtractNodesElements(filename.split('.')[1])
return nodes
#=================================================================================#
#=================================================================================#
def CellMLSetUp(cellMLUserNumber, cellMLModelsFieldUserNumber, cellMLParametersFieldUserNumber,
cellMLIntermediateFieldUserNumber, region, materialField, dependentField, parameters, filename, option):
# This function sets up the CellML environment for defining constitutive models.
cellMLModelIndex = 1
cellML = iron.CellML()
cellML.CreateStart(cellMLUserNumber, region)
cellML.ModelImport(filename)
strain = ["E11", "E12", "E13", "E22", "E23", "E33"]
stress2PK = ["Tdev11", "Tdev12", "Tdev13", "Tdev22", "Tdev23", "Tdev33"]
# Set strains as known in CellML. These will be fed into the model from iron.
for i in range(0, 6):
cellML.VariableSetAsKnown(cellMLModelIndex, "equations/" + strain[i])
for component, parameter in enumerate(parameters):
cellML.VariableSetAsKnown(cellMLModelIndex, "equations/" + parameter)
# Set stresses as unknown/wanted in CellML. These will be calculated using the transversely isotropic constitutive model
for i in range(0, 6):
cellML.VariableSetAsWanted(cellMLModelIndex, "equations/" + stress2PK[i])
cellML.CreateFinish()
# ## Step 13: Map the variables to CellML model ###################################
cellML.FieldMapsCreateStart()
# Map the strain from dependentField U1 variable to CellML.
for component, variable in enumerate(strain, 1):
#print("----> Mapping strain ", str(variable)+ " to CellML <----\n")
cellML.CreateFieldToCellMLMap(dependentField, iron.FieldVariableTypes.U1, component,
iron.FieldParameterSetTypes.VALUES, cellMLModelIndex, "equations/" + variable,
iron.FieldParameterSetTypes.VALUES)
# Map the material parameters from material field to CellML.
for component, parameter in enumerate(parameters, 1):
#print("----> Mapping parameter ", str(parameter)+ " to CellML <----\n")
cellML.CreateFieldToCellMLMap(materialField, iron.FieldVariableTypes.U, component,
iron.FieldParameterSetTypes.VALUES, cellMLModelIndex, "equations/" + parameter,
iron.FieldParameterSetTypes.VALUES)
# Map the stress from CellML to dependentFieldU2 variable
for component, variable in enumerate(stress2PK, 1):
#print("----> Mapping stress ", str(variable)+ " to CellML <----\n")
cellML.CreateCellMLToFieldMap(cellMLModelIndex, "equations/" + variable, iron.FieldParameterSetTypes.VALUES,
dependentField, iron.FieldVariableTypes.U2, component,
iron.FieldParameterSetTypes.VALUES)
cellML.FieldMapsCreateFinish()
print("----> Finished mapping variables to CellML <----\n")
# Create models field for CellML
CellMLModelsField = iron.Field()
cellML.ModelsFieldCreateStart(cellMLModelsFieldUserNumber, CellMLModelsField)
if option[0] == 2:
# Tricubic Hermite
if option[1] == 1:
CellMLModelsField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
elif option[1] == 2:
CellMLModelsField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
cellML.ModelsFieldCreateFinish()
print("----> Finished creating models field for CellML <----\n")
# No need to create a state field since we aren't integrating.
# Create parameters field for CellML, this is used as the strain field.
CellMLParametersField = iron.Field()
cellML.ParametersFieldCreateStart(cellMLParametersFieldUserNumber, CellMLParametersField)
if option[0] == 2:
# Tricubic Hermite
if option[1] == 1:
CellMLParametersField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
elif option[1] == 2:
CellMLParametersField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
cellML.ParametersFieldCreateFinish()
print("----> Finished creating parameters field for CellML <----\n")
# Create intermediate field for CellML, this is used as the stress field.
CellMLIntermediateField = iron.Field()
cellML.IntermediateFieldCreateStart(cellMLIntermediateFieldUserNumber, CellMLIntermediateField)
if option[0] == 2:
# Tricubic Hermite
if option[1] == 1:
CellMLIntermediateField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
elif option[1] == 2:
CellMLIntermediateField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
cellML.IntermediateFieldCreateFinish()
print("----> Finished creating intermediate field for CellML <----\n")
return cellML, CellMLModelsField, CellMLParametersField, CellMLIntermediateField
#=================================================================================#
#=================================================================================#
def StrainFieldSetUp(strainFieldUserNumber, region, decomposition, geometricField, equationsSet, option):
# Set up strain field for output
strainField = iron.Field()
strainField.CreateStart(strainFieldUserNumber, region)
strainField.MeshDecompositionSet(decomposition)
strainField.TypeSet(iron.FieldTypes.GENERAL)
strainField.GeometricFieldSet(geometricField)
strainField.DependentTypeSet(iron.FieldDependentTypes.DEPENDENT)
strainField.VariableTypesSet([iron.FieldVariableTypes.U])
strainField.VariableLabelSet(iron.FieldVariableTypes.U, "Strain")
strainField.NumberOfComponentsSet(iron.FieldVariableTypes.U, 6)
for component in [1,2,3,4,5,6]:
strainField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.GAUSS_POINT_BASED)
if option[0]==2:
if option[1]==1:
strainField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
elif option[1]==2:
strainField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
strainField.CreateFinish()
equationsSet.DerivedCreateStart(strainFieldUserNumber, strainField)
equationsSet.DerivedVariableSet(iron.EquationsSetDerivedTypes.STRAIN, iron.FieldVariableTypes.U)
equationsSet.DerivedCreateFinish()
return strainField
#=================================================================================#
#=================================================================================#
def matrixFromSymmetricComponents(components):
return numpy.array([
[components[0], components[1], components[2]],
[components[1], components[3], components[4]],
[components[2], components[4], components[5]],
])
#=================================================================================#
#=================================================================================#
def EquationsSetSetUp(equationsSet):
# Set up standard options for problem and solvers.
# Create equations
equations = iron.Equations()
equationsSet.EquationsCreateStart(equations)
equations.SparsityTypeSet(iron.EquationsSparsityTypes.SPARSE)
equations.OutputTypeSet(iron.EquationsOutputTypes.NONE)
#equations.OutputTypeSet(iron.EquationsOutputTypes.MATRIX)
equationsSet.EquationsCreateFinish()
#=================================================================================#
#=================================================================================#
def ProblemSolverSetup(equationsSet,problemUserNumber,maxIter, TOL, cellMLOption):
# This function sets up the problem as well as the solver options.
print("----> Set up equations <----\n")
# Define problem
problem = iron.Problem()
if cellMLOption[0]:
problemSpecification = [iron.ProblemClasses.ELASTICITY,
iron.ProblemTypes.FINITE_ELASTICITY,
iron.ProblemSubtypes.FINITE_ELASTICITY_CELLML]
else:
problemSpecification = [iron.ProblemClasses.ELASTICITY,
iron.ProblemTypes.FINITE_ELASTICITY,
iron.ProblemSubtypes.NONE]
problem.CreateStart(problemUserNumber, problemSpecification)
problem.CreateFinish()
# Output
print("----> Set up problem <----\n")
# Create control loops
problem.ControlLoopCreateStart()
controlLoop = iron.ControlLoop()
problem.ControlLoopGet([iron.ControlLoopIdentifiers.NODE], controlLoop)
#controlLoop.TypeSet(iron.ProblemControlLoopTypes.WHILE_LOOP)
#controlLoop.IterationsSet(1,1,1)
controlLoop.MaximumIterationsSet(maxIter)
#controlLoop.MaximumIterationsSet(3)
problem.ControlLoopCreateFinish()
# Output
print("----> Set up control loop <----\n")
# Create nonlinear numerical solver
linearSolver = iron.Solver()
nonLinearSolver = iron.Solver()
problem.SolversCreateStart()
problem.SolverGet([iron.ControlLoopIdentifiers.NODE], 1, nonLinearSolver)
nonLinearSolver.OutputTypeSet(iron.SolverOutputTypes.PROGRESS)
nonLinearSolver.NewtonJacobianCalculationTypeSet(iron.JacobianCalculationTypes.FD)
nonLinearSolver.NewtonAbsoluteToleranceSet(1e-12)
nonLinearSolver.NewtonSolutionToleranceSet(1e-12)
nonLinearSolver.NewtonRelativeToleranceSet(1e-12)
nonLinearSolver.NewtonConvergenceTestTypeSet(iron.NewtonConvergenceTypes.PETSC_DEFAULT)
nonLinearSolver.NewtonLinearSolverGet(linearSolver)
#nonLinearSolver.NewtonLineSearchTypeSet(iron.NewtonLineSearchTypes.LINEAR)
#nonLinearSolver.NewtonLineSearchAlphaSet(1e-6)
#nonLinearSolver.NewtonLineSearchMaxStepSet(1e5)
#nonLinearSolver.NewtonLineSearchMonitorOutputSet()
#nonLinearSolver.NewtonLineSearchStepTolSet(1e-5)
linearSolver.LinearTypeSet(iron.LinearSolverTypes.DIRECT)
#linearSolver.LinearDirectTypeSet(iron.DirectLinearSolverTypes.LU)
#linearSolver.LibraryTypeSet(iron.SolverLibraries.MUMPS)
problem.SolversCreateFinish()
if cellMLOption[0]:
cellMLSolver = iron.Solver()
cellMLEquations = iron.CellMLEquations()
problem.CellMLEquationsCreateStart()
nonLinearSolver.NewtonCellMLSolverGet(cellMLSolver)
cellMLSolver.CellMLEquationsGet(cellMLEquations)
cellMLEquations.CellMLAdd(cellMLOption[1])
problem.CellMLEquationsCreateFinish()
# Output
print("----> Set up linear and nonlinear solvers <----\n")
# Add solver equations sets which encompass the physics
solverEquations = iron.SolverEquations()
solver = iron.Solver()
problem.SolverEquationsCreateStart()
problem.SolverGet([iron.ControlLoopIdentifiers.NODE], 1, solver)
solver.SolverEquationsGet(solverEquations)
solverEquations.SparsityTypeSet(iron.SolverEquationsSparsityTypes.SPARSE)
equationSetIndex = solverEquations.EquationsSetAdd(equationsSet)
problem.SolverEquationsCreateFinish()
# Output
print("----> Set up solver with equations <----\n")
return problem, solverEquations
#=================================================================================#
#=================================================================================#
def BCCubeSingleFace(solverEquations, dependentField, appliedFace, faceNormal, appliedDirection, increm, optionBC,
fixXFace, fixYFace, fixZFace, numNodes, option):
# This function sets up the boundary conditions for dealing with BC's on a
# single face of a cube.
# Set up
boundaryConditions = iron.BoundaryConditions()
solverEquations.BoundaryConditionsCreateStart(boundaryConditions)
# Initialise fixed faces node values.
for node in fixXFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 1,
iron.BoundaryConditionsTypes.FIXED, 0.0)
for node in fixYFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 2,
iron.BoundaryConditionsTypes.FIXED, 0.0)
for node in fixZFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 3,
iron.BoundaryConditionsTypes.FIXED, 0.0)
if option[0] == 2:
# Fix derivatives
if faceNormal == 1:
derivFix = [iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S3]
elif faceNormal == 2:
derivFix = [iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S3]
else:
derivFix = [iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2]
for node in range(1,numNodes+1):
for j in derivFix:
for component in [1,2,3]:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1, j, node,
component, iron.BoundaryConditionsTypes.FIXED, 0.0)
# Fix all second and third derivatives.
for i in range(1, numNodes + 1):
for j in [iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S2,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S2_S3]:
for k in [1, 2, 3]:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1, j, i, k,
iron.BoundaryConditionsTypes.FIXED, 0.0)
# Output
print("----> Implemented fixed boundary conditions <----\n")
# Initialise applied faces.
if optionBC == 1:
# Option 1: Compression/extension
for node in appliedFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, appliedDirection,
iron.BoundaryConditionsTypes.FIXED, increm)
# Output
print("----> Implemented compression/extension boundary condition of " + str(increm) + " <----\n")
elif optionBC == 2:
# Option 2: Force
for node in appliedFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.DELUDELN, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, appliedDirection,
iron.BoundaryConditionsTypes.FIXED_INCREMENTED, increm)
# Output
print("----> Implemented force boundary condition of " + str(increm) + "N <----\n")
elif optionBC == 3:
# Option 3: Pressure
for node in appliedFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.DELUDELN, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, faceNormal,
iron.BoundaryConditionsTypes.PRESSURE_INCREMENTED, increm)
# Output
print("----> Implemented pressure boundary condition of " + str(increm) + " kPa <----\n")
solverEquations.BoundaryConditionsCreateFinish()
#=================================================================================#
#=================================================================================#
def BCCantilever(solverEquations, dependentField, appliedFace, faceNormal, appliedDirection, increm, optionBC,
fixBackFace, fixedFaceNormal, option):
# This function sets up the BC for a cantilever problem.
# Set up
boundaryConditions = iron.BoundaryConditions()
solverEquations.BoundaryConditionsCreateStart(boundaryConditions)
# Initialise fixed faces node values.
for component in [1, 2, 3]:
for node in fixBackFace:
for component in [1,2,3]:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, component,
iron.BoundaryConditionsTypes.FIXED, 0.0)
if option[0] == 2:
# print('Node number ', node)
# Fix derivatives
if fixedFaceNormal == 1:
#print("Fixed back normal is 1. ")
derivFix = [iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S2,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S2_S3]
elif fixedFaceNormal == 2:
derivFix = [iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S2,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S2_S3]
else:
derivFix = [iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S2,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S2_S3]
for deriv in derivFix:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1, deriv, node,
component, iron.BoundaryConditionsTypes.FIXED, 0.0)
# Output
print("----> Implemented fixed boundary conditions <----\n")
# Initialise applied faces.
if optionBC == 1:
# Option 1: Compression/extension
for node in appliedFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, appliedDirection,
iron.BoundaryConditionsTypes.FIXED, increm)
# Output
print("----> Implemented compression/extension boundary condition of " + str(increm) + " <----\n")
elif optionBC == 2:
# Option 2: Force
for node in appliedFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.DELUDELN, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, appliedDirection,
iron.BoundaryConditionsTypes.FIXED_INCREMENTED, increm)
# Output
print("----> Implemented force boundary condition of " + str(increm) + "N <----\n")
elif optionBC == 3:
# Option 3: Pressure
print('Pressure applied on: ')
for node in appliedFace:
print('Node ', node)
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.DELUDELN, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, faceNormal,
iron.BoundaryConditionsTypes.PRESSURE_INCREMENTED, increm)
print('Face normal ', faceNormal)
# Output
print("----> Implemented pressure boundary condition of " + str(increm) + " kPa <----\n")
solverEquations.BoundaryConditionsCreateFinish()
#=================================================================================#
#=================================================================================#
def BCEndoPressure(solverEquations, dependentField, endoFace, pressure, basalFace, option):
# This function sets up the BC for a LV inflation problem where pressure is applied
# on the endocardial surface.
# Set up
boundaryConditions = iron.BoundaryConditions()
solverEquations.BoundaryConditionsCreateStart(boundaryConditions)
if option[0] == 1:
derivFix = [iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV]
else:
derivFix = [iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S3,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1_S3]
# Fix basal nodes and derivatives.
for component in [1, 2, 3]:
for node in basalFace:
for deriv in derivFix:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.U, 1,
deriv, node, component,iron.BoundaryConditionsTypes.FIXED, 0.0)
# Apply pressure BC on endocardial nodes.
for node in endoFace:
boundaryConditions.AddNode(dependentField, iron.FieldVariableTypes.DELUDELN, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 3,
iron.BoundaryConditionsTypes.PRESSURE_INCREMENTED, pressure)
"""
for component in [1,2,3]:
boundaryConditions.ConstrainNodeDofsEqual(dependentField, iron.FieldVariableTypes.U, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, component, apexEndoNodes)
boundaryConditions.ConstrainNodeDofsEqual(dependentField, iron.FieldVariableTypes.U, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, component, apexEpiNodes)
"""
solverEquations.BoundaryConditionsCreateFinish()
#=================================================================================#
#=================================================================================#
def ExportResults(dependentField, deformedFieldUserNumber, decomposition, region, filename, option):
# This function exports the results of simulation to exnode and exelem files.
# Copy over deformed field.
deformedField = iron.Field()
deformedField.CreateStart(deformedFieldUserNumber, region)
deformedField.MeshDecompositionSet(decomposition)
deformedField.TypeSet(iron.FieldTypes.GEOMETRIC)
deformedField.VariableLabelSet(iron.FieldVariableTypes.U, "DeformedGeometry")
if option[0] == 1:
# Trilinear.
for component in [1, 2, 3]:
deformedField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, component, 1)
deformedField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
elif option[0] == 2:
# Tricubic hermite. Geometry interpolated using cubic hermite basis (2nd mesh component).
for component in [1, 2, 3]:
deformedField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, component, 1)
if option[1] == 1:
deformedField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
elif option[1] == 2:
deformedField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
deformedField.CreateFinish()
for component in [1, 2, 3]:
dependentField.ParametersToFieldParametersComponentCopy(iron.FieldVariableTypes.U,
iron.FieldParameterSetTypes.VALUES, component,
deformedField, iron.FieldVariableTypes.U,
iron.FieldParameterSetTypes.VALUES, component)
dependentField.Destroy()
#deformedField.Destroy()
# Export deformation.
if not os.path.exists("./results"):
os.makedirs("./results")
fields = iron.Fields()
fields.CreateRegion(region)
fields.NodesExport("./results/" + filename, "FORTRAN")
fields.ElementsExport("./results/" + filename, "FORTRAN")
fields.Finalise()
# Output
print("----> Export deformed geometric solutions <----\n")
#=================================================================================#
#=================================================================================#
def ExportStressStrain(elements, xiPositions, cellML, equationsSet, filename_disp, filename_strain, filename_stress2PK,
filename_stressCauchy, groupname_disp, groupname_strain, groupname_stress):
# Evaluates the Cauchy strain and 2PK stress at user-specified xi positions
# for each element.
# Writes evaluated strain and stress out to exdata file format for visualisation
# in CMGUI.
try:
file_disp = open(filename_disp, 'w')
except IOError:
print('ERROR: Unable to open ', filename_disp)
return
try:
file_strain = open(filename_strain, 'w')
except IOError:
print('ERROR: Unable to open ', filename_strain)
return
try:
file_stress2PK = open(filename_stress2PK, 'w')
except IOError:
print('ERROR: Unable to open ', filename_stress2PK)
return
try:
file_stressCauchy = open(filename_stressCauchy, 'w')
except IOError:
print('ERROR: Unable to open ', filename_stressCauchy)
return
# Write file headers for displacement
file_disp.write(' Group name: ' + str(groupname_disp) + '\n')
file_disp.write(' #Fields= 4\n')
file_disp.write(' 1) element_xi, field, element_xi, #Components=1\n')
file_disp.write(' 1. Value index= 1, #Derivatives=0\n')
file_disp.write(' 2) yg1, field, real, #Components=1\n')
file_disp.write(' 1. Value index= 1, #Derivatives=0\n')
file_disp.write(' 3) yg2, field, real, #Components=1\n')
file_disp.write(' 1. Value index= 1, #Derivatives=0\n')
file_disp.write(' 4) yg3, field, real, #Components=1\n')
file_disp.write(' 1. Value index= 1, #Derivatives=0\n')
# Write file headers for strain
file_strain.write(' Group name: ' + str(groupname_strain) + '\n')
file_strain.write(' #Fields= 7\n')
file_strain.write(' 1) element_xi, field, element_xi, #Components=1\n')
file_strain.write(' 1. Value index= 1, #Derivatives=0\n')
file_strain.write(' 2) yg1, field, real, #Components=1\n')
file_strain.write(' 1. Value index= 1, #Derivatives=0\n')
file_strain.write(' 3) yg2, field, real, #Components=1\n')
file_strain.write(' 1. Value index= 1, #Derivatives=0\n')
file_strain.write(' 4) yg3, field, real, #Components=1\n')
file_strain.write(' 1. Value index= 1, #Derivatives=0\n')
file_strain.write(' 5) yg4, field, real, #Components=1\n')
file_strain.write(' 1. Value index= 1, #Derivatives=0\n')
file_strain.write(' 6) yg5, field, real, #Components=1\n')
file_strain.write(' 1. Value index= 1, #Derivatives=0\n')
file_strain.write(' 7) yg6, field, real, #Components=1\n')
file_strain.write(' 1. Value index= 1, #Derivatives=0\n')
# Write file headers for stress
file_stress2PK.write(' Group name: ' + str(groupname_stress) + '\n')
file_stress2PK.write(' #Fields= 7\n')
file_stress2PK.write(' 1) element_xi, field, element_xi, #Components=1\n')
file_stress2PK.write(' 1. Value index= 1, #Derivatives=0\n')
file_stress2PK.write(' 2) yg1, field, real, #Components=1\n')
file_stress2PK.write(' 1. Value index= 1, #Derivatives=0\n')
file_stress2PK.write(' 3) yg2, field, real, #Components=1\n')
file_stress2PK.write(' 1. Value index= 1, #Derivatives=0\n')
file_stress2PK.write(' 4) yg3, field, real, #Components=1\n')
file_stress2PK.write(' 1. Value index= 1, #Derivatives=0\n')
file_stress2PK.write(' 5) yg4, field, real, #Components=1\n')
file_stress2PK.write(' 1. Value index= 1, #Derivatives=0\n')
file_stress2PK.write(' 6) yg5, field, real, #Components=1\n')
file_stress2PK.write(' 1. Value index= 1, #Derivatives=0\n')
file_stress2PK.write(' 7) yg6, field, real, #Components=1\n')
file_stress2PK.write(' 1. Value index= 1, #Derivatives=0\n')
# Write file headers for stress
file_stressCauchy.write(' Group name: ' + str(groupname_stress) + '\n')
file_stressCauchy.write(' #Fields= 7\n')
file_stressCauchy.write(' 1) element_xi, field, element_xi, #Components=1\n')
file_stressCauchy.write(' 1. Value index= 1, #Derivatives=0\n')
file_stressCauchy.write(' 2) yg1, field, real, #Components=1\n')
file_stressCauchy.write(' 1. Value index= 1, #Derivatives=0\n')
file_stressCauchy.write(' 3) yg2, field, real, #Components=1\n')
file_stressCauchy.write(' 1. Value index= 1, #Derivatives=0\n')
file_stressCauchy.write(' 4) yg3, field, real, #Components=1\n')
file_stressCauchy.write(' 1. Value index= 1, #Derivatives=0\n')
file_stressCauchy.write(' 5) yg4, field, real, #Components=1\n')
file_stressCauchy.write(' 1. Value index= 1, #Derivatives=0\n')
file_stressCauchy.write(' 6) yg5, field, real, #Components=1\n')
file_stressCauchy.write(' 1. Value index= 1, #Derivatives=0\n')
file_stressCauchy.write(' 7) yg6, field, real, #Components=1\n')
file_stressCauchy.write(' 1. Value index= 1, #Derivatives=0\n')
data_pt = 1
for i in range(0, len(xiPositions)):
file_disp.write(' Node: '+str(data_pt)+'\n')
file_strain.write(' Node: '+str(data_pt)+'\n')
file_stress2PK.write(' Node: '+str(data_pt)+'\n')
file_stressCauchy.write(' Node: '+str(data_pt)+'\n')
file_disp.write(' E '+str(elements[i])+' 3 '+str(xiPositions[i,0])+' '+str(xiPositions[i,1])+' '+str(xiPositions[i,2])+'\n')
file_strain.write(' E '+str(elements[i])+' 3 '+str(xiPositions[i,0])+' '+str(xiPositions[i,1])+' '+str(xiPositions[i,2])+'\n')
file_stress2PK.write(' E '+str(elements[i])+' 3 '+str(xiPositions[i,0])+' '+str(xiPositions[i,1])+' '+str(xiPositions[i,2])+'\n')
file_stressCauchy.write(' E '+str(elements[i])+' 3 '+str(xiPositions[i,0])+' '+str(xiPositions[i,1])+' '+str(xiPositions[i,2])+'\n')
[disp_temp, strain_temp, stress2PK_temp, stressCauchy_temp] = equationsSet.StrainInterpolateXi(elements[i], xiPositions[i,:], cellML)
for k in range(0,6):
file_strain.write(' '+str(strain_temp[k])+'\n')
file_stress2PK.write(' '+str(stress2PK_temp[k])+'\n')
file_stressCauchy.write(' '+str(stressCauchy_temp[k])+'\n')
for m in range(0,3):
file_disp.write(' '+str(disp_temp[m])+'\n')
data_pt = data_pt + 1
file_disp.close()
file_strain.close()
file_stress2PK.close()
file_stressCauchy.close()
# Output
print("----> Export stresses and strains of deformed solution <----\n")
| [
"opencmiss.iron.iron.ComputationalNodeNumberGet",
"opencmiss.iron.iron.GeneratedMesh",
"opencmiss.iron.iron.CellMLEquations",
"numpy.array",
"opencmiss.iron.iron.Fields",
"os.path.exists",
"opencmiss.iron.iron.Equations",
"opencmiss.iron.iron.Region",
"opencmiss.iron.iron.Mesh",
"opencmiss.iron.ir... | [((1233, 1269), 'opencmiss.iron.iron.ComputationalNumberOfNodesGet', 'iron.ComputationalNumberOfNodesGet', ([], {}), '()\n', (1267, 1269), False, 'from opencmiss.iron import iron\n'), ((1300, 1333), 'opencmiss.iron.iron.ComputationalNodeNumberGet', 'iron.ComputationalNodeNumberGet', ([], {}), '()\n', (1331, 1333), False, 'from opencmiss.iron import iron\n'), ((1395, 1418), 'opencmiss.iron.iron.CoordinateSystem', 'iron.CoordinateSystem', ([], {}), '()\n', (1416, 1418), False, 'from opencmiss.iron import iron\n'), ((1591, 1604), 'opencmiss.iron.iron.Region', 'iron.Region', ([], {}), '()\n', (1602, 1604), False, 'from opencmiss.iron import iron\n'), ((5032, 5052), 'opencmiss.iron.iron.GeneratedMesh', 'iron.GeneratedMesh', ([], {}), '()\n', (5050, 5052), False, 'from opencmiss.iron import iron\n'), ((5308, 5319), 'opencmiss.iron.iron.Mesh', 'iron.Mesh', ([], {}), '()\n', (5317, 5319), False, 'from opencmiss.iron import iron\n'), ((5739, 5759), 'opencmiss.iron.iron.Decomposition', 'iron.Decomposition', ([], {}), '()\n', (5757, 5759), False, 'from opencmiss.iron import iron\n'), ((6427, 6439), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (6437, 6439), False, 'from opencmiss.iron import iron\n'), ((9766, 9779), 'opencmiss.iron.iron.Fields', 'iron.Fields', ([], {}), '()\n', (9777, 9779), False, 'from opencmiss.iron import iron\n'), ((11265, 11284), 'numpy.array', 'numpy.array', (['nodesX'], {}), '(nodesX)\n', (11276, 11284), False, 'import numpy\n'), ((11298, 11317), 'numpy.array', 'numpy.array', (['nodesY'], {}), '(nodesY)\n', (11309, 11317), False, 'import numpy\n'), ((11331, 11350), 'numpy.array', 'numpy.array', (['nodesZ'], {}), '(nodesZ)\n', (11342, 11350), False, 'import numpy\n'), ((11400, 11418), 'numpy.array', 'numpy.array', (['nodes'], {}), '(nodes)\n', (11411, 11418), False, 'import numpy\n'), ((11806, 11827), 'numpy.array', 'numpy.array', (['elements'], {}), '(elements)\n', (11817, 11827), False, 'import numpy\n'), ((12266, 12278), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (12276, 12278), False, 'from opencmiss.iron import iron\n'), ((15477, 15489), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (15487, 15489), False, 'from opencmiss.iron import iron\n'), ((16884, 16896), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (16894, 16896), False, 'from opencmiss.iron import iron\n'), ((19226, 19238), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (19236, 19238), False, 'from opencmiss.iron import iron\n'), ((34068, 34081), 'opencmiss.iron.iron.CellML', 'iron.CellML', ([], {}), '()\n', (34079, 34081), False, 'from opencmiss.iron import iron\n'), ((36608, 36620), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (36618, 36620), False, 'from opencmiss.iron import iron\n'), ((37246, 37258), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (37256, 37258), False, 'from opencmiss.iron import iron\n'), ((37848, 37860), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (37858, 37860), False, 'from opencmiss.iron import iron\n'), ((38772, 38784), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (38782, 38784), False, 'from opencmiss.iron import iron\n'), ((40185, 40347), 'numpy.array', 'numpy.array', (['[[components[0], components[1], components[2]], [components[1], components[\n 3], components[4]], [components[2], components[4], components[5]]]'], {}), '([[components[0], components[1], components[2]], [components[1],\n components[3], components[4]], [components[2], components[4],\n components[5]]])\n', (40196, 40347), False, 'import numpy\n'), ((40675, 40691), 'opencmiss.iron.iron.Equations', 'iron.Equations', ([], {}), '()\n', (40689, 40691), False, 'from opencmiss.iron import iron\n'), ((41375, 41389), 'opencmiss.iron.iron.Problem', 'iron.Problem', ([], {}), '()\n', (41387, 41389), False, 'from opencmiss.iron import iron\n'), ((42061, 42079), 'opencmiss.iron.iron.ControlLoop', 'iron.ControlLoop', ([], {}), '()\n', (42077, 42079), False, 'from opencmiss.iron import iron\n'), ((42504, 42517), 'opencmiss.iron.iron.Solver', 'iron.Solver', ([], {}), '()\n', (42515, 42517), False, 'from opencmiss.iron import iron\n'), ((42540, 42553), 'opencmiss.iron.iron.Solver', 'iron.Solver', ([], {}), '()\n', (42551, 42553), False, 'from opencmiss.iron import iron\n'), ((44181, 44203), 'opencmiss.iron.iron.SolverEquations', 'iron.SolverEquations', ([], {}), '()\n', (44201, 44203), False, 'from opencmiss.iron import iron\n'), ((44217, 44230), 'opencmiss.iron.iron.Solver', 'iron.Solver', ([], {}), '()\n', (44228, 44230), False, 'from opencmiss.iron import iron\n'), ((45185, 45210), 'opencmiss.iron.iron.BoundaryConditions', 'iron.BoundaryConditions', ([], {}), '()\n', (45208, 45210), False, 'from opencmiss.iron import iron\n'), ((49731, 49756), 'opencmiss.iron.iron.BoundaryConditions', 'iron.BoundaryConditions', ([], {}), '()\n', (49754, 49756), False, 'from opencmiss.iron import iron\n'), ((54560, 54585), 'opencmiss.iron.iron.BoundaryConditions', 'iron.BoundaryConditions', ([], {}), '()\n', (54583, 54585), False, 'from opencmiss.iron import iron\n'), ((56651, 56663), 'opencmiss.iron.iron.Field', 'iron.Field', ([], {}), '()\n', (56661, 56663), False, 'from opencmiss.iron import iron\n'), ((58283, 58296), 'opencmiss.iron.iron.Fields', 'iron.Fields', ([], {}), '()\n', (58294, 58296), False, 'from opencmiss.iron import iron\n'), ((2373, 2385), 'opencmiss.iron.iron.Basis', 'iron.Basis', ([], {}), '()\n', (2383, 2385), False, 'from opencmiss.iron import iron\n'), ((9685, 9712), 'os.path.exists', 'os.path.exists', (['"""./results"""'], {}), "('./results')\n", (9699, 9712), False, 'import os\n'), ((9722, 9746), 'os.makedirs', 'os.makedirs', (['"""./results"""'], {}), "('./results')\n", (9733, 9746), False, 'import os\n'), ((22427, 22656), 'opencmiss.iron.iron.Field.ParametersToFieldParametersComponentCopy', 'iron.Field.ParametersToFieldParametersComponentCopy', (['geometricField', 'iron.FieldVariableTypes.U', 'iron.FieldParameterSetTypes.VALUES', 'i', 'dependentField', 'iron.FieldVariableTypes.U', 'iron.FieldParameterSetTypes.VALUES', 'i'], {}), '(geometricField, iron.\n FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, i,\n dependentField, iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.\n VALUES, i)\n', (22478, 22656), False, 'from opencmiss.iron import iron\n'), ((43700, 43713), 'opencmiss.iron.iron.Solver', 'iron.Solver', ([], {}), '()\n', (43711, 43713), False, 'from opencmiss.iron import iron\n'), ((43740, 43762), 'opencmiss.iron.iron.CellMLEquations', 'iron.CellMLEquations', ([], {}), '()\n', (43760, 43762), False, 'from opencmiss.iron import iron\n'), ((58207, 58234), 'os.path.exists', 'os.path.exists', (['"""./results"""'], {}), "('./results')\n", (58221, 58234), False, 'import os\n'), ((58244, 58268), 'os.makedirs', 'os.makedirs', (['"""./results"""'], {}), "('./results')\n", (58255, 58268), False, 'import os\n'), ((2955, 2967), 'opencmiss.iron.iron.Basis', 'iron.Basis', ([], {}), '()\n', (2965, 2967), False, 'from opencmiss.iron import iron\n'), ((3715, 3727), 'opencmiss.iron.iron.Basis', 'iron.Basis', ([], {}), '()\n', (3725, 3727), False, 'from opencmiss.iron import iron\n'), ((4131, 4143), 'opencmiss.iron.iron.Basis', 'iron.Basis', ([], {}), '()\n', (4141, 4143), False, 'from opencmiss.iron import iron\n'), ((27529, 27548), 'numpy.array', 'numpy.array', (['nodesX'], {}), '(nodesX)\n', (27540, 27548), False, 'import numpy\n'), ((28245, 28264), 'numpy.array', 'numpy.array', (['nodesY'], {}), '(nodesY)\n', (28256, 28264), False, 'import numpy\n'), ((28961, 28980), 'numpy.array', 'numpy.array', (['nodesZ'], {}), '(nodesZ)\n', (28972, 28980), False, 'import numpy\n'), ((29152, 29170), 'numpy.array', 'numpy.array', (['nodes'], {}), '(nodes)\n', (29163, 29170), False, 'import numpy\n'), ((29795, 29813), 'numpy.array', 'numpy.array', (['hydro'], {}), '(hydro)\n', (29806, 29813), False, 'import numpy\n'), ((31166, 31185), 'numpy.array', 'numpy.array', (['nodesX'], {}), '(nodesX)\n', (31177, 31185), False, 'import numpy\n'), ((31748, 31767), 'numpy.array', 'numpy.array', (['nodesY'], {}), '(nodesY)\n', (31759, 31767), False, 'import numpy\n'), ((32330, 32349), 'numpy.array', 'numpy.array', (['nodesZ'], {}), '(nodesZ)\n', (32341, 32349), False, 'import numpy\n'), ((32521, 32539), 'numpy.array', 'numpy.array', (['nodes'], {}), '(nodes)\n', (32532, 32539), False, 'import numpy\n'), ((33302, 33320), 'numpy.array', 'numpy.array', (['hydro'], {}), '(hydro)\n', (33313, 33320), False, 'import numpy\n')] |
# (C) <NAME>, November 2013
# License: BSD 3 clause
import numpy as np
import os
class DCG:
def __init__(self, config, n_queries, split, rank=25, relevance_methods=['rougeL']):
self.rank = rank
self.relevance_methods = relevance_methods
relevance_dir = os.path.join(config['dataset']['data'], config['dataset']['name'], 'relevances')
relevance_filenames = [os.path.join(relevance_dir, '{}-{}-{}.npy'.format(config['dataset']['name'],
split, m))
for m in relevance_methods]
self.relevances = [np.memmap(f, dtype=np.float32, mode='r') for f in relevance_filenames]
for r in self.relevances:
r.shape = (n_queries, -1)
def compute_ndcg(self, npts, query_id, sorted_indexes, fold_index=0, retrieval='image'):
sorted_indexes = sorted_indexes[:self.rank]
# npts = self.relevances[0].shape[1] // 5
if retrieval == 'image':
query_base = npts * 5 * fold_index
# sorted_indexes += npts * fold_index
relevances = [r[query_base + query_id, fold_index * npts : (fold_index + 1) * npts] for r in self.relevances]
elif retrieval == 'sentence':
query_base = npts * fold_index
# sorted_indexes += npts * 5 * fold_index
relevances = [r[fold_index * npts * 5 : (fold_index + 1) * npts * 5, query_base + query_id] for r in self.relevances]
ndcg_scores = [ndcg_from_ranking(r, sorted_indexes) for r in relevances]
out = {k: v for k, v in zip(self.relevance_methods, ndcg_scores)}
return out
# def compute_dcg(self, query_id, sorted_img_indexes):
# sorted_img_indexes = sorted_img_indexes[:self.rank]
# dcg_score = dcg_from_ranking(self.relevances[query_id], sorted_img_indexes)
# return dcg_score
def ranking_precision_score(y_true, y_score, k=10):
"""Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_true == pos_label)
# Divide by min(n_pos, k) such that the best achievable score is always 1.0.
return float(n_relevant) / min(n_pos, k)
def average_precision_score(y_true, y_score, k=10):
"""Average precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
average precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1][:min(n_pos, k)]
y_true = np.asarray(y_true)[order]
score = 0
for i in xrange(len(y_true)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in xrange(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
if n_pos == 0:
return 0
return score / n_pos
def dcg_score(y_true, y_score, k=10, gains="exponential"):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
if gains == "exponential":
gains = 2 ** y_true - 1
elif gains == "linear":
gains = y_true
else:
raise ValueError("Invalid gains option.")
# highest rank is 1 so +2 instead of +1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float
"""
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return actual / best
# Alternative API.
def dcg_from_ranking(y_true, ranking):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
DCG @k : float
"""
y_true = np.asarray(y_true)
ranking = np.asarray(ranking)
rel = y_true[ranking]
gains = 2 ** rel - 1
discounts = np.log2(np.arange(len(ranking)) + 2)
return np.sum(gains / discounts)
def ndcg_from_ranking(y_true, ranking):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
NDCG @k : float
"""
k = len(ranking)
best_ranking = np.argsort(y_true)[::-1]
best = dcg_from_ranking(y_true, best_ranking[:k])
if best == 0:
return 0
return dcg_from_ranking(y_true, ranking) / best
if __name__ == '__main__':
# Check that some rankings are better than others
assert dcg_score([5, 3, 2], [2, 1, 0]) > dcg_score([4, 3, 2], [2, 1, 0])
assert dcg_score([4, 3, 2], [2, 1, 0]) > dcg_score([1, 3, 2], [2, 1, 0])
assert dcg_score([5, 3, 2], [2, 1, 0], k=2) > dcg_score([4, 3, 2], [2, 1, 0], k=2)
assert dcg_score([4, 3, 2], [2, 1, 0], k=2) > dcg_score([1, 3, 2], [2, 1, 0], k=2)
# Perfect rankings
assert ndcg_score([5, 3, 2], [2, 1, 0]) == 1.0
assert ndcg_score([2, 3, 5], [0, 1, 2]) == 1.0
assert ndcg_from_ranking([5, 3, 2], [0, 1, 2]) == 1.0
assert ndcg_score([5, 3, 2], [2, 1, 0], k=2) == 1.0
assert ndcg_score([2, 3, 5], [0, 1, 2], k=2) == 1.0
assert ndcg_from_ranking([5, 3, 2], [0, 1]) == 1.0
# Check that sample order is irrelevant
assert dcg_score([5, 3, 2], [2, 1, 0]) == dcg_score([2, 3, 5], [0, 1, 2])
assert dcg_score([5, 3, 2], [2, 1, 0], k=2) == dcg_score([2, 3, 5], [0, 1, 2], k=2)
# Check equivalence between two interfaces.
assert dcg_score([5, 3, 2], [2, 1, 0]) == dcg_from_ranking([5, 3, 2], [0, 1, 2])
assert dcg_score([1, 3, 2], [2, 1, 0]) == dcg_from_ranking([1, 3, 2], [0, 1, 2])
assert dcg_score([1, 3, 2], [0, 2, 1]) == dcg_from_ranking([1, 3, 2], [1, 2, 0])
assert ndcg_score([1, 3, 2], [2, 1, 0]) == ndcg_from_ranking([1, 3, 2], [0, 1, 2])
assert dcg_score([5, 3, 2], [2, 1, 0], k=2) == dcg_from_ranking([5, 3, 2], [0, 1])
assert dcg_score([1, 3, 2], [2, 1, 0], k=2) == dcg_from_ranking([1, 3, 2], [0, 1])
assert dcg_score([1, 3, 2], [0, 2, 1], k=2) == dcg_from_ranking([1, 3, 2], [1, 2])
assert ndcg_score([1, 3, 2], [2, 1, 0], k=2) == \
ndcg_from_ranking([1, 3, 2], [0, 1])
# Precision
assert ranking_precision_score([1, 1, 0], [3, 2, 1], k=2) == 1.0
assert ranking_precision_score([1, 1, 0], [1, 0, 0.5], k=2) == 0.5
assert ranking_precision_score([1, 1, 0], [3, 2, 1], k=3) == \
ranking_precision_score([1, 1, 0], [1, 0, 0.5], k=3)
# Average precision
from sklearn.metrics import average_precision_score as ap
assert average_precision_score([1, 1, 0], [3, 2, 1]) == ap([1, 1, 0], [3, 2, 1])
assert average_precision_score([1, 1, 0], [3, 1, 0]) == ap([1, 1, 0], [3, 1, 0]) | [
"numpy.unique",
"sklearn.metrics.average_precision_score",
"numpy.memmap",
"numpy.asarray",
"os.path.join",
"numpy.take",
"numpy.sum",
"numpy.argsort"
] | [((2287, 2304), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (2296, 2304), True, 'import numpy as np\n'), ((2442, 2469), 'numpy.sum', 'np.sum', (['(y_true == pos_label)'], {}), '(y_true == pos_label)\n', (2448, 2469), True, 'import numpy as np\n'), ((2522, 2548), 'numpy.take', 'np.take', (['y_true', 'order[:k]'], {}), '(y_true, order[:k])\n', (2529, 2548), True, 'import numpy as np\n'), ((2566, 2593), 'numpy.sum', 'np.sum', (['(y_true == pos_label)'], {}), '(y_true == pos_label)\n', (2572, 2593), True, 'import numpy as np\n'), ((3109, 3126), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (3118, 3126), True, 'import numpy as np\n'), ((3264, 3291), 'numpy.sum', 'np.sum', (['(y_true == pos_label)'], {}), '(y_true == pos_label)\n', (3270, 3291), True, 'import numpy as np\n'), ((4360, 4386), 'numpy.take', 'np.take', (['y_true', 'order[:k]'], {}), '(y_true, order[:k])\n', (4367, 4386), True, 'import numpy as np\n'), ((4670, 4695), 'numpy.sum', 'np.sum', (['(gains / discounts)'], {}), '(gains / discounts)\n', (4676, 4695), True, 'import numpy as np\n'), ((5829, 5847), 'numpy.asarray', 'np.asarray', (['y_true'], {}), '(y_true)\n', (5839, 5847), True, 'import numpy as np\n'), ((5862, 5881), 'numpy.asarray', 'np.asarray', (['ranking'], {}), '(ranking)\n', (5872, 5881), True, 'import numpy as np\n'), ((5997, 6022), 'numpy.sum', 'np.sum', (['(gains / discounts)'], {}), '(gains / discounts)\n', (6003, 6022), True, 'import numpy as np\n'), ((283, 368), 'os.path.join', 'os.path.join', (["config['dataset']['data']", "config['dataset']['name']", '"""relevances"""'], {}), "(config['dataset']['data'], config['dataset']['name'], 'relevances'\n )\n", (295, 368), False, 'import os\n'), ((2483, 2502), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (2493, 2502), True, 'import numpy as np\n'), ((3360, 3378), 'numpy.asarray', 'np.asarray', (['y_true'], {}), '(y_true)\n', (3370, 3378), True, 'import numpy as np\n'), ((4321, 4340), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (4331, 4340), True, 'import numpy as np\n'), ((6575, 6593), 'numpy.argsort', 'np.argsort', (['y_true'], {}), '(y_true)\n', (6585, 6593), True, 'import numpy as np\n'), ((8910, 8934), 'sklearn.metrics.average_precision_score', 'ap', (['[1, 1, 0]', '[3, 2, 1]'], {}), '([1, 1, 0], [3, 2, 1])\n', (8912, 8934), True, 'from sklearn.metrics import average_precision_score as ap\n'), ((8995, 9019), 'sklearn.metrics.average_precision_score', 'ap', (['[1, 1, 0]', '[3, 1, 0]'], {}), '([1, 1, 0], [3, 1, 0])\n', (8997, 9019), True, 'from sklearn.metrics import average_precision_score as ap\n'), ((648, 688), 'numpy.memmap', 'np.memmap', (['f'], {'dtype': 'np.float32', 'mode': '"""r"""'}), "(f, dtype=np.float32, mode='r')\n", (657, 688), True, 'import numpy as np\n'), ((3305, 3324), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (3315, 3324), True, 'import numpy as np\n')] |
"""
Copyright (c) 2016, Granular, Inc.
All rights reserved.
License: BSD 3-Clause ("BSD New" or "BSD Simplified")
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import codecs
from setuptools import setup, find_packages, Extension
from pip.req import parse_requirements
import numpy
try:
from Cython.Distutils import build_ext
#from Cython.Build import cythonize
USE_CYTHON = True
ext = ".pyx"
except ImportError as e:
USE_CYTHON = False
ext = ".c"
extensions = [
Extension("pyspatial.spatiallib",
["pyspatial/spatiallib" + ext],
extra_compile_args=["-g"],
extra_link_args=["-g"],
include_dirs = [numpy.get_include()]),
]
#if USE_CYTHON:
# pass
# extensions = cythonize(extensions)
if os.environ.get('READTHEDOCS', False) == 'True':
INSTALL_REQUIRES = []
else:
# extensions = []
INSTALL_REQUIRES = ['numpy', 'pandas', 'shapely', 'GDAL',
'scikit-image', 'RTree']
rootpath = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return codecs.open(os.path.join(rootpath, *parts), 'r').read()
pkg_data = {'': ['templates/*.js',
'templates/*.html',
'templates/js/*.js',
'templates/html/*.html',
'templates/css/*.css']}
long_description = '{}\n{}'.format(read('README.md'), read('CHANGES.txt'))
setup(
name="pyspatial",
version='0.2.4',
author="Granular, Inc",
maintainer="<NAME>",
description='Data structures for working with (geo)spatial data',
license='BSD',
url='https://github.com/granularag/pyspatial',
ext_modules=extensions,
packages=find_packages(),
cmdclass = {'build_ext': build_ext},
package_data=pkg_data,
long_description=long_description,
install_requires=INSTALL_REQUIRES,
classifiers=['Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering :: GIS',
'License :: OSI Approved :: BSD License'],
keywords=('spatial raster vector shapefile geojson data visualization '
'pandas shapely gis geojson geographic geo')
)
| [
"setuptools.find_packages",
"os.path.join",
"os.environ.get",
"os.path.dirname",
"numpy.get_include"
] | [((2137, 2173), 'os.environ.get', 'os.environ.get', (['"""READTHEDOCS"""', '(False)'], {}), "('READTHEDOCS', False)\n", (2151, 2173), False, 'import os\n'), ((2378, 2403), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2393, 2403), False, 'import os\n'), ((3047, 3062), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (3060, 3062), False, 'from setuptools import setup, find_packages, Extension\n'), ((2043, 2062), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2060, 2062), False, 'import numpy\n'), ((2448, 2478), 'os.path.join', 'os.path.join', (['rootpath', '*parts'], {}), '(rootpath, *parts)\n', (2460, 2478), False, 'import os\n')] |
"""A module containing functions to run MCMC using emcee."""
import numpy as np
import emcee
from eztao.ts import carma_fit, neg_param_ll, neg_fcoeff_ll
from eztao.carma import CARMA_term
from celerite import GP
def mcmc(t, y, yerr, p, q, n_walkers=32, burn_in=500, n_samples=2000, init_param=None):
"""
A simple wrapper to run quick MCMC using emcee.
Args:
t (array(float)): Time stamps of the input time series (the default unit is day).
y (array(float)): y values of the input time series.
yerr (array(float)): Measurement errors for y values.
p (int): The p order of a CARMA(p, q) model.
q (int): The q order of a CARMA(p, q) model.
n_walkers (int, optional): Number of MCMC walkers. Defaults to 32.
burn_in (int, optional): Number of burn in steps. Defaults to 500.
n_samples (int, optional): Number of MCMC steps to run. Defaults to 2000.
init_param (array(float), optional): The initial position for the MCMC walker.
Defaults to None.
Returns:
(object, array(float), array(float)): The emcee sampler object. The MCMC
flatchain (n_walkers*n_samplers, dim) and chain (n_walkers, n_samplers, dim)
in CARMA space if p > 2, otherwise empty.
"""
assert p > q, "p order must be greater than q order."
if init_param is not None and p <= 2:
assert len(init_param) == int(
p + q + 1
), "The initial parameters doesn't match the dimension of the CARMA model!"
else:
print("Searching for best-fit CARMA parameters...")
init_param = carma_fit(t, y, yerr, p, q, n_opt=200)
# set on param or fcoeff
if p > 2:
ll = lambda *args: -neg_fcoeff_ll(*args)
init_sample = CARMA_term.carma2fcoeffs_log(
np.log(init_param[:p]), np.log(init_param[p:])
)
init_sample = np.exp(init_sample)
else:
ll = lambda *args: -neg_param_ll(*args)
init_sample = init_param
# create vectorized functions
vec_fcoeff2carma_log = np.vectorize(
CARMA_term.fcoeffs2carma_log,
excluded=[
1,
],
signature="(n)->(m),(k)",
)
# reposition ts
t = t - t[0]
y = y - np.median(y)
# init celerite kernel/GP
kernel = CARMA_term(np.log(init_param[:p]), np.log(init_param[p:]))
gp = GP(kernel, mean=0)
gp.compute(t, yerr)
# init sampler
ndim = len(init_sample)
sampler = emcee.EnsembleSampler(n_walkers, ndim, ll, args=[y, gp])
print("Running burn-in...")
p0 = np.log(init_sample) + 1e-8 * np.random.randn(n_walkers, ndim)
p0, lp, _ = sampler.run_mcmc(p0, burn_in)
print("Running production...")
sampler.reset()
sampler.run_mcmc(p0, n_samples)
carma_flatchain = np.array([])
carma_chain = np.array([])
if p > 2:
logAR, logMA = vec_fcoeff2carma_log(sampler.flatchain, p)
carma = np.hstack((logAR, logMA))
carma_flatchain = carma
carma_chain = carma.reshape((n_walkers, n_samples, ndim), order="F")
return sampler, carma_flatchain, carma_chain
| [
"eztao.ts.neg_fcoeff_ll",
"numpy.median",
"numpy.hstack",
"celerite.GP",
"numpy.log",
"emcee.EnsembleSampler",
"numpy.exp",
"numpy.array",
"numpy.random.randn",
"eztao.ts.neg_param_ll",
"numpy.vectorize",
"eztao.ts.carma_fit"
] | [((2065, 2152), 'numpy.vectorize', 'np.vectorize', (['CARMA_term.fcoeffs2carma_log'], {'excluded': '[1]', 'signature': '"""(n)->(m),(k)"""'}), "(CARMA_term.fcoeffs2carma_log, excluded=[1], signature=\n '(n)->(m),(k)')\n", (2077, 2152), True, 'import numpy as np\n'), ((2377, 2395), 'celerite.GP', 'GP', (['kernel'], {'mean': '(0)'}), '(kernel, mean=0)\n', (2379, 2395), False, 'from celerite import GP\n'), ((2482, 2538), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['n_walkers', 'ndim', 'll'], {'args': '[y, gp]'}), '(n_walkers, ndim, ll, args=[y, gp])\n', (2503, 2538), False, 'import emcee\n'), ((2804, 2816), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2812, 2816), True, 'import numpy as np\n'), ((2835, 2847), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2843, 2847), True, 'import numpy as np\n'), ((1617, 1655), 'eztao.ts.carma_fit', 'carma_fit', (['t', 'y', 'yerr', 'p', 'q'], {'n_opt': '(200)'}), '(t, y, yerr, p, q, n_opt=200)\n', (1626, 1655), False, 'from eztao.ts import carma_fit, neg_param_ll, neg_fcoeff_ll\n'), ((1892, 1911), 'numpy.exp', 'np.exp', (['init_sample'], {}), '(init_sample)\n', (1898, 1911), True, 'import numpy as np\n'), ((2252, 2264), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (2261, 2264), True, 'import numpy as np\n'), ((2320, 2342), 'numpy.log', 'np.log', (['init_param[:p]'], {}), '(init_param[:p])\n', (2326, 2342), True, 'import numpy as np\n'), ((2344, 2366), 'numpy.log', 'np.log', (['init_param[p:]'], {}), '(init_param[p:])\n', (2350, 2366), True, 'import numpy as np\n'), ((2581, 2600), 'numpy.log', 'np.log', (['init_sample'], {}), '(init_sample)\n', (2587, 2600), True, 'import numpy as np\n'), ((2945, 2970), 'numpy.hstack', 'np.hstack', (['(logAR, logMA)'], {}), '((logAR, logMA))\n', (2954, 2970), True, 'import numpy as np\n'), ((1813, 1835), 'numpy.log', 'np.log', (['init_param[:p]'], {}), '(init_param[:p])\n', (1819, 1835), True, 'import numpy as np\n'), ((1837, 1859), 'numpy.log', 'np.log', (['init_param[p:]'], {}), '(init_param[p:])\n', (1843, 1859), True, 'import numpy as np\n'), ((2610, 2642), 'numpy.random.randn', 'np.random.randn', (['n_walkers', 'ndim'], {}), '(n_walkers, ndim)\n', (2625, 2642), True, 'import numpy as np\n'), ((1728, 1748), 'eztao.ts.neg_fcoeff_ll', 'neg_fcoeff_ll', (['*args'], {}), '(*args)\n', (1741, 1748), False, 'from eztao.ts import carma_fit, neg_param_ll, neg_fcoeff_ll\n'), ((1950, 1969), 'eztao.ts.neg_param_ll', 'neg_param_ll', (['*args'], {}), '(*args)\n', (1962, 1969), False, 'from eztao.ts import carma_fit, neg_param_ll, neg_fcoeff_ll\n')] |
import os
import numpy as np
import torch
from torch.utils.data import DataLoader,TensorDataset
DATA_DIR="./dataset"
WALK=["35_01","35_02","35_03","35_04","35_05","35_06","35_07","35_08","35_09","35_10",
"35_11","35_12","35_13","35_14","35_15","35_16"]
RUN=["35_17","35_18","35_19","35_20","35_21","35_22","35_23","35_24","35_25","35_26"]
WINDOW=64
STRIDE_TRAIN=5
STRIDE_TEST=20
def normalize(seq):
'''
normalize to [-1,1]
:param seq:
:return:
'''
return 2*(seq-np.min(seq))/(np.max(seq)-np.min(seq))-1
def read_mocap_file(file_path):
timeseries=[]
with open(file_path,"r") as f:
for line in f.readlines():
x=line.strip().split(" ")
timeseries.append([float(xx) for xx in x])
timeseries=np.array(timeseries)
for i in range(timeseries.shape[1]):
timeseries[:,i]=normalize(timeseries[:,i])
return timeseries
def stat_data():
cnt=0
for walk in WALK:
path=os.path.join(DATA_DIR, "walk", walk + ".amc.4d")
ts=read_mocap_file(path)
# print(ts.shape)
cnt+=ts.shape[0]
print (cnt)
for run in RUN:
path = os.path.join(DATA_DIR, "run", run + ".amc.4d")
ts = read_mocap_file(path)
# print(ts.shape)
cnt += ts.shape[0]
print(cnt)
path = os.path.join(DATA_DIR, "other", "49_02.amc.4d")
ts = read_mocap_file(path)
cnt += ts.shape[0]
print(cnt)
def get_from_one(file_path,train=True):
ts=read_mocap_file(file_path)
ts_length=ts.shape[0]
samples=[]
stride=STRIDE_TRAIN if train else STRIDE_TEST
for start in np.arange(0,ts_length,stride):
if start+WINDOW>=ts_length:
break
samples.append(ts[start:start+WINDOW,:])
# print(len(samples))
# print(ts_length)
# print(WINDOW)
# print(stride)
assert len(samples)== np.ceil(((ts_length-WINDOW)/stride))
return np.array(samples)
def load_data():
batchsize=64
train_x=None
for walk in WALK[:-2]:
ts=get_from_one(os.path.join(DATA_DIR,"walk",walk+".amc.4d"),train=True)
if train_x is None:
train_x=ts
else:
train_x=np.concatenate([train_x,ts])
train_y=np.zeros([train_x.shape[0],1])
test_x=None
normal_test_cnt=0
for walk in WALK[-2:]:
ts = get_from_one(os.path.join(DATA_DIR, "walk", walk + ".amc.4d"), train=True)
if test_x is None:
test_x=ts
else:
test_x = np.concatenate([test_x, ts])
normal_test_cnt+=ts.shape[0]
for run in RUN[:]:
ts = get_from_one(os.path.join(DATA_DIR, "run", run + ".amc.4d"), train=True)
test_x = np.concatenate([test_x, ts])
# add jump test data for experiment
ts = get_from_one(os.path.join(DATA_DIR,"other","49_02.amc.4d"),train=True)
test_x = np.concatenate([test_x, ts])
test_y=np.ones([test_x.shape[0],1])
test_y[:normal_test_cnt,:]=0
train_x=np.transpose(train_x,(0,2,1))
test_x=np.transpose(test_x, (0, 2, 1))
print(train_x.shape)
print(test_x.shape)
# print(normal_test_cnt)
# print(test_y)
train_dataset = TensorDataset(torch.Tensor(train_x), torch.Tensor(train_y))
test_dataset = TensorDataset(torch.Tensor(test_x), torch.Tensor(test_y))
dataloader = {"train": DataLoader(
dataset=train_dataset, # torch TensorDataset format
batch_size=batchsize, # mini batch size
shuffle=True,
num_workers=0,
drop_last=False),
"test": DataLoader(
dataset=test_dataset, # torch TensorDataset format
batch_size=batchsize, # mini batch size
shuffle=True,
num_workers=0,
drop_last=False),
}
return dataloader
def load_for_pic(ts_type="run"):
walk_ts = read_mocap_file(os.path.join(DATA_DIR, "walk", WALK[-1] + ".amc.4d"))
walk_ts = np.transpose(walk_ts)
if ts_type=="run":
run_ts = read_mocap_file(os.path.join(DATA_DIR, "run", RUN[1] + ".amc.4d"))
run_ts = np.transpose(run_ts)
ret_ts=run_ts
elif ts_type=="jump":
jump_ts=read_mocap_file(os.path.join(DATA_DIR, "other", "49_02.amc.4d"))
jump_ts = np.transpose(jump_ts)
ret_ts=jump_ts[:,600:750] #jump
# ret_ts=jump_ts[:1500,1650] #hop
else:
raise Exception("ts type error!!!")
return walk_ts,ret_ts
if __name__ == '__main__':
# get_from_one(os.path.join(DATA_DIR,"run",RUN[0]+".amc.4d"))
# load_data()
stat_data()
# ts1,ts2=load_for_pic(ts_type="jump")
# print(ts1.shape)
# print(ts2.shape)
| [
"numpy.ceil",
"numpy.ones",
"os.path.join",
"torch.Tensor",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"numpy.transpose",
"numpy.arange"
] | [((773, 793), 'numpy.array', 'np.array', (['timeseries'], {}), '(timeseries)\n', (781, 793), True, 'import numpy as np\n'), ((1318, 1365), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""other"""', '"""49_02.amc.4d"""'], {}), "(DATA_DIR, 'other', '49_02.amc.4d')\n", (1330, 1365), False, 'import os\n'), ((1622, 1653), 'numpy.arange', 'np.arange', (['(0)', 'ts_length', 'stride'], {}), '(0, ts_length, stride)\n', (1631, 1653), True, 'import numpy as np\n'), ((1920, 1937), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (1928, 1937), True, 'import numpy as np\n'), ((2226, 2257), 'numpy.zeros', 'np.zeros', (['[train_x.shape[0], 1]'], {}), '([train_x.shape[0], 1])\n', (2234, 2257), True, 'import numpy as np\n'), ((2853, 2881), 'numpy.concatenate', 'np.concatenate', (['[test_x, ts]'], {}), '([test_x, ts])\n', (2867, 2881), True, 'import numpy as np\n'), ((2894, 2923), 'numpy.ones', 'np.ones', (['[test_x.shape[0], 1]'], {}), '([test_x.shape[0], 1])\n', (2901, 2923), True, 'import numpy as np\n'), ((2969, 3001), 'numpy.transpose', 'np.transpose', (['train_x', '(0, 2, 1)'], {}), '(train_x, (0, 2, 1))\n', (2981, 3001), True, 'import numpy as np\n'), ((3010, 3041), 'numpy.transpose', 'np.transpose', (['test_x', '(0, 2, 1)'], {}), '(test_x, (0, 2, 1))\n', (3022, 3041), True, 'import numpy as np\n'), ((3910, 3931), 'numpy.transpose', 'np.transpose', (['walk_ts'], {}), '(walk_ts)\n', (3922, 3931), True, 'import numpy as np\n'), ((973, 1021), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""walk"""', "(walk + '.amc.4d')"], {}), "(DATA_DIR, 'walk', walk + '.amc.4d')\n", (985, 1021), False, 'import os\n'), ((1157, 1203), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""run"""', "(run + '.amc.4d')"], {}), "(DATA_DIR, 'run', run + '.amc.4d')\n", (1169, 1203), False, 'import os\n'), ((1872, 1910), 'numpy.ceil', 'np.ceil', (['((ts_length - WINDOW) / stride)'], {}), '((ts_length - WINDOW) / stride)\n', (1879, 1910), True, 'import numpy as np\n'), ((2690, 2718), 'numpy.concatenate', 'np.concatenate', (['[test_x, ts]'], {}), '([test_x, ts])\n', (2704, 2718), True, 'import numpy as np\n'), ((2782, 2829), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""other"""', '"""49_02.amc.4d"""'], {}), "(DATA_DIR, 'other', '49_02.amc.4d')\n", (2794, 2829), False, 'import os\n'), ((3175, 3196), 'torch.Tensor', 'torch.Tensor', (['train_x'], {}), '(train_x)\n', (3187, 3196), False, 'import torch\n'), ((3198, 3219), 'torch.Tensor', 'torch.Tensor', (['train_y'], {}), '(train_y)\n', (3210, 3219), False, 'import torch\n'), ((3255, 3275), 'torch.Tensor', 'torch.Tensor', (['test_x'], {}), '(test_x)\n', (3267, 3275), False, 'import torch\n'), ((3277, 3297), 'torch.Tensor', 'torch.Tensor', (['test_y'], {}), '(test_y)\n', (3289, 3297), False, 'import torch\n'), ((3328, 3433), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batchsize', 'shuffle': '(True)', 'num_workers': '(0)', 'drop_last': '(False)'}), '(dataset=train_dataset, batch_size=batchsize, shuffle=True,\n num_workers=0, drop_last=False)\n', (3338, 3433), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3537, 3641), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batchsize', 'shuffle': '(True)', 'num_workers': '(0)', 'drop_last': '(False)'}), '(dataset=test_dataset, batch_size=batchsize, shuffle=True,\n num_workers=0, drop_last=False)\n', (3547, 3641), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3842, 3894), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""walk"""', "(WALK[-1] + '.amc.4d')"], {}), "(DATA_DIR, 'walk', WALK[-1] + '.amc.4d')\n", (3854, 3894), False, 'import os\n'), ((4057, 4077), 'numpy.transpose', 'np.transpose', (['run_ts'], {}), '(run_ts)\n', (4069, 4077), True, 'import numpy as np\n'), ((2042, 2090), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""walk"""', "(walk + '.amc.4d')"], {}), "(DATA_DIR, 'walk', walk + '.amc.4d')\n", (2054, 2090), False, 'import os\n'), ((2184, 2213), 'numpy.concatenate', 'np.concatenate', (['[train_x, ts]'], {}), '([train_x, ts])\n', (2198, 2213), True, 'import numpy as np\n'), ((2351, 2399), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""walk"""', "(walk + '.amc.4d')"], {}), "(DATA_DIR, 'walk', walk + '.amc.4d')\n", (2363, 2399), False, 'import os\n'), ((2497, 2525), 'numpy.concatenate', 'np.concatenate', (['[test_x, ts]'], {}), '([test_x, ts])\n', (2511, 2525), True, 'import numpy as np\n'), ((2613, 2659), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""run"""', "(run + '.amc.4d')"], {}), "(DATA_DIR, 'run', run + '.amc.4d')\n", (2625, 2659), False, 'import os\n'), ((3989, 4038), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""run"""', "(RUN[1] + '.amc.4d')"], {}), "(DATA_DIR, 'run', RUN[1] + '.amc.4d')\n", (4001, 4038), False, 'import os\n'), ((4225, 4246), 'numpy.transpose', 'np.transpose', (['jump_ts'], {}), '(jump_ts)\n', (4237, 4246), True, 'import numpy as np\n'), ((517, 528), 'numpy.max', 'np.max', (['seq'], {}), '(seq)\n', (523, 528), True, 'import numpy as np\n'), ((529, 540), 'numpy.min', 'np.min', (['seq'], {}), '(seq)\n', (535, 540), True, 'import numpy as np\n'), ((4158, 4205), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""other"""', '"""49_02.amc.4d"""'], {}), "(DATA_DIR, 'other', '49_02.amc.4d')\n", (4170, 4205), False, 'import os\n'), ((503, 514), 'numpy.min', 'np.min', (['seq'], {}), '(seq)\n', (509, 514), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 16 17:58:52 2018
@author: Zhaoyi.Shen
"""
import sys
sys.path.append('/home/z1s/py/lib/')
from signal_processing import lfca
import numpy as np
import scipy as sp
from scipy import io
from matplotlib import pyplot as plt
filename = '/home/z1s/research/climate/LFCA_example/ERSST_1900_2016.mat'
mat = io.loadmat(filename)
lat_axis = mat['LAT_AXIS']
lon_axis = mat['LON_AXIS']
sst = mat['SST']
nlon = sst.shape[0]
nlat = sst.shape[1]
ntime = sst.shape[2]
time = np.arange(1900,2016.99,1/12.)
cutoff = 120
truncation = 30
#%%
mean_seasonal_cycle = np.zeros((nlon,nlat,12))
sst_anomalies = np.zeros((nlon,nlat,ntime))
for i in range(12):
mean_seasonal_cycle[...,i] = np.nanmean(sst[...,i:ntime:12],-1)
sst_anomalies[...,i:ntime:12] = sst[...,i:ntime:12] - mean_seasonal_cycle[...,i][...,np.newaxis]
#%%
s = sst_anomalies.shape
y, x = np.meshgrid(lat_axis,lon_axis)
area = np.cos(y*np.pi/180.)
area[np.where(np.isnan(np.mean(sst_anomalies,-1)))] = 0
#%%
domain = np.ones(area.shape)
domain[np.where(x<100)] = 0
domain[np.where((x<103) & (y<5))] = 0
domain[np.where((x<105) & (y<2))] = 0
domain[np.where((x<111) & (y<-6))] = 0
domain[np.where((x<114) & (y<-7))] = 0
domain[np.where((x<127) & (y<-8))] = 0
domain[np.where((x<147) & (y<-18))] = 0
domain[np.where(y>70)] = 0
domain[np.where((y>65) & ((x<175) | (x>200)))] = 0
domain[np.where(y<-45)] = 0
domain[np.where((x>260) & (y>17))] = 0
domain[np.where((x>270) & (y<=17) & (y>14))] = 0
domain[np.where((x>276) & (y<=14) & (y>9))] = 0
domain[np.where((x>290) & (y<=9))] = 0
#%%
order = 'C'
x = np.transpose(np.reshape(sst_anomalies,(s[0]*s[1],s[2]),order=order))
area_weights = np.transpose(np.reshape(area,(s[0]*s[1],1),order=order))
domain = np.transpose(np.reshape(domain,(s[0]*s[1],1),order=order))
icol_ret = np.where((area_weights!=0) & (domain!=0))
icol_disc = np.where((area_weights==0) | (domain==0))
x = x[:,icol_ret[1]]
area_weights = area_weights[:,icol_ret[1]]
normvec = np.transpose(area_weights)/np.sum(area_weights)
scale = np.sqrt(normvec)
#%%
lfcs, lfps, weights, r, pvar, pcs, eofs, ntr, pvar_slow, pvar_lfc, r_eofs, pvar_slow_eofs = \
lfca(x, cutoff, truncation, scale)
#%%
nins = np.size(icol_disc[1])
nrows = lfps.shape[0]
lfps_aug = np.zeros((nrows,lfps.shape[1]+nins))
lfps_aug[:] = np.nan
lfps_aug[:,icol_ret[1]] = lfps
nrows = eofs.shape[0]
eofs_aug = np.zeros((nrows,eofs.shape[1]+nins))
eofs_aug[:] = np.nan
eofs_aug[:,icol_ret[1]] = eofs
#%%
s1 = np.size(lon_axis)
s2 = np.size(lat_axis)
i = 0
pattern = np.reshape(lfps_aug[i,...],(s1,s2),order=order)
pattern[np.where(np.abs(pattern)>1.e5)] = np.nan
plt.figure()
plt.contourf(np.squeeze(lon_axis),np.squeeze(lat_axis),np.transpose(pattern),\
np.arange(-1,1.1,0.1),cmap=plt.cm.RdYlBu_r)
plt.figure()
plt.plot(lfcs[:,i]) | [
"numpy.sqrt",
"scipy.io.loadmat",
"numpy.nanmean",
"sys.path.append",
"numpy.arange",
"numpy.mean",
"numpy.reshape",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.meshgrid",
"numpy.abs",
"numpy.ones",
"numpy.size",
"numpy.squeeze",
"numpy.cos",
"numpy.transpose",
"numpy.sum",
"nu... | [((125, 161), 'sys.path.append', 'sys.path.append', (['"""/home/z1s/py/lib/"""'], {}), "('/home/z1s/py/lib/')\n", (140, 161), False, 'import sys\n'), ((373, 393), 'scipy.io.loadmat', 'io.loadmat', (['filename'], {}), '(filename)\n', (383, 393), False, 'from scipy import io\n'), ((534, 568), 'numpy.arange', 'np.arange', (['(1900)', '(2016.99)', '(1 / 12.0)'], {}), '(1900, 2016.99, 1 / 12.0)\n', (543, 568), True, 'import numpy as np\n'), ((620, 646), 'numpy.zeros', 'np.zeros', (['(nlon, nlat, 12)'], {}), '((nlon, nlat, 12))\n', (628, 646), True, 'import numpy as np\n'), ((661, 690), 'numpy.zeros', 'np.zeros', (['(nlon, nlat, ntime)'], {}), '((nlon, nlat, ntime))\n', (669, 690), True, 'import numpy as np\n'), ((913, 944), 'numpy.meshgrid', 'np.meshgrid', (['lat_axis', 'lon_axis'], {}), '(lat_axis, lon_axis)\n', (924, 944), True, 'import numpy as np\n'), ((951, 976), 'numpy.cos', 'np.cos', (['(y * np.pi / 180.0)'], {}), '(y * np.pi / 180.0)\n', (957, 976), True, 'import numpy as np\n'), ((1041, 1060), 'numpy.ones', 'np.ones', (['area.shape'], {}), '(area.shape)\n', (1048, 1060), True, 'import numpy as np\n'), ((1849, 1894), 'numpy.where', 'np.where', (['((area_weights != 0) & (domain != 0))'], {}), '((area_weights != 0) & (domain != 0))\n', (1857, 1894), True, 'import numpy as np\n'), ((1903, 1948), 'numpy.where', 'np.where', (['((area_weights == 0) | (domain == 0))'], {}), '((area_weights == 0) | (domain == 0))\n', (1911, 1948), True, 'import numpy as np\n'), ((2075, 2091), 'numpy.sqrt', 'np.sqrt', (['normvec'], {}), '(normvec)\n', (2082, 2091), True, 'import numpy as np\n'), ((2190, 2224), 'signal_processing.lfca', 'lfca', (['x', 'cutoff', 'truncation', 'scale'], {}), '(x, cutoff, truncation, scale)\n', (2194, 2224), False, 'from signal_processing import lfca\n'), ((2236, 2257), 'numpy.size', 'np.size', (['icol_disc[1]'], {}), '(icol_disc[1])\n', (2243, 2257), True, 'import numpy as np\n'), ((2291, 2330), 'numpy.zeros', 'np.zeros', (['(nrows, lfps.shape[1] + nins)'], {}), '((nrows, lfps.shape[1] + nins))\n', (2299, 2330), True, 'import numpy as np\n'), ((2413, 2452), 'numpy.zeros', 'np.zeros', (['(nrows, eofs.shape[1] + nins)'], {}), '((nrows, eofs.shape[1] + nins))\n', (2421, 2452), True, 'import numpy as np\n'), ((2511, 2528), 'numpy.size', 'np.size', (['lon_axis'], {}), '(lon_axis)\n', (2518, 2528), True, 'import numpy as np\n'), ((2534, 2551), 'numpy.size', 'np.size', (['lat_axis'], {}), '(lat_axis)\n', (2541, 2551), True, 'import numpy as np\n'), ((2568, 2619), 'numpy.reshape', 'np.reshape', (['lfps_aug[i, ...]', '(s1, s2)'], {'order': 'order'}), '(lfps_aug[i, ...], (s1, s2), order=order)\n', (2578, 2619), True, 'import numpy as np\n'), ((2665, 2677), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2675, 2677), True, 'from matplotlib import pyplot as plt\n'), ((2814, 2826), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2824, 2826), True, 'from matplotlib import pyplot as plt\n'), ((2827, 2847), 'matplotlib.pyplot.plot', 'plt.plot', (['lfcs[:, i]'], {}), '(lfcs[:, i])\n', (2835, 2847), True, 'from matplotlib import pyplot as plt\n'), ((742, 778), 'numpy.nanmean', 'np.nanmean', (['sst[..., i:ntime:12]', '(-1)'], {}), '(sst[..., i:ntime:12], -1)\n', (752, 778), True, 'import numpy as np\n'), ((1068, 1085), 'numpy.where', 'np.where', (['(x < 100)'], {}), '(x < 100)\n', (1076, 1085), True, 'import numpy as np\n'), ((1096, 1125), 'numpy.where', 'np.where', (['((x < 103) & (y < 5))'], {}), '((x < 103) & (y < 5))\n', (1104, 1125), True, 'import numpy as np\n'), ((1134, 1163), 'numpy.where', 'np.where', (['((x < 105) & (y < 2))'], {}), '((x < 105) & (y < 2))\n', (1142, 1163), True, 'import numpy as np\n'), ((1172, 1202), 'numpy.where', 'np.where', (['((x < 111) & (y < -6))'], {}), '((x < 111) & (y < -6))\n', (1180, 1202), True, 'import numpy as np\n'), ((1211, 1241), 'numpy.where', 'np.where', (['((x < 114) & (y < -7))'], {}), '((x < 114) & (y < -7))\n', (1219, 1241), True, 'import numpy as np\n'), ((1250, 1280), 'numpy.where', 'np.where', (['((x < 127) & (y < -8))'], {}), '((x < 127) & (y < -8))\n', (1258, 1280), True, 'import numpy as np\n'), ((1289, 1320), 'numpy.where', 'np.where', (['((x < 147) & (y < -18))'], {}), '((x < 147) & (y < -18))\n', (1297, 1320), True, 'import numpy as np\n'), ((1335, 1351), 'numpy.where', 'np.where', (['(y > 70)'], {}), '(y > 70)\n', (1343, 1351), True, 'import numpy as np\n'), ((1362, 1406), 'numpy.where', 'np.where', (['((y > 65) & ((x < 175) | (x > 200)))'], {}), '((y > 65) & ((x < 175) | (x > 200)))\n', (1370, 1406), True, 'import numpy as np\n'), ((1413, 1430), 'numpy.where', 'np.where', (['(y < -45)'], {}), '(y < -45)\n', (1421, 1430), True, 'import numpy as np\n'), ((1441, 1471), 'numpy.where', 'np.where', (['((x > 260) & (y > 17))'], {}), '((x > 260) & (y > 17))\n', (1449, 1471), True, 'import numpy as np\n'), ((1480, 1522), 'numpy.where', 'np.where', (['((x > 270) & (y <= 17) & (y > 14))'], {}), '((x > 270) & (y <= 17) & (y > 14))\n', (1488, 1522), True, 'import numpy as np\n'), ((1529, 1570), 'numpy.where', 'np.where', (['((x > 276) & (y <= 14) & (y > 9))'], {}), '((x > 276) & (y <= 14) & (y > 9))\n', (1537, 1570), True, 'import numpy as np\n'), ((1577, 1607), 'numpy.where', 'np.where', (['((x > 290) & (y <= 9))'], {}), '((x > 290) & (y <= 9))\n', (1585, 1607), True, 'import numpy as np\n'), ((1642, 1701), 'numpy.reshape', 'np.reshape', (['sst_anomalies', '(s[0] * s[1], s[2])'], {'order': 'order'}), '(sst_anomalies, (s[0] * s[1], s[2]), order=order)\n', (1652, 1701), True, 'import numpy as np\n'), ((1726, 1773), 'numpy.reshape', 'np.reshape', (['area', '(s[0] * s[1], 1)'], {'order': 'order'}), '(area, (s[0] * s[1], 1), order=order)\n', (1736, 1773), True, 'import numpy as np\n'), ((1792, 1841), 'numpy.reshape', 'np.reshape', (['domain', '(s[0] * s[1], 1)'], {'order': 'order'}), '(domain, (s[0] * s[1], 1), order=order)\n', (1802, 1841), True, 'import numpy as np\n'), ((2019, 2045), 'numpy.transpose', 'np.transpose', (['area_weights'], {}), '(area_weights)\n', (2031, 2045), True, 'import numpy as np\n'), ((2046, 2066), 'numpy.sum', 'np.sum', (['area_weights'], {}), '(area_weights)\n', (2052, 2066), True, 'import numpy as np\n'), ((2691, 2711), 'numpy.squeeze', 'np.squeeze', (['lon_axis'], {}), '(lon_axis)\n', (2701, 2711), True, 'import numpy as np\n'), ((2712, 2732), 'numpy.squeeze', 'np.squeeze', (['lat_axis'], {}), '(lat_axis)\n', (2722, 2732), True, 'import numpy as np\n'), ((2733, 2754), 'numpy.transpose', 'np.transpose', (['pattern'], {}), '(pattern)\n', (2745, 2754), True, 'import numpy as np\n'), ((2770, 2793), 'numpy.arange', 'np.arange', (['(-1)', '(1.1)', '(0.1)'], {}), '(-1, 1.1, 0.1)\n', (2779, 2793), True, 'import numpy as np\n'), ((995, 1021), 'numpy.mean', 'np.mean', (['sst_anomalies', '(-1)'], {}), '(sst_anomalies, -1)\n', (1002, 1021), True, 'import numpy as np\n'), ((2633, 2648), 'numpy.abs', 'np.abs', (['pattern'], {}), '(pattern)\n', (2639, 2648), True, 'import numpy as np\n')] |
import torch
import numpy as np
from onnx import numpy_helper
from thop.vision.basic_hooks import zero_ops
from .counter import counter_matmul, counter_zero_ops,\
counter_conv, counter_mul, counter_norm, counter_pow,\
counter_sqrt, counter_div, counter_softmax, counter_avgpool
def onnx_counter_matmul(diction, node):
input1 = node.input[0]
input2 = node.input[1]
input1_dim = diction[input1]
input2_dim = diction[input2]
out_size = np.append(input1_dim[0:-1], input2_dim[-1])
output_name = node.output[0]
macs = counter_matmul(input1_dim, out_size[-2:])
return macs, out_size, output_name
def onnx_counter_add(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
out_size = diction[node.input[1]]
else:
out_size = diction[node.input[0]]
output_name = node.output[0]
macs = counter_zero_ops()
# if '140' in diction:
# print(diction['140'],output_name)
return macs, out_size, output_name
def onnx_counter_conv(diction, node):
# print(node)
# bias,kernelsize,outputsize
dim_bias = 0
input_count = 0
for i in node.input:
input_count += 1
if (input_count == 3):
dim_bias = 1
dim_weight = diction[node.input[1]]
else:
dim_weight = diction[node.input[1]]
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
if(attr.name == 'strides'):
dim_stride = attr.ints
if(attr.name == 'pads'):
dim_pad = attr.ints
if(attr.name == 'dilations'):
dim_dil = attr.ints
if(attr.name == 'group'):
group = attr.i
# print(dim_dil)
dim_input = diction[node.input[0]]
output_size = np.append(
dim_input[0:-np.array(dim_kernel).size-1], dim_weight[0])
hw = np.array(dim_input[-np.array(dim_kernel).size:])
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_dil[i] *
(dim_kernel[i]-1)-1)/dim_stride[i]+1)
output_size = np.append(output_size, hw)
macs = counter_conv(dim_bias, np.prod(dim_kernel),
np.prod(output_size), dim_weight[1], group)
output_name = node.output[0]
# if '140' in diction:
# print("conv",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_constant(diction, node):
# print("constant",node)
macs = counter_zero_ops()
output_name = node.output[0]
output_size = [1]
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_mul(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_mul(np.prod(input_size))
output_size = diction[node.input[0]]
output_name = node.output[0]
return macs, output_size, output_name
def onnx_counter_bn(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_relu(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
#print(macs, output_size, output_name)
# if '140' in diction:
# print("relu",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_reducemean(diction, node):
keep_dim = 0
for attr in node.attribute:
if('axes' in attr.name):
dim_axis = np.array(attr.ints)
elif('keepdims' in attr.name):
keep_dim = attr.i
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
if (keep_dim == 1):
output_size = input_size
else:
output_size = np.delete(input_size, dim_axis)
#output_size = input_size
return macs, output_size, output_name
def onnx_counter_sub(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pow(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_pow(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_sqrt(diction, node):
input_size = diction[node.input[0]]
macs = counter_sqrt(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_div(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_div(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_instance(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_softmax(diction, node):
input_size = diction[node.input[0]]
dim = node.attribute[0].i
nfeatures = input_size[dim]
batch_size = np.prod(input_size) / nfeatures
macs = counter_softmax(nfeatures, batch_size)
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pad(diction, node):
# # TODO add constant name and output real vector
# if
# if (np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size):
# input_size = diction[node.input[1]]
# else:
# input_size = diction[node.input[0]]
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_averagepool(diction, node):
# TODO add support of ceil_mode and floor
macs = counter_avgpool(np.prod(diction[node.input[0]]))
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_flatten(diction, node):
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
axis = node.attribute[0].i
input_size = diction[node.input[0]]
output_size = np.append(input_size[axis-1], np.prod(input_size[axis:]))
# print("flatten",output_size)
return macs, output_size, output_name
def onnx_counter_gemm(diction, node):
# print(node)
# Compute Y = alpha * A' * B' + beta * C
input_size = diction[node.input[0]]
dim_weight = diction[node.input[1]]
# print(input_size,dim_weight)
macs = np.prod(input_size) * dim_weight[1] + dim_weight[0]
output_size = np.append(input_size[0:-1], dim_weight[0])
output_name = node.output[0]
return macs, output_size, output_name
pass
def onnx_counter_maxpool(diction, node):
# TODO add support of ceil_mode and floor
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_globalaveragepool(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_concat(diction, node):
# print(node)
# print(diction[node.input[0]])
axis = node.attribute[0].i
input_size = diction[node.input[0]]
for i in node.input:
dim_concat = diction[i][axis]
output_size = input_size
output_size[axis] = dim_concat
output_name = node.output[0]
macs = counter_zero_ops()
return macs, output_size, output_name
def onnx_counter_clip(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
onnx_operators = {
'MatMul': onnx_counter_matmul,
'Add': onnx_counter_add,
'Conv': onnx_counter_conv,
'Mul': onnx_counter_mul,
'Constant': onnx_counter_constant,
'BatchNormalization': onnx_counter_bn,
'Relu': onnx_counter_relu,
'ReduceMean': onnx_counter_reducemean,
'Sub': onnx_counter_sub,
'Pow': onnx_counter_pow,
'Sqrt': onnx_counter_sqrt,
'Div': onnx_counter_div,
'InstanceNormalization': onnx_counter_instance,
'Softmax': onnx_counter_softmax,
'Pad': onnx_counter_pad,
'AveragePool': onnx_counter_averagepool,
'MaxPool': onnx_counter_maxpool,
'Flatten': onnx_counter_flatten,
'Gemm': onnx_counter_gemm,
'GlobalAveragePool': onnx_counter_globalaveragepool,
'Concat': onnx_counter_concat,
'Clip': onnx_counter_clip,
None: None,
}
| [
"numpy.append",
"numpy.prod",
"numpy.array",
"numpy.delete"
] | [((463, 506), 'numpy.append', 'np.append', (['input1_dim[0:-1]', 'input2_dim[-1]'], {}), '(input1_dim[0:-1], input2_dim[-1])\n', (472, 506), True, 'import numpy as np\n'), ((2135, 2161), 'numpy.append', 'np.append', (['output_size', 'hw'], {}), '(output_size, hw)\n', (2144, 2161), True, 'import numpy as np\n'), ((8204, 8246), 'numpy.append', 'np.append', (['input_size[0:-1]', 'dim_weight[0]'], {}), '(input_size[0:-1], dim_weight[0])\n', (8213, 8246), True, 'import numpy as np\n'), ((2196, 2215), 'numpy.prod', 'np.prod', (['dim_kernel'], {}), '(dim_kernel)\n', (2203, 2215), True, 'import numpy as np\n'), ((2241, 2261), 'numpy.prod', 'np.prod', (['output_size'], {}), '(output_size)\n', (2248, 2261), True, 'import numpy as np\n'), ((2929, 2948), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (2936, 2948), True, 'import numpy as np\n'), ((3168, 3187), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (3175, 3187), True, 'import numpy as np\n'), ((4061, 4092), 'numpy.delete', 'np.delete', (['input_size', 'dim_axis'], {}), '(input_size, dim_axis)\n', (4070, 4092), True, 'import numpy as np\n'), ((4625, 4644), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (4632, 4644), True, 'import numpy as np\n'), ((4854, 4873), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (4861, 4873), True, 'import numpy as np\n'), ((5226, 5245), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (5233, 5245), True, 'import numpy as np\n'), ((5459, 5478), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (5466, 5478), True, 'import numpy as np\n'), ((5746, 5765), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (5753, 5765), True, 'import numpy as np\n'), ((6523, 6554), 'numpy.prod', 'np.prod', (['diction[node.input[0]]'], {}), '(diction[node.input[0]])\n', (6530, 6554), True, 'import numpy as np\n'), ((7800, 7826), 'numpy.prod', 'np.prod', (['input_size[axis:]'], {}), '(input_size[axis:])\n', (7807, 7826), True, 'import numpy as np\n'), ((678, 710), 'numpy.array', 'np.array', (['diction[node.input[1]]'], {}), '(diction[node.input[1]])\n', (686, 710), True, 'import numpy as np\n'), ((719, 751), 'numpy.array', 'np.array', (['diction[node.input[0]]'], {}), '(diction[node.input[0]])\n', (727, 751), True, 'import numpy as np\n'), ((2728, 2760), 'numpy.array', 'np.array', (['diction[node.input[1]]'], {}), '(diction[node.input[1]])\n', (2736, 2760), True, 'import numpy as np\n'), ((2769, 2801), 'numpy.array', 'np.array', (['diction[node.input[0]]'], {}), '(diction[node.input[0]])\n', (2777, 2801), True, 'import numpy as np\n'), ((3779, 3798), 'numpy.array', 'np.array', (['attr.ints'], {}), '(attr.ints)\n', (3787, 3798), True, 'import numpy as np\n'), ((4424, 4456), 'numpy.array', 'np.array', (['diction[node.input[1]]'], {}), '(diction[node.input[1]])\n', (4432, 4456), True, 'import numpy as np\n'), ((4465, 4497), 'numpy.array', 'np.array', (['diction[node.input[0]]'], {}), '(diction[node.input[0]])\n', (4473, 4497), True, 'import numpy as np\n'), ((5025, 5057), 'numpy.array', 'np.array', (['diction[node.input[1]]'], {}), '(diction[node.input[1]])\n', (5033, 5057), True, 'import numpy as np\n'), ((5066, 5098), 'numpy.array', 'np.array', (['diction[node.input[0]]'], {}), '(diction[node.input[0]])\n', (5074, 5098), True, 'import numpy as np\n'), ((8134, 8153), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (8141, 8153), True, 'import numpy as np\n'), ((7047, 7067), 'numpy.array', 'np.array', (['dim_kernel'], {}), '(dim_kernel)\n', (7055, 7067), True, 'import numpy as np\n'), ((8959, 8979), 'numpy.array', 'np.array', (['dim_kernel'], {}), '(dim_kernel)\n', (8967, 8979), True, 'import numpy as np\n'), ((1947, 1967), 'numpy.array', 'np.array', (['dim_kernel'], {}), '(dim_kernel)\n', (1955, 1967), True, 'import numpy as np\n'), ((1873, 1893), 'numpy.array', 'np.array', (['dim_kernel'], {}), '(dim_kernel)\n', (1881, 1893), True, 'import numpy as np\n'), ((7257, 7277), 'numpy.array', 'np.array', (['dim_kernel'], {}), '(dim_kernel)\n', (7265, 7277), True, 'import numpy as np\n'), ((7440, 7460), 'numpy.array', 'np.array', (['dim_kernel'], {}), '(dim_kernel)\n', (7448, 7460), True, 'import numpy as np\n'), ((9169, 9189), 'numpy.array', 'np.array', (['dim_kernel'], {}), '(dim_kernel)\n', (9177, 9189), True, 'import numpy as np\n'), ((9352, 9372), 'numpy.array', 'np.array', (['dim_kernel'], {}), '(dim_kernel)\n', (9360, 9372), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Module to simulate OH sky lines spectra.
"""
import numpy as np
import pandas as pd
from pathlib import Path
from .constants import Constants as cs
from .spectra import Spectra
from .simSpec import SpecUtil as spec_util
_PARENT_DIR = Path(__file__).resolve().parents[1]
class SkyLines(object):
"""
Contains everything related to OH sky lines.
"""
MAX_V = 13 #fa maximum vibrational quantum level
def __init__(self):
"""
Initiate `SkyLines`. Load data containing line wavelengths, transition
levels, Einstein's A, etc. The data is from Brooke et al. (2016).
"""
self.temperature_rot = 190.
self.temperature_vib = 9000.
self._wavelength_start = 0.
self._wavelength_end = 0.
self._wavelengths = []
self._qn_vs = pd.DataFrame(columns=['v', 'Q', 'N'],
index=np.arange(self.MAX_V+1))
self.line_list = pd.DataFrame() # stores line data
self._line_list_v = [] # list of lines (dataframes) indexed with v
self._lines_in_range = pd.DataFrame() # lines in wavelength range
self.load_data() # loads data into `self.line_list`
def load_data(self):
"""
Load necessary data files.
"""
data_file_path = _PARENT_DIR / 'data' / 'OH-XX-Line_list.txt'
self.line_list = pd.read_csv(data_file_path,
delim_whitespace=True, skiprows=33)
for v in range(self.MAX_V+1):
# subset of line_list with v' = v
v_lines = self.line_list[
self.line_list["v'"].values == v
][["J'", "F'", "p'", "E''", 'Calculated']].drop_duplicates(
).reset_index()
self._line_list_v.append(v_lines)
def reset_temperatures(self, temperature_rot=190, temperature_vib=9000):
"""
Reset the temperatures without reloading data.
:param temperature_rot: Rotational temperature.
:param temperature_vib: Vibrational temperature.
"""
self.temperature_rot = temperature_rot
self.temperature_vib = temperature_vib
# Resetting all the values in the dataframe, but keeping the same
# dataframe object for efficient memory allocation.
self._qn_vs.loc[:] = np.nan
self._lines_in_range.loc[:, ('Q_v', 'N_v', 'Intensity')] = np.nan
def setup_wavelengths(self, start=None, end=None, resolution=0.01,
wavelengths=None):
"""
:param start: Beginning wavelength in nm.
:param end: Ending wavelength in nm.
:param resolution: Resolution element in nm.
:param wavelengths: Wavelengths for computing the sky spectra. If
provided, `start` and `end` are not needed.
"""
if wavelengths is None:
assert start is not None and end is not None, 'You need to ' \
'provide either ' \
'the wavelengths ' \
'or the range.'
self._wavelength_start = start
self._wavelength_end = end
self._wavelengths = np.arange(start, end+resolution/2., resolution)
else:
self._wavelengths = wavelengths
self._wavelength_start = wavelengths[0]
self._wavelength_end = wavelengths[-1]
k_start = 1e7 / spec_util.air_to_vacuum_wavelength(
self._wavelength_end) # in /cm
k_end = 1e7 / spec_util.air_to_vacuum_wavelength(
self._wavelength_start) # in /cm
self._lines_in_range = self.line_list[
self.line_list["Calculated"].between(k_start, k_end)]
self._lines_in_range = self._lines_in_range.assign(
Q_v=np.nan,
N_v=np.nan,
Intensity=np.nan)
def get_sky_spectra(self, temperature_rot=None, temperature_vib=None):
"""
Generate the sky spectra between start and end wavelengths.
:param temperature_rot: Rotational temperature, in K.
:param temperature_vib: Vibrational temperature, in K.
"""
if temperature_rot is not None and temperature_vib is not None:
self.reset_temperatures(temperature_rot=temperature_rot,
temperature_vib=temperature_vib)
elif temperature_rot is not None:
self.reset_temperatures(temperature_rot=temperature_rot,
temperature_vib=self.temperature_vib)
elif temperature_vib is not None:
self.reset_temperatures(temperature_rot=self.temperature_rot,
temperature_vib=temperature_vib)
flux = np.zeros_like(self._wavelengths)
intensities = self._get_line_intensities_in_range()
line_wavelengths = spec_util.vacuum_to_air_wavelength_mathar(
1e7/self._lines_in_range['Calculated'].values) # nm
pixel_size = self._wavelengths[1] - self._wavelengths[0]
for line_wavelength, intensity in zip(line_wavelengths, intensities):
idx = int((line_wavelength - self._wavelength_start)/pixel_size)
if 0 <= idx < len(flux):
flux[idx] += intensity
return Spectra(self._wavelengths, flux, wavelength_unit='nm')
def _get_qn_v(self, v):
"""
Compute partition function Q_v (T_rot) and N_v (T_vib). Q_v (T_rot) is
computed using equation (39) from Mies (1974). Simply, N_v (T_vib) =
Q_v (T_vib).
:param v: Vibrational quantum number.
:type v: int
"""
if v in self._qn_vs.v.values:
idx = np.where(self._qn_vs.v.values == v)[0]
return (self._qn_vs.Q.values[idx],
self._qn_vs.N.values[idx])
else:
v_lines = self._line_list_v[v]
wave_n = v_lines["E''"].values + v_lines['Calculated'].values
j = v_lines["J'"].values
q = np.sum((4*j+2) * np.exp(-self._get_beta(wave_n,
self.temperature_rot)
))
n = np.sum((4*j+2) * np.exp(-self._get_beta(wave_n,
self.temperature_vib)
))
self._qn_vs.loc[v] = [v, q, n]
return q, n
@staticmethod
def _get_beta(wavenumber, temperature):
"""
Compute $\beta(E)$ from wavenumber k (cm^-1).
:param wavenumber: Wavenumber in cm^-1.
:param temperature: Temperature in K.
"""
return cs.h * cs.c * wavenumber / cs.k_b / temperature
@staticmethod
def _wavenumber2energy(wavenumber):
"""
Convert wavenumber to energy.
:param wavenumber: Wavenumber in cm^-1.
:return:
"""
return cs.h * cs.c * wavenumber
def _populate_qn_in_line_list(self):
"""
Fills up [Q_v, N_v] columns of `SkyLines._lines_in_range` with
calculated values.
:return:
:rtype:
"""
for v in range(self.MAX_V+1):
q_v, n_v = self._get_qn_v(v)
row_slice = self._lines_in_range["v'"] == v
self._lines_in_range.loc[row_slice,
'Q_v'] = q_v
self._lines_in_range.loc[row_slice,
'N_v'] = n_v
def _get_line_intensities_in_range(self):
"""
Compute line intensities for lines in `self._lines_in_range`.
Computed intensity is unit of ergs/cm^3/s. Equation (41) from Mies
(1974) is used to derive the intensities in photon number, then the
energy of the line is multiplied to get the intensities in unit of
energy.
:param index:
"""
if np.isnan(self._lines_in_range['Intensity'].values).any():
self._populate_qn_in_line_list()
v = self._lines_in_range["v'"].values
wavenumber = self._lines_in_range['Calculated'].values
einstein_a = self._lines_in_range['A'].values
j = self._lines_in_range["J'"].values
energy_lower = self._lines_in_range["E''"].values
q_v = self._lines_in_range['Q_v'].values
n_v = self._lines_in_range['N_v'].values
intensity = self._wavenumber2energy(wavenumber) * n_v \
* einstein_a * (4*j+2) / q_v * np.exp(
-self._get_beta(energy_lower+wavenumber, self.temperature_rot))
self._lines_in_range.loc[:, 'Intensity'] = pd.Series(intensity,
index=self._lines_in_range.index)
return intensity
else:
return self._lines_in_range['Intensity'].values
| [
"pandas.Series",
"pandas.read_csv",
"pathlib.Path",
"numpy.where",
"numpy.isnan",
"pandas.DataFrame",
"numpy.zeros_like",
"numpy.arange"
] | [((977, 991), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (989, 991), True, 'import pandas as pd\n'), ((1119, 1133), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1131, 1133), True, 'import pandas as pd\n'), ((1405, 1468), 'pandas.read_csv', 'pd.read_csv', (['data_file_path'], {'delim_whitespace': '(True)', 'skiprows': '(33)'}), '(data_file_path, delim_whitespace=True, skiprows=33)\n', (1416, 1468), True, 'import pandas as pd\n'), ((4863, 4895), 'numpy.zeros_like', 'np.zeros_like', (['self._wavelengths'], {}), '(self._wavelengths)\n', (4876, 4895), True, 'import numpy as np\n'), ((3304, 3356), 'numpy.arange', 'np.arange', (['start', '(end + resolution / 2.0)', 'resolution'], {}), '(start, end + resolution / 2.0, resolution)\n', (3313, 3356), True, 'import numpy as np\n'), ((8789, 8843), 'pandas.Series', 'pd.Series', (['intensity'], {'index': 'self._lines_in_range.index'}), '(intensity, index=self._lines_in_range.index)\n', (8798, 8843), True, 'import pandas as pd\n'), ((265, 279), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (269, 279), False, 'from pathlib import Path\n'), ((926, 951), 'numpy.arange', 'np.arange', (['(self.MAX_V + 1)'], {}), '(self.MAX_V + 1)\n', (935, 951), True, 'import numpy as np\n'), ((5813, 5848), 'numpy.where', 'np.where', (['(self._qn_vs.v.values == v)'], {}), '(self._qn_vs.v.values == v)\n', (5821, 5848), True, 'import numpy as np\n'), ((8024, 8074), 'numpy.isnan', 'np.isnan', (["self._lines_in_range['Intensity'].values"], {}), "(self._lines_in_range['Intensity'].values)\n", (8032, 8074), True, 'import numpy as np\n')] |
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from ...visualization import visualize_cut_plane
class Yaw:
"""
Class that performs yaw optimization for a single set of
inflow conditions. Intended to be used together with an object of the
:py:class`floris.tools.optimization.optimization.Optimization` class.
Args:
fi (:py:class:`floris.tools.floris_interface.FlorisInterface`):
Interface from FLORIS to the tools package.
minimum_yaw_angle (float, optional): Minimum constraint on
yaw. Defaults to None.
maximum_yaw_angle (float, optional): Maximum constraint on
yaw. Defaults to None.
x0 (iterable, optional): The initial yaw conditions.
Defaults to None. Initializes to the current turbine
yaw settings.
include_unc (bool): If True, uncertainty in wind direction
and/or yaw position is included when determining wind farm power.
Uncertainty is included by computing the mean wind farm power for
a distribution of wind direction and yaw position deviations from
the original wind direction and yaw angles. Defaults to False.
unc_pmfs (dictionary, optional): A dictionary containing optional
probability mass functions describing the distribution of wind
direction and yaw position deviations when wind direction and/or
yaw position uncertainty is included in the power calculations.
Contains the following key-value pairs:
- **wd_unc**: A numpy array containing wind direction deviations
from the original wind direction.
- **wd_unc_pmf**: A numpy array containing the probability of
each wind direction deviation in **wd_unc** occuring.
- **yaw_unc**: A numpy array containing yaw angle deviations
from the original yaw angles.
- **yaw_unc_pmf**: A numpy array containing the probability of
each yaw angle deviation in **yaw_unc** occuring.
Defaults to None, in which case default PMFs are calculated using
values provided in **unc_options**.
unc_options (disctionary, optional): A dictionary containing values used
to create normally-distributed, zero-mean probability mass functions
describing the distribution of wind direction and yaw position
deviations when wind direction and/or yaw position uncertainty is
included. This argument is only used when **unc_pmfs** is None and
contains the following key-value pairs:
- **std_wd**: A float containing the standard deviation of the wind
direction deviations from the original wind direction.
- **std_yaw**: A float containing the standard deviation of the yaw
angle deviations from the original yaw angles.
- **pmf_res**: A float containing the resolution in degrees of the
wind direction and yaw angle PMFs.
- **pdf_cutoff**: A float containing the cumulative distribution
function value at which the tails of the PMFs are truncated.
Defaults to None. Initializes to {'std_wd': 4.95, 'std_yaw': 1.75,
'pmf_res': 1.0, 'pdf_cutoff': 0.995}.
wdir (float, optional): Wind direction to use for optimization. Defaults
to None. Initializes to current wind direction in floris.
wspd (float, optional): Wind speed to use for optimization. Defaults
to None. Initializes to current wind direction in floris.
Returns:
Yaw: An instantiated Yaw object.
"""
def __init__(
self,
fi,
minimum_yaw_angle=0.0,
maximum_yaw_angle=25.0,
x0=None,
include_unc=False,
unc_pmfs=None,
unc_options=None,
wdir=None,
wspd=None,
):
"""
Instantiate Yaw object and parameter values.
"""
self.fi = fi
self.minimum_yaw_angle = minimum_yaw_angle
self.maximum_yaw_angle = maximum_yaw_angle
if x0 is not None:
self.x0 = x0
else:
self.x0 = [
turbine.yaw_angle
for turbine in self.fi.floris.farm.turbine_map.turbines
]
self.include_unc = include_unc
self.unc_pmfs = unc_pmfs
if self.include_unc & (self.unc_pmfs is None):
self.unc_pmfs = calc_unc_pmfs(self.unc_pmfs)
if wdir is not None:
self.wdir = wdir
else:
self.wdir = self.fi.floris.farm.flow_field.wind_direction
if wspd is not None:
self.wspd = wspd
else:
self.wspd = self.fi.floris.farm.flow_field.wind_speed
self.fi.reinitialize_flow_field(wind_speed=self.wspd, wind_direction=self.wdir)
def __str__(self):
return "yaw"
###########################################################################
# Required private optimization methods
###########################################################################
def reinitialize(self):
pass
def obj_func(self, varDict):
# Parse the variable dictionary
self.parse_opt_vars(varDict)
# Reinitialize with wind speed and direction
self.fi.reinitialize_flow_field(wind_speed=self.wspd, wind_direction=self.wdir)
# Compute the objective function
funcs = {}
funcs["obj"] = -1 * self.fi.get_farm_power_for_yaw_angle(self.yaw) / 1e0
# Compute constraints, if any are defined for the optimization
funcs = self.compute_cons(funcs)
fail = False
return funcs, fail
def parse_opt_vars(self, varDict):
self.yaw = varDict["yaw"]
def parse_sol_vars(self, sol):
self.yaw = list(sol.getDVs().values())[0]
def add_var_group(self, optProb):
optProb.addVarGroup(
"yaw",
self.nturbs,
type="c",
lower=self.minimum_yaw_angle,
upper=self.maximum_yaw_angle,
value=self.x0,
)
return optProb
def add_con_group(self, optProb):
# no constraints defined
return optProb
def compute_cons(self, funcs):
# no constraints defined
return funcs
###########################################################################
# User-defined methods
###########################################################################
def plot_yaw_opt_results(self, sol):
"""
Method to plot the wind farm with optimal yaw offsets
"""
yaw = sol.getDVs()["yaw"]
# Assign yaw angles to turbines and calculate wake
self.fi.calculate_wake(yaw_angles=yaw)
# Initialize the horizontal cut
hor_plane = self.fi.get_hor_plane(x_resolution=400, y_resolution=100)
# Plot and show
fig, ax = plt.subplots()
visualize_cut_plane(hor_plane, ax=ax)
ax.set_title(
"Optimal Yaw Offsets for U = "
+ str(self.wspd[0])
+ " m/s, Wind Direction = "
+ str(self.wdir[0])
+ "$^\\circ$"
)
plt.show()
def print_power_gain(self, sol):
"""
Method to print the power gain from wake steering with optimal yaw offsets
"""
yaw = sol.getDVs()["yaw"]
self.fi.calculate_wake(yaw_angles=0.0)
power_baseline = self.fi.get_farm_power()
self.fi.calculate_wake(yaw_angles=yaw)
power_opt = self.fi.get_farm_power()
pct_gain = 100.0 * (power_opt - power_baseline) / power_baseline
print("==========================================")
print("Baseline Power = %.1f kW" % (power_baseline / 1e3))
print("Optimal Power = %.1f kW" % (power_opt / 1e3))
print("Total Power Gain = %.1f%%" % pct_gain)
print("==========================================")
###########################################################################
# Properties
###########################################################################
@property
def nturbs(self):
"""
This property returns the number of turbines in the FLORIS
object.
Returns:
nturbs (int): The number of turbines in the FLORIS object.
"""
self._nturbs = len(self.fi.floris.farm.turbines)
return self._nturbs
def calc_unc_pmfs(unc_options=None):
"""
Calculates normally-distributed probability mass functions describing the
distribution of wind direction and yaw position deviations when wind direction
and/or yaw position uncertainty are included in power calculations.
Args:
unc_options (dictionary, optional): A dictionary containing values used
to create normally-distributed, zero-mean probability mass functions
describing the distribution of wind direction and yaw position
deviations when wind direction and/or yaw position uncertainty is
included. This argument is only used when **unc_pmfs** is None and
contains the following key-value pairs:
- **std_wd**: A float containing the standard deviation of the wind
direction deviations from the original wind direction.
- **std_yaw**: A float containing the standard deviation of the yaw
angle deviations from the original yaw angles.
- **pmf_res**: A float containing the resolution in degrees of the
wind direction and yaw angle PMFs.
- **pdf_cutoff**: A float containing the cumulative distribution
function value at which the tails of the PMFs are truncated.
Defaults to None. Initializes to {'std_wd': 4.95, 'std_yaw': 1.75,
'pmf_res': 1.0, 'pdf_cutoff': 0.995}.
Returns:
[dictionary]: A dictionary containing
probability mass functions describing the distribution of wind
direction and yaw position deviations when wind direction and/or
yaw position uncertainty is included in the power calculations.
Contains the following key-value pairs:
- **wd_unc**: A numpy array containing wind direction deviations
from the original wind direction.
- **wd_unc_pmf**: A numpy array containing the probability of
each wind direction deviation in **wd_unc** occuring.
- **yaw_unc**: A numpy array containing yaw angle deviations
from the original yaw angles.
- **yaw_unc_pmf**: A numpy array containing the probability of
each yaw angle deviation in **yaw_unc** occuring.
"""
if unc_options is None:
unc_options = {
"std_wd": 4.95,
"std_yaw": 1.75,
"pmf_res": 1.0,
"pdf_cutoff": 0.995,
}
# create normally distributed wd and yaw uncertainty pmfs
if unc_options["std_wd"] > 0:
wd_bnd = int(
np.ceil(
norm.ppf(unc_options["pdf_cutoff"], scale=unc_options["std_wd"])
/ unc_options["pmf_res"]
)
)
wd_unc = np.linspace(
-1 * wd_bnd * unc_options["pmf_res"],
wd_bnd * unc_options["pmf_res"],
2 * wd_bnd + 1,
)
wd_unc_pmf = norm.pdf(wd_unc, scale=unc_options["std_wd"])
wd_unc_pmf = wd_unc_pmf / np.sum(wd_unc_pmf) # normalize so sum = 1.0
else:
wd_unc = np.zeros(1)
wd_unc_pmf = np.ones(1)
if unc_options["std_yaw"] > 0:
yaw_bnd = int(
np.ceil(
norm.ppf(unc_options["pdf_cutoff"], scale=unc_options["std_yaw"])
/ unc_options["pmf_res"]
)
)
yaw_unc = np.linspace(
-1 * yaw_bnd * unc_options["pmf_res"],
yaw_bnd * unc_options["pmf_res"],
2 * yaw_bnd + 1,
)
yaw_unc_pmf = norm.pdf(yaw_unc, scale=unc_options["std_yaw"])
yaw_unc_pmf = yaw_unc_pmf / np.sum(yaw_unc_pmf) # normalize so sum = 1.0
else:
yaw_unc = np.zeros(1)
yaw_unc_pmf = np.ones(1)
return {
"wd_unc": wd_unc,
"wd_unc_pmf": wd_unc_pmf,
"yaw_unc": yaw_unc,
"yaw_unc_pmf": yaw_unc_pmf,
}
| [
"numpy.ones",
"scipy.stats.norm.ppf",
"numpy.sum",
"numpy.linspace",
"numpy.zeros",
"scipy.stats.norm.pdf",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((7707, 7721), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7719, 7721), True, 'import matplotlib.pyplot as plt\n'), ((7982, 7992), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7990, 7992), True, 'import matplotlib.pyplot as plt\n'), ((12182, 12285), 'numpy.linspace', 'np.linspace', (["(-1 * wd_bnd * unc_options['pmf_res'])", "(wd_bnd * unc_options['pmf_res'])", '(2 * wd_bnd + 1)'], {}), "(-1 * wd_bnd * unc_options['pmf_res'], wd_bnd * unc_options[\n 'pmf_res'], 2 * wd_bnd + 1)\n", (12193, 12285), True, 'import numpy as np\n'), ((12349, 12394), 'scipy.stats.norm.pdf', 'norm.pdf', (['wd_unc'], {'scale': "unc_options['std_wd']"}), "(wd_unc, scale=unc_options['std_wd'])\n", (12357, 12394), False, 'from scipy.stats import norm\n'), ((12501, 12512), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (12509, 12512), True, 'import numpy as np\n'), ((12534, 12544), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (12541, 12544), True, 'import numpy as np\n'), ((12790, 12896), 'numpy.linspace', 'np.linspace', (["(-1 * yaw_bnd * unc_options['pmf_res'])", "(yaw_bnd * unc_options['pmf_res'])", '(2 * yaw_bnd + 1)'], {}), "(-1 * yaw_bnd * unc_options['pmf_res'], yaw_bnd * unc_options[\n 'pmf_res'], 2 * yaw_bnd + 1)\n", (12801, 12896), True, 'import numpy as np\n'), ((12961, 13008), 'scipy.stats.norm.pdf', 'norm.pdf', (['yaw_unc'], {'scale': "unc_options['std_yaw']"}), "(yaw_unc, scale=unc_options['std_yaw'])\n", (12969, 13008), False, 'from scipy.stats import norm\n'), ((13119, 13130), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (13127, 13130), True, 'import numpy as np\n'), ((13153, 13163), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (13160, 13163), True, 'import numpy as np\n'), ((12429, 12447), 'numpy.sum', 'np.sum', (['wd_unc_pmf'], {}), '(wd_unc_pmf)\n', (12435, 12447), True, 'import numpy as np\n'), ((13045, 13064), 'numpy.sum', 'np.sum', (['yaw_unc_pmf'], {}), '(yaw_unc_pmf)\n', (13051, 13064), True, 'import numpy as np\n'), ((12035, 12099), 'scipy.stats.norm.ppf', 'norm.ppf', (["unc_options['pdf_cutoff']"], {'scale': "unc_options['std_wd']"}), "(unc_options['pdf_cutoff'], scale=unc_options['std_wd'])\n", (12043, 12099), False, 'from scipy.stats import norm\n'), ((12641, 12706), 'scipy.stats.norm.ppf', 'norm.ppf', (["unc_options['pdf_cutoff']"], {'scale': "unc_options['std_yaw']"}), "(unc_options['pdf_cutoff'], scale=unc_options['std_yaw'])\n", (12649, 12706), False, 'from scipy.stats import norm\n')] |
import typing
import numpy as np
import numba as nb
@nb.njit
def tree_bfs(
g: np.ndarray,
edge_idx: np.ndarray,
root: int,
) -> typing.Tuple[(np.ndarray, ) * 2]:
n = g[:, :2].max() + 1
parent = np.full(n, -1, np.int64)
depth = np.zeros(n, np.int64)
fifo_que = [root]
for u in fifo_que:
for v in g[edge_idx[u]:edge_idx[u + 1], 1]:
if v == parent[u]: continue
parent[v] = u
depth[v] = depth[u] + 1
fifo_que.append(v)
return parent, depth
| [
"numpy.full",
"numpy.zeros"
] | [((206, 230), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (213, 230), True, 'import numpy as np\n'), ((241, 262), 'numpy.zeros', 'np.zeros', (['n', 'np.int64'], {}), '(n, np.int64)\n', (249, 262), True, 'import numpy as np\n')] |
'''
This demo shows how to create variable size dataset and then it creates mini-batches from this dataset so that it
calculates the likelihood of each observation sequence in every batch using vmap.
Author : <NAME> (@karalleyna)
'''
from jax import vmap, jit
from jax.random import split, randint, PRNGKey
import jax.numpy as jnp
import hmm_utils
from hmm_discrete_lib import hmm_sample_jax, hmm_loglikelihood_numpy, hmm_loglikelihood_jax
from hmm_discrete_lib import HMMNumpy, HMMJax
import numpy as np
def loglikelihood_numpy(params_numpy, batches, lens):
return np.vstack([hmm_loglikelihood_numpy(params_numpy, batch, l) for batch, l in zip(batches, lens)])
def loglikelihood_jax(params_jax, batches, lens):
return vmap(hmm_loglikelihood_jax, in_axes=(None, 0, 0))(params_jax, batches, lens)
# state transition matrix
A = jnp.array([
[0.95, 0.05],
[0.10, 0.90]
])
# observation matrix
B = jnp.array([
[1/6, 1/6, 1/6, 1/6, 1/6, 1/6], # fair die
[1/10, 1/10, 1/10, 1/10, 1/10, 5/10] # loaded die
])
pi = jnp.array([1, 1]) / 2
params_numpy= HMMNumpy(np.array(A), np.array(B), np.array(pi))
params_jax = HMMJax(A, B, pi)
seed = 0
rng_key = PRNGKey(seed)
rng_key, rng_sample = split(rng_key)
n_obs_seq, batch_size, max_len = 15, 5, 10
observations, lens = hmm_utils.hmm_sample_n(params_jax,
hmm_sample_jax,
n_obs_seq, max_len,
rng_sample)
observations, lens = hmm_utils.pad_sequences(observations, lens)
rng_key, rng_batch = split(rng_key)
batches, lens = hmm_utils.hmm_sample_minibatches(observations,
lens,
batch_size,
rng_batch)
ll_numpy = loglikelihood_numpy(params_numpy, np.array(batches), np.array(lens))
ll_jax = loglikelihood_jax(params_jax, batches, lens)
assert np.allclose(ll_numpy, ll_jax)
print(f'Loglikelihood {ll_numpy}') | [
"hmm_discrete_lib.hmm_loglikelihood_numpy",
"jax.random.PRNGKey",
"hmm_utils.hmm_sample_minibatches",
"hmm_utils.pad_sequences",
"numpy.allclose",
"jax.numpy.array",
"numpy.array",
"hmm_discrete_lib.HMMJax",
"hmm_utils.hmm_sample_n",
"jax.vmap",
"jax.random.split"
] | [((836, 873), 'jax.numpy.array', 'jnp.array', (['[[0.95, 0.05], [0.1, 0.9]]'], {}), '([[0.95, 0.05], [0.1, 0.9]])\n', (845, 873), True, 'import jax.numpy as jnp\n'), ((912, 1021), 'jax.numpy.array', 'jnp.array', (['[[1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6], [1 / 10, 1 / 10, 1 / 10, 1 / \n 10, 1 / 10, 5 / 10]]'], {}), '([[1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6], [1 / 10, 1 / 10, 1 /\n 10, 1 / 10, 1 / 10, 5 / 10]])\n', (921, 1021), True, 'import jax.numpy as jnp\n'), ((1133, 1149), 'hmm_discrete_lib.HMMJax', 'HMMJax', (['A', 'B', 'pi'], {}), '(A, B, pi)\n', (1139, 1149), False, 'from hmm_discrete_lib import HMMNumpy, HMMJax\n'), ((1170, 1183), 'jax.random.PRNGKey', 'PRNGKey', (['seed'], {}), '(seed)\n', (1177, 1183), False, 'from jax.random import split, randint, PRNGKey\n'), ((1206, 1220), 'jax.random.split', 'split', (['rng_key'], {}), '(rng_key)\n', (1211, 1220), False, 'from jax.random import split, randint, PRNGKey\n'), ((1287, 1373), 'hmm_utils.hmm_sample_n', 'hmm_utils.hmm_sample_n', (['params_jax', 'hmm_sample_jax', 'n_obs_seq', 'max_len', 'rng_sample'], {}), '(params_jax, hmm_sample_jax, n_obs_seq, max_len,\n rng_sample)\n', (1309, 1373), False, 'import hmm_utils\n'), ((1524, 1567), 'hmm_utils.pad_sequences', 'hmm_utils.pad_sequences', (['observations', 'lens'], {}), '(observations, lens)\n', (1547, 1567), False, 'import hmm_utils\n'), ((1590, 1604), 'jax.random.split', 'split', (['rng_key'], {}), '(rng_key)\n', (1595, 1604), False, 'from jax.random import split, randint, PRNGKey\n'), ((1621, 1696), 'hmm_utils.hmm_sample_minibatches', 'hmm_utils.hmm_sample_minibatches', (['observations', 'lens', 'batch_size', 'rng_batch'], {}), '(observations, lens, batch_size, rng_batch)\n', (1653, 1696), False, 'import hmm_utils\n'), ((1987, 2016), 'numpy.allclose', 'np.allclose', (['ll_numpy', 'll_jax'], {}), '(ll_numpy, ll_jax)\n', (1998, 2016), True, 'import numpy as np\n'), ((1034, 1051), 'jax.numpy.array', 'jnp.array', (['[1, 1]'], {}), '([1, 1])\n', (1043, 1051), True, 'import jax.numpy as jnp\n'), ((1080, 1091), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (1088, 1091), True, 'import numpy as np\n'), ((1093, 1104), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (1101, 1104), True, 'import numpy as np\n'), ((1106, 1118), 'numpy.array', 'np.array', (['pi'], {}), '(pi)\n', (1114, 1118), True, 'import numpy as np\n'), ((1890, 1907), 'numpy.array', 'np.array', (['batches'], {}), '(batches)\n', (1898, 1907), True, 'import numpy as np\n'), ((1909, 1923), 'numpy.array', 'np.array', (['lens'], {}), '(lens)\n', (1917, 1923), True, 'import numpy as np\n'), ((728, 777), 'jax.vmap', 'vmap', (['hmm_loglikelihood_jax'], {'in_axes': '(None, 0, 0)'}), '(hmm_loglikelihood_jax, in_axes=(None, 0, 0))\n', (732, 777), False, 'from jax import vmap, jit\n'), ((583, 630), 'hmm_discrete_lib.hmm_loglikelihood_numpy', 'hmm_loglikelihood_numpy', (['params_numpy', 'batch', 'l'], {}), '(params_numpy, batch, l)\n', (606, 630), False, 'from hmm_discrete_lib import hmm_sample_jax, hmm_loglikelihood_numpy, hmm_loglikelihood_jax\n')] |
import torch # torch 1.9.0+cu111
import numpy as np
from compare import *
OC = 3
IN = 2
IC = 2
IH = 4
IW = 4
KH = 3
KW = 3
weight = torch.ones([OC, IC, KH, KW], dtype=torch.float32, requires_grad=False)
print(weight)
input_np = np.arange(1, IN * IC * IH * IW + 1).reshape(IN, IC, IH, IW)
input = torch.from_numpy(input_np).type(torch.FloatTensor)
print(input)
conservertive_convolution = torch.nn.Conv2d(IC, OC, (KH, KH), stride=(1, 1), bias=False)
conservertive_convolution.weight = torch.nn.Parameter(weight)
output = conservertive_convolution(input)
print(output)
output_c = np.fromfile("../output/C_Tensor", dtype=np.float32)
output_py = output.detach().numpy().flatten()
compare_two_tensor(output_py, output_c)
| [
"numpy.fromfile",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.Parameter",
"numpy.arange",
"torch.ones"
] | [((134, 204), 'torch.ones', 'torch.ones', (['[OC, IC, KH, KW]'], {'dtype': 'torch.float32', 'requires_grad': '(False)'}), '([OC, IC, KH, KW], dtype=torch.float32, requires_grad=False)\n', (144, 204), False, 'import torch\n'), ((392, 452), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['IC', 'OC', '(KH, KH)'], {'stride': '(1, 1)', 'bias': '(False)'}), '(IC, OC, (KH, KH), stride=(1, 1), bias=False)\n', (407, 452), False, 'import torch\n'), ((488, 514), 'torch.nn.Parameter', 'torch.nn.Parameter', (['weight'], {}), '(weight)\n', (506, 514), False, 'import torch\n'), ((584, 635), 'numpy.fromfile', 'np.fromfile', (['"""../output/C_Tensor"""'], {'dtype': 'np.float32'}), "('../output/C_Tensor', dtype=np.float32)\n", (595, 635), True, 'import numpy as np\n'), ((231, 266), 'numpy.arange', 'np.arange', (['(1)', '(IN * IC * IH * IW + 1)'], {}), '(1, IN * IC * IH * IW + 1)\n', (240, 266), True, 'import numpy as np\n'), ((299, 325), 'torch.from_numpy', 'torch.from_numpy', (['input_np'], {}), '(input_np)\n', (315, 325), False, 'import torch\n')] |
# This is python script for Metashape Pro. Scripts repository: https://github.com/agisoft-llc/metashape-scripts
#
# Based on https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/differentiable-parameterizations/style_transfer_3d.ipynb
# Modifications:
# 1. Taking into account cameras positions (when possible) instead of meshutil.sample_view(10.0, 12.0)
# 2. Integration with Metashape Pro to make usage easier
#
# Note that you need to:
# 1. Install CUDA 9.0 and cuDNN for CUDA 9.0
# 2. In Python bundled with Metashape install these packages: tensorflow-gpu==1.9.0 lucid==0.2.3 numpy==1.15.0 Pillow==5.2.0 matplotlib==2.2.2 ipython==6.5.0 PyOpenGL==3.1.0 jupyter==1.0.0
#
# Installation and usage instruction: http://www.agisoft.com/index.php?id=54
import Metashape
import pathlib, shutil, math
from PySide2 import QtGui, QtCore, QtWidgets
# Checking compatibility
compatible_major_version = "1.6"
found_major_version = ".".join(Metashape.app.version.split('.')[:2])
if found_major_version != compatible_major_version:
raise Exception("Incompatible Metashape version: {} != {}".format(found_major_version, compatible_major_version))
class ModelStyleTransferDlg(QtWidgets.QDialog):
def __init__(self, parent):
self.texture_size = 2048
self.rendering_width = 2048
self.steps_number = 1000
self.style_path = ""
self.style_name = "style1"
self.working_dir = ""
self.model_name = "model1"
self.use_cameras_position = len(chunk.cameras) > 0
self.content_weight = 200.0
self.style_decay = 0.95
self.googlenet_style_layers = [
'conv2d2',
'mixed3a',
'mixed3b',
'mixed4a',
'mixed4b',
'mixed4c',
]
self.googlenet_content_layer = 'mixed3b'
if len(Metashape.app.document.path) > 0:
self.working_dir = str(pathlib.Path(Metashape.app.document.path).parent / "model_style_transfer")
self.model_name = pathlib.Path(Metashape.app.document.path).stem
# Paths will be inited in self.exportInput()
self.input_model_path = None
self.input_texture_path = None
self.input_cameras_path = None # Can be None if no cameras or self.use_cameras_position is False
self.output_dir = None
self.output_texture_path = None
self.result_model_path = None
# Cameras will be loaded with self.exportCameras() + self.loadCameras() or randomly sampled with meshutil.sample_view(10.0, 12.0)
self.cameras = None
self.max_fovy = 10.0
self.aspect_ratio = 1.0
QtWidgets.QDialog.__init__(self, parent)
self.setWindowTitle("Model style transfer")
self.createGUI()
self.initDefaultParams()
self.exec()
def modelStyleTransfer(self):
self.loadParams()
print("Script started...")
self.exportInput()
try:
self.textureStyle3D()
except:
Metashape.app.messageBox("Something gone wrong!\n"
"Please check the console.")
raise
finally:
self.reject()
print("Script finished!")
return True
def chooseStylePath(self):
style_path = Metashape.app.getOpenFileName(filter="*.jpg;;*.jpeg;;*.JPG;;*.JPEG;;*.png;;*.PNG")
self.edtStylePath.setText(style_path)
self.edtStyleName.setText(pathlib.Path(style_path).stem)
def chooseWorkingDir(self):
working_dir = Metashape.app.getExistingDirectory()
self.edtWorkingDir.setText(working_dir)
def createGUI(self):
layout = QtWidgets.QGridLayout()
row = 0
self.txtStylePath= QtWidgets.QLabel()
self.txtStylePath.setText("Style image:")
self.txtStylePath.setFixedSize(150, 25)
self.edtStylePath= QtWidgets.QLineEdit()
self.edtStylePath.setPlaceholderText("URL or file path")
self.btnStylePath = QtWidgets.QPushButton("...")
self.btnStylePath.setFixedSize(25, 25)
QtCore.QObject.connect(self.btnStylePath, QtCore.SIGNAL("clicked()"), lambda: self.chooseStylePath())
layout.addWidget(self.txtStylePath, row, 0)
layout.addWidget(self.edtStylePath, row, 1)
layout.addWidget(self.btnStylePath, row, 2)
row += 1
self.txtStyleName = QtWidgets.QLabel()
self.txtStyleName.setText("Style name:")
self.txtStyleName.setFixedSize(150, 25)
self.edtStyleName = QtWidgets.QLineEdit()
layout.addWidget(self.txtStyleName, row, 0)
layout.addWidget(self.edtStyleName, row, 1, 1, 2)
row += 1
self.txtStepsNumber = QtWidgets.QLabel()
self.txtStepsNumber.setText("Steps number:")
self.txtStepsNumber.setFixedSize(150, 25)
self.edtStepsNumber = QtWidgets.QLineEdit()
self.edtStepsNumber.setPlaceholderText("number of iterations")
layout.addWidget(self.txtStepsNumber, row, 0)
layout.addWidget(self.edtStepsNumber, row, 1, 1, 2)
row += 1
self.txtTextureSize = QtWidgets.QLabel()
self.txtTextureSize.setText("Texture size:")
self.txtTextureSize.setFixedSize(150, 25)
self.edtTextureSize = QtWidgets.QLineEdit()
self.edtTextureSize.setPlaceholderText("resulting texture resolution")
layout.addWidget(self.txtTextureSize, row, 0)
layout.addWidget(self.edtTextureSize, row, 1, 1, 2)
row += 1
self.txtRenderingSize = QtWidgets.QLabel()
self.txtRenderingSize.setText("Rendering size:")
self.txtRenderingSize.setFixedSize(150, 25)
self.edtRenderingSize = QtWidgets.QLineEdit()
self.edtRenderingSize.setPlaceholderText("width of rendering buffer")
layout.addWidget(self.txtRenderingSize, row, 0)
layout.addWidget(self.edtRenderingSize, row, 1, 1, 2)
row += 1
self.txtModelName = QtWidgets.QLabel()
self.txtModelName.setText("Model name:")
self.txtModelName.setFixedSize(150, 25)
self.edtModelName = QtWidgets.QLineEdit()
layout.addWidget(self.txtModelName, row, 0)
layout.addWidget(self.edtModelName, row, 1, 1, 2)
row += 1
self.txtWorkingDir= QtWidgets.QLabel()
self.txtWorkingDir.setText("Working dir:")
self.txtWorkingDir.setFixedSize(150, 25)
self.edtWorkingDir= QtWidgets.QLineEdit()
self.edtWorkingDir.setPlaceholderText("path to dir")
self.btnWorkingDir = QtWidgets.QPushButton("...")
self.btnWorkingDir.setFixedSize(25, 25)
QtCore.QObject.connect(self.btnWorkingDir, QtCore.SIGNAL("clicked()"), lambda: self.chooseWorkingDir())
layout.addWidget(self.txtWorkingDir, row, 0)
layout.addWidget(self.edtWorkingDir, row, 1)
layout.addWidget(self.btnWorkingDir, row, 2)
row += 1
self.txtContentWeight= QtWidgets.QLabel()
self.txtContentWeight.setText("Content weight:")
self.txtContentWeight.setFixedSize(150, 25)
self.edtContentWeight= QtWidgets.QLineEdit()
layout.addWidget(self.txtContentWeight, row, 0)
layout.addWidget(self.edtContentWeight, row, 1, 1, 2)
row += 1
self.txtUseCameraPositions= QtWidgets.QLabel()
self.txtUseCameraPositions.setText("Use cameras position:")
self.txtUseCameraPositions.setFixedSize(150, 25)
self.chbUseCameraPositions= QtWidgets.QCheckBox()
if len(chunk.cameras) == 0:
self.chbUseCameraPositions.setEnabled(False)
layout.addWidget(self.txtUseCameraPositions, row, 0)
layout.addWidget(self.chbUseCameraPositions, row, 1)
row += 1
self.txtPBar = QtWidgets.QLabel()
self.txtPBar.setText("Progress:")
self.txtPBar.setFixedSize(150, 25)
self.pBar = QtWidgets.QProgressBar()
self.pBar.setTextVisible(False)
self.pBar.setMinimumSize(239, 25)
layout.addWidget(self.txtPBar, row, 0)
layout.addWidget(self.pBar, row, 1, 1, 2)
row += 1
self.btnRun = QtWidgets.QPushButton("Run")
layout.addWidget(self.btnRun, row, 1, 1, 2)
row += 1
self.setLayout(layout)
QtCore.QObject.connect(self.btnRun, QtCore.SIGNAL("clicked()"), lambda: self.modelStyleTransfer())
def initDefaultParams(self):
self.edtTextureSize.setText(str(self.texture_size))
self.edtRenderingSize.setText(str(self.rendering_width))
self.edtStepsNumber.setText(str(self.steps_number))
self.edtStylePath.setText(str(self.style_path))
self.edtStyleName.setText(self.style_name)
self.edtWorkingDir.setText(self.working_dir)
self.edtModelName.setText(self.model_name)
self.edtContentWeight.setText(str(self.content_weight))
self.chbUseCameraPositions.setChecked(self.use_cameras_position)
def loadParams(self):
self.texture_size = int(self.edtTextureSize.text())
self.rendering_width = int(self.edtRenderingSize.text())
self.steps_number = int(self.edtStepsNumber.text())
self.style_path = self.edtStylePath.text()
self.style_name = self.edtStyleName.text()
self.working_dir = self.edtWorkingDir.text()
self.model_name = self.edtModelName.text()
self.content_weight = float(self.edtContentWeight.text())
self.use_cameras_position = self.chbUseCameraPositions.isChecked()
if len(self.style_path) == 0:
Metashape.app.messageBox("You should specify style image!")
raise Exception("You should specify style image!")
if len(self.working_dir) == 0:
Metashape.app.messageBox("You should specify working dir!")
raise Exception("You should specify working dir!")
def exportInput(self):
working_dir = pathlib.Path(self.working_dir)
print("Creating working directory '{}'...".format(self.working_dir))
working_dir.mkdir(parents=True, exist_ok=True)
self.input_model_path = str(working_dir / "{}.ply".format(self.model_name))
print("Exporting model to '{}'...".format(self.input_model_path))
chunk.exportModel(self.input_model_path, binary=True, texture_format=Metashape.ImageFormatJPEG, texture=True,
normals=False, colors=False, cameras=False, markers=False, format=Metashape.ModelFormatPLY)
self.input_model_path = str(working_dir / "{}.obj".format(self.model_name))
print("Exporting model to '{}'...".format(self.input_model_path))
chunk.exportModel(self.input_model_path, binary=False, texture_format=Metashape.ImageFormatJPEG, texture=True,
normals=False, colors=False, cameras=False, markers=False, format=Metashape.ModelFormatOBJ)
self.input_texture_path = str(working_dir / "{}.jpg".format(self.model_name))
self.input_cameras_path = str(working_dir / "{}.cameras".format(self.model_name))
if not self.use_cameras_position or not self.exportCameras():
self.input_cameras_path = None
self.output_dir = working_dir / self.style_name
print("Creating output directory '{}'...".format(str(self.output_dir)))
if self.output_dir.exists():
print(" output directory already exists! Deleting...")
shutil.rmtree(str(self.output_dir))
self.output_dir.mkdir(parents=False, exist_ok=False)
for ext in ["obj", "ply", "mtl"]:
input_path = working_dir / "{}.{}".format(self.model_name, ext)
output_path = self.output_dir / "{}.{}".format(self.model_name, ext)
print(" copying {}.{} to output...".format(self.model_name, ext))
shutil.copyfile(str(input_path), str(output_path))
self.output_texture_path = str(self.output_dir / "{}.jpg".format(self.model_name))
self.result_model_path = str(self.output_dir / "{}.obj".format(self.model_name))
def exportCameras(self):
matrices = []
selection_active = len([c for c in chunk.cameras if c.selected]) > 0
for c in chunk.cameras:
if (selection_active and not c.selected) or not c.enabled or c.transform is None or c.type != Metashape.Camera.Type.Regular:
continue
calibration = c.sensor.calibration
f, w, h = calibration.f, calibration.width, calibration.height
transformToWorld = chunk.transform.matrix * c.transform
matrices.append({
"transformToWorld": eval(str(transformToWorld)[len("Matrix("):-1]),
"fovH": 2 * math.atan(w / 2 / f) * 180 / math.pi,
"fovV": 2 * math.atan(h / 2 / f) * 180 / math.pi,
"w": w,
"h": h,
})
if len(matrices) == 0:
return False
with open(self.input_cameras_path, "w") as f:
f.writelines(str(matrices))
return True
def loadCameras(self):
import numpy as np
if self.input_cameras_path is None:
return None
with open(self.input_cameras_path) as f:
self.cameras = f.readline()
self.cameras = eval(self.cameras)
if len(self.cameras) == 0:
print("Cameras will be randomly sampled!")
self.cameras = None
self.max_fovy = 10.0
self.aspect_ratio = 1.0
else:
print("Loaded {} cameras!".format(len(self.cameras)))
self.max_fovy = 0.0
self.aspect_ratio = 0.0
for i in range(len(self.cameras)):
m = np.float32(self.cameras[i]["transformToWorld"])
m = np.linalg.inv(m)
m[1, :] = -m[1, :]
m[2, :] = -m[2, :]
self.cameras[i]["transformToCamera"] = m
self.cameras[i]["transformToWorld"] = np.linalg.inv(m)
self.max_fovy = max(self.cameras[i]["fovV"], self.max_fovy)
self.aspect_ratio = self.cameras[i]["w"] / self.cameras[i]["h"]
print("Vertical field of view: {:.2f} degrees. Aspect ratio width/height: {:.2f}.".format(self.max_fovy,
self.aspect_ratio))
def textureStyle3D(self):
print("Importing tensorflow...")
import tensorflow as tf
print("Checking that GPU is visible for tensorflow...")
if not tf.test.is_gpu_available():
raise Exception("No GPU available for tensorflow!")
print("Importing other libraries...")
import os
import io
import sys
from string import Template
from pathlib import Path
import numpy as np
import PIL.Image
# import matplotlib.pylab as pl
from IPython.display import clear_output, display, Image, HTML
# if os.name != 'nt':
# from lucid.misc.gl.glcontext import create_opengl_context
import OpenGL.GL as gl
from lucid.misc.gl import meshutil
from lucid.misc.gl import glrenderer
import lucid.misc.io.showing as show
import lucid.misc.io as lucid_io
from lucid.misc.tfutil import create_session
from lucid.modelzoo import vision_models
from lucid.optvis import objectives
from lucid.optvis import param
from lucid.optvis.style import StyleLoss, mean_l1_loss
from lucid.optvis.param.spatial import sample_bilinear
# if os.name != 'nt':
# print("Creating OpenGL context...")
# create_opengl_context()
gl.glGetString(gl.GL_VERSION)
print("Loading vision model...")
model = vision_models.InceptionV1()
model.load_graphdef()
def prepare_image(fn, size=None):
data = lucid_io.reading.read(fn)
im = PIL.Image.open(io.BytesIO(data)).convert('RGB')
if size:
im = im.resize(size, PIL.Image.ANTIALIAS)
return np.float32(im) / 255.0
self.loadCameras()
print("Loading input model from '{}'...".format(self.input_model_path))
mesh = meshutil.load_obj(self.input_model_path)
if self.cameras is None:
mesh = meshutil.normalize_mesh(mesh)
print("Loading input texture from '{}'...".format(self.input_texture_path))
original_texture = prepare_image(self.input_texture_path, (self.texture_size, self.texture_size))
print("Loading style from '{}'...".format(self.style_path))
style = prepare_image(self.style_path)
rendering_width = self.rendering_width
rendering_height = int(rendering_width // self.aspect_ratio)
print("Creating renderer with resolution {}x{}...".format(rendering_width, rendering_height))
renderer = glrenderer.MeshRenderer((rendering_width, rendering_height))
if self.cameras is not None:
print(" renderer fovy: {:.2f} degrees".format(self.max_fovy))
renderer.fovy = self.max_fovy
sess = create_session(timeout_sec=0)
# t_fragments is used to feed rasterized UV coordinates for the current view.
# Channels: [U, V, _, Alpha]. Alpha is 1 for pixels covered by the object, and
# 0 for background.
t_fragments = tf.placeholder(tf.float32, [None, None, 4])
t_uv = t_fragments[..., :2]
t_alpha = t_fragments[..., 3:]
# Texture atlas to optimize
t_texture = param.image(self.texture_size, fft=True, decorrelate=True)[0]
# Variable to store the original mesh texture used to render content views
content_var = tf.Variable(tf.zeros([self.texture_size, self.texture_size, 3]), trainable=False)
# Sample current and original textures with provided pixel data
t_joined_texture = tf.concat([t_texture, content_var], -1)
t_joined_frame = sample_bilinear(t_joined_texture, t_uv) * t_alpha
t_frame_current, t_frame_content = t_joined_frame[..., :3], t_joined_frame[..., 3:]
t_joined_frame = tf.stack([t_frame_current, t_frame_content], 0)
# Feeding the rendered frames to the Neural Network
t_input = tf.placeholder_with_default(t_joined_frame, [None, None, None, 3])
model.import_graph(t_input)
# style loss
style_layers = [sess.graph.get_tensor_by_name('import/%s:0' % s)[0] for s in self.googlenet_style_layers]
# L1-loss seems to be more stable for GoogleNet
# Note that we use style_decay>0 to average style-describing Gram matrices
# over the recent viewports. Please refer to StyleLoss for the details.
sl = StyleLoss(style_layers, self.style_decay, loss_func=mean_l1_loss)
# content loss
content_layer = sess.graph.get_tensor_by_name('import/%s:0' % self.googlenet_content_layer)
content_loss = mean_l1_loss(content_layer[0], content_layer[1]) * self.content_weight
# setup optimization
total_loss = content_loss + sl.style_loss
t_lr = tf.constant(0.05)
trainer = tf.train.AdamOptimizer(t_lr)
train_op = trainer.minimize(total_loss)
init_op = tf.global_variables_initializer()
loss_log = []
def reset(style_img, content_texture):
del loss_log[:]
init_op.run()
sl.set_style({t_input: style_img[None, ...]})
content_var.load(content_texture)
def sample_random_view():
if self.cameras is None:
return meshutil.sample_view(10.0, 12.0)
else:
rand_m = self.cameras[np.random.randint(0, len(self.cameras))]["transformToCamera"].copy()
return rand_m
def run(mesh, step_n=400):
app = QtWidgets.QApplication.instance()
for i in range(step_n):
fragments = renderer.render_mesh(
modelview=sample_random_view(),
position=mesh['position'], uv=mesh['uv'],
face=mesh['face'])
_, loss = sess.run([train_op, [content_loss, sl.style_loss]], {t_fragments: fragments})
loss_log.append(loss)
if i == 0 or (i + 1) % 50 == 0:
# clear_output()
last_frame, last_content = sess.run([t_frame_current, t_frame_content], {t_fragments: fragments})
# show.images([last_frame, last_content], ['current frame', 'content'])
if i == 0 or (i + 1) % 10 == 0:
print(len(loss_log), loss)
pass
# Show progress
self.pBar.setValue((i + step_n//10 + 1) / (step_n + step_n//10) * 100)
app.processEvents()
reset(style, original_texture)
print("Running {} iterations...".format(self.steps_number))
run(mesh, step_n=self.steps_number)
print("Finished!")
texture = t_texture.eval()
print("Exporting result texture to '{}'...".format(self.output_texture_path))
lucid_io.save(texture, self.output_texture_path, quality=90)
sess.close()
print("Importing result model to Metashape '{}'...".format(self.result_model_path))
chunk.model = None
chunk.importModel(self.result_model_path)
chunk.model.label = self.style_name
Metashape.app.messageBox("Everything worked fine!\n"
"Please save project and RESTART Metashape!\n"
"Because video memory was not released by TensorFlow!")
def model_style_transfer():
global chunk
chunk = Metashape.app.document.chunk
if chunk is None or chunk.model is None:
raise Exception("No active model!")
if chunk.model.texture is None or chunk.model.tex_vertices is None or len(chunk.model.tex_vertices) == 0:
raise Exception("Model is not textured!")
app = QtWidgets.QApplication.instance()
parent = app.activeWindow()
dlg = ModelStyleTransferDlg(parent)
label = "Custom menu/Model style transfer"
Metashape.app.addMenuItem(label, model_style_transfer)
print("To execute this script press {}".format(label))
| [
"Metashape.app.getOpenFileName",
"lucid.modelzoo.vision_models.InceptionV1",
"lucid.optvis.style.mean_l1_loss",
"lucid.misc.io.save",
"OpenGL.GL.glGetString",
"Metashape.app.messageBox",
"io.BytesIO",
"lucid.misc.tfutil.create_session",
"Metashape.app.getExistingDirectory",
"math.atan",
"lucid.m... | [((22210, 22264), 'Metashape.app.addMenuItem', 'Metashape.app.addMenuItem', (['label', 'model_style_transfer'], {}), '(label, model_style_transfer)\n', (22235, 22264), False, 'import Metashape\n'), ((22058, 22091), 'PySide2.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (22089, 22091), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((962, 994), 'Metashape.app.version.split', 'Metashape.app.version.split', (['"""."""'], {}), "('.')\n", (989, 994), False, 'import Metashape\n'), ((2669, 2709), 'PySide2.QtWidgets.QDialog.__init__', 'QtWidgets.QDialog.__init__', (['self', 'parent'], {}), '(self, parent)\n', (2695, 2709), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((3329, 3416), 'Metashape.app.getOpenFileName', 'Metashape.app.getOpenFileName', ([], {'filter': '"""*.jpg;;*.jpeg;;*.JPG;;*.JPEG;;*.png;;*.PNG"""'}), "(filter=\n '*.jpg;;*.jpeg;;*.JPG;;*.JPEG;;*.png;;*.PNG')\n", (3358, 3416), False, 'import Metashape\n'), ((3578, 3614), 'Metashape.app.getExistingDirectory', 'Metashape.app.getExistingDirectory', ([], {}), '()\n', (3612, 3614), False, 'import Metashape\n'), ((3706, 3729), 'PySide2.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (3727, 3729), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((3774, 3792), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (3790, 3792), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((3918, 3939), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (3937, 3939), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((4033, 4061), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""..."""'], {}), "('...')\n", (4054, 4061), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((4421, 4439), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (4437, 4439), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((4565, 4586), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (4584, 4586), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((4745, 4763), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (4761, 4763), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((4897, 4918), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (4916, 4918), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((5152, 5170), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (5168, 5170), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((5304, 5325), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (5323, 5325), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((5569, 5587), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (5585, 5587), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((5729, 5750), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (5748, 5750), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((5993, 6011), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (6009, 6011), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((6137, 6158), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (6156, 6158), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((6315, 6333), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (6331, 6333), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((6462, 6483), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (6481, 6483), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((6574, 6602), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""..."""'], {}), "('...')\n", (6595, 6602), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((6971, 6989), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (6987, 6989), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((7130, 7151), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (7149, 7151), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((7324, 7342), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (7340, 7342), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((7504, 7525), 'PySide2.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', ([], {}), '()\n', (7523, 7525), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((7782, 7800), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (7798, 7800), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((7906, 7930), 'PySide2.QtWidgets.QProgressBar', 'QtWidgets.QProgressBar', ([], {}), '()\n', (7928, 7930), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((8150, 8178), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Run"""'], {}), "('Run')\n", (8171, 8178), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((9913, 9943), 'pathlib.Path', 'pathlib.Path', (['self.working_dir'], {}), '(self.working_dir)\n', (9925, 9943), False, 'import pathlib, shutil, math\n'), ((15719, 15748), 'OpenGL.GL.glGetString', 'gl.glGetString', (['gl.GL_VERSION'], {}), '(gl.GL_VERSION)\n', (15733, 15748), True, 'import OpenGL.GL as gl\n'), ((15807, 15834), 'lucid.modelzoo.vision_models.InceptionV1', 'vision_models.InceptionV1', ([], {}), '()\n', (15832, 15834), False, 'from lucid.modelzoo import vision_models\n'), ((16263, 16303), 'lucid.misc.gl.meshutil.load_obj', 'meshutil.load_obj', (['self.input_model_path'], {}), '(self.input_model_path)\n', (16280, 16303), False, 'from lucid.misc.gl import meshutil\n'), ((16932, 16992), 'lucid.misc.gl.glrenderer.MeshRenderer', 'glrenderer.MeshRenderer', (['(rendering_width, rendering_height)'], {}), '((rendering_width, rendering_height))\n', (16955, 16992), False, 'from lucid.misc.gl import glrenderer\n'), ((17163, 17192), 'lucid.misc.tfutil.create_session', 'create_session', ([], {'timeout_sec': '(0)'}), '(timeout_sec=0)\n', (17177, 17192), False, 'from lucid.misc.tfutil import create_session\n'), ((17417, 17460), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, 4]'], {}), '(tf.float32, [None, None, 4])\n', (17431, 17460), True, 'import tensorflow as tf\n'), ((17943, 17982), 'tensorflow.concat', 'tf.concat', (['[t_texture, content_var]', '(-1)'], {}), '([t_texture, content_var], -1)\n', (17952, 17982), True, 'import tensorflow as tf\n'), ((18175, 18222), 'tensorflow.stack', 'tf.stack', (['[t_frame_current, t_frame_content]', '(0)'], {}), '([t_frame_current, t_frame_content], 0)\n', (18183, 18222), True, 'import tensorflow as tf\n'), ((18302, 18368), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['t_joined_frame', '[None, None, None, 3]'], {}), '(t_joined_frame, [None, None, None, 3])\n', (18329, 18368), True, 'import tensorflow as tf\n'), ((18773, 18838), 'lucid.optvis.style.StyleLoss', 'StyleLoss', (['style_layers', 'self.style_decay'], {'loss_func': 'mean_l1_loss'}), '(style_layers, self.style_decay, loss_func=mean_l1_loss)\n', (18782, 18838), False, 'from lucid.optvis.style import StyleLoss, mean_l1_loss\n'), ((19152, 19169), 'tensorflow.constant', 'tf.constant', (['(0.05)'], {}), '(0.05)\n', (19163, 19169), True, 'import tensorflow as tf\n'), ((19188, 19216), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['t_lr'], {}), '(t_lr)\n', (19210, 19216), True, 'import tensorflow as tf\n'), ((19284, 19317), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (19315, 19317), True, 'import tensorflow as tf\n'), ((21180, 21240), 'lucid.misc.io.save', 'lucid_io.save', (['texture', 'self.output_texture_path'], {'quality': '(90)'}), '(texture, self.output_texture_path, quality=90)\n', (21193, 21240), True, 'import lucid.misc.io as lucid_io\n'), ((21486, 21647), 'Metashape.app.messageBox', 'Metashape.app.messageBox', (['"""Everything worked fine!\nPlease save project and RESTART Metashape!\nBecause video memory was not released by TensorFlow!"""'], {}), '(\n """Everything worked fine!\nPlease save project and RESTART Metashape!\nBecause video memory was not released by TensorFlow!"""\n )\n', (21510, 21647), False, 'import Metashape\n'), ((4159, 4185), 'PySide2.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (4172, 4185), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((6702, 6728), 'PySide2.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (6715, 6728), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((8325, 8351), 'PySide2.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (8338, 8351), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((9565, 9624), 'Metashape.app.messageBox', 'Metashape.app.messageBox', (['"""You should specify style image!"""'], {}), "('You should specify style image!')\n", (9589, 9624), False, 'import Metashape\n'), ((9740, 9799), 'Metashape.app.messageBox', 'Metashape.app.messageBox', (['"""You should specify working dir!"""'], {}), "('You should specify working dir!')\n", (9764, 9799), False, 'import Metashape\n'), ((14543, 14569), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (14567, 14569), True, 'import tensorflow as tf\n'), ((15927, 15952), 'lucid.misc.io.reading.read', 'lucid_io.reading.read', (['fn'], {}), '(fn)\n', (15948, 15952), True, 'import lucid.misc.io as lucid_io\n'), ((16356, 16385), 'lucid.misc.gl.meshutil.normalize_mesh', 'meshutil.normalize_mesh', (['mesh'], {}), '(mesh)\n', (16379, 16385), False, 'from lucid.misc.gl import meshutil\n'), ((17593, 17651), 'lucid.optvis.param.image', 'param.image', (['self.texture_size'], {'fft': '(True)', 'decorrelate': '(True)'}), '(self.texture_size, fft=True, decorrelate=True)\n', (17604, 17651), False, 'from lucid.optvis import param\n'), ((17773, 17824), 'tensorflow.zeros', 'tf.zeros', (['[self.texture_size, self.texture_size, 3]'], {}), '([self.texture_size, self.texture_size, 3])\n', (17781, 17824), True, 'import tensorflow as tf\n'), ((18008, 18047), 'lucid.optvis.param.spatial.sample_bilinear', 'sample_bilinear', (['t_joined_texture', 't_uv'], {}), '(t_joined_texture, t_uv)\n', (18023, 18047), False, 'from lucid.optvis.param.spatial import sample_bilinear\n'), ((18986, 19034), 'lucid.optvis.style.mean_l1_loss', 'mean_l1_loss', (['content_layer[0]', 'content_layer[1]'], {}), '(content_layer[0], content_layer[1])\n', (18998, 19034), False, 'from lucid.optvis.style import StyleLoss, mean_l1_loss\n'), ((19883, 19916), 'PySide2.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (19914, 19916), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((2040, 2081), 'pathlib.Path', 'pathlib.Path', (['Metashape.app.document.path'], {}), '(Metashape.app.document.path)\n', (2052, 2081), False, 'import pathlib, shutil, math\n'), ((3043, 3122), 'Metashape.app.messageBox', 'Metashape.app.messageBox', (['"""Something gone wrong!\nPlease check the console."""'], {}), '("""Something gone wrong!\nPlease check the console.""")\n', (3067, 3122), False, 'import Metashape\n'), ((3492, 3516), 'pathlib.Path', 'pathlib.Path', (['style_path'], {}), '(style_path)\n', (3504, 3516), False, 'import pathlib, shutil, math\n'), ((13689, 13736), 'numpy.float32', 'np.float32', (["self.cameras[i]['transformToWorld']"], {}), "(self.cameras[i]['transformToWorld'])\n", (13699, 13736), True, 'import numpy as np\n'), ((13757, 13773), 'numpy.linalg.inv', 'np.linalg.inv', (['m'], {}), '(m)\n', (13770, 13773), True, 'import numpy as np\n'), ((13955, 13971), 'numpy.linalg.inv', 'np.linalg.inv', (['m'], {}), '(m)\n', (13968, 13971), True, 'import numpy as np\n'), ((16116, 16130), 'numpy.float32', 'np.float32', (['im'], {}), '(im)\n', (16126, 16130), True, 'import numpy as np\n'), ((19641, 19673), 'lucid.misc.gl.meshutil.sample_view', 'meshutil.sample_view', (['(10.0)', '(12.0)'], {}), '(10.0, 12.0)\n', (19661, 19673), False, 'from lucid.misc.gl import meshutil\n'), ((1935, 1976), 'pathlib.Path', 'pathlib.Path', (['Metashape.app.document.path'], {}), '(Metashape.app.document.path)\n', (1947, 1976), False, 'import pathlib, shutil, math\n'), ((15985, 16001), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (15995, 16001), False, 'import io\n'), ((12687, 12707), 'math.atan', 'math.atan', (['(w / 2 / f)'], {}), '(w / 2 / f)\n', (12696, 12707), False, 'import pathlib, shutil, math\n'), ((12753, 12773), 'math.atan', 'math.atan', (['(h / 2 / f)'], {}), '(h / 2 / f)\n', (12762, 12773), False, 'import pathlib, shutil, math\n')] |
##
# Copyright: Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File: lownerjohn_ellipsoid.py
#
# Purpose:
# Computes the Lowner-John inner and outer ellipsoidal
# approximations of a polytope.
#
#
# Note:
# To plot the solution the Python package pyx is required.
#
#
# References:
# [1] "Lectures on Modern Optimization", Ben-Tal and Nemirovski, 2000.
# [2] "MOSEK modeling manual", 2013
#
#
##
import sys
from math import sqrt, ceil, log
import mosek
from mosek.fusion import *
def geometric_mean(M,x,t):
'''
Models the convex set
S = { (x, t) \in R^n x R | x >= 0, t <= (x1 * x2 * ... * xn)^(1/n) }
as the intersection of rotated quadratic cones and affine hyperplanes.
see [1, p. 105] or [2, p. 21]. This set can be interpreted as the hypograph of the
geometric mean of x.
We illustrate the modeling procedure using the following example.
Suppose we have
t <= (x1 * x2 * x3)^(1/3)
for some t >= 0, x >= 0. We rewrite it as
t^4 <= x1 * x2 * x3 * x4, x4 = t
which is equivalent to (see [1])
x11^2 <= 2*x1*x2, x12^2 <= 2*x3*x4,
x21^2 <= 2*x11*x12,
sqrt(8)*x21 = t, x4 = t.
References:
[1] "Lectures on Modern Optimization", Ben-Tal and Nemirovski, 2000.
[2] "MOSEK modeling manual", 2013
'''
def rec(x):
n = x.getShape().dim(0)
if n > 1:
y = M.variable(int(n//2), Domain.unbounded())
M.constraint(Var.hstack(Var.reshape(x, [n//2,2]), y), Domain.inRotatedQCone())
return rec(y)
else:
return x
n = x.getShape().dim(0)
l = int(ceil(log(n, 2)))
m = int(2**l) - n
# if size of x is not a power of 2 we pad it:
if m > 0:
x_padding = M.variable(m,Domain.unbounded())
# set the last m elements equal to t
M.constraint(Expr.sub(x_padding, Var.repeat(t,m)), Domain.equalsTo(0.0))
x = Var.vstack(x,x_padding)
M.constraint(Expr.sub(Expr.mul(2.0**(l/2.0), t),rec(x)), Domain.equalsTo(0.0))
def det_rootn(M, X, t):
'''
Purpose: Models the hypograph of the n-th power of the
determinant of a positive definite matrix. See [1,2] for more details.
The convex set (a hypograph)
C = { (X, t) \in S^n_+ x R | t <= det(X)^{1/n} },
can be modeled as the intersection of a semidefinite cone
[ X, Z; Z^T Diag(Z) ] >= 0
and a number of rotated quadratic cones and affine hyperplanes,
t <= (Z11*Z22*...*Znn)^{1/n} (see geometric_mean).
References:
[1] "Lectures on Modern Optimization", <NAME> Nemirovski, 2000.
[2] "MOSEK modeling manual", 2013
'''
n = int(sqrt(X.size()))
# Setup variables
Y = M.variable(Domain.inPSDCone(2*n))
# Setup Y = [X, Z; Z^T , diag(Z)]
Y11 = Y.slice([0, 0], [n, n])
Y21 = Y.slice([n, 0], [2*n, n])
Y22 = Y.slice([n, n], [2*n, 2*n])
M.constraint( Expr.sub(Expr.mulElm( Matrix.eye(n) ,Y21), Y22), Domain.equalsTo(0.0) )
M.constraint( Expr.sub(X, Y11), Domain.equalsTo(0.0) )
# t^n <= (Z11*Z22*...*Znn)
geometric_mean(M, Y22.diag(), t)
def lownerjohn_inner(A, b):
'''
The inner ellipsoidal approximation to a polytope
S = { x \in R^n | Ax < b }.
maximizes the volume of the inscribed ellipsoid,
{ x | x = C*u + d, || u ||_2 <= 1 }.
The volume is proportional to det(C)^(1/n), so the
problem can be solved as
maximize t
subject to t <= det(C)^(1/n)
|| C*ai ||_2 <= bi - ai^T * d, i=1,...,m
C is PSD
which is equivalent to a mixed conic quadratic and semidefinite
programming problem.
References:
[1] "Lectures on Modern Optimization", Ben-Tal and Nemirovski, 2000.
'''
with Model("lownerjohn_inner") as M:
M.setLogHandler(sys.stdout)
m, n = len(A), len(A[0])
# Setup variables
t = M.variable("t", 1, Domain.greaterThan(0.0))
C = M.variable("C", [n,n], Domain.unbounded())
d = M.variable("d", n, Domain.unbounded())
# (bi - ai^T*d, C*ai) \in Q, i=1..m
M.constraint("qc", Expr.hstack(Expr.sub(b, Expr.mul(A,d)), Expr.mul(A,C.transpose())),
Domain.inQCone())
# t <= det(C)^{1/n}
det_rootn(M, C, t)
# Objective: Maximize t
M.objective(ObjectiveSense.Maximize, t)
M.solve()
C, d = C.level(), d.level()
return ([C[i:i+n] for i in range(0,n*n,n)], d)
def lownerjohn_outer(x):
'''
The outer ellipsoidal approximation to a polytope given
as the convex hull of a set of points
S = conv{ x1, x2, ... , xm }
minimizes the volume of the enclosing ellipsoid,
{ x | || P*(x-c) ||_2 <= 1 }
The volume is proportional to det(P)^{-1/n}, so the problem can
be solved as
minimize t
subject to t >= det(P)^(-1/n)
|| P*xi + c ||_2 <= 1, i=1,...,m
P is PSD.
References:
[1] "Lectures on Modern Optimization", Ben-Tal and Nemirovski, 2000.
'''
with Model("lownerjohn_outer") as M:
m, n = len(x), len(x[0])
M.setLogHandler(sys.stdout)
# Setup variables
t = M.variable("t", 1, Domain.greaterThan(0.0))
P = M.variable("P", [n,n], Domain.unbounded())
c = M.variable("c", n, Domain.unbounded())
# (1, P(*xi+c)) \in Q
M.constraint("qc",
Expr.hstack(Expr.ones(m),
Expr.sub(Expr.mul(x,P.transpose()),
Var.reshape(Var.repeat(c,m), [m,2])
)
),
Domain.inQCone())
# t <= det(P)^{1/n}
det_rootn(M, P, t)
# Objective: Maximize t
M.objective(ObjectiveSense.Maximize, t)
M.solve()
P, c = P.level(), c.level()
return ([P[i:i+n] for i in range(0,n*n,n)], c)
#############################################################################################
if __name__ == '__main__':
#few points in 2D
p = [ [0.,0.], [1.,3.], [5.,4.], [7.,1.], [3.,-2.] ]
Po, co = lownerjohn_outer(p)
A = [ [-p[i][1]+p[i-1][1],p[i][0]-p[i-1][0]] for i in range(len(p)) ]
b = [ A[i][0]*p[i][0]+A[i][1]*p[i][1] for i in range(len(p)) ]
Ci, di = lownerjohn_inner(A, b)
try:
import numpy
from pyx import *
Po = numpy.array(Po)
Poi = numpy.linalg.inv(Po)
co = numpy.array(co)
c = canvas.canvas()
c.stroke(box.polygon(p).path(), [style.linestyle.dashed])
c.stroke(path.circle(0, 0, 1), [trafo.trafo(Ci, di)])
c.stroke(path.circle(0, 0, 1), [trafo.trafo(Poi, numpy.dot(Poi,co))])
for pi in p:
c.fill(path.circle(pi[0],pi[1],0.08))
c.writePDFfile("lownerjohn")
except:
pass
| [
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"math.log"
] | [((6568, 6583), 'numpy.array', 'numpy.array', (['Po'], {}), '(Po)\n', (6579, 6583), False, 'import numpy\n'), ((6596, 6616), 'numpy.linalg.inv', 'numpy.linalg.inv', (['Po'], {}), '(Po)\n', (6612, 6616), False, 'import numpy\n'), ((6629, 6644), 'numpy.array', 'numpy.array', (['co'], {}), '(co)\n', (6640, 6644), False, 'import numpy\n'), ((1600, 1609), 'math.log', 'log', (['n', '(2)'], {}), '(n, 2)\n', (1603, 1609), False, 'from math import sqrt, ceil, log\n'), ((6871, 6889), 'numpy.dot', 'numpy.dot', (['Poi', 'co'], {}), '(Poi, co)\n', (6880, 6889), False, 'import numpy\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
## Add path to library (just for examples; you do not need this)
import initExample
import numpy as np
from numpy import linspace
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from pyqtgraph import MultiPlotWidget
try:
from pyqtgraph.metaarray import *
except:
print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)")
exit()
app = pg.mkQApp("MultiPlot Widget Example")
mw = QtGui.QMainWindow()
mw.resize(800,800)
pw = MultiPlotWidget()
mw.setCentralWidget(pw)
mw.show()
data = np.random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
ma = MetaArray(data, info=[
{'name': 'Signal', 'cols': [
{'name': 'Col1', 'units': 'V'},
{'name': 'Col2', 'units': 'A'},
{'name': 'Col3'},
]},
{'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}
])
pw.plot(ma, pen='y')
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| [
"numpy.random.normal",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"numpy.array",
"pyqtgraph.mkQApp",
"numpy.linspace",
"pyqtgraph.MultiPlotWidget",
"pyqtgraph.Qt.QtGui.QMainWindow"
] | [((445, 482), 'pyqtgraph.mkQApp', 'pg.mkQApp', (['"""MultiPlot Widget Example"""'], {}), "('MultiPlot Widget Example')\n", (454, 482), True, 'import pyqtgraph as pg\n'), ((488, 507), 'pyqtgraph.Qt.QtGui.QMainWindow', 'QtGui.QMainWindow', ([], {}), '()\n', (505, 507), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((532, 549), 'pyqtgraph.MultiPlotWidget', 'MultiPlotWidget', ([], {}), '()\n', (547, 549), False, 'from pyqtgraph import MultiPlotWidget\n'), ((592, 624), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 1000)'}), '(size=(3, 1000))\n', (608, 624), True, 'import numpy as np\n'), ((627, 658), 'numpy.array', 'np.array', (['[[0.1], [1e-05], [1]]'], {}), '([[0.1], [1e-05], [1]])\n', (635, 658), True, 'import numpy as np\n'), ((871, 895), 'numpy.linspace', 'linspace', (['(0.0)', '(1.0)', '(1000)'], {}), '(0.0, 1.0, 1000)\n', (879, 895), False, 'from numpy import linspace\n'), ((1123, 1152), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (1150, 1152), False, 'from pyqtgraph.Qt import QtGui, QtCore\n')] |
# get overview of data
# read data from given files, and produce a reduced image of each data file
import argparse
import logging
import math
import os
import shutil
import subprocess
from typing import Any, Dict, List, Optional
import h5py
import nibabel as nib
import numpy as np
from PIL import Image
from tqdm import tqdm
from AssistedVolumeSegmentation.common import (
default_seg_file,
get_file_list,
get_full_path,
get_source_data_path,
init_logging,
load_config,
overview_bound_size,
write_annot_file,
)
def reduce_source_data(
config: Dict[str, Any],
bound_size: List[int],
subdir_num: Optional[int],
launch_editor: bool,
reduce_annotation: bool = False,
source_format: Optional[str] = None,
data_path: Optional[str] = None,
write_data_path: Optional[str] = None,
):
"""
Read source data, from either a stack of tiff files from the given dir or an HDF5 data file, and resize the
source volume data to the given size.
:param Dict[str, Any] config: Configuration map
:param List[int] bound_size: Maximum size of the volume, as (height, width, layers)
:param Optional[int] subdir_num: Number of subdirectory data to read
:param bool launch_editor: If true, launch slicer editor using the produced overview data
:param bool reduce_annotation: If true, perform reduction of a generated segmentation. This requires source_format,
data_path and write_data_path to be defined
:param Optional[str] source_format: Format of source data (read from config if not specified)
:param Optional[str] data_path: Path of source data (read from config if not specified)
:param Optional[str] write_data_path: Path to write reduced data to (read from config if not specified)
:return: Array of source images reduced to given size
"""
if source_format is None:
source_format = config["source_data_format"][subdir_num]
read_tiff_stack = source_format == "tiff-stack"
# select source data path according to index in config file. assume source data path is an array
if data_path is None:
assert subdir_num is not None
data_path = get_source_data_path(config, subdir_num)
if write_data_path is None:
write_data_path = get_full_path(
config, subdir_num, "overview_reduced_data"
)
assert source_format is not None and data_path is not None
if reduce_annotation:
logging.info(
"Reducing generated annotation, reading from source %s, format %s, writing to %s, bound size %s"
% (
data_path,
source_format,
write_data_path,
bound_size,
)
)
else:
write_coverage_path = get_full_path(
config, subdir_num, "overview_coverage"
)
logging.info(
"Producing overview, reading from source %s, format %s, writing to %s, %s, bound size %s"
% (
data_path,
source_format,
write_data_path,
write_coverage_path,
bound_size,
)
)
if read_tiff_stack:
# get the list of files
file_list = get_file_list(data_path)
input_stacks = len(file_list)
# read the first image to get dimensions. assume images are the same size
first_img = Image.open(file_list[0])
input_w, input_h = first_img.size # (width, height)
input_dims = (input_h, input_w)
input_count = input_stacks
iterate_dim = 2
else:
# open as hdf5 file
h5_file = h5py.File(data_path, "r")
h5_data_key = config["source_hdf5_dataset_name"]
h5_data = h5_file[h5_data_key]
# with H5 data, iterate over the first dimension (x), which is different to when reading from image stacks.
# it is much faster to slice on the initial dimension(s) than on later ones (eg z).
# find dimensions of data. if 4 dims assume format is (layers, x, y, z), and only read from first layer. don't try and
# index the layer at the start as it would load the whole array in memory, perform indexing with each iteration
if h5_data.ndim == 4:
input_size = h5_data.shape[1:]
else:
input_size = h5_data.shape
input_count = input_size[0]
input_dims = input_size[1:3]
iterate_dim = 0
# find the downsample factor to fit within the given bounds. only allow reducing size
ratios = [min(1.0, x / y) for x, y in zip(bound_size, input_dims)]
reduce_ratio = np.min(ratios)
# find target image size
target_imgs = math.ceil(input_count * reduce_ratio)
target_dims = tuple([math.ceil(x * reduce_ratio) for x in input_dims])
def find_frame_score(input_idx, div_steps, out_slice):
lower_bound = max(input_idx, div_steps[out_slice])
upper_bound = min(input_idx + 1, div_steps[out_slice + 1])
return upper_bound - lower_bound
# read slices in turn
logging.info(
"found %d images(slices), input dim (w %d, h %d), target dim %s, %d slices"
% (input_count, input_dims[1], input_dims[0], target_dims, target_imgs)
)
input_slices = {}
division_steps = np.arange(target_imgs + 1) * (1.0 / reduce_ratio)
output_slice = 0
range_start = 0
range_end = math.ceil(division_steps[1])
output_slices = []
for count in tqdm(range(input_count)):
if read_tiff_stack:
this_file = file_list[count]
this_img = Image.open(this_file)
else:
if h5_data.ndim == 4:
this_img = Image.fromarray(h5_data[0, count])
else:
this_img = Image.fromarray(h5_data[count])
resized_img = this_img.resize(target_dims, Image.NEAREST)
input_slices[count] = resized_img
# check if we have enough slices to output the next slice
if count >= range_end - 1:
slices = np.stack(
[
np.array(input_slices[x])
for x in range(range_start, range_end)
]
) # (slices, width, height)
slice_weights = np.array(
[
find_frame_score(x, division_steps, output_slice)
for x in range(range_start, range_end)
]
) # (slices,)
reduced_slice = (slices * slice_weights[:, None, None]).sum(
axis=0
) / slice_weights.sum()
output_slices.append(reduced_slice)
# update output slice number and input slice range
output_slice += 1
if output_slice < target_imgs:
range_start = math.floor(division_steps[output_slice])
range_end = math.ceil(division_steps[output_slice + 1])
# remove stored input slices outside of range
remove_input_frames = [
x
for x in input_slices.keys()
if x < range_start or x > range_end
]
for x in remove_input_frames:
input_slices.pop(x)
# write output slices
output_array = np.stack(output_slices, axis=iterate_dim)
if reduce_annotation:
# discretise results and write in annotation format
discrete_array = (output_array > 0.5).astype("int")
# define segment fields. only a single segment is given as output in current method
sample_fields = {
"Segment0_Color": "0.525666 0.813434 0.324",
"Segment0_ColorAutoGenerated": "1",
"Segment0_Extent": "0 %d 0 %d 0 %d"
% tuple((np.array(output_array.shape) - 1).tolist()),
"Segment0_ID": "Generated_0",
"Segment0_LabelValue": "1",
"Segment0_Layer": "0",
"Segment0_Name": "Generated_0",
"Segment0_NameAutoGenerated": "1",
"Segment0_Tags": "Segmentation.Status:inprogress|TerminologyEntry:Segmentation category and type - 3D Slicer General Anatomy list~SRT^T-D0050^Tissue~SRT^T-D0050^Tissue~^^~Anatomic codes - DICOM master list~^^~^^|",
}
scales = np.identity(3, dtype="float")
write_annot_file(
index_name=None,
write_path=None,
annot_data=discrete_array,
annot_fields=sample_fields,
scales=scales,
annot_write_path=write_data_path,
)
logging.info(
"Created reduced segmentation in: %s" % (write_data_path,)
)
launch_args = [write_data_path]
else:
# write out reduced volume data
# create Nifti1 format object from array
# define affine transform matrix representing scaling transform
aff_matrix = np.identity(4, dtype="float")
np.fill_diagonal(aff_matrix, [1.0 / reduce_ratio] * 3)
nifti_object = nib.Nifti1Image(output_array, aff_matrix)
overviews_path = get_full_path(config, subdir_num, "overviews_path")
os.makedirs(overviews_path, exist_ok=True)
nib.save(nifti_object, write_data_path)
# create sample coverage segmentation file
if not os.path.exists(write_coverage_path):
script_path = os.path.dirname(os.path.abspath(__file__))
default_file = os.path.join(
script_path, os.pardir, default_seg_file
)
shutil.copyfile(default_file, write_coverage_path)
logging.info(
"Copying initial coverage annotation from %s to %s"
% (default_file, write_coverage_path)
)
logging.info(
"Created overview data in: %s , and empty coverage annotation in: %s"
% (write_data_path, write_coverage_path)
)
launch_args = [write_data_path, write_coverage_path]
args = [config["slicer_path"]] + launch_args
launch_command = " ".join(args)
if launch_editor:
logging.info("Launching Slicer editor with arguments: %s" % args)
subprocess.run(launch_command, shell=True)
else:
logging.info(
"You can open this in Slicer manually, or launch it with: %s"
% (launch_command,)
)
def main():
init_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config_file", help="Project config file", required=True
)
parser.add_argument(
"-s", "--subdir", help="Data subdirectory number", required=False
)
parser.add_argument(
"-l",
"--launch",
help="Launch Slicer to edit piece",
action="store_true",
)
parser.add_argument(
"-g",
"--generated_data",
help="Generated annotation to reduce",
required=False,
)
parser.add_argument(
"-o",
"--generated_data_output",
help="Output file for reducing generated data",
required=False,
)
args = parser.parse_args()
config = load_config(args.config_file)
if args.generated_data is None:
# produce overview, read source data from tiff files in given directory or an HDF5 file,
# and downsample to given size
if not isinstance(args.subdir, str) or not args.subdir.isnumeric():
raise RuntimeError("Subdir should be a number")
subdir_num = int(args.subdir)
reduce_source_data(
config,
overview_bound_size,
subdir_num,
args.launch,
)
else:
# perform reduction of generated annotation data
assert args.generated_data_output is not None
if os.path.exists(args.generated_data_output):
raise RuntimeError(
"Generated data output already exists: %s"
% args.generated_data_output
)
reduce_source_data(
config,
overview_bound_size,
None,
args.launch,
reduce_annotation=True,
source_format="hdf5",
data_path=args.generated_data,
write_data_path=args.generated_data_output,
)
if __name__ == "__main__":
main()
| [
"math.floor",
"numpy.array",
"logging.info",
"numpy.arange",
"os.path.exists",
"argparse.ArgumentParser",
"AssistedVolumeSegmentation.common.get_file_list",
"AssistedVolumeSegmentation.common.init_logging",
"subprocess.run",
"numpy.stack",
"numpy.min",
"AssistedVolumeSegmentation.common.get_fu... | [((4654, 4668), 'numpy.min', 'np.min', (['ratios'], {}), '(ratios)\n', (4660, 4668), True, 'import numpy as np\n'), ((4717, 4754), 'math.ceil', 'math.ceil', (['(input_count * reduce_ratio)'], {}), '(input_count * reduce_ratio)\n', (4726, 4754), False, 'import math\n'), ((5088, 5259), 'logging.info', 'logging.info', (["('found %d images(slices), input dim (w %d, h %d), target dim %s, %d slices' %\n (input_count, input_dims[1], input_dims[0], target_dims, target_imgs))"], {}), "(\n 'found %d images(slices), input dim (w %d, h %d), target dim %s, %d slices'\n % (input_count, input_dims[1], input_dims[0], target_dims, target_imgs))\n", (5100, 5259), False, 'import logging\n'), ((5422, 5450), 'math.ceil', 'math.ceil', (['division_steps[1]'], {}), '(division_steps[1])\n', (5431, 5450), False, 'import math\n'), ((7313, 7354), 'numpy.stack', 'np.stack', (['output_slices'], {'axis': 'iterate_dim'}), '(output_slices, axis=iterate_dim)\n', (7321, 7354), True, 'import numpy as np\n'), ((10387, 10401), 'AssistedVolumeSegmentation.common.init_logging', 'init_logging', ([], {}), '()\n', (10399, 10401), False, 'from AssistedVolumeSegmentation.common import default_seg_file, get_file_list, get_full_path, get_source_data_path, init_logging, load_config, overview_bound_size, write_annot_file\n'), ((10415, 10440), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10438, 10440), False, 'import argparse\n'), ((11137, 11166), 'AssistedVolumeSegmentation.common.load_config', 'load_config', (['args.config_file'], {}), '(args.config_file)\n', (11148, 11166), False, 'from AssistedVolumeSegmentation.common import default_seg_file, get_file_list, get_full_path, get_source_data_path, init_logging, load_config, overview_bound_size, write_annot_file\n'), ((2188, 2228), 'AssistedVolumeSegmentation.common.get_source_data_path', 'get_source_data_path', (['config', 'subdir_num'], {}), '(config, subdir_num)\n', (2208, 2228), False, 'from AssistedVolumeSegmentation.common import default_seg_file, get_file_list, get_full_path, get_source_data_path, init_logging, load_config, overview_bound_size, write_annot_file\n'), ((2287, 2345), 'AssistedVolumeSegmentation.common.get_full_path', 'get_full_path', (['config', 'subdir_num', '"""overview_reduced_data"""'], {}), "(config, subdir_num, 'overview_reduced_data')\n", (2300, 2345), False, 'from AssistedVolumeSegmentation.common import default_seg_file, get_file_list, get_full_path, get_source_data_path, init_logging, load_config, overview_bound_size, write_annot_file\n'), ((2465, 2643), 'logging.info', 'logging.info', (["('Reducing generated annotation, reading from source %s, format %s, writing to %s, bound size %s'\n % (data_path, source_format, write_data_path, bound_size))"], {}), "(\n 'Reducing generated annotation, reading from source %s, format %s, writing to %s, bound size %s'\n % (data_path, source_format, write_data_path, bound_size))\n", (2477, 2643), False, 'import logging\n'), ((2787, 2841), 'AssistedVolumeSegmentation.common.get_full_path', 'get_full_path', (['config', 'subdir_num', '"""overview_coverage"""'], {}), "(config, subdir_num, 'overview_coverage')\n", (2800, 2841), False, 'from AssistedVolumeSegmentation.common import default_seg_file, get_file_list, get_full_path, get_source_data_path, init_logging, load_config, overview_bound_size, write_annot_file\n'), ((2872, 3068), 'logging.info', 'logging.info', (["('Producing overview, reading from source %s, format %s, writing to %s, %s, bound size %s'\n % (data_path, source_format, write_data_path, write_coverage_path,\n bound_size))"], {}), "(\n 'Producing overview, reading from source %s, format %s, writing to %s, %s, bound size %s'\n % (data_path, source_format, write_data_path, write_coverage_path,\n bound_size))\n", (2884, 3068), False, 'import logging\n'), ((3261, 3285), 'AssistedVolumeSegmentation.common.get_file_list', 'get_file_list', (['data_path'], {}), '(data_path)\n', (3274, 3285), False, 'from AssistedVolumeSegmentation.common import default_seg_file, get_file_list, get_full_path, get_source_data_path, init_logging, load_config, overview_bound_size, write_annot_file\n'), ((3428, 3452), 'PIL.Image.open', 'Image.open', (['file_list[0]'], {}), '(file_list[0])\n', (3438, 3452), False, 'from PIL import Image\n'), ((3669, 3694), 'h5py.File', 'h5py.File', (['data_path', '"""r"""'], {}), "(data_path, 'r')\n", (3678, 3694), False, 'import h5py\n'), ((5315, 5341), 'numpy.arange', 'np.arange', (['(target_imgs + 1)'], {}), '(target_imgs + 1)\n', (5324, 5341), True, 'import numpy as np\n'), ((8303, 8332), 'numpy.identity', 'np.identity', (['(3)'], {'dtype': '"""float"""'}), "(3, dtype='float')\n", (8314, 8332), True, 'import numpy as np\n'), ((8341, 8504), 'AssistedVolumeSegmentation.common.write_annot_file', 'write_annot_file', ([], {'index_name': 'None', 'write_path': 'None', 'annot_data': 'discrete_array', 'annot_fields': 'sample_fields', 'scales': 'scales', 'annot_write_path': 'write_data_path'}), '(index_name=None, write_path=None, annot_data=\n discrete_array, annot_fields=sample_fields, scales=scales,\n annot_write_path=write_data_path)\n', (8357, 8504), False, 'from AssistedVolumeSegmentation.common import default_seg_file, get_file_list, get_full_path, get_source_data_path, init_logging, load_config, overview_bound_size, write_annot_file\n'), ((8587, 8659), 'logging.info', 'logging.info', (["('Created reduced segmentation in: %s' % (write_data_path,))"], {}), "('Created reduced segmentation in: %s' % (write_data_path,))\n", (8599, 8659), False, 'import logging\n'), ((8915, 8944), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': '"""float"""'}), "(4, dtype='float')\n", (8926, 8944), True, 'import numpy as np\n'), ((8953, 9007), 'numpy.fill_diagonal', 'np.fill_diagonal', (['aff_matrix', '([1.0 / reduce_ratio] * 3)'], {}), '(aff_matrix, [1.0 / reduce_ratio] * 3)\n', (8969, 9007), True, 'import numpy as np\n'), ((9031, 9072), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['output_array', 'aff_matrix'], {}), '(output_array, aff_matrix)\n', (9046, 9072), True, 'import nibabel as nib\n'), ((9098, 9149), 'AssistedVolumeSegmentation.common.get_full_path', 'get_full_path', (['config', 'subdir_num', '"""overviews_path"""'], {}), "(config, subdir_num, 'overviews_path')\n", (9111, 9149), False, 'from AssistedVolumeSegmentation.common import default_seg_file, get_file_list, get_full_path, get_source_data_path, init_logging, load_config, overview_bound_size, write_annot_file\n'), ((9158, 9200), 'os.makedirs', 'os.makedirs', (['overviews_path'], {'exist_ok': '(True)'}), '(overviews_path, exist_ok=True)\n', (9169, 9200), False, 'import os\n'), ((9209, 9248), 'nibabel.save', 'nib.save', (['nifti_object', 'write_data_path'], {}), '(nifti_object, write_data_path)\n', (9217, 9248), True, 'import nibabel as nib\n'), ((9768, 9901), 'logging.info', 'logging.info', (["('Created overview data in: %s , and empty coverage annotation in: %s' % (\n write_data_path, write_coverage_path))"], {}), "(\n 'Created overview data in: %s , and empty coverage annotation in: %s' %\n (write_data_path, write_coverage_path))\n", (9780, 9901), False, 'import logging\n'), ((10104, 10169), 'logging.info', 'logging.info', (["('Launching Slicer editor with arguments: %s' % args)"], {}), "('Launching Slicer editor with arguments: %s' % args)\n", (10116, 10169), False, 'import logging\n'), ((10178, 10220), 'subprocess.run', 'subprocess.run', (['launch_command'], {'shell': '(True)'}), '(launch_command, shell=True)\n', (10192, 10220), False, 'import subprocess\n'), ((10239, 10338), 'logging.info', 'logging.info', (["('You can open this in Slicer manually, or launch it with: %s' % (\n launch_command,))"], {}), "('You can open this in Slicer manually, or launch it with: %s' %\n (launch_command,))\n", (10251, 10338), False, 'import logging\n'), ((11786, 11828), 'os.path.exists', 'os.path.exists', (['args.generated_data_output'], {}), '(args.generated_data_output)\n', (11800, 11828), False, 'import os\n'), ((4780, 4807), 'math.ceil', 'math.ceil', (['(x * reduce_ratio)'], {}), '(x * reduce_ratio)\n', (4789, 4807), False, 'import math\n'), ((5609, 5630), 'PIL.Image.open', 'Image.open', (['this_file'], {}), '(this_file)\n', (5619, 5630), False, 'from PIL import Image\n'), ((9316, 9351), 'os.path.exists', 'os.path.exists', (['write_coverage_path'], {}), '(write_coverage_path)\n', (9330, 9351), False, 'import os\n'), ((9449, 9503), 'os.path.join', 'os.path.join', (['script_path', 'os.pardir', 'default_seg_file'], {}), '(script_path, os.pardir, default_seg_file)\n', (9461, 9503), False, 'import os\n'), ((9546, 9596), 'shutil.copyfile', 'shutil.copyfile', (['default_file', 'write_coverage_path'], {}), '(default_file, write_coverage_path)\n', (9561, 9596), False, 'import shutil\n'), ((9609, 9717), 'logging.info', 'logging.info', (["('Copying initial coverage annotation from %s to %s' % (default_file,\n write_coverage_path))"], {}), "('Copying initial coverage annotation from %s to %s' % (\n default_file, write_coverage_path))\n", (9621, 9717), False, 'import logging\n'), ((5706, 5740), 'PIL.Image.fromarray', 'Image.fromarray', (['h5_data[0, count]'], {}), '(h5_data[0, count])\n', (5721, 5740), False, 'from PIL import Image\n'), ((5786, 5817), 'PIL.Image.fromarray', 'Image.fromarray', (['h5_data[count]'], {}), '(h5_data[count])\n', (5801, 5817), False, 'from PIL import Image\n'), ((6820, 6860), 'math.floor', 'math.floor', (['division_steps[output_slice]'], {}), '(division_steps[output_slice])\n', (6830, 6860), False, 'import math\n'), ((6889, 6932), 'math.ceil', 'math.ceil', (['division_steps[output_slice + 1]'], {}), '(division_steps[output_slice + 1])\n', (6898, 6932), False, 'import math\n'), ((9395, 9420), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (9410, 9420), False, 'import os\n'), ((6098, 6123), 'numpy.array', 'np.array', (['input_slices[x]'], {}), '(input_slices[x])\n', (6106, 6123), True, 'import numpy as np\n'), ((7795, 7823), 'numpy.array', 'np.array', (['output_array.shape'], {}), '(output_array.shape)\n', (7803, 7823), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
' a module for conducting the statistical analysis '
__author__ = '<NAME>'
import numpy as np
from scipy.stats import ttest_1samp, ttest_rel, ttest_ind
from neurora.stuff import permutation_test
' a function for conducting the statistical analysis for results of EEG-like data '
def stats(corrs, fisherz=True, permutation=True, iter=5000):
"""
Conduct the statistical analysis for results of EEG-like data
Parameters
----------
corrs : array
The correlation coefficients.
The shape of corrs must be [n_subs, n_chls, n_ts, 2]. n_subs, n_chls, n_ts represent the number of subjects, the
number of channels and the number of time-points. 2 represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_chls, n_ts, 2]. n_chls, n_ts represent the number of channels and the number of
time-points. 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
This function can be used for the correlation results of NPS, ISC, eeg-like RDMs-correlations.
"""
if len(np.shape(corrs)) != 4:
return "Invalid input!"
# get the number of subjects, channels & time-points
subs, chls, ts = np.shape(corrs)[:3]
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# initialize the corrs
stats = np.zeros([chls, ts, 2], dtype=np.float)
# get r-map
rs = corrs[:, :, :, 0]
if fisherz == True:
zs = 0.5 * np.log((1 + rs) / (1 - rs))
#print(zs)
# calculate the statistical results
for i in range(chls):
for j in range(ts):
# t test
stats[i, j] = ttest_1samp(zs[:, i, j], 0, alternative="greater")
if permutation == True:
stats[i, j, 1] = permutation_test(zs[:, i, j], np.zeros([subs]), iter=iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (searchlight) '
def stats_fmri(corrs, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (searchlight)
Parameters
----------
corrs : array
The correlation coefficients.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of calculation units for
searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
This function can be used for the results of searchlight fMRI NPS and searchlight fMRI RDM-correlations.
"""
if len(np.shape(corrs)) != 5:
return "Invalid input!"
# get the number of subjects
subs = np.shape(corrs)[0]
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs)[1:4]
# initialize the corrs
stats = np.zeros([n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs = corrs[:, :, :, :, 0]
if fisherz is True:
zs = 0.5 * np.log((1+rs)/(1-rs))
# calculate the statistical results
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[i, j, k] = ttest_1samp(zs[:, i, j, k], 0, alternative="greater")
if permutation == True:
stats[i, j, k, 1] = permutation_test(zs[:, i, j, k], np.zeros([subs]), iter=iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (searchlight) within group '
def stats_fmri_compare_withingroup(corrs1, corrs2, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (searchlight) (within group: corrs1 > corrs2)
Parameters
----------
corrs1 : array
The correlation coefficients under condition 1.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
corrs2 : array
The correlation coefficients under condition 2.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of calculation units for
searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
This function can be used for the results of searchlight fMRI NPS and searchlight fMRI RDM-correlations.
"""
if len(np.shape(corrs1)) != 5 or len(np.shape(corrs2)) != 5:
return "Invalid input!"
# get the number of subjects
subs = np.shape(corrs1)[0]
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs1)[1:4]
# initialize the corrs
stats = np.zeros([n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs1 = corrs1[:, :, :, :, 0]
rs2 = corrs2[:, :, :, :, 0]
if fisherz is True:
zs1 = 0.5 * np.log((1+rs1)/(1-rs1))
zs2 = 0.5 * np.log((1+rs2)/(1-rs2))
# calculate the statistical results
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[i, j, k] = ttest_rel(zs1[:, i, j, k], zs2[:, i, j, k], alternative="greater")
if permutation == True:
stats[i, j, k, 1] = permutation_test(zs1[:, i, j, k], zs2[:, i, j, k], iter=iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (searchlight) between two groups'
def stats_fmri_compare_betweengroups(corrs1, corrs2, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (searchlight) (between 2 groups: group1 > group2)
Parameters
----------
corrs1 : array
The correlation coefficients for group 1.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
corrs2 : array
The correlation coefficients for group 2.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of calculation units for
searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
This function can be used for the results of searchlight fMRI NPS and searchlight fMRI RDM-correlations.
"""
if len(np.shape(corrs1)) != 5 or len(np.shape(corrs2)) != 5:
return "Invalid input!"
# get the number of subjects
subs1 = np.shape(corrs1)[0]
subs2 = np.shape(corrs2)[0]
# subs>=6
if subs1 < 6 or subs2 < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs1)[1:4]
# initialize the corrs
stats = np.zeros([n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs1 = corrs1[:, :, :, :, 0]
rs2 = corrs2[:, :, :, :, 0]
if fisherz is True:
zs1 = 0.5 * np.log((1 + rs1) / (1 - rs1))
zs2 = 0.5 * np.log((1 + rs2) / (1 - rs2))
# calculate the statistical results
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[i, j, k] = ttest_ind(zs1[:, i, j, k], zs2[:, i, j, k], alternative="greater")
if permutation == True:
stats[i, j, k, 1] = permutation_test(zs1[:, i, j, k], zs2[:, i, j, k], iter = iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (ISC searchlight) '
def stats_iscfmri(corrs, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (ISC searchlight)
Parameters
----------
corrs : array
The correlation coefficients.
The shape of corrs must be [n_ts, n_subs!/(2!*(n_subs-2)!), n_x, n_y, n_z, 2]. n_ts, n_subs, n_x, n_y, n_z
represent the number of subjects, the number of calculation units for searchlight along the x, y, z axis and 2
represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_ts, n_x, n_y, n_z, 2]. n_ts, n_x, n_y, n_z represent the number of time-points, the
number of calculation units for searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 4 (n_subs!/(2!*(n_subs-2)!) >= 6).
"""
if len(np.shape(corrs)) != 6:
return "Invalid input!"
# get the number of time-points, pairs
ts, npairs = np.shape(corrs)[:2]
# n_subs!/(2!*(n_subs-2)!)>=6
if npairs < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs)[2:5]
# initialize the corrs
stats = np.zeros([ts, n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs = corrs[:, :, :, :, :, 0]
if fisherz is True:
# Fisher r to z
zs = 0.5 * np.log((1 + rs) / (1 - rs))
# calculate the statistical results
for t in range(ts):
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[t, i, j, k] = ttest_1samp(zs[t, :, i, j, k], 0, alternative="greater")
if permutation == True:
stats[t, i, j, k, 1] = permutation_test(zs[t, :, i, j, k], np.zeros([npairs]), iter=iter)
return stats
' a function for conducting the statistical analysis for results of EEG-like data (for STPS) '
def stats_stps(corrs1, corrs2, fisherz=True, permutation=True, iter=5000):
"""
Conduct the statistical analysis for results of EEG-like data(for STPS)
Parameters
----------
corrs1 : array
The correlation coefficients under condition1.
The shape of corrs1 must be [n_subs, n_chls, n_ts]. n_subs, n_chls, n_ts represent the number of subjects, the
number of channels and the number of time-points.
corrs2 : array
The correlation coefficients under condition2.
The shape of corrs2 must be [n_subs, n_chls, n_ts]. n_subs, n_chls, n_ts represent the number of subjects, the
number of channels and the number of time-points.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_chls, n_ts, 2]. n_chls, n_ts represent the number of channels and the number of
time-points. 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
"""
if len(np.shape(corrs1)) != 3 or len(np.shape(corrs2)) != 3 or np.shape(corrs1)[1] != np.shape(corrs2)[1] or \
np.shape(corrs1)[2] != np.shape(corrs2)[2]:
return "Invalid input!"
# get the number of subjects, channels & time-points
subs, chls, ts = np.shape(corrs1)
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# initialize the corrs
stats = np.zeros([chls, ts, 2], dtype=np.float)
# get r-map
rs1 = corrs1
rs2 = corrs2
if fisherz is True:
# Fisher r to z
zs1 = 0.5 * np.log((1 + rs1) / (1 - rs1))
zs2 = 0.5 * np.log((1 + rs2) / (1 - rs2))
# calculate the statistical results
for i in range(chls):
for j in range(ts):
# t test
stats[i, j] = ttest_rel(zs1[:, i, j], zs2[:, i, j])
if permutation == True:
stats[i, j, 1] = permutation_test(zs1[:, i, j], zs2[:, i, j], iter=iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (STPS searchlight) '
def stats_stpsfmri(corrs1, corrs2, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (STPS searchlight)
Parameters
----------
corrs1 : array
The correlation coefficients under condition1.
The shape of corrs1 must be [n_subs, n_x, n_y, n_z]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis.
corrs2 : array
The correlation coefficients under condition2.
The shape of corrs2 must be [n_subs, n_x, n_y, n_z]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of calculation units for
searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
"""
if len(np.shape(corrs1)) != 4 or len(np.shape(corrs2)) != 4 or np.shape(corrs1)[1] != np.shape(corrs2)[1] \
or np.shape(corrs1)[2] != np.shape(corrs2)[2] or np.shape(corrs1)[3] != np.shape(corrs2)[3]:
return "Invalid input!"
# get the number of subjects
subs = np.shape(corrs1)[0]
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs1)[1:]
# initialize the corrs
stats = np.zeros([n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs1 = corrs1
rs2 = corrs2
if fisherz == True:
# Fisher r to z
zs1 = 0.5 * np.log((1 + rs1) / (1 - rs1))
zs2 = 0.5 * np.log((1 + rs2) / (1 - rs2))
# calculate the statistical results
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[i, j, k] = ttest_rel(zs1[:, i, j, k], zs2[:, i, j, k])
if permutation == True:
stats[i, j, k, 1] = permutation_test(zs1[:, i, j, k], zs2[:, i, j, k], iter=iter)
return stats | [
"neurora.stuff.permutation_test",
"numpy.log",
"numpy.zeros",
"scipy.stats.ttest_rel",
"scipy.stats.ttest_ind",
"scipy.stats.ttest_1samp",
"numpy.shape"
] | [((1693, 1732), 'numpy.zeros', 'np.zeros', (['[chls, ts, 2]'], {'dtype': 'np.float'}), '([chls, ts, 2], dtype=np.float)\n', (1701, 1732), True, 'import numpy as np\n'), ((3838, 3882), 'numpy.zeros', 'np.zeros', (['[n_x, n_y, n_z, 2]'], {'dtype': 'np.float'}), '([n_x, n_y, n_z, 2], dtype=np.float)\n', (3846, 3882), True, 'import numpy as np\n'), ((6473, 6517), 'numpy.zeros', 'np.zeros', (['[n_x, n_y, n_z, 2]'], {'dtype': 'np.float'}), '([n_x, n_y, n_z, 2], dtype=np.float)\n', (6481, 6517), True, 'import numpy as np\n'), ((9247, 9291), 'numpy.zeros', 'np.zeros', (['[n_x, n_y, n_z, 2]'], {'dtype': 'np.float'}), '([n_x, n_y, n_z, 2], dtype=np.float)\n', (9255, 9291), True, 'import numpy as np\n'), ((11613, 11661), 'numpy.zeros', 'np.zeros', (['[ts, n_x, n_y, n_z, 2]'], {'dtype': 'np.float'}), '([ts, n_x, n_y, n_z, 2], dtype=np.float)\n', (11621, 11661), True, 'import numpy as np\n'), ((13897, 13913), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (13905, 13913), True, 'import numpy as np\n'), ((14047, 14086), 'numpy.zeros', 'np.zeros', (['[chls, ts, 2]'], {'dtype': 'np.float'}), '([chls, ts, 2], dtype=np.float)\n', (14055, 14086), True, 'import numpy as np\n'), ((16589, 16633), 'numpy.zeros', 'np.zeros', (['[n_x, n_y, n_z, 2]'], {'dtype': 'np.float'}), '([n_x, n_y, n_z, 2], dtype=np.float)\n', (16597, 16633), True, 'import numpy as np\n'), ((1540, 1555), 'numpy.shape', 'np.shape', (['corrs'], {}), '(corrs)\n', (1548, 1555), True, 'import numpy as np\n'), ((3572, 3587), 'numpy.shape', 'np.shape', (['corrs'], {}), '(corrs)\n', (3580, 3587), True, 'import numpy as np\n'), ((3777, 3792), 'numpy.shape', 'np.shape', (['corrs'], {}), '(corrs)\n', (3785, 3792), True, 'import numpy as np\n'), ((6205, 6221), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (6213, 6221), True, 'import numpy as np\n'), ((6411, 6427), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (6419, 6427), True, 'import numpy as np\n'), ((8933, 8949), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (8941, 8949), True, 'import numpy as np\n'), ((8965, 8981), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (8973, 8981), True, 'import numpy as np\n'), ((9185, 9201), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (9193, 9201), True, 'import numpy as np\n'), ((11324, 11339), 'numpy.shape', 'np.shape', (['corrs'], {}), '(corrs)\n', (11332, 11339), True, 'import numpy as np\n'), ((11552, 11567), 'numpy.shape', 'np.shape', (['corrs'], {}), '(corrs)\n', (11560, 11567), True, 'import numpy as np\n'), ((16322, 16338), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (16330, 16338), True, 'import numpy as np\n'), ((16528, 16544), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (16536, 16544), True, 'import numpy as np\n'), ((1405, 1420), 'numpy.shape', 'np.shape', (['corrs'], {}), '(corrs)\n', (1413, 1420), True, 'import numpy as np\n'), ((1821, 1848), 'numpy.log', 'np.log', (['((1 + rs) / (1 - rs))'], {}), '((1 + rs) / (1 - rs))\n', (1827, 1848), True, 'import numpy as np\n'), ((2007, 2057), 'scipy.stats.ttest_1samp', 'ttest_1samp', (['zs[:, i, j]', '(0)'], {'alternative': '"""greater"""'}), "(zs[:, i, j], 0, alternative='greater')\n", (2018, 2057), False, 'from scipy.stats import ttest_1samp, ttest_rel, ttest_ind\n'), ((3471, 3486), 'numpy.shape', 'np.shape', (['corrs'], {}), '(corrs)\n', (3479, 3486), True, 'import numpy as np\n'), ((3975, 4002), 'numpy.log', 'np.log', (['((1 + rs) / (1 - rs))'], {}), '((1 + rs) / (1 - rs))\n', (3981, 4002), True, 'import numpy as np\n'), ((6645, 6674), 'numpy.log', 'np.log', (['((1 + rs1) / (1 - rs1))'], {}), '((1 + rs1) / (1 - rs1))\n', (6651, 6674), True, 'import numpy as np\n'), ((6689, 6718), 'numpy.log', 'np.log', (['((1 + rs2) / (1 - rs2))'], {}), '((1 + rs2) / (1 - rs2))\n', (6695, 6718), True, 'import numpy as np\n'), ((9418, 9447), 'numpy.log', 'np.log', (['((1 + rs1) / (1 - rs1))'], {}), '((1 + rs1) / (1 - rs1))\n', (9424, 9447), True, 'import numpy as np\n'), ((9468, 9497), 'numpy.log', 'np.log', (['((1 + rs2) / (1 - rs2))'], {}), '((1 + rs2) / (1 - rs2))\n', (9474, 9497), True, 'import numpy as np\n'), ((11207, 11222), 'numpy.shape', 'np.shape', (['corrs'], {}), '(corrs)\n', (11215, 11222), True, 'import numpy as np\n'), ((11780, 11807), 'numpy.log', 'np.log', (['((1 + rs) / (1 - rs))'], {}), '((1 + rs) / (1 - rs))\n', (11786, 11807), True, 'import numpy as np\n'), ((14207, 14236), 'numpy.log', 'np.log', (['((1 + rs1) / (1 - rs1))'], {}), '((1 + rs1) / (1 - rs1))\n', (14213, 14236), True, 'import numpy as np\n'), ((14257, 14286), 'numpy.log', 'np.log', (['((1 + rs2) / (1 - rs2))'], {}), '((1 + rs2) / (1 - rs2))\n', (14263, 14286), True, 'import numpy as np\n'), ((14430, 14467), 'scipy.stats.ttest_rel', 'ttest_rel', (['zs1[:, i, j]', 'zs2[:, i, j]'], {}), '(zs1[:, i, j], zs2[:, i, j])\n', (14439, 14467), False, 'from scipy.stats import ttest_1samp, ttest_rel, ttest_ind\n'), ((16754, 16783), 'numpy.log', 'np.log', (['((1 + rs1) / (1 - rs1))'], {}), '((1 + rs1) / (1 - rs1))\n', (16760, 16783), True, 'import numpy as np\n'), ((16804, 16833), 'numpy.log', 'np.log', (['((1 + rs2) / (1 - rs2))'], {}), '((1 + rs2) / (1 - rs2))\n', (16810, 16833), True, 'import numpy as np\n'), ((4184, 4237), 'scipy.stats.ttest_1samp', 'ttest_1samp', (['zs[:, i, j, k]', '(0)'], {'alternative': '"""greater"""'}), "(zs[:, i, j, k], 0, alternative='greater')\n", (4195, 4237), False, 'from scipy.stats import ttest_1samp, ttest_rel, ttest_ind\n'), ((6073, 6089), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (6081, 6089), True, 'import numpy as np\n'), ((6103, 6119), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (6111, 6119), True, 'import numpy as np\n'), ((6900, 6966), 'scipy.stats.ttest_rel', 'ttest_rel', (['zs1[:, i, j, k]', 'zs2[:, i, j, k]'], {'alternative': '"""greater"""'}), "(zs1[:, i, j, k], zs2[:, i, j, k], alternative='greater')\n", (6909, 6966), False, 'from scipy.stats import ttest_1samp, ttest_rel, ttest_ind\n'), ((8801, 8817), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (8809, 8817), True, 'import numpy as np\n'), ((8831, 8847), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (8839, 8847), True, 'import numpy as np\n'), ((9685, 9751), 'scipy.stats.ttest_ind', 'ttest_ind', (['zs1[:, i, j, k]', 'zs2[:, i, j, k]'], {'alternative': '"""greater"""'}), "(zs1[:, i, j, k], zs2[:, i, j, k], alternative='greater')\n", (9694, 9751), False, 'from scipy.stats import ttest_1samp, ttest_rel, ttest_ind\n'), ((13625, 13641), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (13633, 13641), True, 'import numpy as np\n'), ((13655, 13671), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (13663, 13671), True, 'import numpy as np\n'), ((13681, 13697), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (13689, 13697), True, 'import numpy as np\n'), ((13704, 13720), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (13712, 13720), True, 'import numpy as np\n'), ((13741, 13757), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (13749, 13757), True, 'import numpy as np\n'), ((13764, 13780), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (13772, 13780), True, 'import numpy as np\n'), ((14539, 14594), 'neurora.stuff.permutation_test', 'permutation_test', (['zs1[:, i, j]', 'zs2[:, i, j]'], {'iter': 'iter'}), '(zs1[:, i, j], zs2[:, i, j], iter=iter)\n', (14555, 14594), False, 'from neurora.stuff import permutation_test\n'), ((16038, 16054), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (16046, 16054), True, 'import numpy as np\n'), ((16068, 16084), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (16076, 16084), True, 'import numpy as np\n'), ((16094, 16110), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (16102, 16110), True, 'import numpy as np\n'), ((16117, 16133), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (16125, 16133), True, 'import numpy as np\n'), ((16154, 16170), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (16162, 16170), True, 'import numpy as np\n'), ((16177, 16193), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (16185, 16193), True, 'import numpy as np\n'), ((16200, 16216), 'numpy.shape', 'np.shape', (['corrs1'], {}), '(corrs1)\n', (16208, 16216), True, 'import numpy as np\n'), ((16223, 16239), 'numpy.shape', 'np.shape', (['corrs2'], {}), '(corrs2)\n', (16231, 16239), True, 'import numpy as np\n'), ((17021, 17064), 'scipy.stats.ttest_rel', 'ttest_rel', (['zs1[:, i, j, k]', 'zs2[:, i, j, k]'], {}), '(zs1[:, i, j, k], zs2[:, i, j, k])\n', (17030, 17064), False, 'from scipy.stats import ttest_1samp, ttest_rel, ttest_ind\n'), ((2159, 2175), 'numpy.zeros', 'np.zeros', (['[subs]'], {}), '([subs])\n', (2167, 2175), True, 'import numpy as np\n'), ((7049, 7110), 'neurora.stuff.permutation_test', 'permutation_test', (['zs1[:, i, j, k]', 'zs2[:, i, j, k]'], {'iter': 'iter'}), '(zs1[:, i, j, k], zs2[:, i, j, k], iter=iter)\n', (7065, 7110), False, 'from neurora.stuff import permutation_test\n'), ((9833, 9894), 'neurora.stuff.permutation_test', 'permutation_test', (['zs1[:, i, j, k]', 'zs2[:, i, j, k]'], {'iter': 'iter'}), '(zs1[:, i, j, k], zs2[:, i, j, k], iter=iter)\n', (9849, 9894), False, 'from neurora.stuff import permutation_test\n'), ((12042, 12098), 'scipy.stats.ttest_1samp', 'ttest_1samp', (['zs[t, :, i, j, k]', '(0)'], {'alternative': '"""greater"""'}), "(zs[t, :, i, j, k], 0, alternative='greater')\n", (12053, 12098), False, 'from scipy.stats import ttest_1samp, ttest_rel, ttest_ind\n'), ((17146, 17207), 'neurora.stuff.permutation_test', 'permutation_test', (['zs1[:, i, j, k]', 'zs2[:, i, j, k]'], {'iter': 'iter'}), '(zs1[:, i, j, k], zs2[:, i, j, k], iter=iter)\n', (17162, 17207), False, 'from neurora.stuff import permutation_test\n'), ((4353, 4369), 'numpy.zeros', 'np.zeros', (['[subs]'], {}), '([subs])\n', (4361, 4369), True, 'import numpy as np\n'), ((12228, 12246), 'numpy.zeros', 'np.zeros', (['[npairs]'], {}), '([npairs])\n', (12236, 12246), True, 'import numpy as np\n')] |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.ops.elementwise import Round
from mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
def round_test_graph(nodes_attributes, value, mode: str):
graph = build_graph(nodes_attributes,
[
('node_1', 'elementwise_node'),
('elementwise_node', 'node_3')
],
{
'node_1': {
'value': value
},
'elementwise_node': {
'op': 'Round',
'mode': mode,
},
'node_3': {
'value': None
}
})
return graph
class TestElementwiseOp(unittest.TestCase):
nodes_attributes = {
'node_1': {
'shape': np.array([13]),
'value': None
},
'elementwise_node': {
'op': None,
'kind': 'op',
'operation': None
},
'node_3': {
'shape': None
}
}
value = np.array([-23.5, -22.5, -2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5, 22.5, 23.5])
def test_elementwise_round_even_infer(self):
graph = round_test_graph(self.nodes_attributes, self.value, 'half_to_even')
graph.graph['layout'] = 'NCHW'
elementwise_node = Node(graph, 'elementwise_node')
Round.infer(elementwise_node)
exp_shape = np.array([13])
res_shape = graph.node['node_3']['shape']
res_value = graph.node['node_3']['value']
exp_value = np.array([-24., -22., -2., -2., -0., 0., 1., 2., 2., 2., 4., 22., 24.,])
for i, value in enumerate(exp_shape):
self.assertEqual(res_shape[i], value)
for i, value in enumerate(exp_value):
self.assertAlmostEqual(res_value[i], value)
def test_elementwise_round_away_infer(self):
graph = round_test_graph(self.nodes_attributes, self.value, 'half_away_from_zero')
graph.graph['layout'] = 'NCHW'
elementwise_node = Node(graph, 'elementwise_node')
Round.infer(elementwise_node)
exp_shape = np.array([13])
res_shape = graph.node['node_3']['shape']
res_value = graph.node['node_3']['value']
exp_value = np.array([-24., -23., -3., -2., -1., 1., 1., 2., 2., 3., 4., 23., 24.])
for i, value in enumerate(exp_shape):
self.assertEqual(res_shape[i], value)
for i, value in enumerate(exp_value):
self.assertAlmostEqual(res_value[i], value)
| [
"numpy.array",
"extensions.ops.elementwise.Round.infer",
"mo.graph.graph.Node",
"unit_tests.utils.graph.build_graph"
] | [((315, 531), 'unit_tests.utils.graph.build_graph', 'build_graph', (['nodes_attributes', "[('node_1', 'elementwise_node'), ('elementwise_node', 'node_3')]", "{'node_1': {'value': value}, 'elementwise_node': {'op': 'Round', 'mode':\n mode}, 'node_3': {'value': None}}"], {}), "(nodes_attributes, [('node_1', 'elementwise_node'), (\n 'elementwise_node', 'node_3')], {'node_1': {'value': value},\n 'elementwise_node': {'op': 'Round', 'mode': mode}, 'node_3': {'value':\n None}})\n", (326, 531), False, 'from unit_tests.utils.graph import build_graph\n'), ((1356, 1445), 'numpy.array', 'np.array', (['[-23.5, -22.5, -2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5, 22.5, 23.5]'], {}), '([-23.5, -22.5, -2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5, \n 22.5, 23.5])\n', (1364, 1445), True, 'import numpy as np\n'), ((1642, 1673), 'mo.graph.graph.Node', 'Node', (['graph', '"""elementwise_node"""'], {}), "(graph, 'elementwise_node')\n", (1646, 1673), False, 'from mo.graph.graph import Node\n'), ((1682, 1711), 'extensions.ops.elementwise.Round.infer', 'Round.infer', (['elementwise_node'], {}), '(elementwise_node)\n', (1693, 1711), False, 'from extensions.ops.elementwise import Round\n'), ((1732, 1746), 'numpy.array', 'np.array', (['[13]'], {}), '([13])\n', (1740, 1746), True, 'import numpy as np\n'), ((1867, 1956), 'numpy.array', 'np.array', (['[-24.0, -22.0, -2.0, -2.0, -0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0, 22.0, 24.0]'], {}), '([-24.0, -22.0, -2.0, -2.0, -0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0, \n 22.0, 24.0])\n', (1875, 1956), True, 'import numpy as np\n'), ((2346, 2377), 'mo.graph.graph.Node', 'Node', (['graph', '"""elementwise_node"""'], {}), "(graph, 'elementwise_node')\n", (2350, 2377), False, 'from mo.graph.graph import Node\n'), ((2386, 2415), 'extensions.ops.elementwise.Round.infer', 'Round.infer', (['elementwise_node'], {}), '(elementwise_node)\n', (2397, 2415), False, 'from extensions.ops.elementwise import Round\n'), ((2436, 2450), 'numpy.array', 'np.array', (['[13]'], {}), '([13])\n', (2444, 2450), True, 'import numpy as np\n'), ((2571, 2660), 'numpy.array', 'np.array', (['[-24.0, -23.0, -3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0, 23.0, 24.0]'], {}), '([-24.0, -23.0, -3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0, \n 23.0, 24.0])\n', (2579, 2660), True, 'import numpy as np\n'), ((1107, 1121), 'numpy.array', 'np.array', (['[13]'], {}), '([13])\n', (1115, 1121), True, 'import numpy as np\n')] |
from typing import Dict, List
from dataclasses import dataclass, field
import tvm
from tvm import relay
import pickle
import random
import numpy as np
import random
from copy import deepcopy
from .tvmpass import PassDependenceGraph, PassNode
# TODO: Add parameters.
# TODO: Add more passes.
_RELAY_FUNCTION_HARD_PASSES_ = [ # Note these are types.
relay.transform.RemoveUnusedFunctions,
relay.transform.Inline,
relay.transform.PartitionGraph,
relay.transform.ToGraphNormalForm,
relay.transform.SimplifyInference,
relay.transform.FoldConstant,
relay.transform.AnnotateSpans,
relay.transform.DefuseOps,
relay.transform.FuseOps,
relay.transform.SimplifyExpr,
# relay.transform.ToBasicBlockNormalForm,
relay.transform.BatchingOps,
relay.transform.AlterOpLayout,
relay.transform.FoldScaleAxis,
relay.transform.CanonicalizeOps,
relay.transform.CanonicalizeCast,
relay.transform.DeadCodeElimination,
relay.transform.EliminateCommonSubexpr,
relay.transform.CombineParallelConv2D,
relay.transform.CombineParallelDense,
relay.transform.CombineParallelBatchMatmul,
relay.transform.FastMath,
relay.transform.DynamicToStatic,
relay.transform.FoldExplicitPadding,
]
_RANDOM_WALK_MAP_ = np.ones((len(_RELAY_FUNCTION_HARD_PASSES_), len(_RELAY_FUNCTION_HARD_PASSES_)))
_RANDOM_WALK_MAP_[_RELAY_FUNCTION_HARD_PASSES_.index(relay.transform.AnnotateSpans)][_RELAY_FUNCTION_HARD_PASSES_.index(relay.transform.FuseOps)] = 0
graph = PassDependenceGraph(tvm.target.Target('llvm'))
_ALL_DIR_PASS_NODES_ = list(graph.tir_pass_nodes.values())
@dataclass
class CompileConfig:
target :tvm.target.Target = None
relay_pass_types :List[relay.transform.FunctionPass] = None # actually, there're some module passes...
tir_pass_nodes :List[PassNode] = None
def mutate(self):
# TODO: Think about better mutation strategies.
# Target
self.target = random.choice(self._target_space())
# Passes
n_pass = random.randint(1, len(_RELAY_FUNCTION_HARD_PASSES_) - 1)
self.relay_pass_types = []
pidx = random.randint(1, len(_RELAY_FUNCTION_HARD_PASSES_) - 1)
for _ in range(n_pass):
self.relay_pass_types.append(_RELAY_FUNCTION_HARD_PASSES_[pidx])
candidates_idx = _RANDOM_WALK_MAP_[pidx].nonzero()[0]
if len(candidates_idx) == 0:
break
pidx = candidates_idx[random.randint(1, len(candidates_idx) - 1)]
self.tir_pass_nodes = graph.random_tir_passes(n_pass)
def hard_relay_passes() -> List[relay.transform.FunctionPass]:
"""passes that do not leverage (great) approximation.
"""
return _RELAY_FUNCTION_HARD_PASSES_
def get_device(self):
if self.target.export()['kind'] == 'cuda':
return tvm.cuda()
if self.target.export()['kind'] == 'rocm':
return tvm.rocm()
return tvm.cpu()
def check(self):
assert self.target != None
assert self.relay_pass_types != None
@staticmethod
def _target_space():
# To get "-mcpu=?", do "cat /proc/cpuinfo". Then search the `model name` on ark.intel.com
# There can more targets... Let's forget it for a while.
# tvm.target.Target('c') is too weak...
_targets = [tvm.target.Target('llvm')]
# TODO: Allow devices.
# if tvm.cuda().exist:
# _targets.append(tvm.target.cuda())
# if cudnn.exists():
# _targets.append(tvm.target.Target('cuda -libs=cudnn'))
# if tvm.rocm().exist:
# _targets.append(tvm.target.rocm())
return _targets
# When using CHI distribution on [0, +inf)
_SAMPLE_CHI_DIST_DF_ = 3
_MAX_SAMPLE_SIZE_ = 64
_MAX_TEST_BATCH_ = _MAX_SAMPLE_SIZE_
_MIN_TEST_HW_ = 128
_MAX_TEST_HW_ = 1024
_HW_NORMAL_DIST_MU_ = (_MIN_TEST_HW_ + _MAX_TEST_HW_ * 3 // 5) // 2
# 3 sigma is hard... we make it 4...
_HW_NORMAL_DIST_SIGMA_ = _HW_NORMAL_DIST_MU_ // 4
@dataclass
class ExecutionConfig:
module :tvm.IRModule
params :Dict
n_inp_node :int
exe_mode :str = None
inputs :List[List[tvm.nd.array]] = field(default_factory=list)
oracle :List[List[tvm.nd.array]] = None # None if not required.
oracle_name :str = "NOT_SET"
def from_keras(self, model, shape=None, layout="NCHW"):
self.module, self.params = relay.frontend.from_keras(model, shape, layout)
@staticmethod
def exe_mode_space(dynamic_shape=False):
if dynamic_shape:
return ['vm', 'debug']
else:
return ['vm', 'graph', 'debug']
def check(self):
assert isinstance(self.module, tvm.IRModule)
assert self.params is not None
assert self.n_inp_node > 0
assert self.exe_mode != None
assert self.inputs
def mutate(self):
# TODO: Think about better mutation strategies.
# Create some inputs...
input_shapes = self.module['main'].checked_type.arg_types[:self.n_inp_node]
dynamic_batch_input_id = []
dynamic_input_ids = []
for i, s in enumerate(input_shapes):
if relay.ty.is_dynamic(s):
dynamic_input_ids.append(i)
if isinstance(s.shape[0], tvm.tir.Any):
dynamic_batch_input_id.append(i)
dy_batch_size_list = [] # if we support dynamic batch.
n_sample = 1 # if len(dynamic_input_ids) == 0
# else: np.random.chisquare
# We use chisquare dist which give more probability on small samples (faster).
# See: https://en.wikipedia.org/wiki/Chi-square_distribution
# Normal dist: \mu and \sigma
# Chi dist: \mu, \sigma, v
if len(dynamic_input_ids) != 0:
n_sample = max(1, int(np.random.chisquare(3)))
n_sample = min(n_sample, _MAX_SAMPLE_SIZE_)
if len(dynamic_batch_input_id) != 0:
start = 0
for _ in range(n_sample):
start += int(np.random.chisquare(_SAMPLE_CHI_DIST_DF_))
if start <= _MAX_TEST_BATCH_:
dy_batch_size_list.append(start)
else:
dynamic_input_ids.append(1)
# From small to big. Crash in small batch is fast path.
dynamic_input_ids.sort()
# We assume there's a batch dim
# TODO: Make it more genral...
def _concretize_non_batch_dim(shape :relay.TensorType):
concrete_shape = []
for idx, x in enumerate(shape.shape):
if isinstance(x, tvm.tir.Any):
if idx == 0:
concrete_shape.append(tvm.tir.Any())
else:
dim = int(np.random.uniform(_HW_NORMAL_DIST_MU_, _HW_NORMAL_DIST_SIGMA_))
dim = min(dim, _MAX_TEST_HW_)
dim = max(dim, _MIN_TEST_HW_)
concrete_shape.append(dim)
else:
concrete_shape.append(int(x))
return relay.TensorType(shape=concrete_shape, dtype=shape.dtype)
# clear inputs
self.inputs = []
for i in range(n_sample):
this_input = []
for shape in input_shapes:
shape_type = _concretize_non_batch_dim(shape)
shape_ = list(shape_type.shape)
dtype_ = shape_type.dtype
if relay.ty.is_dynamic(shape_type):
# Still dynamic means batch dim is dynamic
shape_[0] = dy_batch_size_list[i]
# nd.array empty is dangerous! (causing inf)
shape_ = [int(x) for x in shape_]
data = np.zeros(shape=shape_, dtype=dtype_)
this_input.append(tvm.nd.array(data))
self.inputs.append(this_input)
self.exe_mode = 'graph' # TODO: Test more runtimes.
# random.choice(self.exe_mode_space(len(dynamic_input_ids) != 0))
def __deepcopy__(self, meno):
module = tvm.parser.parse(self.module.astext())
params = {k:tvm.nd.array(v.numpy()) for k,v in self.params.items()}
n_inp_node = self.n_inp_node
exe_mode = deepcopy(self.exe_mode, meno)
inputs = [[tvm.nd.array(i.numpy()) for i in inp]for inp in self.inputs]
oracle = None if self.oracle is None else [[tvm.nd.array(i.numpy()) for i in inp]for inp in self.oracle]
oracle_name = deepcopy(self.oracle_name, meno)
return ExecutionConfig(
module, params, n_inp_node, exe_mode, inputs, oracle, oracle_name
)
@dataclass
class Context:
"""Top-level configuration of fuzzer.
"""
runtime :ExecutionConfig
compile :CompileConfig
def dump(self, path): # Fix this ...
to_store_params = {}
for k, v in self.runtime.params.items():
to_store_params[k] = v.numpy()
with open(path, 'wb') as f:
runtime_conf = {
'module': self.runtime.module.astext(),
'params': to_store_params,
'n_inp_node': self.runtime.n_inp_node,
'exe_mode': self.runtime.exe_mode,
'inputs': [[x.numpy() for x in inp] for inp in self.runtime.inputs],
'oracle': self.runtime.oracle,
'oracle_name': self.runtime.oracle_name
}
compile_conf = {
'target': self.compile.target,
'relay_pass_types': self.compile.relay_pass_types,
'tir_pass_nodes': graph.export_name(self.compile.tir_pass_nodes)
}
pickle.dump({
'runtime': runtime_conf,
'compile': compile_conf
}, f, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, path):
with open(path, 'rb') as f:
data = pickle.load(f)
self.compile.target = data['compile']['target']
self.compile.relay_pass_types = data['compile']['relay_pass_types']
self.compile.tir_pass_nodes = graph.recover(data['compile']['tir_pass_nodes'])
for k, v in data['runtime'].items():
if k == 'module':
self.runtime.module = tvm.parser.fromtext(v)
elif k == 'params':
self.runtime.params = {}
for k_, v_ in v.items():
self.runtime.params[k_] = tvm.nd.array(v_)
elif k == 'inputs':
self.runtime.inputs = [[tvm.nd.array(x) for x in inp] for inp in v],
else:
setattr(self.runtime, k, v)
def mutate(self):
self.runtime.mutate()
self.compile.mutate()
def check(self):
self.runtime.check()
self.compile.check()
| [
"tvm.parser.fromtext",
"tvm.rocm",
"tvm.relay.TensorType",
"tvm.cpu",
"pickle.dump",
"numpy.random.chisquare",
"tvm.nd.array",
"pickle.load",
"tvm.relay.ty.is_dynamic",
"tvm.relay.frontend.from_keras",
"numpy.zeros",
"numpy.random.uniform",
"copy.deepcopy",
"tvm.cuda",
"tvm.target.Target... | [((1532, 1557), 'tvm.target.Target', 'tvm.target.Target', (['"""llvm"""'], {}), "('llvm')\n", (1549, 1557), False, 'import tvm\n'), ((4246, 4273), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (4251, 4273), False, 'from dataclasses import dataclass, field\n'), ((2968, 2977), 'tvm.cpu', 'tvm.cpu', ([], {}), '()\n', (2975, 2977), False, 'import tvm\n'), ((4501, 4548), 'tvm.relay.frontend.from_keras', 'relay.frontend.from_keras', (['model', 'shape', 'layout'], {}), '(model, shape, layout)\n', (4526, 4548), False, 'from tvm import relay\n'), ((8400, 8429), 'copy.deepcopy', 'deepcopy', (['self.exe_mode', 'meno'], {}), '(self.exe_mode, meno)\n', (8408, 8429), False, 'from copy import deepcopy\n'), ((8645, 8677), 'copy.deepcopy', 'deepcopy', (['self.oracle_name', 'meno'], {}), '(self.oracle_name, meno)\n', (8653, 8677), False, 'from copy import deepcopy\n'), ((2861, 2871), 'tvm.cuda', 'tvm.cuda', ([], {}), '()\n', (2869, 2871), False, 'import tvm\n'), ((2942, 2952), 'tvm.rocm', 'tvm.rocm', ([], {}), '()\n', (2950, 2952), False, 'import tvm\n'), ((3359, 3384), 'tvm.target.Target', 'tvm.target.Target', (['"""llvm"""'], {}), "('llvm')\n", (3376, 3384), False, 'import tvm\n'), ((5268, 5290), 'tvm.relay.ty.is_dynamic', 'relay.ty.is_dynamic', (['s'], {}), '(s)\n', (5287, 5290), False, 'from tvm import relay\n'), ((7237, 7294), 'tvm.relay.TensorType', 'relay.TensorType', ([], {'shape': 'concrete_shape', 'dtype': 'shape.dtype'}), '(shape=concrete_shape, dtype=shape.dtype)\n', (7253, 7294), False, 'from tvm import relay\n'), ((9830, 9935), 'pickle.dump', 'pickle.dump', (["{'runtime': runtime_conf, 'compile': compile_conf}", 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), "({'runtime': runtime_conf, 'compile': compile_conf}, f, protocol\n =pickle.HIGHEST_PROTOCOL)\n", (9841, 9935), False, 'import pickle\n'), ((10059, 10073), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10070, 10073), False, 'import pickle\n'), ((7616, 7647), 'tvm.relay.ty.is_dynamic', 'relay.ty.is_dynamic', (['shape_type'], {}), '(shape_type)\n', (7635, 7647), False, 'from tvm import relay\n'), ((7900, 7936), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape_', 'dtype': 'dtype_'}), '(shape=shape_, dtype=dtype_)\n', (7908, 7936), True, 'import numpy as np\n'), ((5911, 5933), 'numpy.random.chisquare', 'np.random.chisquare', (['(3)'], {}), '(3)\n', (5930, 5933), True, 'import numpy as np\n'), ((7971, 7989), 'tvm.nd.array', 'tvm.nd.array', (['data'], {}), '(data)\n', (7983, 7989), False, 'import tvm\n'), ((10435, 10457), 'tvm.parser.fromtext', 'tvm.parser.fromtext', (['v'], {}), '(v)\n', (10454, 10457), False, 'import tvm\n'), ((6142, 6183), 'numpy.random.chisquare', 'np.random.chisquare', (['_SAMPLE_CHI_DIST_DF_'], {}), '(_SAMPLE_CHI_DIST_DF_)\n', (6161, 6183), True, 'import numpy as np\n'), ((6848, 6861), 'tvm.tir.Any', 'tvm.tir.Any', ([], {}), '()\n', (6859, 6861), False, 'import tvm\n'), ((6923, 6985), 'numpy.random.uniform', 'np.random.uniform', (['_HW_NORMAL_DIST_MU_', '_HW_NORMAL_DIST_SIGMA_'], {}), '(_HW_NORMAL_DIST_MU_, _HW_NORMAL_DIST_SIGMA_)\n', (6940, 6985), True, 'import numpy as np\n'), ((10634, 10650), 'tvm.nd.array', 'tvm.nd.array', (['v_'], {}), '(v_)\n', (10646, 10650), False, 'import tvm\n'), ((10731, 10746), 'tvm.nd.array', 'tvm.nd.array', (['x'], {}), '(x)\n', (10743, 10746), False, 'import tvm\n')] |
import os
import argparse
import matplotlib.pyplot as plt
from datetime import datetime, timedelta, date
import nottingham_covid_modelling.lib.priors as priors
import numpy as np
import pints
from nottingham_covid_modelling import MODULE_DIR
# Load project modules
from nottingham_covid_modelling.lib._command_line_args import NOISE_MODEL_MAPPING
from nottingham_covid_modelling.lib.equations import get_model_SIUR_solution, get_model_solution, get_model_SIR_solution, get_model_SEIUR_solution
from nottingham_covid_modelling.lib.settings import Params, get_file_name_suffix
MODEL_FUNCTIONS ={'SItD':get_model_solution, 'SIR': get_model_SIR_solution, 'SIRDeltaD': get_model_SIR_solution, 'SIUR':get_model_SIUR_solution, 'SEIUR':get_model_SEIUR_solution}
# Functions
def parameter_to_optimise_list(FitFull, FitStep, model_name):
# Valid model_names: 'SIR', 'SIRDeltaD', 'SItD', 'SIUR'
assert model_name in ['SIR', 'SIRDeltaD', 'SItD', 'SIUR', 'SEIUR'], "Unknown model"
parameters_to_optimise = ['rho', 'Iinit1']
if FitFull:
if model_name != 'SItD':
parameters_to_optimise.extend(['theta'])
if model_name == 'SIUR':
parameters_to_optimise.extend(['xi'])
if model_name == 'SEIUR':
parameters_to_optimise.extend(['eta'])
parameters_to_optimise.extend(['xi'])
if model_name == 'SIRDeltaD':
parameters_to_optimise.extend(['DeltaD'])
# parameters_to_optimise.extend(['negative_binomial_phi']) <- this one is added in the likelihood class
if FitStep:
parameters_to_optimise.extend(['lockdown_baseline', 'lockdown_offset'])
return parameters_to_optimise
def run_optimise():
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--repeats", type=int, help="number of CMA-ES repeats", default=5)
parser.add_argument("-d", "--detailed_output", action='store_true',
help="whether to output detailed information (CMA-ES logs and all repeat parameters) or not",
default=False)
parser.add_argument("--cmaes_fits", type=str, help="folder to store cmaes fits files in, default: ./cmaes_fits_SIR",
default=os.path.join(MODULE_DIR, 'cmaes_fits_SIR'))
parser.add_argument("--limit_pints_iterations", type=int, default=None,
help=("limit pints to a maximum number of iterations. NOTE: this is mostly for debug and "
"testing purposes, you probably don't want to use this to get meaningful results!"))
parser.add_argument("--model_name", type=str, help="which model to use", choices=MODEL_FUNCTIONS.keys(), default='SIR')
parser.add_argument("-full", "--fit_full", action='store_false', help='Whether to fit all the model parameters, or only [rho, I0, NB_phi], ', default=True)
parser.add_argument("-fitstep", "--fit_step", action='store_false', help='Whether to fit step parameters', default=True)
parser.add_argument("--syndata_num", type=int, help="Give the number of the synthetic data set you want to fit, default 1", default=1)
# At the moment, syntethic data sets 2-9 have travel and step options only. There is only one data sets without step and one with neither travel nor step.
args = parser.parse_args()
repeats = args.repeats
FitFull = args.fit_full
FitStep = args.fit_step
ModelName = args.model_name
SyntDataNum_file = args.syndata_num
max_iterations = args.limit_pints_iterations
# For reproducibility:
np.random.seed(100)
# Number of days to fit
maxtime_fit = 150
# Get parameters, p
p = Params()
# Fixed for the synth data, based on UK google and ONS data:
p.N = 59.1e6
p.numeric_max_age = 35
p.extra_days_to_simulate = 10
p.IFR = 0.00724 # UK time
p.square_lockdown = True
p.alpha = np.ones(p.maxtime)
p.lockdown_baseline = 0.2814 #0.2042884852266899
p.lockdown_offset = 31.57 #34.450147247864166
# For saving file names:
rho_label = '_rho_0-2'
Noise_label = 'NBphi_2e-3_'
# Storing values in the models so there is no error after (since there is no store_params for the simple models)
if ModelName != 'SItD':
p.beta = 1
p.theta = 1 / p.beta_mean
p.eta = 1 / p.beta_mean
p.DeltaD = 0
p.xi = 1 / (p.death_mean - p.beta_mean)
# define the params to optimize
parameters_to_optimise = parameter_to_optimise_list(FitFull, FitStep, ModelName)
# Get noise model
noise_model = NOISE_MODEL_MAPPING['NegBinom']
# Get simulated Age data from file
print('Getting simulated data...')
# folder to load data
if SyntDataNum_file == 1: #Original default syntethic data
folder_path = os.path.join(MODULE_DIR, 'out_SIRvsAGEfits')
full_fit_data_file = 'SItRDmodel_ONSparams_noise_NB_NO-R_travel_TRUE_step_TRUE.npy'
else:
folder_path = os.path.join(MODULE_DIR, 'out_SIRvsAGE_SuplementaryFig')
full_fit_data_file = 'SynteticSItD_default_params_travel_TRUE_step_TRUE_' + str(SyntDataNum_file) + '.npy'
data_filename = full_fit_data_file
# Load data
data = np.load(os.path.join(folder_path, data_filename ))
data_S = data[:,0]
data_Itot = data[:,1]
data_R = data[:,2]
data_Dreal = data[:,3]
data_D = data[:,4] # noise data
data_I = data[:,5:].T # transpose to get exactly the same shape as other code
if len(data_R) < maxtime_fit:
p.maxtime = len(data_R) -1
maxtime_fit = len(data_R) -1
else:
p.maxtime = maxtime_fit
# cut the data to the maxtime lenght:
data_D = data_D[:p.maxtime+1]
data_Dreal = data_Dreal[:p.maxtime+1]
data_S_long = data_S
data_S = data_S[:p.maxtime+1]
data_Itot = data_Itot[:p.maxtime+1]
data_R = data_R[:p.maxtime+1]
data_I = data_I[:,:p.maxtime+1]
# to get the same data and fit lenghts as in Data_loader
p.maxtime = p.maxtime + p.numeric_max_age + p.extra_days_to_simulate #D[p.day_1st_death_after_150220: -(p.numeric_max_age + p.extra_days_to_simulate)]
p.day_1st_death_after_150220 = 22
# OPTIMISATION:
print('Starting optimization...')
# Set up optimisation
folder = args.cmaes_fits
os.makedirs(folder, exist_ok=True) # Create CMA-ES output destination folder
filename = os.path.join(folder, get_file_name_suffix(p, 'SimSItD-' + str(SyntDataNum_file) + rho_label, Noise_label + 'model-' + ModelName + '_full-fit-' + str(FitFull), parameters_to_optimise))
print('Selected data source: ' + data_filename)
print('Selected noise model: Negative Binomial')
print('Storing results to: ' + filename + '.txt')
# Get likelihood function
model_func = MODEL_FUNCTIONS[ModelName]
LL = noise_model(p, data_D[p.day_1st_death_after_150220:] , parameters_to_optimise, model_func = model_func)
upper_sigma = np.max(data_D)
log_prior = priors.LogPrior(LL, upper_sigma, model_name = ModelName)
parameters, scores = [], []
# Tell CMA-ES about the bounds of this optimisation problem (helps it work out sensible sigma)
bounds = pints.RectangularBoundaries(log_prior.lower, log_prior.upper)
# Repeat optimisation multiple times from different initial guesses and pick best
for i in range(repeats):
print('Repeat: ' + str(i + 1))
# Random initial guesses from uniform priors
x0 = priors.get_good_starting_point(log_prior, LL, niterations=1000)
# Create optimiser
opt = pints.OptimisationController(LL, x0, boundaries=bounds, method=pints.CMAES)
opt.set_max_iterations(max_iterations)
opt.set_parallel(True)
# Run optimisation
with np.errstate(all='ignore'): # Tell numpy not to issue warnings
xbest, fbest = opt.run()
parameters.append(xbest)
scores.append(-fbest)
# Sort according to smallest function score
order = np.argsort(scores)
scores = np.asarray(scores)[order]
parameters = np.asarray(parameters)[order]
print('---- Summary ...')
print('Best parameters: ')
print(parameters[0])
print('Best score:')
print(-scores[0])
# Extract best
obtained_parameters = parameters[0]
# Store results
print('Storing best result...')
with open(filename + '.txt', 'w') as f:
for x in obtained_parameters:
f.write(pints.strfloat(x) + '\n')
print('Storing all errors...')
with open(filename + '-errors.txt', 'w') as f:
for score in scores:
f.write(pints.strfloat(-score) + '\n')
if args.detailed_output:
print('Storing all parameters...')
for i, param in enumerate(parameters):
with open(filename + '-parameters-' + str(1 + i) + '.txt', 'w') as f:
for x in param:
f.write(pints.strfloat(x) + '\n')
| [
"nottingham_covid_modelling.lib.priors.get_good_starting_point",
"pints.OptimisationController",
"numpy.ones",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"nottingham_covid_modelling.lib.settings.Params",
"numpy.asarray",
"numpy.max",
"numpy.argsort",
"numpy.errstate",
"pints.Rec... | [((1714, 1739), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1737, 1739), False, 'import argparse\n'), ((3547, 3566), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (3561, 3566), True, 'import numpy as np\n'), ((3651, 3659), 'nottingham_covid_modelling.lib.settings.Params', 'Params', ([], {}), '()\n', (3657, 3659), False, 'from nottingham_covid_modelling.lib.settings import Params, get_file_name_suffix\n'), ((3878, 3896), 'numpy.ones', 'np.ones', (['p.maxtime'], {}), '(p.maxtime)\n', (3885, 3896), True, 'import numpy as np\n'), ((6286, 6320), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (6297, 6320), False, 'import os\n'), ((6929, 6943), 'numpy.max', 'np.max', (['data_D'], {}), '(data_D)\n', (6935, 6943), True, 'import numpy as np\n'), ((6960, 7014), 'nottingham_covid_modelling.lib.priors.LogPrior', 'priors.LogPrior', (['LL', 'upper_sigma'], {'model_name': 'ModelName'}), '(LL, upper_sigma, model_name=ModelName)\n', (6975, 7014), True, 'import nottingham_covid_modelling.lib.priors as priors\n'), ((7161, 7222), 'pints.RectangularBoundaries', 'pints.RectangularBoundaries', (['log_prior.lower', 'log_prior.upper'], {}), '(log_prior.lower, log_prior.upper)\n', (7188, 7222), False, 'import pints\n'), ((7974, 7992), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (7984, 7992), True, 'import numpy as np\n'), ((4782, 4826), 'os.path.join', 'os.path.join', (['MODULE_DIR', '"""out_SIRvsAGEfits"""'], {}), "(MODULE_DIR, 'out_SIRvsAGEfits')\n", (4794, 4826), False, 'import os\n'), ((4952, 5008), 'os.path.join', 'os.path.join', (['MODULE_DIR', '"""out_SIRvsAGE_SuplementaryFig"""'], {}), "(MODULE_DIR, 'out_SIRvsAGE_SuplementaryFig')\n", (4964, 5008), False, 'import os\n'), ((5203, 5243), 'os.path.join', 'os.path.join', (['folder_path', 'data_filename'], {}), '(folder_path, data_filename)\n', (5215, 5243), False, 'import os\n'), ((7443, 7506), 'nottingham_covid_modelling.lib.priors.get_good_starting_point', 'priors.get_good_starting_point', (['log_prior', 'LL'], {'niterations': '(1000)'}), '(log_prior, LL, niterations=1000)\n', (7473, 7506), True, 'import nottingham_covid_modelling.lib.priors as priors\n'), ((7548, 7623), 'pints.OptimisationController', 'pints.OptimisationController', (['LL', 'x0'], {'boundaries': 'bounds', 'method': 'pints.CMAES'}), '(LL, x0, boundaries=bounds, method=pints.CMAES)\n', (7576, 7623), False, 'import pints\n'), ((8006, 8024), 'numpy.asarray', 'np.asarray', (['scores'], {}), '(scores)\n', (8016, 8024), True, 'import numpy as np\n'), ((8049, 8071), 'numpy.asarray', 'np.asarray', (['parameters'], {}), '(parameters)\n', (8059, 8071), True, 'import numpy as np\n'), ((2220, 2262), 'os.path.join', 'os.path.join', (['MODULE_DIR', '"""cmaes_fits_SIR"""'], {}), "(MODULE_DIR, 'cmaes_fits_SIR')\n", (2232, 2262), False, 'import os\n'), ((7742, 7767), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (7753, 7767), True, 'import numpy as np\n'), ((8433, 8450), 'pints.strfloat', 'pints.strfloat', (['x'], {}), '(x)\n', (8447, 8450), False, 'import pints\n'), ((8595, 8617), 'pints.strfloat', 'pints.strfloat', (['(-score)'], {}), '(-score)\n', (8609, 8617), False, 'import pints\n'), ((8888, 8905), 'pints.strfloat', 'pints.strfloat', (['x'], {}), '(x)\n', (8902, 8905), False, 'import pints\n')] |
# ライブラリのインポート
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import gym
from gym import spaces
# gym.Envを継承したEasyMazeクラス
class EasyMaze(gym.Env):
# この環境ではrenderのモードとしてrgb_arrayのみを用意していることを宣言しておく
# GymのWrapperなどから参照される可能性がある
metadata = {'render.modes': ['rgb_array']}
m = 0.2 # 迷路の周りの外枠の幅
c = 1 # 各セルの幅
agent_color = "blue" # エージェントの色
maze_color = "green" # 迷路の色
# 迷路の枠の描画関連情報
maze_info_rec = {"xy":[(0, 0), (0, m+4*c), (m+4*c, 0), (0, 0),
(m, m+c), (m+c, m+3*c), (m+3*c, m+c)],
"width":[m, 2*m+4*c, m, 2*m+4*c,
2*c, c, c],
"height":[2*m+4*c, m, 2*m+4*c, m,
c, c, c]}
# 迷路内の点線の表示関連情報
maze_info_line = {"s_xy":[(m, m+c), (m, m+2*c), (m, m+3*c),
(m+c, m), (m+2*c, m), (m+3*c, m)],
"e_xy":[(m+4*c, m+c), (m+4*c, m+2*c), (m+4*c, m+3*c),
(m+c, m+4*c), (m+2*c, m+4*c), (m+3*c, m+4*c)]}
# 状態テキストの表示位置情報
maze_state_pos = {"xy":[(m+0.5*c, m+3.5*c), (m+0.5*c, m+2.5*c), (m+1.5*c, m+2.5*c),
(m+2.5*c, m+2.5*c), (m+2.5*c, m+3.5*c), (m+3.5*c, m+3.5*c),
(m+3.5*c, m+2.5*c), (m+2.5*c, m+1.5*c), (m+2.5*c, m+0.5*c),
(m+3.5*c, m+0.5*c), (m+1.5*c, m+0.5*c), (m+0.5*c, m+0.5*c),],
"text":["s0", "s1", "s2", "s3", "s4", "s5", "s6",
"s7", "s8", "s9", "s10", "s11"]}
# 状態と行動に対する遷移先状態(ダイナミクス)
# 一般的にMDPにおけるダイナミクスは確率P(s'|s,a)で表されるが、ここでは決定論的なダイナミクスを採用
# 左から順番に行動入力が"left","top","right","down"の場合の各状態の遷移先を示す
# 例)状態"s0"のとき、
# "left"を受け取っても移動ができないので遷移先は現在と同じ"s0"
# "top"を受け取っても移動ができないので遷移先は現在と同じ"s0"
# "right"を受け取っても移動ができないので遷移先は現在と同じ"s0"
# "down"を受け取ったら下へ移動できるので遷移先は"s1"
# その他全ての状態も同様
dynamics = {"s0":["s0", "s0", "s0", "s1"],
"s1":["s1", "s0", "s2", "s1"],
"s2":["s1", "s2", "s3", "s2"],
"s3":["s2", "s4", "s6", "s7"],
"s4":["s4", "s4", "s5", "s3"],
"s5":["s4", "s5", "s5", "s6"],
"s6":["s3", "s5", "s6", "s6"],
"s7":["s7", "s3", "s7", "s8"],
"s8":["s10", "s7", "s9", "s8"],
"s9":["s8", "s9", "s9", "s9"],
"s10":["s11", "s10", "s8", "s10"],
"s11":["s11", "s11", "s10", "s11"]}
def __init__(self):
super(EasyMaze, self).__init__()
self.fig = None
self.ax = None
self.state = None
# 行動空間として0から3までの4種類の離散値を対象とする
# ちなみに、0は"left"、1は"top"、2は”right”、3は"down"に対応させた
self.action_space = gym.spaces.Discrete(4)
# 状態はエージェントが存在するセルの位置(12種類)
self.observation_space = gym.spaces.Discrete(12)
# 即時報酬の値は0から1の間とした
self.reward_range = (0, 1)
def reset(self):
# 迷路のスタート位置は"s0"とする
self.state = "s0"
# 初期状態の番号を観測として返す
return int(self.state[1:])
def step(self, action):
# 現在の状態と行動から次の状態に遷移
self.state = self.dynamics[self.state][action]
# ゴール状態"s11"に遷移していたら終了したことをdoneに格納&報酬1を格納
# その他の状態ならdone=False, reward=0とする
if self.state == "s11":
done = True
reward = 1
else:
done = False
reward = 0
# 今回の例ではinfoは使用しない
info = {}
return int(self.state[1:]), reward, done, info
# 描画関連の処理を実施
def render(self, mode='rgb_array'):
# matplotlibを用いて迷路を作成
self.make_maze()
# 現在位置にエージェントを配置
self.plot_agent(self.state)
# matplotlibで作成した図を配列にRGB配列に変換
rgb_array = self.fig2array()[:, :, :3]
# RGB配列をリターン
return rgb_array
# 迷路を描画する関数
def make_maze(self):
self.fig = plt.figure(figsize=(7, 7), dpi=200)
self.ax = plt.axes()
self.ax.axis("off")
# 迷路の外枠を表示
for i in range(len(self.maze_info_rec["xy"])):
r = patches.Rectangle(xy=self.maze_info_rec["xy"][i],
width=self.maze_info_rec["width"][i],
height=self.maze_info_rec["height"][i],
color=self.maze_color,
fill=True)
self.ax.add_patch(r)
# 点線による枠の表示
for i in range(len(self.maze_info_line["s_xy"])):
self.ax.plot([self.maze_info_line["s_xy"][i][0], self.maze_info_line["e_xy"][i][0]],
[self.maze_info_line["s_xy"][i][1], self.maze_info_line["e_xy"][i][1]],
linewidth=1,
linestyle="--",
color=self.maze_color)
# 状態のテキストを表示(スタート状態とゴール状態は後で描画)
for i in range(1, len(self.maze_state_pos["xy"])-1):
self.ax.text(self.maze_state_pos["xy"][i][0],
self.maze_state_pos["xy"][i][1],
self.maze_state_pos["text"][i],
size=14,
ha="center")
# スタート状態のテキストを描画
self.ax.text(self.maze_state_pos["xy"][0][0],
self.maze_state_pos["xy"][0][1],
"s0\n start",
size=14,
ha="center")
# ゴール状態のテキストを描画
self.ax.text(self.maze_state_pos["xy"][11][0],
self.maze_state_pos["xy"][11][1],
"s11\n goal",
size=14,
ha="center")
# エージェントを描画
def plot_agent(self, state_name):
state_index = self.maze_state_pos["text"].index(state_name)
agent_pos = self.maze_state_pos["xy"][state_index]
line, = self.ax.plot([agent_pos[0]],
[agent_pos[1]],
marker="o",
color=self.agent_color,
markersize=50)
# matplotlibの画像データをnumpyに変換
def fig2array(self):
self.fig.canvas.draw()
w, h = self.fig.canvas.get_width_height()
buf = np.fromstring(self.fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
buf = np.roll(buf, 3, axis=2)
return buf
| [
"matplotlib.patches.Rectangle",
"numpy.roll",
"gym.spaces.Discrete",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes"
] | [((2840, 2862), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(4)'], {}), '(4)\n', (2859, 2862), False, 'import gym\n'), ((2933, 2956), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(12)'], {}), '(12)\n', (2952, 2956), False, 'import gym\n'), ((3973, 4008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)', 'dpi': '(200)'}), '(figsize=(7, 7), dpi=200)\n', (3983, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4027, 4037), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (4035, 4037), True, 'import matplotlib.pyplot as plt\n'), ((6439, 6462), 'numpy.roll', 'np.roll', (['buf', '(3)'], {'axis': '(2)'}), '(buf, 3, axis=2)\n', (6446, 6462), True, 'import numpy as np\n'), ((4157, 4328), 'matplotlib.patches.Rectangle', 'patches.Rectangle', ([], {'xy': "self.maze_info_rec['xy'][i]", 'width': "self.maze_info_rec['width'][i]", 'height': "self.maze_info_rec['height'][i]", 'color': 'self.maze_color', 'fill': '(True)'}), "(xy=self.maze_info_rec['xy'][i], width=self.maze_info_rec[\n 'width'][i], height=self.maze_info_rec['height'][i], color=self.\n maze_color, fill=True)\n", (4174, 4328), True, 'import matplotlib.patches as patches\n')] |
#
# Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
#
#!/usr/bin/python
from __future__ import division
from __future__ import print_function
import os
import sys
from random import randint
import numpy as np
import lenet5
try:
from PIL import Image
import pycuda.driver as cuda
import pycuda.autoinit
import argparse
except ImportError as err:
sys.stderr.write("""ERROR: failed to import module ({})
Please make sure you have pycuda and the example dependencies installed.
https://wiki.tiker.net/PyCuda/Installation/Linux
pip(3) install tensorrt[examples]
""".format(err))
exit(1)
try:
import uff
except ImportError:
raise ImportError("""Please install the UFF Toolkit""")
try:
import tensorrt as trt
from tensorrt.parsers import uffparser
except ImportError as err:
sys.stderr.write("""ERROR: failed to import module ({})
Please make sure you have the TensorRT Library installed
and accessible in your LD_LIBRARY_PATH
""".format(err))
exit(1)
MAX_WORKSPACE = 1 << 30
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)
INPUT_W = 28
INPUT_H = 28
OUTPUT_SIZE = 10
MAX_BATCHSIZE = 1
ITERATIONS = 10
# API CHANGE: Try to generalize into a utils function
#Run inference on device
def infer(context, input_img, batch_size):
#load engine
engine = context.get_engine()
assert(engine.get_nb_bindings() == 2)
#create output array to receive data
dims = engine.get_binding_dimensions(1).to_DimsCHW()
elt_count = dims.C() * dims.H() * dims.W() * batch_size
#convert input data to Float32
input_img = input_img.astype(np.float32)
#Allocate pagelocked memory
output = cuda.pagelocked_empty(elt_count, dtype=np.float32)
#alocate device memory
d_input = cuda.mem_alloc(batch_size * input_img.size * input_img.dtype.itemsize)
d_output = cuda.mem_alloc(batch_size * output.size * output.dtype.itemsize)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
#transfer input data to device
cuda.memcpy_htod_async(d_input, input_img, stream)
#execute model
context.enqueue(batch_size, bindings, stream.handle, None)
#transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
#return predictions
return output
def main():
path = os.path.dirname(os.path.realpath(__file__))
tf_model = lenet5.learn()
uff_model = uff.from_tensorflow(tf_model, ["fc2/Relu"])
#Convert Tensorflow model to TensorRT model
parser = uffparser.create_uff_parser()
parser.register_input("Placeholder", (1, 28, 28), 0)
parser.register_output("fc2/Relu")
engine = trt.utils.uff_to_trt_engine(G_LOGGER,
uff_model,
parser,
MAX_BATCHSIZE,
MAX_WORKSPACE)
assert(engine)
# parser.destroy()
context = engine.create_execution_context()
print("\n| TEST CASE | PREDICTION |")
for i in range(ITERATIONS):
img, label = lenet5.get_testcase()
img = img[0]
label = label[0]
out = infer(context, img, 1)
print("|-----------|------------|")
print("| " + str(label) + " | " + str(np.argmax(out)) + " |")
if __name__ == "__main__":
main()
| [
"pycuda.driver.mem_alloc",
"pycuda.driver.pagelocked_empty",
"pycuda.driver.Stream",
"tensorrt.infer.ConsoleLogger",
"tensorrt.parsers.uffparser.create_uff_parser",
"lenet5.learn",
"pycuda.driver.memcpy_htod_async",
"numpy.argmax",
"os.path.realpath",
"uff.from_tensorflow",
"tensorrt.utils.uff_t... | [((3442, 3493), 'tensorrt.infer.ConsoleLogger', 'trt.infer.ConsoleLogger', (['trt.infer.LogSeverity.INFO'], {}), '(trt.infer.LogSeverity.INFO)\n', (3465, 3493), True, 'import tensorrt as trt\n'), ((4073, 4123), 'pycuda.driver.pagelocked_empty', 'cuda.pagelocked_empty', (['elt_count'], {'dtype': 'np.float32'}), '(elt_count, dtype=np.float32)\n', (4094, 4123), True, 'import pycuda.driver as cuda\n'), ((4166, 4236), 'pycuda.driver.mem_alloc', 'cuda.mem_alloc', (['(batch_size * input_img.size * input_img.dtype.itemsize)'], {}), '(batch_size * input_img.size * input_img.dtype.itemsize)\n', (4180, 4236), True, 'import pycuda.driver as cuda\n'), ((4252, 4316), 'pycuda.driver.mem_alloc', 'cuda.mem_alloc', (['(batch_size * output.size * output.dtype.itemsize)'], {}), '(batch_size * output.size * output.dtype.itemsize)\n', (4266, 4316), True, 'import pycuda.driver as cuda\n'), ((4377, 4390), 'pycuda.driver.Stream', 'cuda.Stream', ([], {}), '()\n', (4388, 4390), True, 'import pycuda.driver as cuda\n'), ((4431, 4481), 'pycuda.driver.memcpy_htod_async', 'cuda.memcpy_htod_async', (['d_input', 'input_img', 'stream'], {}), '(d_input, input_img, stream)\n', (4453, 4481), True, 'import pycuda.driver as cuda\n'), ((4599, 4647), 'pycuda.driver.memcpy_dtoh_async', 'cuda.memcpy_dtoh_async', (['output', 'd_output', 'stream'], {}), '(output, d_output, stream)\n', (4621, 4647), True, 'import pycuda.driver as cuda\n'), ((4775, 4789), 'lenet5.learn', 'lenet5.learn', ([], {}), '()\n', (4787, 4789), False, 'import lenet5\n'), ((4807, 4850), 'uff.from_tensorflow', 'uff.from_tensorflow', (['tf_model', "['fc2/Relu']"], {}), "(tf_model, ['fc2/Relu'])\n", (4826, 4850), False, 'import uff\n'), ((4918, 4947), 'tensorrt.parsers.uffparser.create_uff_parser', 'uffparser.create_uff_parser', ([], {}), '()\n', (4945, 4947), False, 'from tensorrt.parsers import uffparser\n'), ((5057, 5147), 'tensorrt.utils.uff_to_trt_engine', 'trt.utils.uff_to_trt_engine', (['G_LOGGER', 'uff_model', 'parser', 'MAX_BATCHSIZE', 'MAX_WORKSPACE'], {}), '(G_LOGGER, uff_model, parser, MAX_BATCHSIZE,\n MAX_WORKSPACE)\n', (5084, 5147), True, 'import tensorrt as trt\n'), ((4731, 4757), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4747, 4757), False, 'import os\n'), ((5516, 5537), 'lenet5.get_testcase', 'lenet5.get_testcase', ([], {}), '()\n', (5535, 5537), False, 'import lenet5\n'), ((5724, 5738), 'numpy.argmax', 'np.argmax', (['out'], {}), '(out)\n', (5733, 5738), True, 'import numpy as np\n')] |
'''
This file contains a set of functions to plot models for Matisse. Including
- Map of ScS, SKS and SKKS data
- Global map of the Trigonal domains
- Regional view showing phases and domains
- Global view showing Reciever Side corrections
- Regional map showing the number of ScS, SKS and SKKS paths in each domain for a given model (D`` and RSide)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib import colorbar
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
def draw_trigonal_doms(ax, doms2plot,extent=[-180,180,-90,90],color='black', trns=ccrs.PlateCarree()):
'''
This function draws the trigonal domains contained in doms2plot on the axis ax
'''
trig = []
for i,dom in enumerate(doms2plot):
if (dom[2] > extent[0]) & (dom[2] < extent[1]) & (dom[1] > extent[2]) & (dom[1] < extent[3]):
if (dom[4] ==180) & (dom[2] < 0) :
dom[4] = -180
if (dom[6] == 180):
dom[6] = -180
elif (dom[8] == 180):
dom[8] = -180
elif (dom[6] == 180) & (dom[2] < 0):
dom[6] = -180
if (dom[8] == 180):
dom[8] = -180
elif (dom[8] == 180) & (dom[2] < 0):
dom[8] = -180
vertices = np.zeros([3,2])
vertices[:,0] = dom[[4,6,8]]
vertices[:,1] = dom[[3,5,7]]
trig += [Polygon(vertices,closed=True)]
tps = PatchCollection(trig, alpha=0.6)
tps.set_linestyle('--')
tps.set_edgecolor('black')
tps.set_facecolor('white')
tps.set_linewidth(2.)
ax.add_collection(tps)
def map_path_counts(cfile,extent=[-160,-70,0,60]):
'''
This function makes a map showing the number of ScS, SKS, SKKS phases that pass through
'''
domains = np.loadtxt('T3_global.bins',skiprows=1)
counts = np.loadtxt(cfile)
doms2plot = domains[np.isin(domains[:,0],counts[:,0])]
ldom_counts = counts[:,1] + counts[:,2] + counts[:,3]
fig = plt.figure(figsize=(10,22))
ax = fig.add_subplot(211,projection=ccrs.PlateCarree())
ax.set_extent(extent, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.GSHHSFeature(levels=[1],scale='high'))
ax.set_title(r"ScS, SKS and SKKS path density in D$''$")
draw_trigonal_doms(ax, doms2plot, extent)
for i,dom in enumerate(doms2plot):
if ldom_counts[i] > 0:
ax.scatter(doms2plot[i,2],doms2plot[i,1],c=ldom_counts[i],transform=ccrs.PlateCarree())
ax2 = fig.add_subplot(212,projection=ccrs.PlateCarree())
ax2.set_extent(extent, crs=ccrs.PlateCarree())
ax2.add_feature(cfeature.GSHHSFeature(levels=[1],scale='high'))
udom_counts = counts[:,4] + counts[:,5] + counts[:,6]
draw_trigonal_doms(ax2, doms2plot, extent)
for i,dom in enumerate(doms2plot):
if udom_counts[i] > 0:
ax2.scatter(doms2plot[i,2],doms2plot[i,1],c=udom_counts[i],transform=ccrs.PlateCarree())
plt.show()
def contour_map_counts(cfile,extent=[-170,-90,-10,70]):
'''
This fuction makes filled contour maps showing the coverage we have in ScS, SKS ,SKKS phases
'''
domains = np.loadtxt('T3_global.bins',skiprows=1)
counts = np.loadtxt(cfile)
doms2plot = domains[np.isin(domains[:,0],counts[:,0])]
ldom_counts = counts[:,1] + counts[:,2] + counts[:,3]
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(211,projection=ccrs.PlateCarree())
ax.set_extent(extent, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.GSHHSFeature(levels=[1],scale='high'))
draw_trigonal_doms(ax, doms2plot, extent, fmt='k:')
cntr = ax.tricontourf(doms2plot[:,2], doms2plot[:,1], ldom_counts, levels=10, vmin=1 )
fig.colorbar(cntr, ax=ax)
def plot_swave_model(mfile,title,save=False):
'''
This function plots a surface wave model, with the orientation of the anisotropy at each mesh point. THis can be used to plot a single depth slice of a model or a depth averaged model
Args:
mfile (str) - file containing the surface wave model to plot. Must be formatted with columns of
lon, lat, phi, strength
title (str) - title to give the plot
Returns:
map view of the model
'''
model = np.loadtxt(mfile)
if model[1,0] - model[0,0] == 1.0 :
print('First column is Bin No. adjusting')
lon = model[:,2]
lat = model[:,1]
phi = model[:,3]
strength = model[:,4]
else:
lon = model[:,0]
lat = model[:,1]
phi = model[:,2]
strength = model[:,3]
if (phi.max() <= np.pi/2) & (phi.min() >= -np.pi/2):
print('Phi is in raidans, convert to degrees')
phi = np.rad2deg(phi)
fig = plt.figure(figsize=(11,11))
ax = fig.add_subplot(111,projection=ccrs.PlateCarree())
extent=[-140,-70,0,50]
ax.set_extent(extent)
ax.add_feature(cfeature.GSHHSFeature(levels=[1],scale='high'))
ax.quiver(lon,lat,strength,strength,angles=90-phi,
headaxislength=0,transform=ccrs.PlateCarree(),
pivot='mid',units='xy',scale=0.5)
# ax.quiver(ds.lon,ds.lat,ds.dVs_P,ds.dVs_P,angles=270-ds.phi,headaxislength=0,transform=ccrs.PlateCarree())
ax.plot(lon,lat,'r.',transform=ccrs.PlateCarree())
ax.set_title(title)
grd = ax.gridlines(draw_labels=True)
grd.top_labels = None
if save == True:
modelname = input('Enter name for the plot >>>')
plt.savefig(f'../SchafferSurfaceWaveModels/{modelname}',dpi=400)
plt.show()
def draw_tri_patch(ax, doms2plot, counts, cmin):
trig = []
fig = plt.gcf()
for i, dom in enumerate(doms2plot):
if counts[i] > cmin:
vertices = np.zeros([3,2])
vertices[:,0] = dom[[4,6,8]]
vertices[:,1] = dom[[3,5,7]]
trig += [Polygon(vertices,closed=True)]
tps = PatchCollection(trig, alpha=0.6)
tps.set_array(np.array(counts[counts > cmin])) # sets colors
tps.set_linestyle('-')
tps.set_edgecolor('black')
tps.set_linewidth(2.)
ax.add_collection(tps)
#Add colorbar
cax,kw = colorbar.make_axes(ax,location='bottom',pad=0.05,shrink=0.7)
cbar = fig.colorbar(tps, cax=cax, **kw)
cbar.set_label('No. of phases in domain')
return tps
def counts_heatmap(cfile,bins='T3_global.bin', extent=[-180,180,-90,90],cmin=5):
'''
This is a function to make a heatmap showing our coverage. Each trigonal domain is coloured based off the number of phases that pass through it
Args:
cfile [str] - path to the textfile containing the counts data (generated by routine in Pathset.py)
extent [list] - geographic extent of maps [lon_min, lon_max, lat_min, lat_max]. NB current cartopy implementation does not wrap around -180/180. Pick a side!
cmin [int] - the minimum number of counts to draw a polygon for.
Returns:
fig [figure] - a 2 panel figure showing ScS and SnKS coverage in D'' and the Upper Mantle (reciever side)
'''
domains = np.loadtxt(bins,skiprows=1)
counts = np.loadtxt(cfile)
doms2plot = domains[np.isin(domains[:,0],counts[:,0])]
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(211,projection=ccrs.PlateCarree())
ax.set_extent(extent, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.GSHHSFeature(levels=[1],scale='high'))
ldom_counts = counts[:,1] + counts[:,2] + counts[:,3]
trigs = draw_tri_patch(doms2plot, ldom_counts, cmin)
tps = PatchCollection(trigs, alpha=0.6)
tps.set_array(np.array(ldom_counts[ldom_counts > cmin]))
ax.add_collection(tps)
tps.set_clim([10,ldom_counts.max()])
fig.colorbar(tps, ax=ax)
ax.set_title(r"Coverage of SKS, SKKS and ScS phases in D$''$")
# Now draw a subplot for Rside domain counts
ax2 = fig.add_subplot(212,projection=ccrs.PlateCarree())
ax2.set_extent(extent, crs=ccrs.PlateCarree())
ax2.add_feature(cfeature.GSHHSFeature(levels=[1],scale='high'))
rdom_counts = counts[:,4] + counts[:,5] + counts[:,6]
print(rdom_counts[rdom_counts >1])
tps2 = draw_tri_patch(ax, doms2plot, rdom_counts, cmin)
tps2.set_clim([10,ldom_counts.max()])
fig.colorbar(tps, ax=ax2)
ax2.set_title(r"Coverage of SKS, SKKS and ScS phases in Upper Mantle (Receiver Side)")
plt.savefig('Global_phasecount_heatmap',format='png', dpi=500)
def map_T3_doms(domfile='T3_global.bins', extent=[-170,-60,0,70]):
'''
This is a function to draw all domains with their domain ID, for identificiation of domaisn and reference use with other maps
Args:
domfile [str] - path to the textfile containing the counts data (generated by routine in Pathset.py)
extent [list] - geographic extent of maps [lon_min, lon_max, lat_min, lat_max]. NB current cartopy implementation does not wrap around -180/180. Pick a side!
Returns:
fig [figure] - a figure with all trigonal domains in domfile drawn with the domain ID added
'''
domains = np.loadtxt(domfile,skiprows=1)
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(211,projection=ccrs.PlateCarree())
ax1.add_feature(cfeature.GSHHSFeature(levels=[1],scale='auto'))
draw_trigonal_doms(ax1, domains)
for dom in domains:
if (dom[2] > extent[0]) & (dom[2] < extent[1]) & (dom[3] > extent[2]) & (dom[4] < extent[3]):
ax1.text(dom[2]-1,dom[1],str(dom[0]),transform=ccrs.PlateCarree())
ax1.set_extent(extent, crs=ccrs.PlateCarree())
plt.savefig('domain_map_w_IDs.eps',bbox_inches='tight')
def phase_counts_heatmap_byphase(cfile='ScS_SnKS.counts', extent=[-170,-60,0,70], cmin=0, save=False):
'''
This is a function to make seperate maps showing our coverage broken down by phase.
Args:
cfile [str] - path to the textfile containing the counts data (generated by routine in Pathset.py)
extent [list] - geographic extent of maps [lon_min, lon_max, lat_min, lat_max]. NB current cartopy implementation does not wrap around -180/180. Pick a side!
cmin [int] - the minimum number of counts to draw a polygon for.
save [bool] - switch for if you want to save plots or not
Returns:
scs_fig [figure] - a 2 panel figure showing ScS coverage in D'' and the Upper Mantle (reciever side)
snks_fig [figure] - a 2 panel figure showing SnkS coverage in D'' and the Upper Mantle (reciever side)
'''
domains = np.loadtxt('T3_global.bins',skiprows=1)
counts = np.loadtxt(cfile)
doms2plot = domains[np.isin(domains[:,0],counts[:,0])]
scs_lc = counts[:,1]
snks_lc = counts[:,2] + counts[:,3]
scs_rc = counts[:,4]
snks_rc = counts[:,5] + counts[:,6]
# ScS Figure
scs_fig = plt.figure(figsize=(10,16))
ax1 = scs_fig.add_subplot(211,projection=ccrs.PlateCarree())
ax1.set_extent(extent, crs=ccrs.PlateCarree())
ax1.add_feature(cfeature.GSHHSFeature(levels=[1],scale='auto'))
draw_trigonal_doms(ax1, domains)
scs = draw_tri_patch(ax1, doms2plot, scs_lc, cmin)
ax1.set_title(r"ScS coverage in D$''$ domains")
grd = ax1.gridlines(draw_labels=True,linewidth=0)
grd.xlabels_top = None
# Rside ScS Panel
ax2 = scs_fig.add_subplot(212, projection=ccrs.PlateCarree())
ax2.set_extent(extent, crs=ccrs.PlateCarree())
ax2.add_feature(cfeature.GSHHSFeature(levels=[1],scale='auto'))
draw_trigonal_doms(ax2, domains)
scs2 = draw_tri_patch(ax2, doms2plot, scs_rc, cmin)
ax2.set_title(r"ScS coverage in upper mantle (receiver side) domains")
grd2 = ax2.gridlines(draw_labels=True,linewidth=0)
grd2.xlabels_top = None
# SnKS Figure - D'' panel
snks_fig = plt.figure(figsize=(10,16))
ax3 = snks_fig.add_subplot(211,projection=ccrs.PlateCarree())
ax3.set_extent(extent, crs=ccrs.PlateCarree())
ax3.add_feature(cfeature.GSHHSFeature(levels=[1],scale='auto'))
draw_trigonal_doms(ax3, domains)
snks = draw_tri_patch(ax3, doms2plot, snks_lc, cmin)
ax3.set_title(r"SKS/ SKKS coverage in D$''$ domains")
grd3 = ax3.gridlines(draw_labels=True,linewidth=0)
grd3.xlabels_top = None
# Rside SnKS Panel
ax4 = snks_fig.add_subplot(212, projection=ccrs.PlateCarree())
ax4.set_extent(extent, crs=ccrs.PlateCarree())
ax4.add_feature(cfeature.GSHHSFeature(levels=[1],scale='auto'))
draw_trigonal_doms(ax4, domains)
snks2 = draw_tri_patch(ax4, doms2plot, snks_rc, cmin)
ax4.set_title(r"SKS/ SKKS coverage in upper mantle (receiver side) domains")
grd4 = ax4.gridlines(draw_labels=True,linewidth=0)
grd4.xlabels_top = None
if save:
scs_fig.savefig('ScS_coverage_maps_1158.png',format='png', dpi=600)
snks_fig.savefig('SnKS_coverage_maps_1158.png',format='png', dpi=600)
def low_most_mantle_phasecounts(cfile='ScS_SnKS.counts', extent=[-170,-60,0,70], cmin=0,save=False):
'''
This is a function to make seperate maps showing our coverage in D`` broken down by phase.
Args:
cfile [str] - path to the textfile containing the counts data (generated by routine in Pathset.py)
extent [list] - geographic extent of maps [lon_min, lon_max, lat_min, lat_max]. NB current cartopy implementation does not wrap around -180/180. Pick a side!
cmin [int] - the minimum number of counts to draw a polygon for.
save [bool] - switch for if you want to save plots or not
Returns:
fig [figure] - a 2 panel figure showing coverage in D'' for ScS and SnKS
'''
domains = np.loadtxt('T3_global.bins',skiprows=1)
counts = np.loadtxt(cfile)
doms2plot = domains[np.isin(domains[:,0],counts[:,0])]
scs_lc = counts[:,1]
snks_lc = counts[:,2] + counts[:,3]
# ScS panel
fig = plt.figure(figsize=(10,16))
ax1 = fig.add_subplot(211,projection=ccrs.PlateCarree())
ax1.set_extent(extent, crs=ccrs.PlateCarree())
ax1.add_feature(cfeature.GSHHSFeature(levels=[1],scale='auto'))
draw_trigonal_doms(ax1, domains)
scs = draw_tri_patch(ax1, doms2plot, scs_lc, cmin)
ax1.set_title(r"ScS coverage in D$''$ domains")
grd = ax1.gridlines(draw_labels=True,linewidth=0)
grd.top_labels = None
# SnkS panel
ax2 = fig.add_subplot(212, projection=ccrs.PlateCarree())
ax2.set_extent(extent, crs=ccrs.PlateCarree())
ax2.add_feature(cfeature.GSHHSFeature(levels=[1],scale='auto'))
draw_trigonal_doms(ax2, domains)
scs2 = draw_tri_patch(ax2, doms2plot, snks_lc, cmin)
ax2.set_title(r"SnKS coverage in D$''$ domains")
grd2 = ax2.gridlines(draw_labels=True,linewidth=0)
grd2.top_lables = None
if save:
fig.savefig('highquality_lowMM_coverage.png',format='png',dpi=400)
def plot_phase_data(file='E_pacific_SNR10_goodQ.sdb',save=False,fname=None):
'''
function to plot input phase data.
'''
proj = ccrs.PlateCarree()
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, projection=proj)
data = pd.read_csv('~/SWSTomo/BlueCrystal/{}'.format(file),delim_whitespace=True)
scs = data[data.PHASE == 'ScS']
sks = data[data.PHASE == 'SKS']
skks = data[data.PHASE == 'SKKS']
ax.set_extent([-180,-80,0,90], crs=proj)
ax.add_feature(cfeature.GSHHSFeature(levels=[1],scale='auto'))
crit = (data.LOWMM_LON > -170) & (data.LOWMM_LAT > 0)
ax.plot(data.EVLO[crit], data.EVLA[crit], color='black', marker='*', markersize=10,
label='ScS Events', linestyle='', transform=proj)
ax.plot(scs.STLO[crit], scs.STLA[crit], markerfacecolor='red', markersize=8,
markeredgecolor='black', linestyle='', marker='v', transform=proj, label='Stations')
ax.plot(scs.LOWMM_LON[crit], scs.LOWMM_LAT[crit], color='blue', markeredgecolor='black',
linestyle='', marker='o', transform=proj, label='ScS', markersize=8)
ax.plot(sks.LOWMM_LON[crit], sks.LOWMM_LAT[crit], color='red', markeredgecolor='black',
linestyle='', marker='o', transform=proj, label='SKS', markersize=8)
ax.plot(skks.LOWMM_LON[crit], skks.LOWMM_LAT[crit], color='orange', markeredgecolor='black',
linestyle='', marker='o', transform=proj, label='SKKS', markersize=8)
# ax.plot(scs.ENTLON, scs.ENTLAT, color='green', markeredgecolor='black', linestyle='', marker='>', transform=proj)
# ax.plot(scs.EXTLON, scs.EXTLON, color='green', markeredgecolor='black', linestyle='', marker='<', transform=proj)
ax.legend()
title = ' '.join(fname.split('_'))
ax.set_title(title)
grd = ax.gridlines(draw_labels=True,linewidth=0)
grd.top_labels = None
if save:
plt.savefig('{}.png'.format(fname),dpi=600)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"cartopy.crs.PlateCarree",
"numpy.isin",
"matplotlib.collections.PatchCollection",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.colorbar.make_axes",
"numpy.zeros",
"numpy.loadtxt",
"numpy.rad2deg",
"cartopy.feature.GSHHSFeature",
... | [((698, 716), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (714, 716), True, 'import cartopy.crs as ccrs\n'), ((1609, 1641), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['trig'], {'alpha': '(0.6)'}), '(trig, alpha=0.6)\n', (1624, 1641), False, 'from matplotlib.collections import PatchCollection\n'), ((1962, 2002), 'numpy.loadtxt', 'np.loadtxt', (['"""T3_global.bins"""'], {'skiprows': '(1)'}), "('T3_global.bins', skiprows=1)\n", (1972, 2002), True, 'import numpy as np\n'), ((2015, 2032), 'numpy.loadtxt', 'np.loadtxt', (['cfile'], {}), '(cfile)\n', (2025, 2032), True, 'import numpy as np\n'), ((2160, 2188), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 22)'}), '(figsize=(10, 22))\n', (2170, 2188), True, 'import matplotlib.pyplot as plt\n'), ((3152, 3162), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3160, 3162), True, 'import matplotlib.pyplot as plt\n'), ((3359, 3399), 'numpy.loadtxt', 'np.loadtxt', (['"""T3_global.bins"""'], {'skiprows': '(1)'}), "('T3_global.bins', skiprows=1)\n", (3369, 3399), True, 'import numpy as np\n'), ((3412, 3429), 'numpy.loadtxt', 'np.loadtxt', (['cfile'], {}), '(cfile)\n', (3422, 3429), True, 'import numpy as np\n'), ((3557, 3585), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (3567, 3585), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4518), 'numpy.loadtxt', 'np.loadtxt', (['mfile'], {}), '(mfile)\n', (4511, 4518), True, 'import numpy as np\n'), ((4991, 5019), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 11)'}), '(figsize=(11, 11))\n', (5001, 5019), True, 'import matplotlib.pyplot as plt\n'), ((5791, 5801), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5799, 5801), True, 'import matplotlib.pyplot as plt\n'), ((5876, 5885), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5883, 5885), True, 'import matplotlib.pyplot as plt\n'), ((6139, 6171), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['trig'], {'alpha': '(0.6)'}), '(trig, alpha=0.6)\n', (6154, 6171), False, 'from matplotlib.collections import PatchCollection\n'), ((6389, 6452), 'matplotlib.colorbar.make_axes', 'colorbar.make_axes', (['ax'], {'location': '"""bottom"""', 'pad': '(0.05)', 'shrink': '(0.7)'}), "(ax, location='bottom', pad=0.05, shrink=0.7)\n", (6407, 6452), False, 'from matplotlib import colorbar\n'), ((7343, 7371), 'numpy.loadtxt', 'np.loadtxt', (['bins'], {'skiprows': '(1)'}), '(bins, skiprows=1)\n', (7353, 7371), True, 'import numpy as np\n'), ((7384, 7401), 'numpy.loadtxt', 'np.loadtxt', (['cfile'], {}), '(cfile)\n', (7394, 7401), True, 'import numpy as np\n'), ((7471, 7499), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (7481, 7499), True, 'import matplotlib.pyplot as plt\n'), ((7818, 7851), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['trigs'], {'alpha': '(0.6)'}), '(trigs, alpha=0.6)\n', (7833, 7851), False, 'from matplotlib.collections import PatchCollection\n'), ((8659, 8722), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Global_phasecount_heatmap"""'], {'format': '"""png"""', 'dpi': '(500)'}), "('Global_phasecount_heatmap', format='png', dpi=500)\n", (8670, 8722), True, 'import matplotlib.pyplot as plt\n'), ((9368, 9399), 'numpy.loadtxt', 'np.loadtxt', (['domfile'], {'skiprows': '(1)'}), '(domfile, skiprows=1)\n', (9378, 9399), True, 'import numpy as np\n'), ((9409, 9435), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (9419, 9435), True, 'import matplotlib.pyplot as plt\n'), ((9871, 9927), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""domain_map_w_IDs.eps"""'], {'bbox_inches': '"""tight"""'}), "('domain_map_w_IDs.eps', bbox_inches='tight')\n", (9882, 9927), True, 'import matplotlib.pyplot as plt\n'), ((10865, 10905), 'numpy.loadtxt', 'np.loadtxt', (['"""T3_global.bins"""'], {'skiprows': '(1)'}), "('T3_global.bins', skiprows=1)\n", (10875, 10905), True, 'import numpy as np\n'), ((10918, 10935), 'numpy.loadtxt', 'np.loadtxt', (['cfile'], {}), '(cfile)\n', (10928, 10935), True, 'import numpy as np\n'), ((11166, 11194), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 16)'}), '(figsize=(10, 16))\n', (11176, 11194), True, 'import matplotlib.pyplot as plt\n'), ((12123, 12151), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 16)'}), '(figsize=(10, 16))\n', (12133, 12151), True, 'import matplotlib.pyplot as plt\n'), ((14030, 14070), 'numpy.loadtxt', 'np.loadtxt', (['"""T3_global.bins"""'], {'skiprows': '(1)'}), "('T3_global.bins', skiprows=1)\n", (14040, 14070), True, 'import numpy as np\n'), ((14083, 14100), 'numpy.loadtxt', 'np.loadtxt', (['cfile'], {}), '(cfile)\n', (14093, 14100), True, 'import numpy as np\n'), ((14257, 14285), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 16)'}), '(figsize=(10, 16))\n', (14267, 14285), True, 'import matplotlib.pyplot as plt\n'), ((15363, 15381), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (15379, 15381), True, 'import cartopy.crs as ccrs\n'), ((15392, 15419), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (15402, 15419), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2093), 'numpy.isin', 'np.isin', (['domains[:, 0]', 'counts[:, 0]'], {}), '(domains[:, 0], counts[:, 0])\n', (2064, 2093), True, 'import numpy as np\n'), ((2323, 2370), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""high"""'}), "(levels=[1], scale='high')\n", (2344, 2370), True, 'import cartopy.feature as cfeature\n'), ((2803, 2850), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""high"""'}), "(levels=[1], scale='high')\n", (2824, 2850), True, 'import cartopy.feature as cfeature\n'), ((3454, 3490), 'numpy.isin', 'np.isin', (['domains[:, 0]', 'counts[:, 0]'], {}), '(domains[:, 0], counts[:, 0])\n', (3461, 3490), True, 'import numpy as np\n'), ((3720, 3767), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""high"""'}), "(levels=[1], scale='high')\n", (3741, 3767), True, 'import cartopy.feature as cfeature\n'), ((4960, 4975), 'numpy.rad2deg', 'np.rad2deg', (['phi'], {}), '(phi)\n', (4970, 4975), True, 'import numpy as np\n'), ((5157, 5204), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""high"""'}), "(levels=[1], scale='high')\n", (5178, 5204), True, 'import cartopy.feature as cfeature\n'), ((5717, 5782), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../SchafferSurfaceWaveModels/{modelname}"""'], {'dpi': '(400)'}), "(f'../SchafferSurfaceWaveModels/{modelname}', dpi=400)\n", (5728, 5782), True, 'import matplotlib.pyplot as plt\n'), ((6190, 6221), 'numpy.array', 'np.array', (['counts[counts > cmin]'], {}), '(counts[counts > cmin])\n', (6198, 6221), True, 'import numpy as np\n'), ((7426, 7462), 'numpy.isin', 'np.isin', (['domains[:, 0]', 'counts[:, 0]'], {}), '(domains[:, 0], counts[:, 0])\n', (7433, 7462), True, 'import numpy as np\n'), ((7634, 7681), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""high"""'}), "(levels=[1], scale='high')\n", (7655, 7681), True, 'import cartopy.feature as cfeature\n'), ((7870, 7911), 'numpy.array', 'np.array', (['ldom_counts[ldom_counts > cmin]'], {}), '(ldom_counts[ldom_counts > cmin])\n', (7878, 7911), True, 'import numpy as np\n'), ((8267, 8314), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""high"""'}), "(levels=[1], scale='high')\n", (8288, 8314), True, 'import cartopy.feature as cfeature\n'), ((9516, 9563), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""auto"""'}), "(levels=[1], scale='auto')\n", (9537, 9563), True, 'import cartopy.feature as cfeature\n'), ((10960, 10996), 'numpy.isin', 'np.isin', (['domains[:, 0]', 'counts[:, 0]'], {}), '(domains[:, 0], counts[:, 0])\n', (10967, 10996), True, 'import numpy as np\n'), ((11336, 11383), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""auto"""'}), "(levels=[1], scale='auto')\n", (11357, 11383), True, 'import cartopy.feature as cfeature\n'), ((11774, 11821), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""auto"""'}), "(levels=[1], scale='auto')\n", (11795, 11821), True, 'import cartopy.feature as cfeature\n'), ((12294, 12341), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""auto"""'}), "(levels=[1], scale='auto')\n", (12315, 12341), True, 'import cartopy.feature as cfeature\n'), ((12748, 12795), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""auto"""'}), "(levels=[1], scale='auto')\n", (12769, 12795), True, 'import cartopy.feature as cfeature\n'), ((14125, 14161), 'numpy.isin', 'np.isin', (['domains[:, 0]', 'counts[:, 0]'], {}), '(domains[:, 0], counts[:, 0])\n', (14132, 14161), True, 'import numpy as np\n'), ((14423, 14470), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""auto"""'}), "(levels=[1], scale='auto')\n", (14444, 14470), True, 'import cartopy.feature as cfeature\n'), ((14850, 14897), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""auto"""'}), "(levels=[1], scale='auto')\n", (14871, 14897), True, 'import cartopy.feature as cfeature\n'), ((15738, 15785), 'cartopy.feature.GSHHSFeature', 'cfeature.GSHHSFeature', ([], {'levels': '[1]', 'scale': '"""auto"""'}), "(levels=[1], scale='auto')\n", (15759, 15785), True, 'import cartopy.feature as cfeature\n'), ((1444, 1460), 'numpy.zeros', 'np.zeros', (['[3, 2]'], {}), '([3, 2])\n', (1452, 1460), True, 'import numpy as np\n'), ((2228, 2246), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2244, 2246), True, 'import cartopy.crs as ccrs\n'), ((2278, 2296), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2294, 2296), True, 'import cartopy.crs as ccrs\n'), ((2706, 2724), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2722, 2724), True, 'import cartopy.crs as ccrs\n'), ((2757, 2775), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2773, 2775), True, 'import cartopy.crs as ccrs\n'), ((3625, 3643), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3641, 3643), True, 'import cartopy.crs as ccrs\n'), ((3675, 3693), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3691, 3693), True, 'import cartopy.crs as ccrs\n'), ((5059, 5077), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5075, 5077), True, 'import cartopy.crs as ccrs\n'), ((5301, 5319), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5317, 5319), True, 'import cartopy.crs as ccrs\n'), ((5520, 5538), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5536, 5538), True, 'import cartopy.crs as ccrs\n'), ((5978, 5994), 'numpy.zeros', 'np.zeros', (['[3, 2]'], {}), '([3, 2])\n', (5986, 5994), True, 'import numpy as np\n'), ((7539, 7557), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7555, 7557), True, 'import cartopy.crs as ccrs\n'), ((7589, 7607), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7605, 7607), True, 'import cartopy.crs as ccrs\n'), ((8170, 8188), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8186, 8188), True, 'import cartopy.crs as ccrs\n'), ((8221, 8239), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8237, 8239), True, 'import cartopy.crs as ccrs\n'), ((9476, 9494), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9492, 9494), True, 'import cartopy.crs as ccrs\n'), ((9842, 9860), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9858, 9860), True, 'import cartopy.crs as ccrs\n'), ((11239, 11257), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11255, 11257), True, 'import cartopy.crs as ccrs\n'), ((11290, 11308), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11306, 11308), True, 'import cartopy.crs as ccrs\n'), ((11683, 11701), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11699, 11701), True, 'import cartopy.crs as ccrs\n'), ((11734, 11752), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11750, 11752), True, 'import cartopy.crs as ccrs\n'), ((12197, 12215), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12213, 12215), True, 'import cartopy.crs as ccrs\n'), ((12248, 12266), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12264, 12266), True, 'import cartopy.crs as ccrs\n'), ((12657, 12675), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12673, 12675), True, 'import cartopy.crs as ccrs\n'), ((12708, 12726), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12724, 12726), True, 'import cartopy.crs as ccrs\n'), ((14326, 14344), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (14342, 14344), True, 'import cartopy.crs as ccrs\n'), ((14377, 14395), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (14393, 14395), True, 'import cartopy.crs as ccrs\n'), ((14759, 14777), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (14775, 14777), True, 'import cartopy.crs as ccrs\n'), ((14810, 14828), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (14826, 14828), True, 'import cartopy.crs as ccrs\n'), ((1563, 1593), 'matplotlib.patches.Polygon', 'Polygon', (['vertices'], {'closed': '(True)'}), '(vertices, closed=True)\n', (1570, 1593), False, 'from matplotlib.patches import Polygon\n'), ((6097, 6127), 'matplotlib.patches.Polygon', 'Polygon', (['vertices'], {'closed': '(True)'}), '(vertices, closed=True)\n', (6104, 6127), False, 'from matplotlib.patches import Polygon\n'), ((2632, 2650), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2648, 2650), True, 'import cartopy.crs as ccrs\n'), ((3115, 3133), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3131, 3133), True, 'import cartopy.crs as ccrs\n'), ((9786, 9804), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9802, 9804), True, 'import cartopy.crs as ccrs\n')] |
# -*- coding: utf-8 -*-
import warnings
from contextlib import redirect_stderr, redirect_stdout, suppress
from copy import deepcopy
from logging import Logger, LogRecord
from os import devnull
from typing import Optional, Sequence, Sized, Tuple, TypeVar
import numpy as np
import pandas as pd
import sklearn.datasets
from funcy import complement, decorator, lfilter
from funcy.decorators import Call
from pada.check.exception import PadaError
from pada.utils.log import logger
def asarray2d(a: np.ndarray) -> np.ndarray:
"""Cast to 2d array"""
arr = np.asarray(a)
if arr.ndim == 1:
arr = arr.reshape(-1, 1)
return arr
def get_arr_desc(arr: np.ndarray) -> str:
"""Get array description, in the form '<array type> <array shape>'"""
type_ = type(arr).__name__ # see also __qualname__
shape = getattr(arr, 'shape', '<no shape>')
return f'{type_} {shape}'
def indent(text: str, n=4) -> str:
"""Indent each line of text by n spaces"""
_indent = ' ' * n
return '\n'.join(_indent + line for line in text.split('\n'))
def make_plural_suffix(obj: Sized, suffix='s') -> str:
if len(obj) != 1:
return suffix
else:
return ''
def has_nans(obj) -> bool:
"""Check if obj has any NaNs
Compatible with different behavior of np.isnan, which sometimes applies
over all axes (py35+) and sometimes does not (py34).
"""
nans = np.isnan(obj)
while np.ndim(nans):
nans = np.any(nans)
return bool(nans)
@decorator
def dfilter(call: Call, pred):
"""Decorate a callable with a filter that accepts a predicate
Example::
>>> @dfilter(lambda x: x >= 0)
... def numbers():
... return [-1, 2, 0, -2]
[2, 0]
"""
return lfilter(pred, call())
def load_sklearn_df(name: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
method_name = f'load_{name}'
method = getattr(sklearn.datasets, method_name)
data = method()
X_df = pd.DataFrame(data=data.data, columns=data.feature_names)
y_df = pd.Series(data.target, name='target')
return X_df, y_df
@decorator
def quiet(call: Call):
with open(devnull, 'w') as fnull:
with redirect_stderr(fnull), redirect_stdout(fnull):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return call()
class DeepcopyMixin:
def __deepcopy__(self, memo: dict):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
_T = TypeVar('_T')
def one_or_raise(seq: Sequence[_T]) -> _T:
n = len(seq)
if n == 1:
return seq[0]
else:
raise ValueError(f'Expected exactly 1 element, but got {n}')
def warn(msg: str):
"""Issue a warning message of category BalletWarning"""
warnings.warn(msg, category=PadaError)
@decorator
def raiseifnone(call: Call):
"""Decorate a function to raise a ValueError if result is None"""
result = call()
if result is None:
raise ValueError
else:
return result
def falsy(o) -> bool:
"""Check whether o is falsy
In this case, a falsy value is one of the following:
1. the singleton value `False`
2. the string 'false' (ignoring case)
3. the empty string
"""
if isinstance(o, bool):
return not o
return isinstance(o, str) and (o.lower() == 'false' or o == '')
truthy = complement(falsy)
"""Check whether o is truthy
In this case, a truthy value is any value that is not falsy.
"""
@decorator
def nonnegative(call: Call, name: Optional[str] = None):
"""Warn if the function's return value is negative and set it to 0"""
result = call()
with suppress(TypeError):
if result < 0:
result = 0.0
# Format a nice log message
if name is None:
try:
pieces = call._func.__name__.split('_')[1:]
name = ''.join(map(str.capitalize, pieces))
except RuntimeError:
name = 'Result'
logger.warning(f'{name} should be non-negative.')
return result
@decorator
def dont_log_nonnegative(call: Call, logger: Logger = logger):
def filter(record: LogRecord) -> int:
return 0 if 'should be non-negative' in record.msg else 1
logger.addFilter(filter)
try:
return call()
finally:
logger.removeFilter(filter)
# re-export cookiecutter work_in
# work_in = cookiecutter.utils.work_in
def skipna(a: np.ndarray, b: np.ndarray, *c: np.ndarray, how: str = 'left'):
"""Drop rows of both a and b corresponding to missing values
The length of a and b along the first dimension must be equal.
Args:
a:
first array
b:
second array
*c:
any additional arrays
how:
how to determine the rows to drop, one of 'left', 'any', or 'all'.
If left, then any row in which a has a missing value is dropped. If
any, then any row in which at least one of a, b, or additional
arrays has a missing value is dropped. If all , then any row in
which all of a, b, and additional arrays has a missing value is
dropped. Defaults to left.
Returns:
tuple of a, b, and any additional arrays where a, b, and any
additional arrays are guaranteed to be the same length with missing
values removed according to ``how``.
"""
if how not in ('left', 'any', 'all'):
raise ValueError(f'Invalid value for how: {how}')
def find_nan_inds(arr):
nan_inds = np.isnan(arr)
if arr.ndim > 1:
nan_inds = nan_inds.any(axis=1)
nan_inds = nan_inds.squeeze()
assert nan_inds.shape == (arr.shape[0],)
return nan_inds
if how == 'left':
nan_inds = find_nan_inds(a)
elif how == 'any':
arr = np.concatenate(
(asarray2d(a), asarray2d(b), *(asarray2d(c0) for c0 in c)),
axis=1
)
nan_inds = find_nan_inds(arr)
elif how == 'all':
nan_inds = find_nan_inds(a)
for arr in [b, *c]:
nan_inds &= find_nan_inds(arr)
a_out = a[~nan_inds]
b_out = b[~nan_inds]
c_out = [
arr[~nan_inds]
for arr in c
]
out = (a_out, b_out, *c_out)
return out
| [
"pandas.Series",
"contextlib.redirect_stdout",
"copy.deepcopy",
"pada.utils.log.logger.addFilter",
"numpy.asarray",
"numpy.ndim",
"numpy.any",
"warnings.catch_warnings",
"contextlib.redirect_stderr",
"warnings.simplefilter",
"numpy.isnan",
"contextlib.suppress",
"pada.utils.log.logger.remove... | [((2642, 2655), 'typing.TypeVar', 'TypeVar', (['"""_T"""'], {}), "('_T')\n", (2649, 2655), False, 'from typing import Optional, Sequence, Sized, Tuple, TypeVar\n'), ((3522, 3539), 'funcy.complement', 'complement', (['falsy'], {}), '(falsy)\n', (3532, 3539), False, 'from funcy import complement, decorator, lfilter\n'), ((562, 575), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (572, 575), True, 'import numpy as np\n'), ((1414, 1427), 'numpy.isnan', 'np.isnan', (['obj'], {}), '(obj)\n', (1422, 1427), True, 'import numpy as np\n'), ((1438, 1451), 'numpy.ndim', 'np.ndim', (['nans'], {}), '(nans)\n', (1445, 1451), True, 'import numpy as np\n'), ((1976, 2032), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data.data', 'columns': 'data.feature_names'}), '(data=data.data, columns=data.feature_names)\n', (1988, 2032), True, 'import pandas as pd\n'), ((2044, 2081), 'pandas.Series', 'pd.Series', (['data.target'], {'name': '"""target"""'}), "(data.target, name='target')\n", (2053, 2081), True, 'import pandas as pd\n'), ((2920, 2958), 'warnings.warn', 'warnings.warn', (['msg'], {'category': 'PadaError'}), '(msg, category=PadaError)\n', (2933, 2958), False, 'import warnings\n'), ((4436, 4460), 'pada.utils.log.logger.addFilter', 'logger.addFilter', (['filter'], {}), '(filter)\n', (4452, 4460), False, 'from pada.utils.log import logger\n'), ((1468, 1480), 'numpy.any', 'np.any', (['nans'], {}), '(nans)\n', (1474, 1480), True, 'import numpy as np\n'), ((3808, 3827), 'contextlib.suppress', 'suppress', (['TypeError'], {}), '(TypeError)\n', (3816, 3827), False, 'from contextlib import redirect_stderr, redirect_stdout, suppress\n'), ((4513, 4540), 'pada.utils.log.logger.removeFilter', 'logger.removeFilter', (['filter'], {}), '(filter)\n', (4532, 4540), False, 'from pada.utils.log import logger\n'), ((5753, 5766), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (5761, 5766), True, 'import numpy as np\n'), ((2191, 2213), 'contextlib.redirect_stderr', 'redirect_stderr', (['fnull'], {}), '(fnull)\n', (2206, 2213), False, 'from contextlib import redirect_stderr, redirect_stdout, suppress\n'), ((2215, 2237), 'contextlib.redirect_stdout', 'redirect_stdout', (['fnull'], {}), '(fnull)\n', (2230, 2237), False, 'from contextlib import redirect_stderr, redirect_stdout, suppress\n'), ((4180, 4229), 'pada.utils.log.logger.warning', 'logger.warning', (['f"""{name} should be non-negative."""'], {}), "(f'{name} should be non-negative.')\n", (4194, 4229), False, 'from pada.utils.log import logger\n'), ((2256, 2281), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2279, 2281), False, 'import warnings\n'), ((2299, 2330), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2320, 2330), False, 'import warnings\n'), ((2594, 2611), 'copy.deepcopy', 'deepcopy', (['v', 'memo'], {}), '(v, memo)\n', (2602, 2611), False, 'from copy import deepcopy\n')] |
"""Create example plot with different metrics.
Example
-------
python docs/stats_explainer.py
"""
import matplotlib.pyplot as plt
import numpy as np
from asreviewcontrib.insights.plot import _fix_start_tick
# The recall at a given number of documents read is the fraction of the
# relevant records found at that moment. (recall = n_pos_records / n_records)
# The old RRF@X (relevant records found) is basically the same as the recall.
# The WSS@X (work saved over sampling) is the number of records you need to
# read less to find the fraction X of relevant records. (wss = recall - recall_random)
# The (my suggestion for a name) ERF@X (extra records found) is the number of
# extra relevant records found after reading a fraction X of the total number of
# records.
# Create fictive data.
n_docs = 1000
n_pos_docs = 30
percentages = np.array([x**(1 / 3) for x in np.linspace(0, 1, n_docs)])
n_docs_found = np.round(percentages * n_pos_docs)
labels = [
n_docs_found[i + 1] - n_docs_found[i]
for i in range(len(n_docs_found) - 1)
] + [0]
labels[0] = 1
labels[5] = 1
labels[8] = 1
# Plot the recall curve.
fig, ax = plt.subplots()
x = list(range(1, n_docs + 1))
# Recall curve.
recall = np.cumsum(labels) / np.sum(labels)
ax.step(x, recall, where='post')
# Random
recall_random = np.round(np.linspace(0, n_pos_docs, n_docs)) / np.sum(labels)
ax.step(x, recall_random, where='post', color="black")
# Add the ERF@.137 line (recall > 0.5 at 137, recall_random 0.5 at 517).
ax.plot((137, 137), (137 / 1000, recall[137]), color='red')
erf_x_offset = -70
ax.text(137 + erf_x_offset, (137 / 1000 + recall[137]) * 0.9 / 2,
'ERF',
color='red')
# Add the WSS@.5 line.
ax.plot((137, 517), (recall[137], recall[137]), color='blue')
wss_y_offset = 0.03
ax.text((137 + recall[137] * 1000) / 2,
recall[137] + wss_y_offset,
'WSS',
color='blue')
ax.set_title("Explaining Recall, WSS and ERF")
ax.set(xlabel='#', ylabel='Recall')
ax.set_ylim([-0.05, 1.05])
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax.xaxis.get_major_locator().set_params(integer=True)
_fix_start_tick(ax)
fig.savefig('docs/stats_explainer.png')
| [
"numpy.sum",
"numpy.linspace",
"asreviewcontrib.insights.plot._fix_start_tick",
"numpy.cumsum",
"matplotlib.pyplot.subplots",
"numpy.round"
] | [((919, 953), 'numpy.round', 'np.round', (['(percentages * n_pos_docs)'], {}), '(percentages * n_pos_docs)\n', (927, 953), True, 'import numpy as np\n'), ((1135, 1149), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1147, 1149), True, 'import matplotlib.pyplot as plt\n'), ((2102, 2121), 'asreviewcontrib.insights.plot._fix_start_tick', '_fix_start_tick', (['ax'], {}), '(ax)\n', (2117, 2121), False, 'from asreviewcontrib.insights.plot import _fix_start_tick\n'), ((1208, 1225), 'numpy.cumsum', 'np.cumsum', (['labels'], {}), '(labels)\n', (1217, 1225), True, 'import numpy as np\n'), ((1228, 1242), 'numpy.sum', 'np.sum', (['labels'], {}), '(labels)\n', (1234, 1242), True, 'import numpy as np\n'), ((1349, 1363), 'numpy.sum', 'np.sum', (['labels'], {}), '(labels)\n', (1355, 1363), True, 'import numpy as np\n'), ((1311, 1345), 'numpy.linspace', 'np.linspace', (['(0)', 'n_pos_docs', 'n_docs'], {}), '(0, n_pos_docs, n_docs)\n', (1322, 1345), True, 'import numpy as np\n'), ((876, 901), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_docs'], {}), '(0, 1, n_docs)\n', (887, 901), True, 'import numpy as np\n')] |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer."""
import lingvo.compat as tf
from lingvo.core import cluster_factory
from lingvo.core import layers
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.core import test_utils
import numpy as np
class OptimizerTest(test_utils.TestCase):
@tf.function
def _FpropBprop(self, fc_layer, opt):
inputs = tf.zeros(shape=[2, 4, 3], dtype=tf.float64)
output = fc_layer.FPropDefaultTheta(inputs)
loss = tf.reduce_sum(output)
var_grads = py_utils.ComputeGradients(loss, fc_layer.vars)
# Name becomes meaningless in Eager mode. Here we just check whether
# errors get raised.
update_op = opt.Apply(1e-1, var_grads)
self.assertIn('composite_optimizer_train_op', update_op.name)
def testCompositeOptimizerName(self):
adam_op = optimizer.Adam.Params()
rmsprop_op = optimizer.RMSProp.Params()
adam_rmsprop_opt = optimizer.CompositeOptimizer.Params().Set(
optimizer_map={
'fc/w': (adam_op, 1.),
'fc/b': (rmsprop_op, 1.),
'default_optimizer': (adam_op, 1.)
}).Instantiate()
params = layers.FCLayer.Params()
params.name = 'fc'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.batch_norm = False
fc_layer = layers.FCLayer(params)
self._FpropBprop(fc_layer, adam_rmsprop_opt)
def testCompositeOptimizerRaises(self):
sgd_op = optimizer.SGD.Params()
adagrad_op = optimizer.Adagrad.Params()
overlapping_comp_opt = optimizer.CompositeOptimizer.Params().Set(
optimizer_map={
'fc/w': (sgd_op, 1.),
'.': (adagrad_op, 1.),
'default_optimizer': (adagrad_op, 1.)
}).Instantiate()
params = layers.FCLayer.Params()
params.name = 'fc'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.batch_norm = False
fc_layer = layers.FCLayer(params)
with self.assertRaisesRegex(
Exception,
'Variable fc/w/var:0 is matched 2 times by regex',
):
self._FpropBprop(fc_layer, overlapping_comp_opt)
def testAccumulator(self):
# testAccumulator compares
# - explicit averaging of independently computed var_grads1 and
# var_grads2,
# - Accumulator(SGD) optimizer effectively doing this over 2 steps.
np.random.seed(12345)
np_input1 = np.random.normal(0.1, 0.5, [2, 4, 3])
np.random.seed(12346)
np_input2 = np.random.normal(0.1, 0.5, [2, 4, 3])
tf.random.set_seed(123456)
params = layers.ProjectionLayer.Params()
params.name = 'proj'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.01, 123456)
params.batch_norm = False
proj_layer = layers.ProjectionLayer(params)
inputs1 = np_input1
in_padding1 = tf.zeros([2, 4, 1], dtype=tf.float64)
inputs2 = np_input2
in_padding2 = tf.zeros([2, 4, 1], dtype=tf.float64)
op = optimizer.SGD.Params()
opt = op.Instantiate()
# Get `snapshots` of the variables
vars1 = [v.read_value() for v in proj_layer.vars.Flatten()]
lr = lambda: 1e-1
@tf.function
def _Apply1(proj_layer, opt):
output1 = proj_layer.FPropDefaultTheta(inputs1, in_padding1)
output2 = proj_layer.FPropDefaultTheta(inputs2, in_padding2)
loss1 = tf.reduce_sum(output1)
loss2 = tf.reduce_sum(output2)
var_grads1 = py_utils.ComputeGradients(loss1, proj_layer.vars)
var_grads2 = py_utils.ComputeGradients(loss2, proj_layer.vars)
_ = opt.Apply(lr, py_utils.ApplyGradMultiplier(var_grads1, 1. / 2.))
_ = opt.Apply(lr, py_utils.ApplyGradMultiplier(var_grads2, 1. / 2.))
vars1_1 = proj_layer.vars.Flatten()
grads1_1 = var_grads1.Transform(tuple)
grads1_2 = var_grads2.Transform(tuple)
return vars1_1, grads1_1, grads1_2
vars1_1, grads1_1, grads1_2 = _Apply1(proj_layer, opt)
tf.random.set_seed(123456)
params = layers.ProjectionLayer.Params()
params.name = 'proj2'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.01, 123456)
params.batch_norm = False
proj_layer = layers.ProjectionLayer(params)
in_padding1 = tf.zeros([2, 4, 1], dtype=tf.float64)
op = optimizer.Accumulator.Params().Set(
accum_steps=2, dtype=tf.float64, optimizer_tpl=optimizer.SGD.Params())
opt = op.Instantiate()
# Get `snapshots` of the variables
vars2 = [v.read_value() for v in proj_layer.vars.Flatten()]
@tf.function
def _Apply2(proj_layer, opt):
inputs1 = np_input1
output1 = proj_layer.FPropDefaultTheta(inputs1, in_padding1)
loss2_1 = tf.reduce_sum(output1)
var_grads2_1 = py_utils.ComputeGradients(loss2_1, proj_layer.vars)
grads2_1 = var_grads2_1.Transform(tuple)
inputs1 = np_input2
output1 = proj_layer.FPropDefaultTheta(inputs1, in_padding1)
loss2_2 = tf.reduce_sum(output1)
var_grads2_2 = py_utils.ComputeGradients(loss2_2, proj_layer.vars)
grads2_2 = var_grads2_2.Transform(tuple)
with cluster_factory.ForTestingWorker(add_summary=True):
_ = opt.Apply(lr, var_grads2_1)
# Get `snapshots` of the intermediate variables
vars2_intermediate = [v.read_value() for v in proj_layer.vars.Flatten()]
tf.assign_add(py_utils.GetOrCreateGlobalStepVar(), 1)
with cluster_factory.ForTestingWorker(add_summary=True):
_ = opt.Apply(lr, var_grads2_2)
vars2_1 = proj_layer.vars.Flatten()
return vars2_intermediate, vars2_1, grads2_1, grads2_2
vars2_intermediate, vars2_1, grads2_1, grads2_2 = _Apply2(proj_layer, opt)
# Unlike Graph mode, grads2_1['w'][0]/grads2_2['w'][0] returned from
# `tf.function` are variables after updates. As a result we cannot compare
# them with e.g. `vars1`.
self.assertAllClose(vars1, vars2)
self.assertAllClose(grads1_1, grads2_1)
self.assertAllClose(grads1_2, grads2_2)
self.assertAllClose(vars1, vars2_intermediate)
lr = lr()
self.assertAllClose(
vars1[0] - 0.5 * lr * (grads1_1['w'][1] + grads1_2['w'][1]), vars1_1[0])
self.assertAllClose(
vars2[0] - 0.5 * lr * (grads2_1['w'][1] + grads2_2['w'][1]), vars2_1[0])
self.assertAllClose(vars2, vars2_intermediate)
self.assertAllClose(vars1_1, vars2_1)
# TODO(jiaweix): Add checks for the event files from tf.summary
# once we migrate summary_utils to TF2
if __name__ == '__main__':
py_utils.SetEagerMode(True)
tf.test.main()
| [
"lingvo.core.optimizer.Adam.Params",
"lingvo.compat.zeros",
"lingvo.core.optimizer.Accumulator.Params",
"lingvo.core.layers.ProjectionLayer.Params",
"lingvo.core.optimizer.RMSProp.Params",
"lingvo.core.py_utils.GetOrCreateGlobalStepVar",
"lingvo.core.py_utils.ComputeGradients",
"numpy.random.seed",
... | [((7285, 7312), 'lingvo.core.py_utils.SetEagerMode', 'py_utils.SetEagerMode', (['(True)'], {}), '(True)\n', (7306, 7312), False, 'from lingvo.core import py_utils\n'), ((7315, 7329), 'lingvo.compat.test.main', 'tf.test.main', ([], {}), '()\n', (7327, 7329), True, 'import lingvo.compat as tf\n'), ((1068, 1111), 'lingvo.compat.zeros', 'tf.zeros', ([], {'shape': '[2, 4, 3]', 'dtype': 'tf.float64'}), '(shape=[2, 4, 3], dtype=tf.float64)\n', (1076, 1111), True, 'import lingvo.compat as tf\n'), ((1171, 1192), 'lingvo.compat.reduce_sum', 'tf.reduce_sum', (['output'], {}), '(output)\n', (1184, 1192), True, 'import lingvo.compat as tf\n'), ((1209, 1255), 'lingvo.core.py_utils.ComputeGradients', 'py_utils.ComputeGradients', (['loss', 'fc_layer.vars'], {}), '(loss, fc_layer.vars)\n', (1234, 1255), False, 'from lingvo.core import py_utils\n'), ((1518, 1541), 'lingvo.core.optimizer.Adam.Params', 'optimizer.Adam.Params', ([], {}), '()\n', (1539, 1541), False, 'from lingvo.core import optimizer\n'), ((1559, 1585), 'lingvo.core.optimizer.RMSProp.Params', 'optimizer.RMSProp.Params', ([], {}), '()\n', (1583, 1585), False, 'from lingvo.core import optimizer\n'), ((1835, 1858), 'lingvo.core.layers.FCLayer.Params', 'layers.FCLayer.Params', ([], {}), '()\n', (1856, 1858), False, 'from lingvo.core import layers\n'), ((2008, 2030), 'lingvo.core.layers.FCLayer', 'layers.FCLayer', (['params'], {}), '(params)\n', (2022, 2030), False, 'from lingvo.core import layers\n'), ((2137, 2159), 'lingvo.core.optimizer.SGD.Params', 'optimizer.SGD.Params', ([], {}), '()\n', (2157, 2159), False, 'from lingvo.core import optimizer\n'), ((2177, 2203), 'lingvo.core.optimizer.Adagrad.Params', 'optimizer.Adagrad.Params', ([], {}), '()\n', (2201, 2203), False, 'from lingvo.core import optimizer\n'), ((2456, 2479), 'lingvo.core.layers.FCLayer.Params', 'layers.FCLayer.Params', ([], {}), '()\n', (2477, 2479), False, 'from lingvo.core import layers\n'), ((2629, 2651), 'lingvo.core.layers.FCLayer', 'layers.FCLayer', (['params'], {}), '(params)\n', (2643, 2651), False, 'from lingvo.core import layers\n'), ((3057, 3078), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (3071, 3078), True, 'import numpy as np\n'), ((3095, 3132), 'numpy.random.normal', 'np.random.normal', (['(0.1)', '(0.5)', '[2, 4, 3]'], {}), '(0.1, 0.5, [2, 4, 3])\n', (3111, 3132), True, 'import numpy as np\n'), ((3137, 3158), 'numpy.random.seed', 'np.random.seed', (['(12346)'], {}), '(12346)\n', (3151, 3158), True, 'import numpy as np\n'), ((3175, 3212), 'numpy.random.normal', 'np.random.normal', (['(0.1)', '(0.5)', '[2, 4, 3]'], {}), '(0.1, 0.5, [2, 4, 3])\n', (3191, 3212), True, 'import numpy as np\n'), ((3218, 3244), 'lingvo.compat.random.set_seed', 'tf.random.set_seed', (['(123456)'], {}), '(123456)\n', (3236, 3244), True, 'import lingvo.compat as tf\n'), ((3258, 3289), 'lingvo.core.layers.ProjectionLayer.Params', 'layers.ProjectionLayer.Params', ([], {}), '()\n', (3287, 3289), False, 'from lingvo.core import layers\n'), ((3421, 3463), 'lingvo.core.py_utils.WeightInit.Gaussian', 'py_utils.WeightInit.Gaussian', (['(0.01)', '(123456)'], {}), '(0.01, 123456)\n', (3449, 3463), False, 'from lingvo.core import py_utils\n'), ((3512, 3542), 'lingvo.core.layers.ProjectionLayer', 'layers.ProjectionLayer', (['params'], {}), '(params)\n', (3534, 3542), False, 'from lingvo.core import layers\n'), ((3585, 3622), 'lingvo.compat.zeros', 'tf.zeros', (['[2, 4, 1]'], {'dtype': 'tf.float64'}), '([2, 4, 1], dtype=tf.float64)\n', (3593, 3622), True, 'import lingvo.compat as tf\n'), ((3665, 3702), 'lingvo.compat.zeros', 'tf.zeros', (['[2, 4, 1]'], {'dtype': 'tf.float64'}), '([2, 4, 1], dtype=tf.float64)\n', (3673, 3702), True, 'import lingvo.compat as tf\n'), ((3713, 3735), 'lingvo.core.optimizer.SGD.Params', 'optimizer.SGD.Params', ([], {}), '()\n', (3733, 3735), False, 'from lingvo.core import optimizer\n'), ((4679, 4705), 'lingvo.compat.random.set_seed', 'tf.random.set_seed', (['(123456)'], {}), '(123456)\n', (4697, 4705), True, 'import lingvo.compat as tf\n'), ((4719, 4750), 'lingvo.core.layers.ProjectionLayer.Params', 'layers.ProjectionLayer.Params', ([], {}), '()\n', (4748, 4750), False, 'from lingvo.core import layers\n'), ((4883, 4925), 'lingvo.core.py_utils.WeightInit.Gaussian', 'py_utils.WeightInit.Gaussian', (['(0.01)', '(123456)'], {}), '(0.01, 123456)\n', (4911, 4925), False, 'from lingvo.core import py_utils\n'), ((4974, 5004), 'lingvo.core.layers.ProjectionLayer', 'layers.ProjectionLayer', (['params'], {}), '(params)\n', (4996, 5004), False, 'from lingvo.core import layers\n'), ((5023, 5060), 'lingvo.compat.zeros', 'tf.zeros', (['[2, 4, 1]'], {'dtype': 'tf.float64'}), '([2, 4, 1], dtype=tf.float64)\n', (5031, 5060), True, 'import lingvo.compat as tf\n'), ((4089, 4111), 'lingvo.compat.reduce_sum', 'tf.reduce_sum', (['output1'], {}), '(output1)\n', (4102, 4111), True, 'import lingvo.compat as tf\n'), ((4126, 4148), 'lingvo.compat.reduce_sum', 'tf.reduce_sum', (['output2'], {}), '(output2)\n', (4139, 4148), True, 'import lingvo.compat as tf\n'), ((4168, 4217), 'lingvo.core.py_utils.ComputeGradients', 'py_utils.ComputeGradients', (['loss1', 'proj_layer.vars'], {}), '(loss1, proj_layer.vars)\n', (4193, 4217), False, 'from lingvo.core import py_utils\n'), ((4237, 4286), 'lingvo.core.py_utils.ComputeGradients', 'py_utils.ComputeGradients', (['loss2', 'proj_layer.vars'], {}), '(loss2, proj_layer.vars)\n', (4262, 4286), False, 'from lingvo.core import py_utils\n'), ((5477, 5499), 'lingvo.compat.reduce_sum', 'tf.reduce_sum', (['output1'], {}), '(output1)\n', (5490, 5499), True, 'import lingvo.compat as tf\n'), ((5521, 5572), 'lingvo.core.py_utils.ComputeGradients', 'py_utils.ComputeGradients', (['loss2_1', 'proj_layer.vars'], {}), '(loss2_1, proj_layer.vars)\n', (5546, 5572), False, 'from lingvo.core import py_utils\n'), ((5730, 5752), 'lingvo.compat.reduce_sum', 'tf.reduce_sum', (['output1'], {}), '(output1)\n', (5743, 5752), True, 'import lingvo.compat as tf\n'), ((5774, 5825), 'lingvo.core.py_utils.ComputeGradients', 'py_utils.ComputeGradients', (['loss2_2', 'proj_layer.vars'], {}), '(loss2_2, proj_layer.vars)\n', (5799, 5825), False, 'from lingvo.core import py_utils\n'), ((4312, 4363), 'lingvo.core.py_utils.ApplyGradMultiplier', 'py_utils.ApplyGradMultiplier', (['var_grads1', '(1.0 / 2.0)'], {}), '(var_grads1, 1.0 / 2.0)\n', (4340, 4363), False, 'from lingvo.core import py_utils\n'), ((4387, 4438), 'lingvo.core.py_utils.ApplyGradMultiplier', 'py_utils.ApplyGradMultiplier', (['var_grads2', '(1.0 / 2.0)'], {}), '(var_grads2, 1.0 / 2.0)\n', (4415, 4438), False, 'from lingvo.core import py_utils\n'), ((5071, 5101), 'lingvo.core.optimizer.Accumulator.Params', 'optimizer.Accumulator.Params', ([], {}), '()\n', (5099, 5101), False, 'from lingvo.core import optimizer\n'), ((5162, 5184), 'lingvo.core.optimizer.SGD.Params', 'optimizer.SGD.Params', ([], {}), '()\n', (5182, 5184), False, 'from lingvo.core import optimizer\n'), ((5885, 5935), 'lingvo.core.cluster_factory.ForTestingWorker', 'cluster_factory.ForTestingWorker', ([], {'add_summary': '(True)'}), '(add_summary=True)\n', (5917, 5935), False, 'from lingvo.core import cluster_factory\n'), ((6131, 6166), 'lingvo.core.py_utils.GetOrCreateGlobalStepVar', 'py_utils.GetOrCreateGlobalStepVar', ([], {}), '()\n', (6164, 6166), False, 'from lingvo.core import py_utils\n'), ((6183, 6233), 'lingvo.core.cluster_factory.ForTestingWorker', 'cluster_factory.ForTestingWorker', ([], {'add_summary': '(True)'}), '(add_summary=True)\n', (6215, 6233), False, 'from lingvo.core import cluster_factory\n'), ((1609, 1646), 'lingvo.core.optimizer.CompositeOptimizer.Params', 'optimizer.CompositeOptimizer.Params', ([], {}), '()\n', (1644, 1646), False, 'from lingvo.core import optimizer\n'), ((2231, 2268), 'lingvo.core.optimizer.CompositeOptimizer.Params', 'optimizer.CompositeOptimizer.Params', ([], {}), '()\n', (2266, 2268), False, 'from lingvo.core import optimizer\n')] |
from os import urandom
import numpy as np
s1 = '''
11111
19991
19191
19991
11111'''
sampleIn = '''
5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526'''
realIn = '''
4438624262
6263251864
2618812434
2134264565
1815131247
2612457325
8585767584
7217134556
2825456563
8248473584'''
def day11p1(textIn, steps):
l = textIn.split()
b = []
for item in l:
b.append([int(char) for char in item])
a = np.array(b)
flashCount = 0
for s in range(steps):
if s==0:
newA = np.copy(a) + 1
else:
newA = np.copy(newA) + 1
newAc = np.copy(newA)
countNines = np.count_nonzero(newA > 9)
flashedYet = newA==True
oppFY = flashedYet==False
nineMask = newA > 9
comboMask = nineMask[oppFY]
countNines = np.count_nonzero(comboMask)
while countNines>0:
for r,row in enumerate(b):
for c,col in enumerate(row):
if flashedYet[r,c] == False and newA[r,c]>9:
flashCount += 1
index = [r,c]
left = max(0,index[0]-1)
right = max(0,index[0]+1+1)
bottom = max(0,index[1]-1)
top = max(0,index[1]+1+1)
newAc[left:right,bottom:top] += 1
flashedYet[r,c] = True
newA = np.copy(newAc)
oppFY = flashedYet==False
nineMask = newA > 9
comboMask = nineMask[oppFY]
countNines = np.count_nonzero(comboMask)
newA = np.where(newA > 9, 0, newA)
return flashCount
print(day11p1(sampleIn, 100))
print(day11p1(realIn, 100))
def day11p2(textIn):
l = textIn.split()
b = []
for item in l:
b.append([int(char) for char in item])
a = np.array(b)
x,y = a.shape
q = 0
s = 0
flashCount = 0
while q==0:
if s==0:
newA = np.copy(a) + 1
else:
newA = np.copy(newA) + 1
newAc = np.copy(newA)
countNines = np.count_nonzero(newA > 9)
flashedYet = newA==True
oppFY = flashedYet==False
nineMask = newA > 9
comboMask = nineMask[oppFY]
countNines = np.count_nonzero(comboMask)
while countNines>0:
for r,row in enumerate(b):
for c,col in enumerate(row):
if flashedYet[r,c] == False and newA[r,c]>9:
flashCount += 1
index = [r,c]
left = max(0,index[0]-1)
right = max(0,index[0]+1+1)
bottom = max(0,index[1]-1)
top = max(0,index[1]+1+1)
newAc[left:right,bottom:top] += 1
flashedYet[r,c] = True
newA = np.copy(newAc)
oppFY = flashedYet==False
nineMask = newA > 9
comboMask = nineMask[oppFY]
countNines = np.count_nonzero(comboMask)
if np.count_nonzero(newA > 9)==x*y:
w = s + 1
q = 1
else:
newA = np.where(newA > 9, 0, newA)
s += 1
return w
print(day11p2(sampleIn))
print(day11p2(realIn)) | [
"numpy.count_nonzero",
"numpy.array",
"numpy.copy",
"numpy.where"
] | [((478, 489), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (486, 489), True, 'import numpy as np\n'), ((1925, 1936), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (1933, 1936), True, 'import numpy as np\n'), ((656, 669), 'numpy.copy', 'np.copy', (['newA'], {}), '(newA)\n', (663, 669), True, 'import numpy as np\n'), ((691, 717), 'numpy.count_nonzero', 'np.count_nonzero', (['(newA > 9)'], {}), '(newA > 9)\n', (707, 717), True, 'import numpy as np\n'), ((869, 896), 'numpy.count_nonzero', 'np.count_nonzero', (['comboMask'], {}), '(comboMask)\n', (885, 896), True, 'import numpy as np\n'), ((1684, 1711), 'numpy.where', 'np.where', (['(newA > 9)', '(0)', 'newA'], {}), '(newA > 9, 0, newA)\n', (1692, 1711), True, 'import numpy as np\n'), ((2130, 2143), 'numpy.copy', 'np.copy', (['newA'], {}), '(newA)\n', (2137, 2143), True, 'import numpy as np\n'), ((2165, 2191), 'numpy.count_nonzero', 'np.count_nonzero', (['(newA > 9)'], {}), '(newA > 9)\n', (2181, 2191), True, 'import numpy as np\n'), ((2343, 2370), 'numpy.count_nonzero', 'np.count_nonzero', (['comboMask'], {}), '(comboMask)\n', (2359, 2370), True, 'import numpy as np\n'), ((1481, 1495), 'numpy.copy', 'np.copy', (['newAc'], {}), '(newAc)\n', (1488, 1495), True, 'import numpy as np\n'), ((1631, 1658), 'numpy.count_nonzero', 'np.count_nonzero', (['comboMask'], {}), '(comboMask)\n', (1647, 1658), True, 'import numpy as np\n'), ((2955, 2969), 'numpy.copy', 'np.copy', (['newAc'], {}), '(newAc)\n', (2962, 2969), True, 'import numpy as np\n'), ((3105, 3132), 'numpy.count_nonzero', 'np.count_nonzero', (['comboMask'], {}), '(comboMask)\n', (3121, 3132), True, 'import numpy as np\n'), ((3145, 3171), 'numpy.count_nonzero', 'np.count_nonzero', (['(newA > 9)'], {}), '(newA > 9)\n', (3161, 3171), True, 'import numpy as np\n'), ((3251, 3278), 'numpy.where', 'np.where', (['(newA > 9)', '(0)', 'newA'], {}), '(newA > 9, 0, newA)\n', (3259, 3278), True, 'import numpy as np\n'), ((573, 583), 'numpy.copy', 'np.copy', (['a'], {}), '(a)\n', (580, 583), True, 'import numpy as np\n'), ((621, 634), 'numpy.copy', 'np.copy', (['newA'], {}), '(newA)\n', (628, 634), True, 'import numpy as np\n'), ((2047, 2057), 'numpy.copy', 'np.copy', (['a'], {}), '(a)\n', (2054, 2057), True, 'import numpy as np\n'), ((2095, 2108), 'numpy.copy', 'np.copy', (['newA'], {}), '(newA)\n', (2102, 2108), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
Authors: <NAME> | https://parshanpakiman.github.io/homepage/
<NAME> | https://selvan.people.uic.edu/
Licensing Information: The MIT License
-------------------------------------------------------------------------------
"""
import numpy as np
from numba.core.errors import NumbaDeprecationWarning, NumbaPendingDeprecationWarning
import warnings
from numba import jit
from utils import mean_confidence_interval,make_text_bold
import time
from utils import output_handler_option_pricing
warnings.simplefilter('ignore', category=NumbaDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)
@jit
def lstsq_jit(X,y):
#--------------------------------------------------------------------------
# Jited least seqaures
#--------------------------------------------------------------------------
return np.linalg.lstsq(a=X,b=y)[0]
class LeastSquaresMonteCarlo():
#--------------------------------------------------------------------------
# Least-Squares Monte Carlo: Longstaff & Schwartz
#--------------------------------------------------------------------------
def __init__(self,instance_conf):
#----------------------------------------------------------------------
# Initialization
#----------------------------------------------------------------------
self.instance_conf = instance_conf
self.mdp = instance_conf['mdp_conf']['mdp'](instance_conf)
self.basis_func = instance_conf['basis_func_conf']['basis_func'](instance_conf)
self.num_CFA_sample_path:int = instance_conf['mdp_conf']['num_CFA_sample_path']
self.num_pol_eval_sample_path:int = instance_conf['mdp_conf']['num_pol_eval_sample_path']
self.num_stages = self.mdp.num_stages
self.num_basis_func = self.basis_func.num_basis_func
self.discount = instance_conf['mdp_conf']['discount']
self.basis_func_coef_matrix = np.empty(shape=(self.num_basis_func,self.num_stages))
self.CFA_random_seed = instance_conf['mdp_conf']['CFA_random_seed']
self.pol_random_seed = instance_conf['mdp_conf']['pol_random_seed']
self.output_handler = output_handler_option_pricing(instance_conf)
def print_algorithm_instance_info(self):
#----------------------------------------------------------------------
# Print to users some info
#----------------------------------------------------------------------
print('\n')
print('='*99)
print('Instance number: \t' + make_text_bold(self.mdp.instance_number))
print('Algorithm name: \t' + make_text_bold('LSM'))
print('Basis function type: \t' + make_text_bold(self.basis_func.basis_func_type))
print('State relevance: \t' + make_text_bold(self.mdp.state_relevance_type))
print('Random seed of CFA paths: \t' + make_text_bold(str(self.CFA_random_seed)))
print('Random seed of pol sim: \t' + make_text_bold(str(self.pol_random_seed)))
print('='*99)
print('| {:>9s} | {:>8s} | {:>8s} | {:>9s} | {:>9s} | {:>15s} | {:>8s} | {:>8s} |'.format(
'Path GenT', '# Basis','Time','CFA T', 'Train LB', 'Test LB', 'LB RT','TOT RT') )
print('-'*99)
def generate_sample_paths(self):
#----------------------------------------------------------------------
# Generate and store sample paths
#----------------------------------------------------------------------
self.CFA_sample_paths = self.mdp.get_sample_path(self.num_CFA_sample_path,self.CFA_random_seed, self.mdp.state_relevance_type)
self.CFA_paths_rewards = self.mdp.get_reward_of_path(self.CFA_sample_paths)
self.pol_sim_sample_paths = self.mdp.get_sample_path(self.num_pol_eval_sample_path,self.pol_random_seed)
self.pol_sim_paths_rewards = self.mdp.get_reward_of_path(self.pol_sim_sample_paths)
def LSMN_fit_CFA(self):
#----------------------------------------------------------------------
# Fit Continuation Function Approximation (CFA)
#----------------------------------------------------------------------
tot_runtime = time.time()
start = time.time()
self.print_algorithm_instance_info()
self.generate_sample_paths()
path_gen_RT = time.time() - start
CFA_RT,LB_RT = 0,0
when_print_results = 25
#----------------------------------------------------------------------
# For loop over all time steps
CFA_values = np.empty(shape=(self.num_CFA_sample_path,self.num_stages))
for t in range(self.num_stages-1,-1,-1):
print('| {:>9.2f} | {:>8d} | {:>8d} | {:>9s} | {:>9s} | {:>15s} | {:>8s} | {:>8.1f} |'.format(path_gen_RT,self.num_basis_func,t,'','','','',(time.time()-tot_runtime)/60),end='\r')
#----------------------------------------------------------------------
# Fit CFA
if t == self.num_stages-1:
start = time.time()
CFA_values[:,t] = np.zeros(len(self.CFA_sample_paths[:,t,0])) #self.CFA_paths_rewards[:,t]*self.discount
CFA_RT += time.time() - start
print('| {:>9.2f} | {:>8d} | {:>8d} | {:>9.1f} | {:>9s} | {:>15s} | {:>8s} | {:>8.1f} |'.format(path_gen_RT,self.num_basis_func,t,CFA_RT,'','','',(time.time()-tot_runtime)/60),end='\r')
elif t == self.num_stages-2:
CFA_values[:,t] = self.CFA_paths_rewards[:,t+1]*self.discount
else:
start = time.time()
state_list = self.CFA_sample_paths[:,t,:]
feature_matrix = self.basis_func.eval_basis(state_list)
self.basis_func_coef_matrix[:,t] = lstsq_jit(feature_matrix,CFA_values[:,t+1])
CFA_values[:,t] = np.maximum(self.CFA_paths_rewards[:,t],feature_matrix@self.basis_func_coef_matrix[:,t]*self.discount)
CFA_RT += time.time() - start
if t%when_print_results==0:
print('| {:>9.2f} | {:>8d} | {:>8d} | {:>9.1f} | {:>9s} | {:>15s} | {:>8s} | {:>8.1f} |'.format(path_gen_RT,self.num_basis_func,t,CFA_RT,'','','',(time.time()-tot_runtime)/60),end='\r')
#----------------------------------------------------------------------
# Compute lower bound confidence interval
train_LB_stat = self.get_policy_from_continue_func(CFA_values,self.CFA_sample_paths,self.CFA_paths_rewards )
print('| {:>9.2f} | {:>8d} | {:>8d} | {:>9.1f} | {:>9.2f} | {:>15s} | {:>8s} | {:>8.1f} |'.format(path_gen_RT,self.num_basis_func,t,CFA_RT,train_LB_stat[0][0],'','',(time.time()-tot_runtime)/60),end='\r')
start = time.time()
LB_stat = self.simulate_CVFA_policy()
LB_RT += time.time() - start
print('| {:>9.2f} | {:>8d} | {:>8d} | {:>9.1f} | {:>9.2f} | {:>15.2f} | {:>8.1f} | {:>8.1f} |'.format(path_gen_RT,self.num_basis_func,t,CFA_RT,train_LB_stat[0][0],LB_stat[0],LB_RT,(time.time()-tot_runtime)/60),end='\n')
self.output_handler.append_to_outputs(algorithm_name = 'LSM',
basis_seed = self.basis_func.basis_func_random_state,
num_basis_func = self.num_basis_func,
num_constr = self.num_CFA_sample_path,
ALP_con_runtime = np.nan,
FALP_obj = np.nan,
ALP_slv_runtime = np.nan,
train_LB_mean = train_LB_stat[0][0],
train_LB_SE = train_LB_stat[0][3],
test_LB_mean = LB_stat[0],
test_LB_SE = LB_stat[3],
test_LB_runtime = (time.time()-start)/60,
total_runtime = (time.time()-tot_runtime)/60)
print('-'*99)
return True
def simulate_CVFA_policy(self):
#----------------------------------------------------------------------
# Construct policy from a value function and perform inner sampling
#----------------------------------------------------------------------
continue_value_list = np.zeros((len(self.pol_sim_paths_rewards),self.num_stages))
reward = []
eliminated_paths = []
stopping_time = np.zeros(len(self.pol_sim_sample_paths))
for t in range(self.num_stages):
feature_matrix = self.basis_func.eval_basis(self.pol_sim_sample_paths[:,t,:])
if t == self.num_stages-1:
continue_value = np.zeros_like(self.pol_sim_paths_rewards[:,t] )
else:
continue_value = feature_matrix@self.basis_func_coef_matrix[:,t]
immediate_reward = self.pol_sim_paths_rewards[:,t]
stopping_time = np.less_equal(continue_value,immediate_reward)
path_to_stop = np.setdiff1d(np.nonzero(stopping_time)[0],eliminated_paths)
if len(path_to_stop)>0:
reward.extend([self.pol_sim_paths_rewards[_,t]*(self.discount**(t)) for _ in path_to_stop])
eliminated_paths.extend(path_to_stop)
continue_value_list[:,t] = continue_value
last_stage_stop =np.setdiff1d(range(len(self.pol_sim_sample_paths)),eliminated_paths)
T = self.num_stages
reward.extend([self.pol_sim_paths_rewards[_,T-1]*(self.discount**(T-1)) for _ in last_stage_stop])
return mean_confidence_interval(reward)
def get_policy_from_continue_func(self,continue_func, paths_state, paths_rewards):
#----------------------------------------------------------------------
# Construct policy from a value function and perform inner sampling
#----------------------------------------------------------------------
reward = []
eliminated_paths = []
stopping_time = np.zeros(len(paths_state))
pol_visited_state = [[] for _ in range(self.num_stages)]
for t in range(self.num_stages):
immediate_reward = paths_rewards[:,t]
continue_value = continue_func[:,t]
state_list = paths_state[:,t]
stopping_time = np.less_equal(continue_value, immediate_reward)
path_to_stop = np.setdiff1d(np.nonzero(stopping_time)[0], eliminated_paths)
pol_visited_state[t] = [state_list[_] for _ in np.setdiff1d(range(len(state_list)),eliminated_paths)]
if len(path_to_stop)>0:
reward.extend([paths_rewards[_,t]*(self.discount**(t)) for _ in path_to_stop])
eliminated_paths.extend(path_to_stop)
last_stage_stop = np.setdiff1d(range(len(paths_state)),eliminated_paths)
T = self.num_stages
reward.extend([paths_rewards[_,T-1]*(self.discount**(T-1)) for _ in last_stage_stop])
return mean_confidence_interval(reward),pol_visited_state
| [
"utils.mean_confidence_interval",
"numpy.less_equal",
"utils.make_text_bold",
"numpy.zeros_like",
"numpy.empty",
"numpy.linalg.lstsq",
"numpy.nonzero",
"warnings.simplefilter",
"numpy.maximum",
"time.time",
"utils.output_handler_option_pricing"
] | [((649, 714), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'NumbaDeprecationWarning'}), "('ignore', category=NumbaDeprecationWarning)\n", (670, 714), False, 'import warnings\n'), ((715, 787), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'NumbaPendingDeprecationWarning'}), "('ignore', category=NumbaPendingDeprecationWarning)\n", (736, 787), False, 'import warnings\n'), ((1041, 1066), 'numpy.linalg.lstsq', 'np.linalg.lstsq', ([], {'a': 'X', 'b': 'y'}), '(a=X, b=y)\n', (1056, 1066), True, 'import numpy as np\n'), ((2305, 2359), 'numpy.empty', 'np.empty', ([], {'shape': '(self.num_basis_func, self.num_stages)'}), '(shape=(self.num_basis_func, self.num_stages))\n', (2313, 2359), True, 'import numpy as np\n'), ((2601, 2645), 'utils.output_handler_option_pricing', 'output_handler_option_pricing', (['instance_conf'], {}), '(instance_conf)\n', (2630, 2645), False, 'from utils import output_handler_option_pricing\n'), ((4763, 4774), 'time.time', 'time.time', ([], {}), '()\n', (4772, 4774), False, 'import time\n'), ((4817, 4828), 'time.time', 'time.time', ([], {}), '()\n', (4826, 4828), False, 'import time\n'), ((5202, 5261), 'numpy.empty', 'np.empty', ([], {'shape': '(self.num_CFA_sample_path, self.num_stages)'}), '(shape=(self.num_CFA_sample_path, self.num_stages))\n', (5210, 5261), True, 'import numpy as np\n'), ((7724, 7735), 'time.time', 'time.time', ([], {}), '()\n', (7733, 7735), False, 'import time\n'), ((10999, 11031), 'utils.mean_confidence_interval', 'mean_confidence_interval', (['reward'], {}), '(reward)\n', (11023, 11031), False, 'from utils import mean_confidence_interval, make_text_bold\n'), ((4949, 4960), 'time.time', 'time.time', ([], {}), '()\n', (4958, 4960), False, 'import time\n'), ((7817, 7828), 'time.time', 'time.time', ([], {}), '()\n', (7826, 7828), False, 'import time\n'), ((10314, 10361), 'numpy.less_equal', 'np.less_equal', (['continue_value', 'immediate_reward'], {}), '(continue_value, immediate_reward)\n', (10327, 10361), True, 'import numpy as np\n'), ((11851, 11898), 'numpy.less_equal', 'np.less_equal', (['continue_value', 'immediate_reward'], {}), '(continue_value, immediate_reward)\n', (11864, 11898), True, 'import numpy as np\n'), ((12577, 12609), 'utils.mean_confidence_interval', 'mean_confidence_interval', (['reward'], {}), '(reward)\n', (12601, 12609), False, 'from utils import mean_confidence_interval, make_text_bold\n'), ((2984, 3024), 'utils.make_text_bold', 'make_text_bold', (['self.mdp.instance_number'], {}), '(self.mdp.instance_number)\n', (2998, 3024), False, 'from utils import mean_confidence_interval, make_text_bold\n'), ((3080, 3101), 'utils.make_text_bold', 'make_text_bold', (['"""LSM"""'], {}), "('LSM')\n", (3094, 3101), False, 'from utils import mean_confidence_interval, make_text_bold\n'), ((3157, 3204), 'utils.make_text_bold', 'make_text_bold', (['self.basis_func.basis_func_type'], {}), '(self.basis_func.basis_func_type)\n', (3171, 3204), False, 'from utils import mean_confidence_interval, make_text_bold\n'), ((3260, 3305), 'utils.make_text_bold', 'make_text_bold', (['self.mdp.state_relevance_type'], {}), '(self.mdp.state_relevance_type)\n', (3274, 3305), False, 'from utils import mean_confidence_interval, make_text_bold\n'), ((5743, 5754), 'time.time', 'time.time', ([], {}), '()\n', (5752, 5754), False, 'import time\n'), ((10044, 10091), 'numpy.zeros_like', 'np.zeros_like', (['self.pol_sim_paths_rewards[:, t]'], {}), '(self.pol_sim_paths_rewards[:, t])\n', (10057, 10091), True, 'import numpy as np\n'), ((5950, 5961), 'time.time', 'time.time', ([], {}), '()\n', (5959, 5961), False, 'import time\n'), ((6412, 6423), 'time.time', 'time.time', ([], {}), '()\n', (6421, 6423), False, 'import time\n'), ((6752, 6865), 'numpy.maximum', 'np.maximum', (['self.CFA_paths_rewards[:, t]', '(feature_matrix @ self.basis_func_coef_matrix[:, t] * self.discount)'], {}), '(self.CFA_paths_rewards[:, t], feature_matrix @ self.\n basis_func_coef_matrix[:, t] * self.discount)\n', (6762, 6865), True, 'import numpy as np\n'), ((10408, 10433), 'numpy.nonzero', 'np.nonzero', (['stopping_time'], {}), '(stopping_time)\n', (10418, 10433), True, 'import numpy as np\n'), ((11954, 11979), 'numpy.nonzero', 'np.nonzero', (['stopping_time'], {}), '(stopping_time)\n', (11964, 11979), True, 'import numpy as np\n'), ((6908, 6919), 'time.time', 'time.time', ([], {}), '()\n', (6917, 6919), False, 'import time\n'), ((7663, 7674), 'time.time', 'time.time', ([], {}), '()\n', (7672, 7674), False, 'import time\n'), ((8035, 8046), 'time.time', 'time.time', ([], {}), '()\n', (8044, 8046), False, 'import time\n'), ((9149, 9160), 'time.time', 'time.time', ([], {}), '()\n', (9158, 9160), False, 'import time\n'), ((9249, 9260), 'time.time', 'time.time', ([], {}), '()\n', (9258, 9260), False, 'import time\n'), ((5476, 5487), 'time.time', 'time.time', ([], {}), '()\n', (5485, 5487), False, 'import time\n'), ((6133, 6144), 'time.time', 'time.time', ([], {}), '()\n', (6142, 6144), False, 'import time\n'), ((7156, 7167), 'time.time', 'time.time', ([], {}), '()\n', (7165, 7167), False, 'import time\n')] |
import logging
import os
import numpy as np
import pandas as pd
import sqlalchemy
from cached_property import cached_property
from scipy.interpolate import interp1d
from aqueduct.errors import Error
class RiskService(object):
def __init__(self, user_selections):
# DB Connection
self.engine = sqlalchemy.create_engine(os.getenv('POSTGRES_URL'))
self.metadata = sqlalchemy.MetaData(bind=self.engine)
self.metadata.reflect(self.engine)
# BACKGROUND INFO
self.flood_types = ["riverine", "coastal"]
self.exposures = ["gdpexp", "popexp", "urban_damage_v2"]
self.geogunits = ["geogunit_103", "geogunit_108"]
self.scenarios = {"business as usual": ['rcp8p5', 'ssp2', "bau"],
"pessimistic": ['rcp8p5', 'ssp3', "pes"],
"optimistic": ['rcp4p5', 'ssp2', "opt"],
"rcp8p5": ['rcp8p5', 'ssp3', "pes"],
"rcp4p5": ['rcp8p5', 'ssp2', "bau"]}
self.models = {"riverine": ["gf", "ha", "ip", "mi", "nr"],
# "coastal": ["wt"]}
"coastal": ["95", "50", "05"]}
self.years = [2010., 2030., 2050., 2080.]
self.ys = [str(x)[0:4] for x in self.years]
self.rps = [2, 5, 10, 25, 50, 100, 250, 500, 1000]
self.rps_names = ["rp" + str(x).zfill(5) for x in self.rps]
# MANDATORY USER INPUTS
self.flood = user_selections.get("flood") # Flood type
self.exposure = user_selections.get("exposure") # Exposure type
self.geogunit_unique_name = user_selections.get("geogunit_unique_name") # Unique geographical unit name
self.sub_scenario = user_selections.get(
"sub_scenario") # Subsidence option (Will always be no for Riverine floods)
self.existing_prot = user_selections.get(
"existing_prot") # User input for protection standard (triggers on-the-fly calculation)
self.scenario = user_selections.get("scenario")
self.geogunit, self.geogunit_name, self.geogunit_type, self.clim, self.socio, self.scen_abb, self.sub_abb, self.df_precalc, self.prot_pres, self.risk_analysis = self.user_selections()
# Scenario abbreviation
self.mods = self.models.get(self.flood)
def user_selections(self):
"""
Purpose: Gather all necessary inputs to run any analysis
Input:
flood: Riverine of Coastal (User must select)
Geogunit_unique_name: geographical unit name from website. (User must select)
Website should use list of unique names to avoid selecting more than one unit
Scenario: Business as usual, Pessimistic, Optimistic
sub_scenario: Yes (defaul(t), No does the user want to consider subsidence? Only relevant for coastal)
existing_prot: Default protection standard. User can input their own or, which will trigger on-the-fly calculations
Output:
geogunit unit - (geogunit_103 for cities, geogunit_108 for everything else)
geogunit_name - original (ie non-unique) name
geogunit_type - City, State, Country, Basin
clim - rcp4p5, rcp8p4 (climate scenario associated with overall scenario)
socio - base, ssp2, ssp3 (socioeconomic scenario associated with overall scenario)
sub_scenario- Yes, No (Is subsidence included?)
sub_abb - wtsub or nosub (code name for subsidence. wtsub = with sub)
prot_pres - default protection standard for unit as a whole
risk_analysis - can we use precalculated risk data, or do we need to calculate on-the-fly?
"""
# GEOGUNIT INFO
fids, geogunit_name, geogunit_type = pd.read_sql_query(
"SELECT fids, name, type FROM lookup_master where uniqueName = '{0}' ".format(self.geogunit_unique_name),
self.engine).values[0]
geogunit = "geogunit_103" if geogunit_type.lower() == "city" else "geogunit_108"
# IMPACT DRIVER INFO (climate and socioeconomc scenarios
clim, socio, scen_abb = self.scenarios.get(self.scenario)
# SUBSIDENCE INFO
# Make sure subsidence is turned off for river floods
sub_abb = "wtsub" if self.sub_scenario else "nosub"
# DEFAULT DATA
defaultfn = "precalc_agg_{0}_{1}_{2}".format(self.flood, geogunit_type.lower(), sub_abb)
logging.info(f'[RISK - user_selection]: {str(defaultfn)}')
df_precalc = pd.read_sql_query("SELECT * FROM {0} where id like '{1}'".format(defaultfn, geogunit_name),
self.engine, index_col='id')
# PROTECTION STANDARDS and RISK ANALYSIS TYPE
if not self.existing_prot:
risk_analysis = "precalc"
# Hardwire in the protection standards for the Netherlands or Average prot standard for a whole unit (i.e. country)
# here self.exposure should be allways urban_damage_v2
prot_pres = (1000 if geogunit_name in ['Noord-Brabant, Netherlands', 'Zeeland, Netherlands',
'Zeeuwse meren, Netherlands', 'Zuid-Holland, Netherlands',
'Drenthe, Netherlands', 'Flevoland, Netherlands',
'Friesland, Netherlands', 'Gelderland, Netherlands',
'Groningen, Netherlands', 'IJsselmeer, Netherlands',
'Limburg, Netherlands', 'Noord-Holland, Netherlands',
'Overijssel, Netherlands', 'Utrecht, Netherlands',
'Netherlands'] else df_precalc[
["_".join(['urban_damage_v2', '2010', scen_abb, "prot_avg"])]])
else:
risk_analysis = "calc"
prot_pres = self.existing_prot
return geogunit, geogunit_name, geogunit_type.lower(), clim, socio, scen_abb, sub_abb, df_precalc, prot_pres, risk_analysis
def lp_data(self):
inFormat = 'raw_agg_{:s}_{:s}_{:s}'.format(self.flood, self.geogunit_type, self.exposure)
cols = [
'{0} as {1}'.format(col, col.replace(self.clim, 'lp').replace(self.socio + "_" + self.sub_abb + "_", ''))
for col in sqlalchemy.Table(inFormat, self.metadata).columns.keys() if
(self.clim in col) and (self.socio in col) and (self.sub_abb in col)]
df_temp = pd.read_sql_query(
"SELECT {0} FROM {1} where id like '{2}'".format(', '.join(cols), inFormat, self.geogunit_name),
self.engine)
df_lpcurve = df_temp.T
df1 = df_lpcurve.reset_index().rename(columns={"index": "index", 0: "y"})
df2 = df_lpcurve.reset_index()['index'].str.split('_', expand=True).rename(
columns={0: "lp", 1: "c", 2: "year", 3: "x"})
logging.info('[RISK]: lp_curve')
#logging.info(df1)
#logging.info(df2)
return pd.concat([df1, df2], axis=1).reindex(df1.index)[['c', 'year', 'y', 'x']].replace(self.rps_names, self.rps)
#return pd.concat([df1, df2], axis=1, join_axes=[df1.index])[['c', 'year', 'y', 'x']].replace(self.rps_names, self.rps)
def bench(self):
defaultfn = "precalc_agg_{0}_{1}_{2}".format(self.flood, self.geogunit_type, self.sub_abb)
print(defaultfn)
# cols = ['{0} as {1}'.format(col, col.replace(self.exposure, 'bench').replace('urban_damage_v2', 'bench').replace("_"+ self.scen_abb, '')) for col in sqlalchemy.Table(defaultfn, self.metadata).columns.keys() if ((self.exposure in col) or ('urban_damage_v2' in col)) and (self.scen_abb in col) and ("cc" not in col) and ("soc" not in col) and ("sub" not in col) and ("avg" in col)]
cols = ['{0} as {1}'.format(col,
col.replace(self.exposure, 'bench').replace('urban_damage_v2', 'bench').replace(
"_" + self.scen_abb, '')) for col in
sqlalchemy.Table(defaultfn, self.metadata).columns.keys() if
((self.exposure in col) or ('prot' in col)) and (self.scen_abb in col) and ("cc" not in col) and (
"soc" not in col) and ("sub" not in col) and ("avg" in col)]
benchData = pd.read_sql_query("SELECT id, {0} FROM {1}".format(', '.join(cols), defaultfn), self.engine,
index_col='id')
return benchData
def format_risk(self, dataframe):
datalist = ["tot_avg", "tot_min", "tot_max",
"ast", "prot_avg",
"per_avg", "per_min", "per_max",
"cc_avg", "cc_min", "cc_max",
"soc_avg", "sub_avg"]
colNames = ["Annual_Damage_Avg", "Annual_Damage_Min", "Annual_Damage_Max",
"Asset_Value", "Flood_Protection",
"Percent_Damage_Avg", "Percent_Damage_Min", "Percent_Damage_Max",
"CC_Driver_Avg", "CC_Driver_Min", "CC_Driver_Max",
"Soc_Driver", "Sub_Driver"]
df_final = pd.DataFrame(index=self.ys, columns=colNames)
for d in range(0, len(datalist)):
selData = dataframe[[col for col in dataframe.columns.tolist() if (datalist[d] in col)]]
if len(selData.values[0]) == 3:
df_final[colNames[d]][1:] = selData.values[0]
else:
df_final[colNames[d]] = selData.values[0]
return df_final
def find_assets(self):
"""
Purpose: Find total asset value
Output:
df_aggregate = Annual impacts for each year for user-selected geographical unit
"""
# Create term to filter out unnecessary results. Drop SSP2 data if scenario
# is pessemistic. Else, drop SSP3
dropex = "ssp2" if self.scen_abb == "pes" else "ssp3"
assts = self.df_precalc[[col for col in self.df_precalc.columns.tolist() if
(self.exposure in col) and (self.scen_abb in col) and ("ast" in col) and (
dropex not in col)]]
return assts.reset_index(drop=True)
def run_stats(self, dataframe):
"""
Purpose: Finds the average, min, and max impact for all impact types
Input:
dataframe: Data associated with flood, geography, exposure type for all climate models
Output:
Dataframe with average impact data for each year for each impact type. Also includes min and max (uncertainity)
"""
# Create dataframe to hold final data
df_final = pd.DataFrame(index=dataframe.index)
# Define column field name structure
colFormat = '{:s}_{:s}_{:s}_{:s}_{:s}'.format
# Run following analysis for each year and impact type
for y in self.ys:
for t in ["cc", "soc", "sub", "tot", "prot"]:
df_filt = dataframe[[col for col in dataframe.columns if (t in col) and (y in col)]]
df_final[colFormat(self.exposure, y, self.scen_abb, t, "avg")] = df_filt.mean(axis=1)
if y != '2010' and t == "tot" or y != '2010' and t == 'cc':
df_final[colFormat(self.exposure, y, self.scen_abb, t, "min")] = df_filt.min(axis=1)
df_final[colFormat(self.exposure, y, self.scen_abb, t, "max")] = df_filt.max(axis=1)
df_final.replace(np.nan, 0, inplace=True)
return df_final
def ratio_to_total(self, dataframe):
"""
Purpose: Finds the impact attributed to climate change only, socioecon only, and subsidence only
Input:
inData: Annual expected impact data (found using default_risk function)
mods: All possible climate models
Output:
Dataframe with final impact data for each year for each impact type. Column name also specifies given model
"""
# Create dataframe to hold final data
df_final = pd.DataFrame(index=dataframe.index)
# Run analysis for each climate model and each year past 2010
colFormat = '{:s}_{:s}_{:s}_{:s}_{:s}'.format
df_final[colFormat(self.exposure, "2010", self.scen_abb, "prot", "avg")] = dataframe[
colFormat(self.exposure, "2010", self.scen_abb, "prot", "avg")]
tot2010 = dataframe[colFormat(self.exposure, "2010", self.scen_abb, "tot", "avg")]
df_final[colFormat(self.exposure, "2010", self.scen_abb, "tot", "avg")] = tot2010
for y in self.ys[1:]:
# Filter data year
df_filt = dataframe[[col for col in dataframe.columns if (y in col)]]
# Total impact for selected year is already calculated
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "avg")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "avg")]
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "min")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "min")]
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "max")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "max")]
# Find the difference from each impact to the 2010 baseline data
df_filt['tot_diff'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "tot",
"avg")] - tot2010 # Total impact
df_filt['cc_diff_avg'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"avg")] - tot2010 # Total impact
df_filt['cc_diff_min'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"min")] - tot2010 # Total impact
df_filt['cc_diff_max'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"max")] - tot2010 # Total impact
df_filt['soc_diff'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "soc",
"avg")] - tot2010 # Total impact#Soc only impact
df_filt['sub_diff'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "sub",
"avg")] - tot2010 # Total impact #Subsidence only impact
# Correct for values if impact is less than 2010 baseline data
df_filt['cc_diff_avg'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['cc_diff_avg'] < 0, 0, df_filt['cc_diff_avg']),
np.where(df_filt['cc_diff_avg'] > 0, 0, df_filt['cc_diff_avg']))
df_filt['cc_diff_min'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['cc_diff_min'] < 0, 0, df_filt['cc_diff_min']),
np.where(df_filt['cc_diff_min'] > 0, 0, df_filt['cc_diff_min']))
df_filt['cc_diff_max'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['cc_diff_max'] < 0, 0, df_filt['cc_diff_max']),
np.where(df_filt['cc_diff_max'] > 0, 0, df_filt['cc_diff_max']))
df_filt['soc_diff'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['soc_diff'] < 0, 0, df_filt['soc_diff']),
np.where(df_filt['soc_diff'] > 0, 0, df_filt['soc_diff']))
df_filt['sub_diff'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['sub_diff'] < 0, 0, df_filt['sub_diff']),
np.where(df_filt['sub_diff'] > 0, 0, df_filt['sub_diff']))
if self.sub_abb == "nosub":
df_filt['sub_diff'] = 0
# Find the ratio of impact attributed to each impact cause ( use the difference from 2010, not the absolute impact)
# Climate change only = (CC Only) / ( CC Only + Socio Only + Sub Only) * Total Impact
df_final[colFormat(self.exposure, y, self.scen_abb, "cc", "avg")] = (df_filt['cc_diff_avg'] / (
df_filt['cc_diff_avg'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
df_final[colFormat(self.exposure, y, self.scen_abb, "cc", "min")] = (df_filt['cc_diff_min'] / (
df_filt['cc_diff_min'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
df_final[colFormat(self.exposure, y, self.scen_abb, "cc", "max")] = (df_filt['cc_diff_max'] / (
df_filt['cc_diff_max'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
# Socioecon change only = (Soc Only) / ( CC Only + Socio Only + Sub Only) * Total Impact
df_final[colFormat(self.exposure, y, self.scen_abb, "soc", "avg")] = (df_filt['soc_diff'] / (
df_filt['cc_diff_avg'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
# Subsidence change only = (Sub Only) / ( CC Only + Socio Only + Sub Only) * Total Impact
df_final[colFormat(self.exposure, y, self.scen_abb, "sub", "avg")] = (df_filt['sub_diff'] / (
df_filt['cc_diff_avg'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
df_final[colFormat(self.exposure, y, self.scen_abb, "prot", "avg")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "prot", "avg")]
# Replace any nulls with 0
df_final.replace(np.nan, 0, inplace=True)
return df_final
@staticmethod
def expected_value(values, RPs, RP_zero, RP_infinite):
"""
Purpose: Annual expected image/damage for given time period
Input:
values: Impact per return period
2D array MxN
M: several time periods
N: several return periods
RPs: return periods (equal to length of N)
RP_zero: return period at which to break the EP-curve to zero (i.e. protection standard)
RP_infinite: return period close to the infinitely high return period
Output:
vector with expected values for each time period
"""
# append the return period at which maximum impact occurs, normally this is set to 1e6 years
RPs = np.append(np.array(RPs), RP_infinite)
# derive the probabilities associated with return periods
prob = 1. / RPs
values = np.array(values)
# append infinite impact (last value in array) to array. Simply copy the last value.
values = np.append(values, values[-1])
# now make a smooth function (function relates prob (on x) to projected future impact (y))
values_func = interp1d(prob, values)
# Returns 10,000 evenly spaced probabilities from most likely prob to most extreme
prob_smooth = np.linspace(prob[0], prob[-1], 10000)
# Insert these probabilites into "smooth function" to find their related impact
values_smooth = values_func(prob_smooth)
# Set all impacts above thres (protection standard) to zero
values_smooth[prob_smooth > 1. / RP_zero] = 0.
# compute expected values from return period values:
# Integrate under curve to find sum of all impact
exp_val = np.trapz(np.flipud(values_smooth), np.flipud(prob_smooth))
# print "Values, RP, Exp Value", values, RP_zero, exp_val,
return exp_val
@staticmethod
def interp_value(x, y, x_i, min_x=-np.Inf, max_x=np.Inf):
"""
Purpose: Find impacts associated with given protection standard
OR Find probability associated with a given impact
Allows for extrapolation to find new Y given user-defined X
Do a linear inter/extrapolation of y(x) to find a value y(x_idx)
"""
### OLD CODE
# Creates a function that relates X and Y and allows for extrapolation to find new Y given user-defined X
# y_interp = extrap1d(interp1d(np.array(x), np.array(y), axis=0))
# return y_interp(np.maximum(np.minimum(np.atleast_1d(x_i), max_x), min_x))
# -#-#-#-#-#-#-#-#-#-#-#-#-#
### NEW CODE
# interpolation only! return y min/max if out of bounds
x = np.atleast_1d(x)
y = np.atleast_1d(y)
f = interp1d(x, y, fill_value=(y.min(), y.max()), bounds_error=False)
y_new = f(x_i)
return y_new
@staticmethod
def extrap1d(interpolator):
"""
Purpose: Make an extrapolation function
"""
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
# If new prob is smaller than smallest prob in function
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
# If new prob is larger than largest prob in function
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (xs[-1] - xs[-2])
# If prob falls within set range of prob in function
else:
return interpolator(x)
def ufunclike(xs):
return np.fromiter(map(pointwise, np.array(xs)))
return ufunclike
def compute_rp_change(self, ref_impact, target_impact, rp, min_rp=2, max_rp=1000):
"""
Purpose: Compute how return period protection changes from one impact
distribution to another (e.g. present to future)
Input:
rps: return periods of impacts
ref_impact: set of reference impact
target_impacts: impacts to which protection standard should be mapped
(i.e. year the flood protection should be valid in)
rp, protection standard at reference impacts
"""
### NEW CODE
if target_impact.sum() == 0:
new_prot = np.nan
else:
# interpolate to estimate impacts at protection level 'rp'
prot_impact = self.interp_value(self.rps, ref_impact, rp)
new_prot = self.interp_value(target_impact, self.rps, prot_impact)
return new_prot
def find_impact(self, impact_cc, impact_soc, impact_sub, impact_cc_soc, impact_urb, model):
"""
Purpose: Finds annual impacts for climate only, socio only, subsidence only, and all scenarios together
Input:
impact_cc: Climate change only impacts.Variable consists of 4 dataframes (one for each year)
impact_soc: Socioecon change only impacts. Variable consists of 4 dataframes (one for each year)
impact_sub: Subsidence only impacts. Variable consists of 4 dataframes (one for each year)
impact_cc_sub: Total impacts. Variable consists of 4 dataframes (one for each year)
impact_urb: Climate change only impacts to urban damage. Variable consists of 4 dataframes (one for each year)
model = Climate change model associated with input data
Output:
Dataframe with raw annual impact data for each year for each impact type. Column name also specifies given model
"""
# Create dataframes to hold expected impact (for each model and year)
col = [model + x + j for x in ["_cc_", "_soc_", "_sub_", "_tot_", "_prot_"] for j in self.ys]
model_imps = pd.DataFrame(index=[self.geogunit_name], columns=col)
# Perform for each year we have impact data
for y, imp_cc, imp_soc, imp_sub, imp_cc_soc, imp_urb in zip(self.ys, impact_cc, impact_soc, impact_sub,
impact_cc_soc, impact_urb):
# No transformation needed in 2010
if y == '2010':
prot_trans = self.prot_pres
else:
# Find how the flood protection changes over time
prot_trans = self.compute_rp_change(impact_urb[0], imp_urb.values[0], self.prot_pres,
min_rp=min(self.rps), max_rp=max(self.rps)) # i.e. RP_zero
# Find the annual expected damage with the new protection standard
model_imps.loc[self.geogunit_name, [model + "_cc_" + y]] = self.expected_value(imp_cc.values[0], self.rps,
prot_trans, 1e5)
model_imps.loc[self.geogunit_name, [model + "_soc_" + y]] = self.expected_value(imp_soc.values[0], self.rps,
prot_trans, 1e5)
model_imps.loc[self.geogunit_name, [model + "_sub_" + y]] = self.expected_value(imp_sub, self.rps,
prot_trans, 1e5)
model_imps.loc[self.geogunit_name, [model + "_tot_" + y]] = self.expected_value(imp_cc_soc.values[0],
self.rps, prot_trans, 1e5)
model_imps.loc[self.geogunit_name, [model + "_prot_" + y]] = prot_trans
return model_imps
def select_projection_data(self, dataframe, climate, model, socioecon, year):
"""
Purpose: Pull all historical (2010) raw data
Input:
dataframe: Raw data associated with user-defined flood, geographic unit and exposure
climate = Climate scenario
model = Climate model
socioecon = Socioeconomic scenario
sub_scenario: Is subsidence considered? Yes or No
year: 2030, 2050, or 2080
Output:mpact data for each return period for given year
Dataframe with raw ir
"""
# Select data using year, subsidence type, climate scen, socioecon scen, model
# CHANGEDIT
selCol = climate + "_" + model + "_" + socioecon + "_" + self.sub_abb + "_" + year
#logging.debug(selCol)
# selData = dataframe[[col for col in dataframe.index.tolist() if selCol in col]]
selData = dataframe[[col for col in dataframe.columns if (selCol in col) and ("rp00001" not in col)]]
# selData = dataframe[[col for col in dataframe.columns if (model in col) and (socioecon in col) and (climate in col) and (year in col) and ("rp00001" not in col)]]
#logging.debug(f'[RISK SERVICE - select_projection_data]: {selData}')
return selData
def calc_risk(self):
"""
Purpose: Runs analysis on the fly instead of using precalcuted results
(For when users define current protection level, find annual impact themselves)
Output:
df_aggregate = aggregated annual impacts for each year
"""
# READ IN DATA
# File name format for raw data
inFormat = 'raw_agg_{:s}_{:s}_{:s}'.format
fn = inFormat(self.flood, self.geogunit_type, self.exposure)
# URBAN DAMAGE DATA
urbfn = inFormat(self.flood, self.geogunit_type, "urban_damage_v2")
# Filter by geographic name
df_raw = pd.read_sql_query("SELECT * FROM {0} where id = '{1}' ".format(fn, self.geogunit_name), self.engine,
index_col='id')
df_urb = pd.read_sql_query("SELECT * FROM {0} where id = '{1}' ".format(urbfn, self.geogunit_name), self.engine,
index_col='id')
logging.info(f'[RISK SERVICE - calc_risk]: urbfn => {urbfn} fn => {fn}')
logging.debug('[RISK SERVICE - calc_risk]: prot_press => ' + str(self.prot_pres))
# Find impact for each model
model_impact = pd.DataFrame(index=[self.geogunit_name])
# Find model options associated with flood type
modsT = '95' if self.flood == 'coastal' else 'wt'
for m in self.mods:
cc_raw, soc_raw, sub_raw, cc_soc_raw, urb_raw = [], [], [], [], []
for y in self.ys:
logging.debug('[RISK SERVICE - calc_risk]: prot_press1 => ' + str(self.prot_pres))
dfsub_a = []
# 2010 DATA
if y == '2010':
# Pull historical raw data
histData = self.select_projection_data(df_raw, "histor", modsT, "base", y)
cc_raw.append(histData)
soc_raw.append(histData)
cc_soc_raw.append(histData)
urb_raw.append(self.select_projection_data(df_urb, "histor", modsT, "base", y))
dfsub = histData
# 2030, 2050, 2080 DATA
else:
cc_raw.append(
self.select_projection_data(df_raw, self.clim, m, "base", y)) # Add to climate change only list
soc_raw.append(self.select_projection_data(df_raw, "histor", modsT, self.socio,
y)) # Add to socieco change only list
cc_soc_raw.append(self.select_projection_data(df_raw, self.clim, m, self.socio,
y)) # Add to subsid change only list
urb_raw.append(
self.select_projection_data(df_urb, self.clim, m, "base", y)) # Add data using urban data
dfsub = self.select_projection_data(df_raw, "histor", modsT, "base",
y) # Add to socieco change only list
#logging.debug(f'[RISK SERVICE - calc_risk]: {dfsub.columns}')
if not dfsub.empty:
dfsub_a = pd.melt(dfsub, value_vars=dfsub.columns)
sub_raw.append(pd.Series(name=self.geogunit_name, index=self.rps, data=dfsub_a["value"].tolist()))
#logging.debug(f'[RISK SERVICE - calc_risk]: {sub_raw}')
if self.sub_scenario == False:
sub_raw = []
dfsub = pd.Series(name=self.geogunit_name, index=self.rps, data=0)
sub_raw.extend([dfsub for i in range(4)])
#logging.debug(f'[RISK SERVICE - calc_risk]: {len(sub_raw)}, {len(sub_raw[0])}')
#logging.debug(f'[RISK SERVICE - calc_risk]: {type(sub_raw[0])}')
outData = self.find_impact(cc_raw, soc_raw, sub_raw, cc_soc_raw, urb_raw, m)
model_impact = model_impact.join(outData)
df_stats = self.run_stats(model_impact)
df_ratio = self.ratio_to_total(df_stats)
assets = self.find_assets()
df_risk = df_ratio.loc[self.geogunit_name]
df_risk = df_risk.append(assets.T)
# 2010 data
colFormat = '{:s}_{:s}_{:s}_{:s}_{:s}'.format
ast = df_risk.loc[colFormat(self.exposure, '2010', self.scen_abb, "ast", "tot")]
imp = df_risk.loc[colFormat(self.exposure, '2010', self.scen_abb, "tot", "avg")]
per = np.where(ast < imp, np.nan, imp / ast * 100)
df_risk = pd.concat(
[df_risk, pd.Series(per, index=[colFormat(self.exposure, '2010', self.scen_abb, "per", "avg")])])
for y in self.ys[1:]:
ast = df_risk.loc[colFormat(self.exposure, y, self.scen_abb, "ast", "tot")]
for t in ["avg", "min", "max"]:
imp = df_risk.loc[colFormat(self.exposure, y, self.scen_abb, "tot", t)]
per = np.where(ast < imp, np.nan, imp / ast * 100)
df_risk = pd.concat(
[df_risk, pd.Series(per, index=[colFormat(self.exposure, y, self.scen_abb, "per", t)])])
logging.debug('[RISK SERVICE - calc_risk]: prot_press3 => ' + str(self.prot_pres))
return df_risk.T
def precalc_risk(self):
# Filter by
# we have set self.exposure as urban Damage
logging.info('[RISK, precalc in]')
logging.debug('[RISK]: ' + str(self.prot_pres))
df_risk = self.df_precalc[
[col for col in self.df_precalc.columns.tolist() if (self.exposure in col) and (self.scen_abb in col)]]
if self.exposure != 'urban_damage_v2':
df_prot = self.df_precalc[
[col for col in self.df_precalc.columns.tolist() if ("prot" in col) and (self.scen_abb in col)]]
columnsD = [col for col in self.df_precalc.columns.tolist() if ("urban_damage_v2" in col)]
df_prot.rename(
columns=dict(zip(columnsD, [cols.replace("urban_damage_v2", self.exposure) for cols in columnsD])),
inplace=True)
df_risk = pd.concat([df_risk, df_prot], axis=1, sort=False)
if self.geogunit_name in ['Noord-Brabant, Netherlands', 'Zeeland, Netherlands', 'Zeeuwse meren, Netherlands', 'Zuid-Holland, Netherlands', 'Drenthe, Netherlands', 'Flevoland, Netherlands', 'Friesland, Netherlands', 'Gelderland, Netherlands', 'Groningen, Netherlands', 'IJsselmeer, Netherlands', 'Limburg, Netherlands', 'Noord-Holland, Netherlands', 'Overijssel, Netherlands', 'Utrecht, Netherlands', "Netherlands"]:
logging.info(df_risk)
df_risk[self.exposure + "_2010_" + self.scen_abb + "_prot_avg"] = 1000
return df_risk
@cached_property
def meta(self):
return {"flood": self.flood,
"geogunit_name": self.geogunit_name,
"geogunit_type": self.geogunit_type,
"Scenario": self.scenario,
"Exposure": self.exposure,
"Average Protection": self.prot_pres if isinstance(self.prot_pres, int) else self.prot_pres.values[0][0]
}
def getRisk(self):
# Run risk data analysis based on user-inputs
try:
if self.risk_analysis == "precalc":
logging.info('[RISK, precalc]')
risk_data = self.precalc_risk()
else:
risk_data = self.calc_risk()
return self.format_risk(risk_data)
except Exception as e:
logging.error('[RISK]: ' + str(e))
raise Error('[RISK] Computation failed: '+ str(e))
def get_widget(self, argument):
method_name = 'widget_' + str(argument)
method = getattr(self, method_name, lambda: "Widget not found")
return method()
def widget_table(self):
return {'widgetId': 'table', 'chart_type': 'table', 'meta': self.meta, 'data': self.getRisk().reset_index()[
['index', 'Annual_Damage_Avg', 'Asset_Value', 'Percent_Damage_Avg', 'Flood_Protection']].to_dict('records')}
def widget_annual_flood(self):
return {'widgetId': 'annual_flood', 'chart_type': 'annual_flood', 'meta': self.meta,
'data': self.getRisk().reset_index()[
['index', 'Annual_Damage_Avg', 'Annual_Damage_Min', 'Annual_Damage_Max', 'Percent_Damage_Avg',
'Percent_Damage_Min', 'Percent_Damage_Max']].to_dict('records')}
def widget_flood_drivers(self):
return {'widgetId': 'flood_drivers', 'chart_type': 'flood_drivers', 'meta': self.meta,
'data': self.getRisk().reset_index()[
['index', 'Annual_Damage_Avg', 'Annual_Damage_Min', 'Annual_Damage_Max', 'Percent_Damage_Avg',
'Percent_Damage_Min', 'Percent_Damage_Max', 'CC_Driver_Avg', 'CC_Driver_Min', 'CC_Driver_Max',
'Soc_Driver', 'Sub_Driver']].to_dict('records')}
def widget_benchmark(self):
benchData = self.bench().reset_index()
per = pd.melt(benchData[['id', 'bench_2010_prot_avg', 'bench_2030_prot_avg', 'bench_2050_prot_avg',
'bench_2080_prot_avg']], id_vars=['id'],
value_vars=['bench_2010_prot_avg', 'bench_2030_prot_avg', 'bench_2050_prot_avg',
'bench_2080_prot_avg'], var_name='c', value_name='prot')
per['year'] = per.c.str.split('_').str.get(1)
tot = pd.melt(benchData[
['id', 'bench_2010_tot_avg', 'bench_2030_tot_avg', 'bench_2050_tot_avg', 'bench_2080_tot_avg',
'bench_2010_per_avg', 'bench_2030_per_avg', 'bench_2050_per_avg', 'bench_2080_per_avg']],
id_vars=['id'], value_vars=['bench_2010_per_avg', 'bench_2030_per_avg', 'bench_2050_per_avg',
'bench_2080_per_avg', 'bench_2010_tot_avg', 'bench_2030_tot_avg',
'bench_2050_tot_avg', 'bench_2080_tot_avg'], var_name='c1',
value_name='value')
tot['year'] = tot['c1'].str.split('_').str.get(1)
tot['type'] = tot['c1'].str.split('_').str.get(2)
fData = per.merge(tot, how='right', left_on=['id', 'year'], right_on=['id', 'year'])
return {'widgetId': "benchmark", "chart_type": "benchmark", "meta": self.meta,
"data": fData.reset_index()[['id', 'year', 'type', 'value', 'prot']].to_dict('records')}
def widget_lp_curve(self):
return {'widgetId': "lp_curve", "chart_type": "lp_curve", "meta": self.meta,
"data": self.lp_data().to_dict('records')}
| [
"pandas.Series",
"sqlalchemy.Table",
"os.getenv",
"numpy.flipud",
"numpy.where",
"scipy.interpolate.interp1d",
"numpy.append",
"sqlalchemy.MetaData",
"numpy.array",
"numpy.linspace",
"pandas.concat",
"pandas.melt",
"pandas.DataFrame",
"logging.info",
"numpy.atleast_1d"
] | [((393, 430), 'sqlalchemy.MetaData', 'sqlalchemy.MetaData', ([], {'bind': 'self.engine'}), '(bind=self.engine)\n', (412, 430), False, 'import sqlalchemy\n'), ((6995, 7027), 'logging.info', 'logging.info', (['"""[RISK]: lp_curve"""'], {}), "('[RISK]: lp_curve')\n", (7007, 7027), False, 'import logging\n'), ((9216, 9261), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.ys', 'columns': 'colNames'}), '(index=self.ys, columns=colNames)\n', (9228, 9261), True, 'import pandas as pd\n'), ((10761, 10796), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dataframe.index'}), '(index=dataframe.index)\n', (10773, 10796), True, 'import pandas as pd\n'), ((12126, 12161), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dataframe.index'}), '(index=dataframe.index)\n', (12138, 12161), True, 'import pandas as pd\n'), ((19379, 19395), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (19387, 19395), True, 'import numpy as np\n'), ((19506, 19535), 'numpy.append', 'np.append', (['values', 'values[-1]'], {}), '(values, values[-1])\n', (19515, 19535), True, 'import numpy as np\n'), ((19657, 19679), 'scipy.interpolate.interp1d', 'interp1d', (['prob', 'values'], {}), '(prob, values)\n', (19665, 19679), False, 'from scipy.interpolate import interp1d\n'), ((19793, 19830), 'numpy.linspace', 'np.linspace', (['prob[0]', 'prob[-1]', '(10000)'], {}), '(prob[0], prob[-1], 10000)\n', (19804, 19830), True, 'import numpy as np\n'), ((21191, 21207), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (21204, 21207), True, 'import numpy as np\n'), ((21220, 21236), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (21233, 21236), True, 'import numpy as np\n'), ((24280, 24333), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[self.geogunit_name]', 'columns': 'col'}), '(index=[self.geogunit_name], columns=col)\n', (24292, 24333), True, 'import pandas as pd\n'), ((28389, 28462), 'logging.info', 'logging.info', (['f"""[RISK SERVICE - calc_risk]: urbfn => {urbfn} fn => {fn}"""'], {}), "(f'[RISK SERVICE - calc_risk]: urbfn => {urbfn} fn => {fn}')\n", (28401, 28462), False, 'import logging\n'), ((28613, 28653), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[self.geogunit_name]'}), '(index=[self.geogunit_name])\n', (28625, 28653), True, 'import pandas as pd\n'), ((31933, 31977), 'numpy.where', 'np.where', (['(ast < imp)', 'np.nan', '(imp / ast * 100)'], {}), '(ast < imp, np.nan, imp / ast * 100)\n', (31941, 31977), True, 'import numpy as np\n'), ((32807, 32841), 'logging.info', 'logging.info', (['"""[RISK, precalc in]"""'], {}), "('[RISK, precalc in]')\n", (32819, 32841), False, 'import logging\n'), ((36473, 36762), 'pandas.melt', 'pd.melt', (["benchData[['id', 'bench_2010_prot_avg', 'bench_2030_prot_avg',\n 'bench_2050_prot_avg', 'bench_2080_prot_avg']]"], {'id_vars': "['id']", 'value_vars': "['bench_2010_prot_avg', 'bench_2030_prot_avg', 'bench_2050_prot_avg',\n 'bench_2080_prot_avg']", 'var_name': '"""c"""', 'value_name': '"""prot"""'}), "(benchData[['id', 'bench_2010_prot_avg', 'bench_2030_prot_avg',\n 'bench_2050_prot_avg', 'bench_2080_prot_avg']], id_vars=['id'],\n value_vars=['bench_2010_prot_avg', 'bench_2030_prot_avg',\n 'bench_2050_prot_avg', 'bench_2080_prot_avg'], var_name='c', value_name\n ='prot')\n", (36480, 36762), True, 'import pandas as pd\n'), ((36903, 37369), 'pandas.melt', 'pd.melt', (["benchData[['id', 'bench_2010_tot_avg', 'bench_2030_tot_avg',\n 'bench_2050_tot_avg', 'bench_2080_tot_avg', 'bench_2010_per_avg',\n 'bench_2030_per_avg', 'bench_2050_per_avg', 'bench_2080_per_avg']]"], {'id_vars': "['id']", 'value_vars': "['bench_2010_per_avg', 'bench_2030_per_avg', 'bench_2050_per_avg',\n 'bench_2080_per_avg', 'bench_2010_tot_avg', 'bench_2030_tot_avg',\n 'bench_2050_tot_avg', 'bench_2080_tot_avg']", 'var_name': '"""c1"""', 'value_name': '"""value"""'}), "(benchData[['id', 'bench_2010_tot_avg', 'bench_2030_tot_avg',\n 'bench_2050_tot_avg', 'bench_2080_tot_avg', 'bench_2010_per_avg',\n 'bench_2030_per_avg', 'bench_2050_per_avg', 'bench_2080_per_avg']],\n id_vars=['id'], value_vars=['bench_2010_per_avg', 'bench_2030_per_avg',\n 'bench_2050_per_avg', 'bench_2080_per_avg', 'bench_2010_tot_avg',\n 'bench_2030_tot_avg', 'bench_2050_tot_avg', 'bench_2080_tot_avg'],\n var_name='c1', value_name='value')\n", (36910, 37369), True, 'import pandas as pd\n'), ((342, 367), 'os.getenv', 'os.getenv', (['"""POSTGRES_URL"""'], {}), "('POSTGRES_URL')\n", (351, 367), False, 'import os\n'), ((19244, 19257), 'numpy.array', 'np.array', (['RPs'], {}), '(RPs)\n', (19252, 19257), True, 'import numpy as np\n'), ((20237, 20261), 'numpy.flipud', 'np.flipud', (['values_smooth'], {}), '(values_smooth)\n', (20246, 20261), True, 'import numpy as np\n'), ((20263, 20285), 'numpy.flipud', 'np.flipud', (['prob_smooth'], {}), '(prob_smooth)\n', (20272, 20285), True, 'import numpy as np\n'), ((33548, 33597), 'pandas.concat', 'pd.concat', (['[df_risk, df_prot]'], {'axis': '(1)', 'sort': '(False)'}), '([df_risk, df_prot], axis=1, sort=False)\n', (33557, 33597), True, 'import pandas as pd\n'), ((34034, 34055), 'logging.info', 'logging.info', (['df_risk'], {}), '(df_risk)\n', (34046, 34055), False, 'import logging\n'), ((14768, 14831), 'numpy.where', 'np.where', (["(df_filt['cc_diff_avg'] < 0)", '(0)', "df_filt['cc_diff_avg']"], {}), "(df_filt['cc_diff_avg'] < 0, 0, df_filt['cc_diff_avg'])\n", (14776, 14831), True, 'import numpy as np\n'), ((14879, 14942), 'numpy.where', 'np.where', (["(df_filt['cc_diff_avg'] > 0)", '(0)', "df_filt['cc_diff_avg']"], {}), "(df_filt['cc_diff_avg'] > 0, 0, df_filt['cc_diff_avg'])\n", (14887, 14942), True, 'import numpy as np\n'), ((15061, 15124), 'numpy.where', 'np.where', (["(df_filt['cc_diff_min'] < 0)", '(0)', "df_filt['cc_diff_min']"], {}), "(df_filt['cc_diff_min'] < 0, 0, df_filt['cc_diff_min'])\n", (15069, 15124), True, 'import numpy as np\n'), ((15172, 15235), 'numpy.where', 'np.where', (["(df_filt['cc_diff_min'] > 0)", '(0)', "df_filt['cc_diff_min']"], {}), "(df_filt['cc_diff_min'] > 0, 0, df_filt['cc_diff_min'])\n", (15180, 15235), True, 'import numpy as np\n'), ((15354, 15417), 'numpy.where', 'np.where', (["(df_filt['cc_diff_max'] < 0)", '(0)', "df_filt['cc_diff_max']"], {}), "(df_filt['cc_diff_max'] < 0, 0, df_filt['cc_diff_max'])\n", (15362, 15417), True, 'import numpy as np\n'), ((15465, 15528), 'numpy.where', 'np.where', (["(df_filt['cc_diff_max'] > 0)", '(0)', "df_filt['cc_diff_max']"], {}), "(df_filt['cc_diff_max'] > 0, 0, df_filt['cc_diff_max'])\n", (15473, 15528), True, 'import numpy as np\n'), ((15641, 15698), 'numpy.where', 'np.where', (["(df_filt['soc_diff'] < 0)", '(0)', "df_filt['soc_diff']"], {}), "(df_filt['soc_diff'] < 0, 0, df_filt['soc_diff'])\n", (15649, 15698), True, 'import numpy as np\n'), ((15743, 15800), 'numpy.where', 'np.where', (["(df_filt['soc_diff'] > 0)", '(0)', "df_filt['soc_diff']"], {}), "(df_filt['soc_diff'] > 0, 0, df_filt['soc_diff'])\n", (15751, 15800), True, 'import numpy as np\n'), ((15913, 15970), 'numpy.where', 'np.where', (["(df_filt['sub_diff'] < 0)", '(0)', "df_filt['sub_diff']"], {}), "(df_filt['sub_diff'] < 0, 0, df_filt['sub_diff'])\n", (15921, 15970), True, 'import numpy as np\n'), ((16015, 16072), 'numpy.where', 'np.where', (["(df_filt['sub_diff'] > 0)", '(0)', "df_filt['sub_diff']"], {}), "(df_filt['sub_diff'] > 0, 0, df_filt['sub_diff'])\n", (16023, 16072), True, 'import numpy as np\n'), ((31003, 31061), 'pandas.Series', 'pd.Series', ([], {'name': 'self.geogunit_name', 'index': 'self.rps', 'data': '(0)'}), '(name=self.geogunit_name, index=self.rps, data=0)\n', (31012, 31061), True, 'import pandas as pd\n'), ((32389, 32433), 'numpy.where', 'np.where', (['(ast < imp)', 'np.nan', '(imp / ast * 100)'], {}), '(ast < imp, np.nan, imp / ast * 100)\n', (32397, 32433), True, 'import numpy as np\n'), ((34728, 34759), 'logging.info', 'logging.info', (['"""[RISK, precalc]"""'], {}), "('[RISK, precalc]')\n", (34740, 34759), False, 'import logging\n'), ((22114, 22126), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (22122, 22126), True, 'import numpy as np\n'), ((30634, 30674), 'pandas.melt', 'pd.melt', (['dfsub'], {'value_vars': 'dfsub.columns'}), '(dfsub, value_vars=dfsub.columns)\n', (30641, 30674), True, 'import pandas as pd\n'), ((6418, 6459), 'sqlalchemy.Table', 'sqlalchemy.Table', (['inFormat', 'self.metadata'], {}), '(inFormat, self.metadata)\n', (6434, 6459), False, 'import sqlalchemy\n'), ((7097, 7126), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)'}), '([df1, df2], axis=1)\n', (7106, 7126), True, 'import pandas as pd\n'), ((8121, 8163), 'sqlalchemy.Table', 'sqlalchemy.Table', (['defaultfn', 'self.metadata'], {}), '(defaultfn, self.metadata)\n', (8137, 8163), False, 'import sqlalchemy\n')] |
"""Schefel26 1981 dataset tests.
Scientific Machine Learning Benchmark:
A benchmark of regression models in chem- and materials informatics.
(c) <NAME> 2020, Citrine Informatics.
"""
import pytest
import numpy as np
import smlb
def test_schwefel26_1981_examples():
"""Tests instantiating and evaluating Schwefel26 (1981) datasets."""
from datasets.synthetic.schwefel26_1981.schwefel26_1981 import Schwefel261981Data
s1 = Schwefel261981Data(dimensions=1)
s2 = Schwefel261981Data(dimensions=2)
s9 = Schwefel261981Data(dimensions=9)
assert (s1.dimensions, s2.dimensions, s9.dimensions) == (1, 2, 9)
# results from Mathematica reference implementation
# minima
inp = np.asfarray([[420.9687] * 9])
assert np.allclose(s1.labels(inp[:,:1]), [0], atol=1e-4)
assert np.allclose(s2.labels(inp[:,:2]), [0], atol=1e-4)
assert np.allclose(s9.labels(inp[:,:9]), [0], atol=1e-3)
inp = np.asfarray(
[
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
[0.5, 0.4, 0.3, 0.2, 0.1, 0.9, 0.8, 0.7, 0.6],
]
)
assert np.allclose(s1.labels(inp[:,:1]), [418.9518016407093, 418.6580815304599], atol=1e-6)
assert np.allclose(s2.labels(inp[:,:2]), [837.8482106729184, 837.4045306835739], atol=1e-6)
assert np.allclose(s9.labels(inp[:,:9]), [3767.716410053263, 3767.716410053263], atol=1e-6)
# invalid inputs
with pytest.raises(smlb.InvalidParameterError):
s2.labels(inp)
| [
"numpy.asfarray",
"datasets.synthetic.schwefel26_1981.schwefel26_1981.Schwefel261981Data",
"pytest.raises"
] | [((442, 474), 'datasets.synthetic.schwefel26_1981.schwefel26_1981.Schwefel261981Data', 'Schwefel261981Data', ([], {'dimensions': '(1)'}), '(dimensions=1)\n', (460, 474), False, 'from datasets.synthetic.schwefel26_1981.schwefel26_1981 import Schwefel261981Data\n'), ((484, 516), 'datasets.synthetic.schwefel26_1981.schwefel26_1981.Schwefel261981Data', 'Schwefel261981Data', ([], {'dimensions': '(2)'}), '(dimensions=2)\n', (502, 516), False, 'from datasets.synthetic.schwefel26_1981.schwefel26_1981 import Schwefel261981Data\n'), ((526, 558), 'datasets.synthetic.schwefel26_1981.schwefel26_1981.Schwefel261981Data', 'Schwefel261981Data', ([], {'dimensions': '(9)'}), '(dimensions=9)\n', (544, 558), False, 'from datasets.synthetic.schwefel26_1981.schwefel26_1981 import Schwefel261981Data\n'), ((711, 740), 'numpy.asfarray', 'np.asfarray', (['[[420.9687] * 9]'], {}), '([[420.9687] * 9])\n', (722, 740), True, 'import numpy as np\n'), ((939, 1050), 'numpy.asfarray', 'np.asfarray', (['[[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], [0.5, 0.4, 0.3, 0.2, 0.1, \n 0.9, 0.8, 0.7, 0.6]]'], {}), '([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], [0.5, 0.4, 0.3,\n 0.2, 0.1, 0.9, 0.8, 0.7, 0.6]])\n', (950, 1050), True, 'import numpy as np\n'), ((1415, 1456), 'pytest.raises', 'pytest.raises', (['smlb.InvalidParameterError'], {}), '(smlb.InvalidParameterError)\n', (1428, 1456), False, 'import pytest\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Evaluate the logs of a random run."""
import json
from pathlib import Path
import humanize
import numpy as np
from absl import app, flags
import compiler_gym.util.flags.output_dir # Flag definition.
from compiler_gym.util import logs
from compiler_gym.util.statistics import geometric_mean
from compiler_gym.util.tabulate import tabulate
FLAGS = flags.FLAGS
def eval_logs(outdir: Path) -> None:
rows = []
totals = {
"instructions": 0,
"init_reward": [],
"max_reward": [],
"attempts": 0,
"time": 0,
"actions": 0,
}
for results_dir in sorted(outdir.iterdir()):
benchmark = results_dir.name
progress_path = results_dir / logs.PROGRESS_LOG_NAME
meta_path = results_dir / logs.METADATA_NAME
if (
not results_dir.is_dir()
or not progress_path.is_file()
or not meta_path.is_file()
):
continue
with open(meta_path, "rb") as f:
meta = json.load(f)
with open(str(progress_path)) as f:
final_line = f.readlines()[-1]
best = logs.ProgressLogEntry.from_csv(final_line)
totals["instructions"] += meta["num_instructions"]
totals["init_reward"].append(meta["init_reward"])
totals["max_reward"].append(best.reward)
totals["attempts"] += best.total_episode_count
totals["time"] += best.runtime_seconds
totals["actions"] += best.num_passes
rows.append(
(
benchmark,
humanize.intcomma(meta["num_instructions"]),
f"{meta['init_reward']:.4f}",
f"{best.reward:.4f}",
(
f"{humanize.intcomma(best.total_episode_count)} attempts "
f"in {humanize.naturaldelta(best.runtime_seconds)}"
),
humanize.intcomma(best.num_passes),
)
)
row_count = len(totals["init_reward"])
rows.append(
(
"Geomean",
"",
f"{geometric_mean(totals['init_reward']):.4f}",
f"{geometric_mean(totals['max_reward']):.4f}",
"",
"",
)
)
rows.append(
(
"Average",
humanize.intcomma(int(totals["instructions"] / row_count)),
f"{np.array(totals['init_reward']).mean():.4f}",
f"{np.array(totals['max_reward']).mean():.4f}",
(
f"{humanize.intcomma(int(totals['attempts'] / row_count))} attempts "
f"in {humanize.naturaldelta(totals['time'] / row_count)}"
),
humanize.intcomma(int(totals["actions"] / row_count)),
)
)
print(
tabulate(
rows,
headers=(
"Benchmark",
"#. instructions",
"Init Reward",
"Max Reward",
"Found after",
"#. actions",
),
)
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
output_dir = Path(FLAGS.output_dir).expanduser().resolve().absolute()
assert output_dir.is_dir(), f"Directory not found: {output_dir}"
eval_logs(output_dir)
if __name__ == "__main__":
app.run(main)
| [
"absl.app.UsageError",
"compiler_gym.util.logs.ProgressLogEntry.from_csv",
"pathlib.Path",
"compiler_gym.util.statistics.geometric_mean",
"absl.app.run",
"numpy.array",
"humanize.naturaldelta",
"json.load",
"humanize.intcomma",
"compiler_gym.util.tabulate.tabulate"
] | [((3583, 3596), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (3590, 3596), False, 'from absl import app, flags\n'), ((1303, 1345), 'compiler_gym.util.logs.ProgressLogEntry.from_csv', 'logs.ProgressLogEntry.from_csv', (['final_line'], {}), '(final_line)\n', (1333, 1345), False, 'from compiler_gym.util import logs\n'), ((2944, 3062), 'compiler_gym.util.tabulate.tabulate', 'tabulate', (['rows'], {'headers': "('Benchmark', '#. instructions', 'Init Reward', 'Max Reward', 'Found after',\n '#. actions')"}), "(rows, headers=('Benchmark', '#. instructions', 'Init Reward',\n 'Max Reward', 'Found after', '#. actions'))\n", (2952, 3062), False, 'from compiler_gym.util.tabulate import tabulate\n'), ((3317, 3378), 'absl.app.UsageError', 'app.UsageError', (['f"""Unknown command line arguments: {argv[1:]}"""'], {}), "(f'Unknown command line arguments: {argv[1:]}')\n", (3331, 3378), False, 'from absl import app, flags\n'), ((1187, 1199), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1196, 1199), False, 'import json\n'), ((1739, 1782), 'humanize.intcomma', 'humanize.intcomma', (["meta['num_instructions']"], {}), "(meta['num_instructions'])\n", (1756, 1782), False, 'import humanize\n'), ((2072, 2106), 'humanize.intcomma', 'humanize.intcomma', (['best.num_passes'], {}), '(best.num_passes)\n', (2089, 2106), False, 'import humanize\n'), ((2257, 2294), 'compiler_gym.util.statistics.geometric_mean', 'geometric_mean', (["totals['init_reward']"], {}), "(totals['init_reward'])\n", (2271, 2294), False, 'from compiler_gym.util.statistics import geometric_mean\n'), ((2317, 2353), 'compiler_gym.util.statistics.geometric_mean', 'geometric_mean', (["totals['max_reward']"], {}), "(totals['max_reward'])\n", (2331, 2353), False, 'from compiler_gym.util.statistics import geometric_mean\n'), ((2774, 2823), 'humanize.naturaldelta', 'humanize.naturaldelta', (["(totals['time'] / row_count)"], {}), "(totals['time'] / row_count)\n", (2795, 2823), False, 'import humanize\n'), ((1909, 1952), 'humanize.intcomma', 'humanize.intcomma', (['best.total_episode_count'], {}), '(best.total_episode_count)\n', (1926, 1952), False, 'import humanize\n'), ((1991, 2034), 'humanize.naturaldelta', 'humanize.naturaldelta', (['best.runtime_seconds'], {}), '(best.runtime_seconds)\n', (2012, 2034), False, 'import humanize\n'), ((2546, 2577), 'numpy.array', 'np.array', (["totals['init_reward']"], {}), "(totals['init_reward'])\n", (2554, 2577), True, 'import numpy as np\n'), ((2607, 2637), 'numpy.array', 'np.array', (["totals['max_reward']"], {}), "(totals['max_reward'])\n", (2615, 2637), True, 'import numpy as np\n'), ((3397, 3419), 'pathlib.Path', 'Path', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (3401, 3419), False, 'from pathlib import Path\n')] |
"""
Objetivo: Resolver questão 1 do segundo laboratorio.
"""
from math import exp
import matplotlib.pyplot as plt
import numpy as np
def f(n): #Calcula o valor de um I passado
e = exp(1)
I = (1 / e) * (e - 1) #I0
soma = 0
for c in range(0, n + 1): #Calcula cada I ate In de maneira sucessiva
if c == 0:
I_n = I
else:
I_n = 1 - (1 / (c + 1)) * soma
soma = I_n
# ao final temos In = soma
return soma
x = np.arange(0, 301, 1) #Gera array de 0 a 300
y = [f(i) for i in x] #Gera lista de f(0) a f(300)
#Layout do grafico
plt.style.use('ggplot')
plt.figure(figsize=(7, 5))
plt.xlabel('Valores de n')
plt.ylabel('Valores de I')
#Gera o grafico
plt.plot(x, y)
plt.tight_layout()
#Plota o grafico
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"math.exp",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((485, 505), 'numpy.arange', 'np.arange', (['(0)', '(301)', '(1)'], {}), '(0, 301, 1)\n', (494, 505), True, 'import numpy as np\n'), ((609, 632), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (622, 632), True, 'import matplotlib.pyplot as plt\n'), ((633, 659), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (643, 659), True, 'import matplotlib.pyplot as plt\n'), ((660, 686), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Valores de n"""'], {}), "('Valores de n')\n", (670, 686), True, 'import matplotlib.pyplot as plt\n'), ((687, 713), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Valores de I"""'], {}), "('Valores de I')\n", (697, 713), True, 'import matplotlib.pyplot as plt\n'), ((730, 744), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (738, 744), True, 'import matplotlib.pyplot as plt\n'), ((745, 763), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (761, 763), True, 'import matplotlib.pyplot as plt\n'), ((781, 791), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (789, 791), True, 'import matplotlib.pyplot as plt\n'), ((189, 195), 'math.exp', 'exp', (['(1)'], {}), '(1)\n', (192, 195), False, 'from math import exp\n')] |
import numpy as np
import pandas as pd
from tensorflow import keras
from ENUtransform import WGS84toENU, ENUtoWGS84
from PythonCode.Trajectory_Prediction.process import scale_data, reshape_data, get_inverse_transform
tREF = {"lon": 12.114733,
"lat": 54.145409,
"ECEF": np.array([[3660725], [785776], [514624]])
}
data_features = ['x', 'y', 'cog', 'sog']
data_dim = len(data_features)
enu_features = ['x', 'y', "z", 'cog', 'sog']
enu_dim = len(enu_features)
INPUT_LEN = 10 # same as timesteps
model_name = 'Seq2Seq_model_ENU.h5' # 'Seq2Seq_model_ENU_167.h5'
model = keras.models.load_model('/home/sing_sd/Desktop/anomaly_detection/PythonCode/Trajectory_Prediction/'+ model_name)
def load_data_trajectory(filename):
path = '/home/sing_sd/Desktop/anomaly_detection/PythonCode/KF/'
filename1 = path + filename # "Track167_interpolated_1min.csv"
# filename4 = path + "Track167_EKF.csv"
try:
return np.array(pd.read_csv(filename1))
except IOError:
print("Error: File does not appear to exist for track ")
return None
def convert2ENU(lon, lat):
return WGS84toENU(lon, lat, tREF, h=0.)
def convert2Degree(zENU):
return ENUtoWGS84(np.array(zENU), tREF)
def data_preparation(xhat_past, data):
in_clm_len = INPUT_LEN*enu_dim
overall_data = np.full(shape=(1, in_clm_len), fill_value=np.nan)
overall_data[0, 0:in_clm_len:enu_dim] = xhat_past[:, 0].ravel()
overall_data[0, 1:in_clm_len:enu_dim] = xhat_past[:, 1].ravel()
overall_data[0, 2:in_clm_len:enu_dim] = xhat_past[:, 2].ravel()
overall_data[0, 3:in_clm_len:enu_dim] = np.transpose(data[:, 2])
overall_data[0, 4:in_clm_len:enu_dim] = np.transpose(data[:, 3])
return overall_data
def predict_data(xhat_past, data):
test_data = data_preparation(np.array(xhat_past), data)
X_test = test_data.reshape(1, INPUT_LEN, enu_dim)
X_test[0] = scale_data(X_test[0])
test_predict = model.predict(X_test)
# invert predictions
test_predict = get_inverse_transform(test_predict[0])
test_predict.shape = (1, INPUT_LEN * enu_dim)
# convert lon, lat from ENU to degrees
return test_predict[0, 0], test_predict[0, 1], test_predict[0, 2], test_predict[0, 3], test_predict[0, 4]
| [
"ENUtransform.WGS84toENU",
"PythonCode.Trajectory_Prediction.process.get_inverse_transform",
"pandas.read_csv",
"numpy.array",
"tensorflow.keras.models.load_model",
"PythonCode.Trajectory_Prediction.process.scale_data",
"numpy.full",
"numpy.transpose"
] | [((600, 723), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (["('/home/sing_sd/Desktop/anomaly_detection/PythonCode/Trajectory_Prediction/' +\n model_name)"], {}), "(\n '/home/sing_sd/Desktop/anomaly_detection/PythonCode/Trajectory_Prediction/'\n + model_name)\n", (623, 723), False, 'from tensorflow import keras\n'), ((294, 335), 'numpy.array', 'np.array', (['[[3660725], [785776], [514624]]'], {}), '([[3660725], [785776], [514624]])\n', (302, 335), True, 'import numpy as np\n'), ((1132, 1165), 'ENUtransform.WGS84toENU', 'WGS84toENU', (['lon', 'lat', 'tREF'], {'h': '(0.0)'}), '(lon, lat, tREF, h=0.0)\n', (1142, 1165), False, 'from ENUtransform import WGS84toENU, ENUtoWGS84\n'), ((1331, 1380), 'numpy.full', 'np.full', ([], {'shape': '(1, in_clm_len)', 'fill_value': 'np.nan'}), '(shape=(1, in_clm_len), fill_value=np.nan)\n', (1338, 1380), True, 'import numpy as np\n'), ((1629, 1653), 'numpy.transpose', 'np.transpose', (['data[:, 2]'], {}), '(data[:, 2])\n', (1641, 1653), True, 'import numpy as np\n'), ((1698, 1722), 'numpy.transpose', 'np.transpose', (['data[:, 3]'], {}), '(data[:, 3])\n', (1710, 1722), True, 'import numpy as np\n'), ((1915, 1936), 'PythonCode.Trajectory_Prediction.process.scale_data', 'scale_data', (['X_test[0]'], {}), '(X_test[0])\n', (1925, 1936), False, 'from PythonCode.Trajectory_Prediction.process import scale_data, reshape_data, get_inverse_transform\n'), ((2023, 2061), 'PythonCode.Trajectory_Prediction.process.get_inverse_transform', 'get_inverse_transform', (['test_predict[0]'], {}), '(test_predict[0])\n', (2044, 2061), False, 'from PythonCode.Trajectory_Prediction.process import scale_data, reshape_data, get_inverse_transform\n'), ((1215, 1229), 'numpy.array', 'np.array', (['zENU'], {}), '(zENU)\n', (1223, 1229), True, 'import numpy as np\n'), ((1818, 1837), 'numpy.array', 'np.array', (['xhat_past'], {}), '(xhat_past)\n', (1826, 1837), True, 'import numpy as np\n'), ((963, 985), 'pandas.read_csv', 'pd.read_csv', (['filename1'], {}), '(filename1)\n', (974, 985), True, 'import pandas as pd\n')] |
import unittest
import numpy as np
import matplotlib
matplotlib.use('Agg')
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.testing_utils import use_tempdirs
import dymos as dm
class _BrachistochroneTestODE(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
# Inputs
self.add_input('v', val=np.zeros(nn), desc='velocity', units='m/s')
self.add_input('g', val=9.80665 * np.ones(nn), desc='grav. acceleration', units='m/s/s')
self.add_input('theta', val=np.zeros(nn), desc='angle of wire', units='rad')
self.add_input('t_initial', val=-88.0, desc='start time of phase', units='s')
self.add_input('t_duration', val=-89.0, desc='total duration of phase', units='s')
self.add_input('time_phase', val=np.zeros(nn), desc='elapsed time of phase', units='s')
self.add_input('time', val=np.zeros(nn), desc='time of phase', units='s')
self.add_output('xdot', val=np.zeros(nn), desc='velocity component in x', units='m/s')
self.add_output('ydot', val=np.zeros(nn), desc='velocity component in y', units='m/s')
self.add_output('vdot', val=np.zeros(nn), desc='acceleration magnitude', units='m/s**2')
self.add_output('check', val=np.zeros(nn), desc='check solution: v/sin(theta) = constant',
units='m/s')
# Setup partials
arange = np.arange(self.options['num_nodes'])
self.declare_partials(of='vdot', wrt='g', rows=arange, cols=arange)
self.declare_partials(of='vdot', wrt='theta', rows=arange, cols=arange)
self.declare_partials(of='xdot', wrt='v', rows=arange, cols=arange)
self.declare_partials(of='xdot', wrt='theta', rows=arange, cols=arange)
self.declare_partials(of='ydot', wrt='v', rows=arange, cols=arange)
self.declare_partials(of='ydot', wrt='theta', rows=arange, cols=arange)
self.declare_partials(of='check', wrt='v', rows=arange, cols=arange)
self.declare_partials(of='check', wrt='theta', rows=arange, cols=arange)
def compute(self, inputs, outputs):
theta = inputs['theta']
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
g = inputs['g']
v = inputs['v']
outputs['vdot'] = g * cos_theta
outputs['xdot'] = v * sin_theta
outputs['ydot'] = -v * cos_theta
outputs['check'] = v / sin_theta
def compute_partials(self, inputs, jacobian):
theta = inputs['theta']
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
g = inputs['g']
v = inputs['v']
jacobian['vdot', 'g'] = cos_theta
jacobian['vdot', 'theta'] = -g * sin_theta
jacobian['xdot', 'v'] = sin_theta
jacobian['xdot', 'theta'] = v * cos_theta
jacobian['ydot', 'v'] = -cos_theta
jacobian['ydot', 'theta'] = v * sin_theta
jacobian['check', 'v'] = 1 / sin_theta
jacobian['check', 'theta'] = -v * cos_theta / sin_theta**2
@use_tempdirs
class TestPhaseTimeTargets(unittest.TestCase):
def _make_problem(self, transcription, num_seg, transcription_order=3):
p = om.Problem(model=om.Group())
p.driver = om.ScipyOptimizeDriver()
# Compute sparsity/coloring when run_driver is called
p.driver.declare_coloring()
t = {'gauss-lobatto': dm.GaussLobatto(num_segments=num_seg, order=transcription_order),
'radau-ps': dm.Radau(num_segments=num_seg, order=transcription_order)}
phase = dm.Phase(ode_class=_BrachistochroneTestODE, transcription=t[transcription])
p.model.add_subsystem('phase0', phase)
phase.set_time_options(initial_bounds=(1, 1), duration_bounds=(.5, 10), units='s',
time_phase_targets=['time_phase'], t_duration_targets=['t_duration'],
t_initial_targets=['t_initial'], targets=['time'])
phase.add_state('x', fix_initial=True, rate_source='xdot', units='m')
phase.add_state('y', fix_initial=True, rate_source='ydot', units='m')
phase.add_state('v', fix_initial=True, rate_source='vdot', targets=['v'], units='m/s')
phase.add_control('theta', units='deg', rate_continuity=True, lower=0.01, upper=179.9, targets=['theta'])
phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665, targets=['g'])
phase.add_boundary_constraint('x', loc='final', equals=10)
phase.add_boundary_constraint('y', loc='final', equals=5)
# Minimize time at the end of the phase
phase.add_objective('time', loc='final', scaler=10)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True)
p['phase0.t_initial'] = 1.0
p['phase0.t_duration'] = 3.0
p['phase0.states:x'] = phase.interp('x', [0, 10])
p['phase0.states:y'] = phase.interp('y', [10, 5])
p['phase0.states:v'] = phase.interp('v', [0, 9.9])
p['phase0.controls:theta'] = phase.interp('theta', [5, 100.5])
return p
def test_gauss_lobatto(self):
num_seg = 20
p = self._make_problem('gauss-lobatto', num_seg)
# Solve for the optimal trajectory
p.run_driver()
gd = p.model.phase0.options['transcription'].grid_data
time_all = p['phase0.time']
time_col = time_all[gd.subset_node_indices['col']]
time_disc = time_all[gd.subset_node_indices['state_disc']]
time_segends = np.reshape(time_all[gd.subset_node_indices['segment_ends']],
newshape=(gd.num_segments, 2))
time_phase_all = p['phase0.time_phase']
time_phase_col = time_phase_all[gd.subset_node_indices['col']]
time_phase_disc = time_phase_all[gd.subset_node_indices['state_disc']]
time_phase_segends = np.reshape(time_phase_all[gd.subset_node_indices['segment_ends']],
newshape=(gd.num_segments, 2))
assert_near_equal(p['phase0.rhs_disc.time_phase'][-1], 1.8016, tolerance=1.0E-3)
assert_near_equal(p['phase0.rhs_disc.t_initial'], p['phase0.t_initial'])
assert_near_equal(p['phase0.rhs_col.t_initial'], p['phase0.t_initial'])
assert_near_equal(p['phase0.rhs_disc.t_duration'], p['phase0.t_duration'])
assert_near_equal(p['phase0.rhs_col.t_duration'], p['phase0.t_duration'])
assert_near_equal(p['phase0.rhs_disc.time_phase'], time_phase_disc)
assert_near_equal(p['phase0.rhs_col.time_phase'], time_phase_col)
assert_near_equal(p['phase0.rhs_disc.time'], time_disc)
assert_near_equal(p['phase0.rhs_col.time'], time_col)
exp_out = p.model.phase0.simulate()
for iseg in range(num_seg):
seg_comp_i = exp_out.model.phase0._get_subsystem('segments.segment_{0}'.format(iseg))
iface = seg_comp_i.options['ode_integration_interface']
t_initial_i = iface.prob.get_val('ode.t_initial')
t_duration_i = iface.prob.get_val('ode.t_duration')
time_phase_i = iface.prob.get_val('ode.time_phase')
time_i = iface.prob.get_val('ode.time')
# Since the phase has simulated, all times should be equal to their respective value
# at the end of each segment.
assert_near_equal(t_initial_i, p['phase0.t_initial'])
assert_near_equal(t_duration_i, p['phase0.t_duration'])
assert_near_equal(time_phase_i, time_phase_segends[iseg, 1], tolerance=1.0E-12)
assert_near_equal(time_i, time_segends[iseg, 1], tolerance=1.0E-12)
def test_radau(self):
num_seg = 20
p = self._make_problem('radau-ps', num_seg)
# Solve for the optimal trajectory
p.run_driver()
gd = p.model.phase0.options['transcription'].grid_data
time_all = p['phase0.time']
time_segends = np.reshape(time_all[gd.subset_node_indices['segment_ends']],
newshape=(gd.num_segments, 2))
time_phase_all = p['phase0.time_phase']
time_phase_segends = np.reshape(time_phase_all[gd.subset_node_indices['segment_ends']],
newshape=(gd.num_segments, 2))
assert_near_equal(p['phase0.rhs_all.time_phase'][-1], 1.8016, tolerance=1.0E-3)
assert_near_equal(p['phase0.rhs_all.t_initial'], p['phase0.t_initial'])
assert_near_equal(p['phase0.rhs_all.t_duration'], p['phase0.t_duration'])
assert_near_equal(p['phase0.rhs_all.time_phase'], time_phase_all)
assert_near_equal(p['phase0.rhs_all.time'], time_all)
exp_out = p.model.phase0.simulate()
for iseg in range(num_seg):
seg_comp_i = exp_out.model.phase0._get_subsystem('segments.segment_{0}'.format(iseg))
iface = seg_comp_i.options['ode_integration_interface']
t_initial_i = iface.prob.get_val('ode.t_initial')
t_duration_i = iface.prob.get_val('ode.t_duration')
time_phase_i = iface.prob.get_val('ode.time_phase')
time_i = iface.prob.get_val('ode.time')
# Since the phase has simulated, all times should be equal to their respective value
# at the end of each segment.
assert_near_equal(t_initial_i, p['phase0.t_initial'])
assert_near_equal(t_duration_i, p['phase0.t_duration'])
assert_near_equal(time_phase_i, time_phase_segends[iseg, 1], tolerance=1.0E-12)
assert_near_equal(time_i, time_segends[iseg, 1], tolerance=1.0E-12)
if __name__ == "__main__":
unittest.main()
| [
"numpy.reshape",
"numpy.ones",
"matplotlib.use",
"dymos.Phase",
"unittest.main",
"dymos.GaussLobatto",
"openmdao.api.Group",
"openmdao.api.DirectSolver",
"numpy.zeros",
"numpy.cos",
"openmdao.utils.assert_utils.assert_near_equal",
"openmdao.api.ScipyOptimizeDriver",
"numpy.sin",
"dymos.Rad... | [((54, 75), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (68, 75), False, 'import matplotlib\n'), ((9739, 9754), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9752, 9754), False, 'import unittest\n'), ((1538, 1574), 'numpy.arange', 'np.arange', (["self.options['num_nodes']"], {}), "(self.options['num_nodes'])\n", (1547, 1574), True, 'import numpy as np\n'), ((2298, 2311), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2304, 2311), True, 'import numpy as np\n'), ((2332, 2345), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2338, 2345), True, 'import numpy as np\n'), ((2660, 2673), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2666, 2673), True, 'import numpy as np\n'), ((2694, 2707), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2700, 2707), True, 'import numpy as np\n'), ((3353, 3377), 'openmdao.api.ScipyOptimizeDriver', 'om.ScipyOptimizeDriver', ([], {}), '()\n', (3375, 3377), True, 'import openmdao.api as om\n'), ((3675, 3750), 'dymos.Phase', 'dm.Phase', ([], {'ode_class': '_BrachistochroneTestODE', 'transcription': 't[transcription]'}), '(ode_class=_BrachistochroneTestODE, transcription=t[transcription])\n', (3683, 3750), True, 'import dymos as dm\n'), ((4806, 4823), 'openmdao.api.DirectSolver', 'om.DirectSolver', ([], {}), '()\n', (4821, 4823), True, 'import openmdao.api as om\n'), ((5622, 5718), 'numpy.reshape', 'np.reshape', (["time_all[gd.subset_node_indices['segment_ends']]"], {'newshape': '(gd.num_segments, 2)'}), "(time_all[gd.subset_node_indices['segment_ends']], newshape=(gd.\n num_segments, 2))\n", (5632, 5718), True, 'import numpy as np\n'), ((5976, 6078), 'numpy.reshape', 'np.reshape', (["time_phase_all[gd.subset_node_indices['segment_ends']]"], {'newshape': '(gd.num_segments, 2)'}), "(time_phase_all[gd.subset_node_indices['segment_ends']], newshape\n =(gd.num_segments, 2))\n", (5986, 6078), True, 'import numpy as np\n'), ((6123, 6202), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_disc.time_phase'][-1]", '(1.8016)'], {'tolerance': '(0.001)'}), "(p['phase0.rhs_disc.time_phase'][-1], 1.8016, tolerance=0.001)\n", (6140, 6202), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((6213, 6285), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_disc.t_initial']", "p['phase0.t_initial']"], {}), "(p['phase0.rhs_disc.t_initial'], p['phase0.t_initial'])\n", (6230, 6285), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((6294, 6365), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_col.t_initial']", "p['phase0.t_initial']"], {}), "(p['phase0.rhs_col.t_initial'], p['phase0.t_initial'])\n", (6311, 6365), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((6375, 6449), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_disc.t_duration']", "p['phase0.t_duration']"], {}), "(p['phase0.rhs_disc.t_duration'], p['phase0.t_duration'])\n", (6392, 6449), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((6458, 6531), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_col.t_duration']", "p['phase0.t_duration']"], {}), "(p['phase0.rhs_col.t_duration'], p['phase0.t_duration'])\n", (6475, 6531), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((6541, 6608), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_disc.time_phase']", 'time_phase_disc'], {}), "(p['phase0.rhs_disc.time_phase'], time_phase_disc)\n", (6558, 6608), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((6617, 6682), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_col.time_phase']", 'time_phase_col'], {}), "(p['phase0.rhs_col.time_phase'], time_phase_col)\n", (6634, 6682), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((6692, 6747), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_disc.time']", 'time_disc'], {}), "(p['phase0.rhs_disc.time'], time_disc)\n", (6709, 6747), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((6756, 6809), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_col.time']", 'time_col'], {}), "(p['phase0.rhs_col.time'], time_col)\n", (6773, 6809), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((8037, 8133), 'numpy.reshape', 'np.reshape', (["time_all[gd.subset_node_indices['segment_ends']]"], {'newshape': '(gd.num_segments, 2)'}), "(time_all[gd.subset_node_indices['segment_ends']], newshape=(gd.\n num_segments, 2))\n", (8047, 8133), True, 'import numpy as np\n'), ((8241, 8343), 'numpy.reshape', 'np.reshape', (["time_phase_all[gd.subset_node_indices['segment_ends']]"], {'newshape': '(gd.num_segments, 2)'}), "(time_phase_all[gd.subset_node_indices['segment_ends']], newshape\n =(gd.num_segments, 2))\n", (8251, 8343), True, 'import numpy as np\n'), ((8388, 8466), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_all.time_phase'][-1]", '(1.8016)'], {'tolerance': '(0.001)'}), "(p['phase0.rhs_all.time_phase'][-1], 1.8016, tolerance=0.001)\n", (8405, 8466), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((8477, 8548), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_all.t_initial']", "p['phase0.t_initial']"], {}), "(p['phase0.rhs_all.t_initial'], p['phase0.t_initial'])\n", (8494, 8548), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((8558, 8631), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_all.t_duration']", "p['phase0.t_duration']"], {}), "(p['phase0.rhs_all.t_duration'], p['phase0.t_duration'])\n", (8575, 8631), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((8641, 8706), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_all.time_phase']", 'time_phase_all'], {}), "(p['phase0.rhs_all.time_phase'], time_phase_all)\n", (8658, 8706), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((8716, 8769), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (["p['phase0.rhs_all.time']", 'time_all'], {}), "(p['phase0.rhs_all.time'], time_all)\n", (8733, 8769), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((3508, 3572), 'dymos.GaussLobatto', 'dm.GaussLobatto', ([], {'num_segments': 'num_seg', 'order': 'transcription_order'}), '(num_segments=num_seg, order=transcription_order)\n', (3523, 3572), True, 'import dymos as dm\n'), ((3599, 3656), 'dymos.Radau', 'dm.Radau', ([], {'num_segments': 'num_seg', 'order': 'transcription_order'}), '(num_segments=num_seg, order=transcription_order)\n', (3607, 3656), True, 'import dymos as dm\n'), ((7452, 7505), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['t_initial_i', "p['phase0.t_initial']"], {}), "(t_initial_i, p['phase0.t_initial'])\n", (7469, 7505), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((7518, 7573), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['t_duration_i', "p['phase0.t_duration']"], {}), "(t_duration_i, p['phase0.t_duration'])\n", (7535, 7573), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((7586, 7663), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['time_phase_i', 'time_phase_segends[iseg, 1]'], {'tolerance': '(1e-12)'}), '(time_phase_i, time_phase_segends[iseg, 1], tolerance=1e-12)\n', (7603, 7663), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((7678, 7743), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['time_i', 'time_segends[iseg, 1]'], {'tolerance': '(1e-12)'}), '(time_i, time_segends[iseg, 1], tolerance=1e-12)\n', (7695, 7743), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((9412, 9465), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['t_initial_i', "p['phase0.t_initial']"], {}), "(t_initial_i, p['phase0.t_initial'])\n", (9429, 9465), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((9478, 9533), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['t_duration_i', "p['phase0.t_duration']"], {}), "(t_duration_i, p['phase0.t_duration'])\n", (9495, 9533), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((9546, 9623), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['time_phase_i', 'time_phase_segends[iseg, 1]'], {'tolerance': '(1e-12)'}), '(time_phase_i, time_phase_segends[iseg, 1], tolerance=1e-12)\n', (9563, 9623), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((9638, 9703), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['time_i', 'time_segends[iseg, 1]'], {'tolerance': '(1e-12)'}), '(time_i, time_segends[iseg, 1], tolerance=1e-12)\n', (9655, 9703), False, 'from openmdao.utils.assert_utils import assert_near_equal\n'), ((481, 493), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (489, 493), True, 'import numpy as np\n'), ((660, 672), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (668, 672), True, 'import numpy as np\n'), ((930, 942), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (938, 942), True, 'import numpy as np\n'), ((1021, 1033), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (1029, 1033), True, 'import numpy as np\n'), ((1105, 1117), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (1113, 1117), True, 'import numpy as np\n'), ((1201, 1213), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (1209, 1213), True, 'import numpy as np\n'), ((1297, 1309), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (1305, 1309), True, 'import numpy as np\n'), ((1396, 1408), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (1404, 1408), True, 'import numpy as np\n'), ((3321, 3331), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (3329, 3331), True, 'import openmdao.api as om\n'), ((568, 579), 'numpy.ones', 'np.ones', (['nn'], {}), '(nn)\n', (575, 579), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Author: <NAME> (<EMAIL>)
# License: BSD-3-Clause
import logging, os, time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time
from astropy import constants as const
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
import matplotlib
nice_fonts = {
"text.usetex": True,
"font.family": "serif",
"font.serif": "Times New Roman",
}
matplotlib.rcParams.update(nice_fonts)
GENERIC_COSMOLOGY = FlatLambdaCDM(H0=70, Om0=0.3)
REDSHIFT = 0.267
IC200530A_ENERGY = 4.915
FIG_WIDTH = 6
BIG_FONTSIZE = 14
SMALL_FONTSIZE = 8
GOLDEN_RATIO = 1.618
DPI = 400
if __name__ == "__main__":
CURRENT_FILE_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.abspath(os.path.join(CURRENT_FILE_DIR, "data", "effective_area"))
PLOT_DIR = os.path.abspath(os.path.join(CURRENT_FILE_DIR, "plots"))
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
infile_jetted = os.path.join(DATA_DIR, "fl_jetted_tywin_dbb_v4.csv")
infile_corona = os.path.join(DATA_DIR, "kohta_corona.csv")
infile_wind = os.path.join(DATA_DIR, "kohta_wind2.csv")
jetted = pd.read_csv(infile_jetted)
corona = pd.read_csv(infile_corona)
wind = pd.read_csv(infile_wind)
plt.figure(dpi=DPI, figsize=(5, 4))
ax1 = plt.subplot(111)
corona["a"] = corona["a"] / 1e9
corona["h"] = corona["h"] * 1e9
corona_x = np.log10(corona["a"] / 1.267)
corona_y = np.log10(corona["a"] * corona["a"] * corona["h"] * 1.474 / 3)
wind["a"] = wind["a"] / 1e9
wind["h"] = wind["h"] * 1e9
wind_x = np.log10(wind["a"] / 1.267)
wind_y = np.log10(wind["a"] * wind["a"] * wind["h"] * 55.38 / 3)
df1 = pd.DataFrame()
df1["log10E_GeV"] = wind_x
df1["fluence_GeV"] = wind_y
df2 = pd.DataFrame()
df2["log10E_GeV"] = corona_x
df2["fluence_GeV"] = corona_y
df3 = pd.DataFrame()
df3["log10E_GeV"] = jetted["log10_e_nu"]
df3["fluence_GeV"] = jetted["log10_F"]
df1.to_csv(os.path.join(DATA_DIR, "wind_new.csv"))
df2.to_csv(os.path.join(DATA_DIR, "corona.csv"))
df3.to_csv(os.path.join(DATA_DIR, "jet_new.csv"))
ax1.plot(
jetted["log10_e_nu"],
jetted["log10_F"],
label=r"Relativistic jet",
linestyle="dashdot",
)
ax1.plot(corona_x, corona_y, color="red", label="Disk-corona")
ax1.plot(
wind_x, wind_y, color="green", label="Sub-relativistic wind", linestyle="dashed"
)
ax1.set_xlim([2, 9])
ax1.set_ylim([-5.5, 0])
ax1.set_xlabel(r"log$_{10}~E_{\nu}$ (GeV)", fontsize=BIG_FONTSIZE)
ax1.set_ylabel(
r"log$_{10}~E_{\nu}^2 ~\mathcal{F}_\mu$ (GeV/cm$^2$)", fontsize=BIG_FONTSIZE
)
# ax1.arrow(IC200530A_ENERGY, -3.5, 0, 0.7, width=0.01, color="black")
ax1.text(IC200530A_ENERGY - 1.05, -1, r"$E_{\nu,~\rm obs}$", fontsize=BIG_FONTSIZE)
ax1.axvline(IC200530A_ENERGY, linestyle="dotted", color="black")
ax1.tick_params(axis="both", labelsize=BIG_FONTSIZE)
plt.legend(fontsize=BIG_FONTSIZE - 2)
plt.tight_layout()
outfile = os.path.join(PLOT_DIR, "fluence.pdf")
plt.savefig(outfile)
| [
"logging.getLogger",
"numpy.log10",
"matplotlib.pyplot.savefig",
"matplotlib.rcParams.update",
"pandas.read_csv",
"os.path.join",
"astropy.cosmology.FlatLambdaCDM",
"os.path.dirname",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.subplot",... | [((443, 481), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (['nice_fonts'], {}), '(nice_fonts)\n', (469, 481), False, 'import matplotlib\n'), ((503, 532), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': '(70)', 'Om0': '(0.3)'}), '(H0=70, Om0=0.3)\n', (516, 532), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((709, 734), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (724, 734), False, 'import logging, os, time\n'), ((910, 937), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (927, 937), False, 'import logging, os, time\n'), ((993, 1045), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""fl_jetted_tywin_dbb_v4.csv"""'], {}), "(DATA_DIR, 'fl_jetted_tywin_dbb_v4.csv')\n", (1005, 1045), False, 'import logging, os, time\n'), ((1066, 1108), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""kohta_corona.csv"""'], {}), "(DATA_DIR, 'kohta_corona.csv')\n", (1078, 1108), False, 'import logging, os, time\n'), ((1127, 1168), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""kohta_wind2.csv"""'], {}), "(DATA_DIR, 'kohta_wind2.csv')\n", (1139, 1168), False, 'import logging, os, time\n'), ((1183, 1209), 'pandas.read_csv', 'pd.read_csv', (['infile_jetted'], {}), '(infile_jetted)\n', (1194, 1209), True, 'import pandas as pd\n'), ((1223, 1249), 'pandas.read_csv', 'pd.read_csv', (['infile_corona'], {}), '(infile_corona)\n', (1234, 1249), True, 'import pandas as pd\n'), ((1261, 1285), 'pandas.read_csv', 'pd.read_csv', (['infile_wind'], {}), '(infile_wind)\n', (1272, 1285), True, 'import pandas as pd\n'), ((1291, 1326), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': 'DPI', 'figsize': '(5, 4)'}), '(dpi=DPI, figsize=(5, 4))\n', (1301, 1326), True, 'import matplotlib.pyplot as plt\n'), ((1338, 1354), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1349, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1472), 'numpy.log10', 'np.log10', (["(corona['a'] / 1.267)"], {}), "(corona['a'] / 1.267)\n", (1451, 1472), True, 'import numpy as np\n'), ((1488, 1549), 'numpy.log10', 'np.log10', (["(corona['a'] * corona['a'] * corona['h'] * 1.474 / 3)"], {}), "(corona['a'] * corona['a'] * corona['h'] * 1.474 / 3)\n", (1496, 1549), True, 'import numpy as np\n'), ((1628, 1655), 'numpy.log10', 'np.log10', (["(wind['a'] / 1.267)"], {}), "(wind['a'] / 1.267)\n", (1636, 1655), True, 'import numpy as np\n'), ((1669, 1724), 'numpy.log10', 'np.log10', (["(wind['a'] * wind['a'] * wind['h'] * 55.38 / 3)"], {}), "(wind['a'] * wind['a'] * wind['h'] * 55.38 / 3)\n", (1677, 1724), True, 'import numpy as np\n'), ((1736, 1750), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1748, 1750), True, 'import pandas as pd\n'), ((1825, 1839), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1837, 1839), True, 'import pandas as pd\n'), ((1918, 1932), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1930, 1932), True, 'import pandas as pd\n'), ((3036, 3073), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(BIG_FONTSIZE - 2)'}), '(fontsize=BIG_FONTSIZE - 2)\n', (3046, 3073), True, 'import matplotlib.pyplot as plt\n'), ((3078, 3096), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3094, 3096), True, 'import matplotlib.pyplot as plt\n'), ((3112, 3149), 'os.path.join', 'os.path.join', (['PLOT_DIR', '"""fluence.pdf"""'], {}), "(PLOT_DIR, 'fluence.pdf')\n", (3124, 3149), False, 'import logging, os, time\n'), ((3154, 3174), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (3165, 3174), True, 'import matplotlib.pyplot as plt\n'), ((766, 822), 'os.path.join', 'os.path.join', (['CURRENT_FILE_DIR', '"""data"""', '"""effective_area"""'], {}), "(CURRENT_FILE_DIR, 'data', 'effective_area')\n", (778, 822), False, 'import logging, os, time\n'), ((855, 894), 'os.path.join', 'os.path.join', (['CURRENT_FILE_DIR', '"""plots"""'], {}), "(CURRENT_FILE_DIR, 'plots')\n", (867, 894), False, 'import logging, os, time\n'), ((2037, 2075), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""wind_new.csv"""'], {}), "(DATA_DIR, 'wind_new.csv')\n", (2049, 2075), False, 'import logging, os, time\n'), ((2092, 2128), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""corona.csv"""'], {}), "(DATA_DIR, 'corona.csv')\n", (2104, 2128), False, 'import logging, os, time\n'), ((2145, 2182), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""jet_new.csv"""'], {}), "(DATA_DIR, 'jet_new.csv')\n", (2157, 2182), False, 'import logging, os, time\n')] |
"""
Tests the Critical Line Algorithm (CLA).
"""
import unittest
import os
import numpy as np
import pandas as pd
from mlfinlab.portfolio_optimization.cla import CLA
from mlfinlab.portfolio_optimization.returns_estimators import ReturnsEstimation
class TestCLA(unittest.TestCase):
# pylint: disable=too-many-public-methods
"""
Tests different functions of the CLA class.
"""
def setUp(self):
"""
Set the file path for the tick data csv.
"""
project_path = os.path.dirname(__file__)
data_path = project_path + '/test_data/stock_prices.csv'
self.data = pd.read_csv(data_path, parse_dates=True, index_col="Date")
def test_cla_with_mean_returns(self):
"""
Test the calculation of CLA turning points using mean returns.
"""
self.data.iloc[1:10, :] = 40
self.data.iloc[11:20, :] = 50
self.data.iloc[21, :] = 100
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, asset_names=self.data.columns)
weights = cla.weights.values
weights[weights <= 1e-15] = 0 # Convert very very small numbers to 0
for turning_point in weights:
assert (turning_point >= 0).all()
assert len(turning_point) == self.data.shape[1]
np.testing.assert_almost_equal(np.sum(turning_point), 1)
def test_cla_with_weight_bounds_as_lists(self):
"""
Test the calculation of CLA turning points when we pass the weight bounds as a list
instead of just lower and upper bound value.
"""
cla = CLA(weight_bounds=([0]*self.data.shape[1], [1]*self.data.shape[1]), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, asset_names=self.data.columns)
weights = cla.weights.values
weights[weights <= 1e-15] = 0 # Convert very very small numbers to 0
for turning_point in weights:
assert (turning_point >= 0).all()
assert len(turning_point) == self.data.shape[1]
np.testing.assert_almost_equal(np.sum(turning_point), 1)
def test_cla_with_exponential_returns(self):
"""
Test the calculation of CLA turning points using exponential returns
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="exponential")
cla.allocate(asset_prices=self.data, asset_names=self.data.columns)
weights = cla.weights.values
weights[weights <= 1e-15] = 0 # Convert very very small numbers to 0
for turning_point in weights:
assert (turning_point >= 0).all()
assert len(turning_point) == self.data.shape[1]
np.testing.assert_almost_equal(np.sum(turning_point), 1)
def test_cla_max_sharpe(self):
"""
Test the calculation of maximum sharpe ratio weights.
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='max_sharpe', asset_names=self.data.columns)
weights = cla.weights.values[0]
assert (weights >= 0).all()
assert len(weights) == self.data.shape[1]
np.testing.assert_almost_equal(np.sum(weights), 1)
def test_cla_min_volatility(self):
"""
Test the calculation for minimum volatility weights.
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='min_volatility', asset_names=self.data.columns)
weights = cla.weights.values[0]
assert (weights >= 0).all()
assert len(weights) == self.data.shape[1]
np.testing.assert_almost_equal(np.sum(weights), 1)
def test_cla_efficient_frontier(self):
"""
Test the calculation of the efficient frontier solution.
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='efficient_frontier', asset_names=self.data.columns)
assert len(cla.efficient_frontier_means) == len(cla.efficient_frontier_sigma) and \
len(cla.efficient_frontier_sigma) == len(cla.weights.values)
assert cla.efficient_frontier_sigma[-1] <= cla.efficient_frontier_sigma[0] and \
cla.efficient_frontier_means[-1] <= cla.efficient_frontier_means[0] # higher risk = higher return
def test_lambda_for_no_bounded_weights(self):
# pylint: disable=protected-access,invalid-name
"""
Test the computation of lambda when there are no bounded weights.
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='min_volatility', asset_names=self.data.columns)
data = self.data.cov()
data = data.values
x, y = cla._compute_lambda(covar_f_inv=data,
covar_fb=data,
mean_f=cla.expected_returns,
w_b=None,
asset_index=1,
b_i=[[0], [1]])
assert isinstance(x, float)
assert isinstance(y, int)
def test_free_bound_weights(self):
# pylint: disable=protected-access,invalid-name
"""
Test the method of freeing bounded weights when free-weights is None.
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='min_volatility', asset_names=self.data.columns)
x, y = cla._free_bound_weight(free_weights=[1]*(cla.expected_returns.shape[0]+1))
assert not x
assert not y
def test_expected_returns_equals_means(self):
# pylint: disable=protected-access,invalid-name
"""
Test for condition when expected returns equal the mean value.
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='min_volatility', asset_names=self.data.columns)
data = self.data.copy()
data.iloc[:, :] = 0.02320653
cla._initialise(asset_prices=data, resample_by='B', expected_asset_returns=None, covariance_matrix=None)
assert cla.expected_returns[-1, 0] == 1e-5
def test_lambda_for_zero_matrices(self):
# pylint: disable=protected-access,invalid-name
"""
Test the computation of lambda when there are no bounded weights. The method
should return None, None.
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='min_volatility', asset_names=self.data.columns)
data = self.data.cov()
data = data.values
data[:, :] = 0
x, y = cla._compute_lambda(covar_f_inv=data,
covar_fb=data,
mean_f=cla.expected_returns,
w_b=None,
asset_index=1,
b_i=[[0], [1]])
assert not x
assert not y
def test_w_for_no_bounded_weights(self):
# pylint: disable=protected-access,invalid-name
"""
Test the computation of weights (w) when there are no bounded weights.
"""
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='min_volatility', asset_names=self.data.columns)
data = self.data.cov()
data = data.values
x, y = cla._compute_w(covar_f_inv=data,
covar_fb=data,
mean_f=cla.expected_returns,
w_b=None)
assert isinstance(x, np.ndarray)
assert isinstance(y, float)
def test_purge_excess(self):
# pylint: disable=protected-access,invalid-name
"""
Test purge number excess for very very small tolerance.
"""
with self.assertRaises(IndexError):
cla = CLA(weight_bounds=(0, 1), calculate_expected_returns="mean")
cla.allocate(asset_prices=self.data, solution='cla_turning_points', asset_names=self.data.columns)
cla.weights = list(cla.weights.values)
cla.weights = cla.weights*100
cla._purge_num_err(tol=1e-18)
def test_flag_true_for_purge_num_err(self):
# pylint: disable=protected-access, no-self-use
"""
Test whether the flag becomes True in the purge num error function.
"""
cla = CLA()
cla.weights = [[1]]
cla.lower_bounds = [100]
cla.upper_bounds = [1]
cla.lambdas = [[1]]
cla.gammas = [[1]]
cla.free_weights = [[1]]
cla._purge_num_err(tol=1)
assert not cla.weights
assert not cla.lambdas
assert not cla.gammas
def test_value_error_for_unknown_solution(self):
"""
Test ValueError on passing unknown solution string.
"""
with self.assertRaises(ValueError):
cla = CLA()
cla.allocate(asset_prices=self.data, solution='unknown_string', asset_names=self.data.columns)
def test_value_error_for_non_dataframe_input(self):
"""
Test ValueError on passing non-dataframe input.
"""
with self.assertRaises(ValueError):
cla = CLA()
cla.allocate(asset_prices=self.data.values, solution='cla_turning_points', asset_names=self.data.columns)
def test_value_error_for_non_date_index(self):
"""
Test ValueError on passing dataframe not indexed by date.
"""
with self.assertRaises(ValueError):
cla = CLA()
data = self.data.reset_index()
cla.allocate(asset_prices=data, solution='cla_turning_points', asset_names=self.data.columns)
def test_value_error_for_unknown_returns(self):
"""
Test ValueError on passing unknown returns string.
"""
with self.assertRaises(ValueError):
cla = CLA(calculate_expected_returns="unknown_returns")
cla.allocate(asset_prices=self.data, solution='cla_turning_points', asset_names=self.data.columns)
def test_resampling_asset_prices(self):
"""
Test resampling of asset prices.
"""
cla = CLA()
cla.allocate(asset_prices=self.data, resample_by='B', solution='min_volatility', asset_names=self.data.columns)
weights = cla.weights.values[0]
assert (weights >= 0).all()
assert len(weights) == self.data.shape[1]
np.testing.assert_almost_equal(np.sum(weights), 1)
def test_all_inputs_none(self):
"""
Test allocation when all inputs are None.
"""
with self.assertRaises(ValueError):
cla = CLA()
cla.allocate(asset_names=self.data.columns)
def test_cla_with_input_as_returns_and_covariance(self):
# pylint: disable=invalid-name
"""
Test CLA when we pass expected returns and covariance matrix as input.
"""
cla = CLA()
expected_returns = ReturnsEstimation().calculate_mean_historical_returns(asset_prices=self.data)
covariance = ReturnsEstimation().calculate_returns(asset_prices=self.data).cov()
cla.allocate(covariance_matrix=covariance,
expected_asset_returns=expected_returns,
asset_names=self.data.columns)
weights = cla.weights.values
weights[weights <= 1e-15] = 0 # Convert very very small numbers to 0
for turning_point in weights:
assert (turning_point >= 0).all()
assert len(turning_point) == self.data.shape[1]
np.testing.assert_almost_equal(np.sum(turning_point), 1)
def test_no_asset_names(self):
"""
Test CLA when not supplying a list of asset names.
"""
cla = CLA()
cla.allocate(asset_prices=self.data)
weights = cla.weights.values[0]
assert (weights >= 0).all()
assert len(weights) == self.data.shape[1]
np.testing.assert_almost_equal(np.sum(weights), 1)
def test_valuerror_with_no_asset_names(self):
"""
Test ValueError when not supplying a list of asset names and no other input.
"""
with self.assertRaises(ValueError):
cla = CLA()
expected_returns = ReturnsEstimation().calculate_mean_historical_returns(asset_prices=self.data,
resample_by='W')
covariance = ReturnsEstimation().calculate_returns(asset_prices=self.data, resample_by='W').cov()
cla.allocate(expected_asset_returns=expected_returns, covariance_matrix=covariance)
| [
"pandas.read_csv",
"mlfinlab.portfolio_optimization.cla.CLA",
"os.path.dirname",
"numpy.sum",
"mlfinlab.portfolio_optimization.returns_estimators.ReturnsEstimation"
] | [((513, 538), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (528, 538), False, 'import os\n'), ((624, 682), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'parse_dates': '(True)', 'index_col': '"""Date"""'}), "(data_path, parse_dates=True, index_col='Date')\n", (635, 682), True, 'import pandas as pd\n'), ((947, 1007), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (950, 1007), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((1648, 1758), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '([0] * self.data.shape[1], [1] * self.data.shape[1])', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=([0] * self.data.shape[1], [1] * self.data.shape[1]),\n calculate_expected_returns='mean')\n", (1651, 1758), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((2321, 2388), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""exponential"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='exponential')\n", (2324, 2388), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((2929, 2989), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (2932, 2989), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((3414, 3474), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (3417, 3474), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((3911, 3971), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (3914, 3971), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((4670, 4730), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (4673, 4730), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((5488, 5548), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (5491, 5548), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((6001, 6061), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (6004, 6061), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((6658, 6718), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (6661, 6718), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((7478, 7538), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (7481, 7538), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((8737, 8742), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (8740, 8742), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((10532, 10537), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (10535, 10537), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((11298, 11303), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (11301, 11303), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((12125, 12130), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (12128, 12130), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((3254, 3269), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3260, 3269), True, 'import numpy as np\n'), ((3743, 3758), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3749, 3758), True, 'import numpy as np\n'), ((8210, 8270), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'weight_bounds': '(0, 1)', 'calculate_expected_returns': '"""mean"""'}), "(weight_bounds=(0, 1), calculate_expected_returns='mean')\n", (8213, 8270), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((9250, 9255), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (9253, 9255), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((9563, 9568), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (9566, 9568), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((9892, 9897), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (9895, 9897), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((10246, 10295), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {'calculate_expected_returns': '"""unknown_returns"""'}), "(calculate_expected_returns='unknown_returns')\n", (10249, 10295), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((10823, 10838), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (10829, 10838), True, 'import numpy as np\n'), ((11017, 11022), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (11020, 11022), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((12341, 12356), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (12347, 12356), True, 'import numpy as np\n'), ((12584, 12589), 'mlfinlab.portfolio_optimization.cla.CLA', 'CLA', ([], {}), '()\n', (12587, 12589), False, 'from mlfinlab.portfolio_optimization.cla import CLA\n'), ((1385, 1406), 'numpy.sum', 'np.sum', (['turning_point'], {}), '(turning_point)\n', (1391, 1406), True, 'import numpy as np\n'), ((2129, 2150), 'numpy.sum', 'np.sum', (['turning_point'], {}), '(turning_point)\n', (2135, 2150), True, 'import numpy as np\n'), ((2766, 2787), 'numpy.sum', 'np.sum', (['turning_point'], {}), '(turning_point)\n', (2772, 2787), True, 'import numpy as np\n'), ((11331, 11350), 'mlfinlab.portfolio_optimization.returns_estimators.ReturnsEstimation', 'ReturnsEstimation', ([], {}), '()\n', (11348, 11350), False, 'from mlfinlab.portfolio_optimization.returns_estimators import ReturnsEstimation\n'), ((11965, 11986), 'numpy.sum', 'np.sum', (['turning_point'], {}), '(turning_point)\n', (11971, 11986), True, 'import numpy as np\n'), ((12621, 12640), 'mlfinlab.portfolio_optimization.returns_estimators.ReturnsEstimation', 'ReturnsEstimation', ([], {}), '()\n', (12638, 12640), False, 'from mlfinlab.portfolio_optimization.returns_estimators import ReturnsEstimation\n'), ((11430, 11449), 'mlfinlab.portfolio_optimization.returns_estimators.ReturnsEstimation', 'ReturnsEstimation', ([], {}), '()\n', (11447, 11449), False, 'from mlfinlab.portfolio_optimization.returns_estimators import ReturnsEstimation\n'), ((12826, 12845), 'mlfinlab.portfolio_optimization.returns_estimators.ReturnsEstimation', 'ReturnsEstimation', ([], {}), '()\n', (12843, 12845), False, 'from mlfinlab.portfolio_optimization.returns_estimators import ReturnsEstimation\n')] |
"""MolecularAI Implementation of sample generation, randomizing scaffolds as well as fetching unique sample sequences
The source of this file is
https://raw.githubusercontent.com/MolecularAI/Reinvent/982b26dd6cfeb8aa84b6d7e4a8c2a7edde2bad36/running_modes/lib_invent/rl_actions/sample_model.py
and it was only minimally changed. See README.md.
"""
__copyright__ = "Copyright 2021, MolecularAI"
__license__ = "Apache 2.0"
import logging
from dataclasses import dataclass
from typing import List
import numpy as np
import torch.utils.data as tud
from reinvent_chemistry import Conversions
from reinvent_chemistry.library_design import AttachmentPoints, BondMaker
from reinvent_chemistry.utils import get_indices_of_unique_smiles
from reinvent_models.lib_invent.models import dataset as md
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
@dataclass
class SampledSequencesDTO:
scaffold: str
decoration: str
nll: float
class ReinventBase:
def __init__(
self, model, batch_size: int, logger=None, randomize=False, sample_uniquely=True
):
"""
Creates an instance of SampleModel.
:params model: A model instance (better in scaffold_decorating mode).
:params batch_size: Batch size to use.
:return:
"""
self.model = model
self._batch_size = batch_size
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._randomize = randomize
self._conversions = Conversions()
self._sample_uniquely = sample_uniquely
def get_dataloader(self, scaffold_list: List[str]) -> tud.DataLoader:
"""
Get a dataloader for the list of scaffolds to use with reinvent.
NOTE: This method was factored out of the `run` method from the original source.
:params scaffold_list: A list of scaffold SMILES.
:return: An instance of a torch dataloader.
"""
scaffold_list = (
self._randomize_scaffolds(scaffold_list)
if self._randomize
else scaffold_list
)
clean_scaffolds = [
self._attachment_points.remove_attachment_point_numbers(scaffold)
for scaffold in scaffold_list
]
dataset = md.Dataset(
clean_scaffolds,
self.model.vocabulary.scaffold_vocabulary,
self.model.vocabulary.scaffold_tokenizer,
)
dataloader = tud.DataLoader(
dataset,
batch_size=len(dataset),
shuffle=False,
collate_fn=md.Dataset.collate_fn,
)
return dataloader
def run(self, scaffold_list: List[str]) -> List[SampledSequencesDTO]:
"""
Samples the model for the given number of SMILES.
NOTE: this method was slightly adapted from the original source.
:params scaffold_list: A list of scaffold SMILES.
:return: A list of SampledSequencesDTO.
"""
dataloader = self.get_dataloader(scaffold_list)
sampled_sequences = []
for batch in dataloader:
for _ in range(self._batch_size):
scaffold_seqs, scaffold_seq_lengths = batch
packed = self.model.sample_decorations(
scaffold_seqs, scaffold_seq_lengths
)
for scaffold, decoration, nll in packed:
sampled_sequences.append(
SampledSequencesDTO(scaffold, decoration, nll)
)
if self._sample_uniquely:
sampled_sequences = self._sample_unique_sequences(sampled_sequences)
return sampled_sequences
def _sample_unique_sequences(
self, sampled_sequences: List[SampledSequencesDTO]
) -> List[SampledSequencesDTO]:
strings = [
"".join([ss.scaffold, ss.decoration])
for index, ss in enumerate(sampled_sequences)
]
unique_idxs = get_indices_of_unique_smiles(strings)
sampled_sequences_np = np.array(sampled_sequences)
unique_sampled_sequences = sampled_sequences_np[unique_idxs]
return unique_sampled_sequences.tolist()
def _randomize_scaffolds(self, scaffolds: List[str]):
scaffold_mols = [
self._conversions.smile_to_mol(scaffold) for scaffold in scaffolds
]
randomized = [self._bond_maker.randomize_scaffold(mol) for mol in scaffold_mols]
return randomized
| [
"logging.getLogger",
"logging.NullHandler",
"reinvent_chemistry.library_design.AttachmentPoints",
"numpy.array",
"reinvent_models.lib_invent.models.dataset.Dataset",
"reinvent_chemistry.library_design.BondMaker",
"reinvent_chemistry.utils.get_indices_of_unique_smiles",
"reinvent_chemistry.Conversions"... | [((800, 827), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (817, 827), False, 'import logging\n'), ((846, 867), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (865, 867), False, 'import logging\n'), ((1400, 1411), 'reinvent_chemistry.library_design.BondMaker', 'BondMaker', ([], {}), '()\n', (1409, 1411), False, 'from reinvent_chemistry.library_design import AttachmentPoints, BondMaker\n'), ((1446, 1464), 'reinvent_chemistry.library_design.AttachmentPoints', 'AttachmentPoints', ([], {}), '()\n', (1462, 1464), False, 'from reinvent_chemistry.library_design import AttachmentPoints, BondMaker\n'), ((1529, 1542), 'reinvent_chemistry.Conversions', 'Conversions', ([], {}), '()\n', (1540, 1542), False, 'from reinvent_chemistry import Conversions\n'), ((2289, 2406), 'reinvent_models.lib_invent.models.dataset.Dataset', 'md.Dataset', (['clean_scaffolds', 'self.model.vocabulary.scaffold_vocabulary', 'self.model.vocabulary.scaffold_tokenizer'], {}), '(clean_scaffolds, self.model.vocabulary.scaffold_vocabulary, self\n .model.vocabulary.scaffold_tokenizer)\n', (2299, 2406), True, 'from reinvent_models.lib_invent.models import dataset as md\n'), ((3992, 4029), 'reinvent_chemistry.utils.get_indices_of_unique_smiles', 'get_indices_of_unique_smiles', (['strings'], {}), '(strings)\n', (4020, 4029), False, 'from reinvent_chemistry.utils import get_indices_of_unique_smiles\n'), ((4061, 4088), 'numpy.array', 'np.array', (['sampled_sequences'], {}), '(sampled_sequences)\n', (4069, 4088), True, 'import numpy as np\n')] |
import itertools
import logging
import numpy as np
import pandas as pd
import scipy.stats
def create_regression_dataset(metafeatures, experiments):
X = []
X_indices = []
Y = []
for dataset_name in experiments:
experiment = experiments[dataset_name]
mf = metafeatures.loc[dataset_name]
for i, run in enumerate(experiment):
x1 = pd.Series(
data=[run.params[param] for param in run.params],
index=run.params.keys())
x2 = mf
X.append(x1.append(x2))
X_indices.append('%s_%d' % (dataset_name, i))
Y.append(run.result)
X = pd.DataFrame(X, index=X_indices)
Y = pd.DataFrame(Y, index=X_indices)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
return X, Y
def create_predict_spearman_rank(metafeatures, experiments, iterator):
X = []
Y = []
Y_names = []
# Calculate the pairwise ranks between datasets
dataset_names = [name for name in metafeatures.index]
cross_product = []
if iterator == 'combination':
for cross in itertools.combinations_with_replacement(
dataset_names, r=2):
cross_product.append(cross)
elif iterator == 'permutation':
for cross in itertools.permutations(dataset_names, r=2):
cross_product.append(cross)
else:
raise NotImplementedError(iterator)
logging.info('Create spearman rank dataset without CV data and %s',
iterator)
logging.info('Using %d datasets', len(dataset_names))
logging.info('This will results in %d training points', len(cross_product))
# Create inputs and targets
for cross in cross_product:
name = '%s_%s' % (cross[0], cross[1])
mf_1 = metafeatures.loc[cross[0]]
mf_2 = metafeatures.loc[cross[1]]
assert mf_1.dtype == np.float64
assert mf_2.dtype == np.float64
x = np.hstack((mf_1, mf_2))
columns = metafeatures.columns.values
index = np.hstack(('0_' + columns, '1_' + columns))
x = pd.Series(data=x, name=name, index=index)
X.append(x)
experiments_1 = experiments[cross[0]]
experiments_2 = experiments[cross[1]]
assert len(experiments_1) == len(experiments_2), name
responses_1 = np.zeros((len(experiments_1)), dtype=np.float64)
responses_2 = np.zeros((len(experiments_1)), dtype=np.float64)
for idx, zipped in enumerate(
zip(
sorted(experiments_1, key=lambda t: str(t.configuration)),
sorted(experiments_2,
key=lambda t: str(t.configuration)))):
# Test if the order of the params is the same
exp_1, exp_2 = zipped
print(exp_1.configuration, exp_2.configuration)
assert exp_1.configuration == exp_2.configuration,\
(experiments_1, experiments_2)
responses_1[idx] = exp_1.result if np.isfinite(exp_1.result) else 1
responses_2[idx] = exp_2.result if np.isfinite(exp_2.result) else 1
rho, p = scipy.stats.spearmanr(responses_1, responses_2)
# rho, p = scipy.stats.kendalltau(responses_1, responses_2)
if not np.isfinite(rho):
rho = 0
Y.append(rho)
Y_names.append(name)
X = pd.DataFrame(X)
Y = pd.Series(Y, index=Y_names)
logging.info('Metafeatures %s', metafeatures.shape)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
assert X.shape == (len(cross_product), metafeatures.shape[1] * 2), \
(X.shape, (len(cross), metafeatures.shape[1] * 2))
assert Y.shape == (len(cross_product), )
# train sklearn regressor (tree) with 10fold CV
indices = range(len(X))
np_rs = np.random.RandomState(42)
np_rs.shuffle(indices)
X = X.iloc[indices]
Y = Y.iloc[indices]
return X, Y
def create_predict_spearman_rank_with_cv(cv_metafeatures, cv_experiments,
iterator):
X = []
Y = []
Y_names = []
# Calculate the pairwise ranks between datasets
dataset_names = [name for name in cv_metafeatures]
cross_product = []
folds_product = []
if iterator == 'combination':
for cross in itertools.combinations_with_replacement(
dataset_names, r=2):
cross_product.append(cross)
for folds in itertools.combinations_with_replacement(range(10), r=2):
folds_product.append(folds)
elif iterator == 'permutation':
for cross in itertools.permutations(dataset_names, r=2):
cross_product.append(cross)
for folds in itertools.permutations(range(10), r=2):
folds_product.append(folds)
else:
raise NotImplementedError()
logging.info('Create spearman rank dataset with CV data %s', iterator)
logging.info('Using %d datasets', len(dataset_names))
logging.info('This will results in %d training points',
len(cross_product) * len(folds_product))
logging.info('Length of dataset crossproduct %s', len(cross_product))
logging.info('Length of folds crossproduct %s', len(folds_product))
# Create inputs and targets
for i, cross in enumerate(cross_product):
print('%d/%d: %s' % (i, len(cross_product), cross), )
for folds in folds_product:
name = '%s-%d_%s-%d' % (cross[0], folds[0], cross[1], folds[1])
mf_1 = cv_metafeatures[cross[0]][folds[0]]
mf_2 = cv_metafeatures[cross[1]][folds[1]]
assert mf_1.dtype == np.float64
assert mf_2.dtype == np.float64
x = np.hstack((mf_1, mf_2))
columns = cv_metafeatures[cross[0]][folds[0]].index.values
index = np.hstack(('0_' + columns, '1_' + columns))
x = pd.Series(data=x, name=name, index=index)
X.append(x)
experiments_1 = cv_experiments[cross[0]][folds[0]]
experiments_2 = cv_experiments[cross[1]][folds[1]]
assert len(experiments_1) == len(experiments_2)
responses_1 = np.zeros((len(experiments_1)), dtype=np.float64)
responses_2 = np.zeros((len(experiments_1)), dtype=np.float64)
for idx, zipped in enumerate(zip(experiments_1, experiments_2)):
# Test if the order of the params is the same
exp_1, exp_2 = zipped
assert exp_1.params == exp_2.params
responses_1[idx] = exp_1.result
responses_2[idx] = exp_2.result
rho, p = scipy.stats.spearmanr(responses_1, responses_2)
# A nan is produced if all values of one of the response lists
# are equal. This results in a division by zero. Because there is
# no correlation if all values are the same, rho is replaced by
# zero...
# It would probably be better to assign random ranks for equal
# values, but scipy doesn't support this...
if not np.isfinite(rho):
rho = 0
Y.append(rho)
Y_names.append(name)
X = pd.DataFrame(X)
Y = pd.Series(Y, index=Y_names)
logging.info('CV_Metafeatures %s', cv_metafeatures.shape)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
# train sklearn regressor (tree) with 10fold CV
indices = range(len(X))
np_rs = np.random.RandomState(42)
np_rs.shuffle(indices)
X = X.iloc[indices]
Y = Y.iloc[indices]
return X, Y
"""
def create_smac_warmstart_files(context, dataset, output_dir, num_warmstarts):
runs_and_results = StringIO.StringIO()
runs_and_results.write("Run Number,Run History Configuration ID,Instance ID,"
"Response Value (y),Censored?,Cutoff Time Used,Seed,"
"Runtime,Run Length,Run Result Code,Run Quality,SMAC"
" Iteration,SMAC Cumulative Runtime,Run Result,"
"Additional Algorithm Run Data,Wall Clock Time,\n")
paramstrings = StringIO.StringIO()
best_hyperparameters, distances = metalearner.metalearn_base(context)
hp_list, name_list, dist_list = metalearner.assemble_best_hyperparameters_list(
best_hyperparameters, distances)
for i in range(len(hp_list)):
print hp_list[i], name_list[i], dist_list[i]
def create_smac_files_file(cv_metafeatures, cv_experiments, dataset,
output_dir):
runs_and_results = StringIO.StringIO()
runs_and_results.write("Run Number,Run History Configuration ID,Instance ID,"
"Response Value (y),Censored?,Cutoff Time Used,Seed,"
"Runtime,Run Length,Run Result Code,Run Quality,SMAC"
" Iteration,SMAC Cumulative Runtime,Run Result,"
"Additional Algorithm Run Data,Wall Clock Time,\n")
paramstrings = StringIO.StringIO()
train_instances_file = StringIO.StringIO()
feature_file = StringIO.StringIO()
scenario_file = StringIO.StringIO()
run_number = 1
instance_number = 1
# TODO: is it possible to get_value the openml dataset id?
for dataset_number, name in enumerate(cv_experiments):
for fold in cv_experiments[name]:
configuration_id = 1
iteration = int(run_number/2)
# if name == dataset, we don't want to put the rundata in there
# because we want to optimize for name
if name != dataset:
for exp in cv_experiments[name][fold]:
str = "%s,%s,%s,%f,0,108000,-1,%f,1,1,%f,%d,%f,SAT,Aditional data,%f" \
% (run_number, configuration_id, instance_number, exp.result, 1.0,
exp.result, iteration, float(run_number), 1.0)
runs_and_results.write(str + "\n")
run_number += 1
configuration_id += 1
train_instances_file.write("%d-%d\n" % (dataset_number, fold))
instance_number += 1
if instance_number > 100:
break
configuration_id = 1
for exp in cv_experiments[name][0]:
paramstring = ", ".join(["%s='%s'" % (re.sub("^-", "",param),
exp.params[param]) for param in exp.params])
paramstrings.write("%d: %s\n" % (configuration_id, paramstring))
with open(os.path.join(output_dir, "runs_and_results-it%d.csv" %
iteration), "w") as fh:
runs_and_results.seek(0)
for line in runs_and_results:
fh.write(line)
with open(os.path.join(output_dir, "paramstrings-it%d.txt" % iteration),
"w") as fh:
paramstrings.seek(0)
for line in paramstrings:
fh.write(line)
with open(os.path.join(output_dir, "instances-train.txt"),
"w") as fh:
train_instances_file.seek(0)
for line in train_instances_file:
fh.write(line)
"""
if __name__ == '__main__':
pass
"""
# TODO: right now, this is only done for one split, namely the split of
# the directory we're inside...
# TODO: this only works in a directory, in which a metaexperiment was
# already run...
parser = ArgumentParser()
parser.add_argument("target_directory", type=str)
args = parser.parse_args()
target_directory = args.target_directory
if not os.path.exists(target_directory):
raise ValueError("Target directory %s does not exist." % target_directory)
# Important, change into some directory in which an experiment was already
# performed...
context = metalearner.setup(None)
metafeatures = context["metafeatures"]
#cv_metafeatures = context["cv_metafeatures"]
meta_base = context["meta_base"]
#cv_meta_base = context["cv_meta_base"]
savefile_prefix = "testfold_%d-%d" % (context["test_fold"],
context["test_folds"])
# Use only the pfahringer subset of the available metafeatures
#columns = list()
#columns.extend(mf.subsets["pfahringer_2000_experiment1"])
#print columns
#metafeatures = metafeatures.loc[:,columns]
#for key in cv_metafeatures:
# cv_metafeatures[key] = cv_metafeatures[key].loc[columns,:]
# savefile_prefix += "_pfahringer"
# Remove class_probability_max from the set of metafeatures
# columns = list()
# metafeature_list = mf.subsets["all"]
# metafeature_list.remove("class_probability_max")
# metafeature_list.remove("class_probability_min")
# metafeature_list.remove("class_probability_mean")
# metafeature_list.remove("class_probability_std")
# columns.extend(metafeature_list)
# metafeatures = metafeatures.loc[:,columns]
# for key in cv_metafeatures:
# cv_metafeatures[key] = cv_metafeatures[key].loc[columns,:]
# savefile_prefix += "_woclassprobs"
# Experiment is an OrderedDict, which has dataset names as keys
# The values are lists of experiments(OrderedDict of params, response)
experiments = meta_base.experiments
#cv_experiments = cv_meta_base.experiments
"""
"""
# Build the warmstart directory for SMAC, can be called with
# ./smac --scenario-file <file> --seed 0 --warmstart <foldername>
# needs paramstrings.txt and runs_and_results.txt
# plain
smac_bootstrap_output = "smac_bootstrap_plain"
for dataset in cv_metafeatures:
bootstraps = (2, 5, 10)
distance = ("l1", "l2", "learned_distance")
metafeature_subset = mf.subsets
for num_bootstrap, dist, subset in itertools.product(
bootstraps, distance, metafeature_subset, repeat=1):
context["distance_measure"] = dist
# TODO: somehow only get_value a metafeature subset
dataset_output_dir = os.path.join(target_directory,
smac_bootstrap_output, dataset +
"_bootstrapped%d_%s_%s" % (num_bootstrap, dist, subset))
if not os.path.exists(dataset_output_dir):
os.mkdirs(dataset_output_dir)
create_smac_warmstart_files(context, dataset, dataset_output_dir,
num_warmstarts=num_bootstrap)
break
# with the adjustment of Yogotama and Mann
"""
# X, Y = create_regression_dataset(metafeatures, experiments)
# with open("regression_dataset.pkl", "w") as fh:
# cPickle.dump((X, Y, metafeatures), fh, -1)
"""
# Calculate the metafeatures without the 10fold CV
X, Y = create_predict_spearman_rank(metafeatures, experiments,
iterator="permutation")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_perm.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
X, Y = create_predict_spearman_rank(metafeatures, experiments,
iterator="combination")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_comb.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
print
# Calculate the metafeatures for the 10fold CV...
X, Y = create_predict_spearman_rank_with_cv(cv_metafeatures,
cv_experiments,
iterator="combination")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_cv_comb.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
X, Y = create_predict_spearman_rank_with_cv(cv_metafeatures,
cv_experiments,
iterator="permutation")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_cv_perm.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
"""
| [
"pandas.Series",
"numpy.hstack",
"itertools.combinations_with_replacement",
"numpy.isfinite",
"pandas.DataFrame",
"itertools.permutations",
"logging.info",
"numpy.random.RandomState"
] | [((655, 687), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'index': 'X_indices'}), '(X, index=X_indices)\n', (667, 687), True, 'import pandas as pd\n'), ((696, 728), 'pandas.DataFrame', 'pd.DataFrame', (['Y'], {'index': 'X_indices'}), '(Y, index=X_indices)\n', (708, 728), True, 'import pandas as pd\n'), ((733, 768), 'logging.info', 'logging.info', (['"""X.shape %s"""', 'X.shape'], {}), "('X.shape %s', X.shape)\n", (745, 768), False, 'import logging\n'), ((773, 808), 'logging.info', 'logging.info', (['"""Y.shape %s"""', 'Y.shape'], {}), "('Y.shape %s', Y.shape)\n", (785, 808), False, 'import logging\n'), ((1443, 1520), 'logging.info', 'logging.info', (['"""Create spearman rank dataset without CV data and %s"""', 'iterator'], {}), "('Create spearman rank dataset without CV data and %s', iterator)\n", (1455, 1520), False, 'import logging\n'), ((3384, 3399), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (3396, 3399), True, 'import pandas as pd\n'), ((3408, 3435), 'pandas.Series', 'pd.Series', (['Y'], {'index': 'Y_names'}), '(Y, index=Y_names)\n', (3417, 3435), True, 'import pandas as pd\n'), ((3440, 3491), 'logging.info', 'logging.info', (['"""Metafeatures %s"""', 'metafeatures.shape'], {}), "('Metafeatures %s', metafeatures.shape)\n", (3452, 3491), False, 'import logging\n'), ((3496, 3531), 'logging.info', 'logging.info', (['"""X.shape %s"""', 'X.shape'], {}), "('X.shape %s', X.shape)\n", (3508, 3531), False, 'import logging\n'), ((3536, 3571), 'logging.info', 'logging.info', (['"""Y.shape %s"""', 'Y.shape'], {}), "('Y.shape %s', Y.shape)\n", (3548, 3571), False, 'import logging\n'), ((3841, 3866), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (3862, 3866), True, 'import numpy as np\n'), ((4863, 4933), 'logging.info', 'logging.info', (['"""Create spearman rank dataset with CV data %s"""', 'iterator'], {}), "('Create spearman rank dataset with CV data %s', iterator)\n", (4875, 4933), False, 'import logging\n'), ((7209, 7224), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (7221, 7224), True, 'import pandas as pd\n'), ((7233, 7260), 'pandas.Series', 'pd.Series', (['Y'], {'index': 'Y_names'}), '(Y, index=Y_names)\n', (7242, 7260), True, 'import pandas as pd\n'), ((7265, 7322), 'logging.info', 'logging.info', (['"""CV_Metafeatures %s"""', 'cv_metafeatures.shape'], {}), "('CV_Metafeatures %s', cv_metafeatures.shape)\n", (7277, 7322), False, 'import logging\n'), ((7327, 7362), 'logging.info', 'logging.info', (['"""X.shape %s"""', 'X.shape'], {}), "('X.shape %s', X.shape)\n", (7339, 7362), False, 'import logging\n'), ((7367, 7402), 'logging.info', 'logging.info', (['"""Y.shape %s"""', 'Y.shape'], {}), "('Y.shape %s', Y.shape)\n", (7379, 7402), False, 'import logging\n'), ((7495, 7520), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (7516, 7520), True, 'import numpy as np\n'), ((1126, 1185), 'itertools.combinations_with_replacement', 'itertools.combinations_with_replacement', (['dataset_names'], {'r': '(2)'}), '(dataset_names, r=2)\n', (1165, 1185), False, 'import itertools\n'), ((1964, 1987), 'numpy.hstack', 'np.hstack', (['(mf_1, mf_2)'], {}), '((mf_1, mf_2))\n', (1973, 1987), True, 'import numpy as np\n'), ((2050, 2093), 'numpy.hstack', 'np.hstack', (["('0_' + columns, '1_' + columns)"], {}), "(('0_' + columns, '1_' + columns))\n", (2059, 2093), True, 'import numpy as np\n'), ((2106, 2147), 'pandas.Series', 'pd.Series', ([], {'data': 'x', 'name': 'name', 'index': 'index'}), '(data=x, name=name, index=index)\n', (2115, 2147), True, 'import pandas as pd\n'), ((4334, 4393), 'itertools.combinations_with_replacement', 'itertools.combinations_with_replacement', (['dataset_names'], {'r': '(2)'}), '(dataset_names, r=2)\n', (4373, 4393), False, 'import itertools\n'), ((1301, 1343), 'itertools.permutations', 'itertools.permutations', (['dataset_names'], {'r': '(2)'}), '(dataset_names, r=2)\n', (1323, 1343), False, 'import itertools\n'), ((3285, 3301), 'numpy.isfinite', 'np.isfinite', (['rho'], {}), '(rho)\n', (3296, 3301), True, 'import numpy as np\n'), ((4627, 4669), 'itertools.permutations', 'itertools.permutations', (['dataset_names'], {'r': '(2)'}), '(dataset_names, r=2)\n', (4649, 4669), False, 'import itertools\n'), ((5723, 5746), 'numpy.hstack', 'np.hstack', (['(mf_1, mf_2)'], {}), '((mf_1, mf_2))\n', (5732, 5746), True, 'import numpy as np\n'), ((5838, 5881), 'numpy.hstack', 'np.hstack', (["('0_' + columns, '1_' + columns)"], {}), "(('0_' + columns, '1_' + columns))\n", (5847, 5881), True, 'import numpy as np\n'), ((5898, 5939), 'pandas.Series', 'pd.Series', ([], {'data': 'x', 'name': 'name', 'index': 'index'}), '(data=x, name=name, index=index)\n', (5907, 5939), True, 'import pandas as pd\n'), ((3023, 3048), 'numpy.isfinite', 'np.isfinite', (['exp_1.result'], {}), '(exp_1.result)\n', (3034, 3048), True, 'import numpy as np\n'), ((3103, 3128), 'numpy.isfinite', 'np.isfinite', (['exp_2.result'], {}), '(exp_2.result)\n', (3114, 3128), True, 'import numpy as np\n'), ((7099, 7115), 'numpy.isfinite', 'np.isfinite', (['rho'], {}), '(rho)\n', (7110, 7115), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018, National Institute of Informatics
All rights reserved.
Author: <NAME>
-----------------------------------------------------
Script for evaluating the network on full-size dataset using the LDA classifier
"""
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import torchvision.models as models
from torch.autograd import Variable
import numpy as np
from sklearn.utils import shuffle
from sklearn import metrics
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import pickle
import math
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default ='datasets/dataset_1', help='path to root dataset')
parser.add_argument('--test_set', default ='test', help='path to test dataset')
parser.add_argument('--outf', default='output', help='folder to output images and model checkpoints')
parser.add_argument('--name', default ='dataset_1_output', help='name of training output')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=1)
parser.add_argument('--imageSize', type=int, default=100, help='the height / width of the input image to network')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--id', type=int, help='checkpoint ID')
parser.add_argument('--random_sample', type=int, default=0, help='number of random sample to test')
opt = parser.parse_args()
print(opt)
opt.cuda = not opt.no_cuda and torch.cuda.is_available()
model_path = os.path.join(opt.outf, opt.name)
text_test = open(os.path.join(model_path, 'test_lda_360p.csv'), 'w')
transform_fwd = transforms.Compose([
#transforms.Scale(opt.imageSize),
#transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# folder dataset
dataset_val = dset.ImageFolder(root=os.path.join(opt.dataset, opt.test_set), transform=transform_fwd)
assert dataset_val
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=1, shuffle=False, num_workers=int(opt.workers))
class VggExtractor(nn.Module):
def __init__(self, vgg, begin, end):
super(VggExtractor, self).__init__()
self.features = nn.Sequential(*list(vgg.features.children())[begin:(end+1)])
def print(self):
print(self.features)
def forward(self, input):
output = self.features(input)
return output
vgg_net = models.vgg19(pretrained=True)
if opt.cuda:
vgg_net = vgg_net.cuda()
# before ReLU
vgg_1 = VggExtractor(vgg_net, 0, 2)
vgg_2 = VggExtractor(vgg_net, 3, 7)
vgg_3 = VggExtractor(vgg_net, 8, 16)
# vgg_4 = VggExtractor(vgg_net, 17, 25)
# vgg_5 = VggExtractor(vgg_net, 26, 34)
# after ReLU
# vgg_1 = VggExtractor(vgg_net, 0, 4)
# vgg_2 = VggExtractor(vgg_net, 5, 9)
# vgg_3 = VggExtractor(vgg_net, 10, 18)
# vgg_4 = VggExtractor(vgg_net, 19, 27)
# vgg_5 = VggExtractor(vgg_net, 28, 36)
del vgg_net
class _netStats(nn.Module):
def __init__(self, depth, n=64):
super(_netStats, self).__init__()
self.depth = depth
self.n = n
self.conv_1 = nn.Sequential(
nn.Conv2d(self.depth, 128, 3, 1, 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 64, 3, 1, 1),
nn.BatchNorm2d(64),
nn.ReLU()
)
def forward(self, input):
x = self.conv_1(input)
y = x.view(x.data.shape[0], x.data.shape[1], x.data.shape[2]*x.data.shape[3])
mean = torch.mean(y, 2)
std = torch.std(y, 2)
result = torch.cat((mean, std), 1)
return result
def extract_subimages(image, subimage_size=256):
subimages = []
width = image.shape[3]
height = image.shape[2]
current_height = 0
while current_height + subimage_size <= height:
current_width = 0
while current_width + subimage_size <= width:
sub = image[:,:,current_height:current_height+subimage_size, current_width:current_width+subimage_size]
subimages.append(sub)
current_width += subimage_size
current_height += subimage_size
return subimages
model_id = opt.id
netStats_1 = _netStats(64)
netStats_1.load_state_dict(torch.load('%s/stats_1_%d.pth' % (model_path, model_id)))
netStats_2 = _netStats(128)
netStats_2.load_state_dict(torch.load('%s/stats_2_%d.pth' % (model_path, model_id)))
netStats_3 = _netStats(256)
netStats_3.load_state_dict(torch.load('%s/stats_3_%d.pth' % (model_path, model_id)))
# netStats_4 = _netStats(512)
# netStats_4.load_state_dict(torch.load('%s/stats_4_%d.pth' % (model_path, model_id)))
# netStats_5 = _netStats(512)
# netStats_5.load_state_dict(torch.load('%s/stats_5_%d.pth' % (model_path, model_id)))
abspath = os.path.abspath('%s/lda_%d.pickle' % (model_path, model_id))
#abspath = os.path.abspath('%s/lda_%d.pickle' % (model_path, model_id))
clf = pickle.load(open(abspath, 'rb'))
netStats_1.eval()
netStats_2.eval()
netStats_3.eval()
# netStats_4.eval()
# netStats_5.eval()
if opt.cuda:
netStats_1.cuda()
netStats_2.cuda()
netStats_3.cuda()
# netStats_4.cuda()
# netStats_5.cuda()
##################################################################################
predict_lst = np.array([], dtype=np.float)
#prob_lst = np.array([], dtype=np.float)
labels_lst = np.array([], dtype=np.float)
for img_data, labels_data in dataloader_val:
img_label = labels_data.numpy().astype(np.float)
features_lst = np.array([], dtype=np.float).reshape(0,384)
subimages = extract_subimages(img_data, opt.imageSize)
n_sub_imgs = len(subimages)
if (opt.random_sample > 0):
if n_sub_imgs > opt.random_sample:
np.random.shuffle(subimages)
n_sub_imgs = opt.random_sample
img_tmp = torch.FloatTensor([]).view(0, 3, opt.imageSize, opt.imageSize)
for i in range(n_sub_imgs):
img_tmp = torch.cat((img_tmp, subimages[i]), dim=0)
if opt.cuda:
img_tmp = img_tmp.cuda()
input_v = Variable(img_tmp, requires_grad = False)
vgg_output = vgg_1(input_v)
input_v = Variable(vgg_output.detach().data, requires_grad = False)
output_1 = netStats_1(input_v).data.cpu().numpy()
vgg_output = vgg_2(vgg_output)
input_v = Variable(vgg_output.detach().data, requires_grad = False)
output_2 = netStats_2(input_v).data.cpu().numpy()
vgg_output = vgg_3(vgg_output)
input_v = Variable(vgg_output.detach().data, requires_grad = False)
output_3 = netStats_3(input_v).data.cpu().numpy()
# vgg_output = vgg_4(vgg_output)
# input_v = Variable(vgg_output.detach().data, requires_grad = False)
# output_4 = netStats_4(input_v).data.cpu().numpy()
# vgg_output = vgg_5(vgg_output)
# input_v = Variable(vgg_output.detach().data, requires_grad = False)
# output_5 = netStats_5(input_v).data.cpu().numpy()
output_t = np.concatenate((output_1, output_2, output_3), axis=1)
features_lst = np.vstack((features_lst, output_t))
else:
batchSize = 50
steps = int(math.ceil(n_sub_imgs*1.0/batchSize))
output_pred = np.array([], dtype=np.float).reshape(0,2)
for i in range(steps):
img_tmp = torch.FloatTensor([]).view(0, 3, opt.imageSize, opt.imageSize)
end = (i + 1)*batchSize
if end > n_sub_imgs:
end = n_sub_imgs - i*batchSize
else:
end = batchSize
for j in range(end):
img_tmp = torch.cat((img_tmp, subimages[i*batchSize + j]), dim=0)
if opt.cuda:
img_tmp = img_tmp.cuda()
input_v = Variable(img_tmp, requires_grad = False)
vgg_output = vgg_1(input_v)
input_v = Variable(vgg_output.detach().data, requires_grad = False)
output_1 = netStats_1(input_v).data.cpu().numpy()
vgg_output = vgg_2(vgg_output)
input_v = Variable(vgg_output.detach().data, requires_grad = False)
output_2 = netStats_2(input_v).data.cpu().numpy()
vgg_output = vgg_3(vgg_output)
input_v = Variable(vgg_output.detach().data, requires_grad = False)
output_3 = netStats_3(input_v).data.cpu().numpy()
# vgg_output = vgg_4(vgg_output)
# input_v = Variable(vgg_output.detach().data, requires_grad = False)
# output_4 = netStats_4(input_v).data.cpu().numpy()
# vgg_output = vgg_5(vgg_output)
# input_v = Variable(vgg_output.detach().data, requires_grad = False)
# output_5 = netStats_5(input_v).data.cpu().numpy()
output_t = np.concatenate((output_1, output_2, output_3), axis=1)
features_lst = np.vstack((features_lst, output_t))
output_pred = clf.predict_proba(features_lst)
output_pred = output_pred.mean(0)
if output_pred[1] >= output_pred[0]:
pred = 1.0
else:
pred = 0.0
print('%d - %d' %(pred, img_label))
text_test.write('%d,%.2f\n' % (img_label, output_pred[1]))
predict_lst = np.concatenate((predict_lst, np.array([pred])), axis=0)
#prob_lst = np.concatenate((prob_lst, output_pred[1]), axis=0)
labels_lst = np.concatenate((labels_lst, img_label), axis=0)
acc = metrics.accuracy_score(labels_lst, predict_lst)
print(len(predict_lst))
print('%d\t%.4f' % (model_id, acc))
text_test.flush()
text_test.close()
| [
"torch.nn.ReLU",
"torchvision.models.vgg19",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"argparse.ArgumentParser",
"torch.mean",
"numpy.vstack",
"numpy.concatenate",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"torchvision.transforms.Normalize",
"torc... | [((863, 888), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (886, 888), False, 'import argparse\n'), ((1901, 1933), 'os.path.join', 'os.path.join', (['opt.outf', 'opt.name'], {}), '(opt.outf, opt.name)\n', (1913, 1933), False, 'import os\n'), ((3101, 3130), 'torchvision.models.vgg19', 'models.vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3113, 3130), True, 'import torchvision.models as models\n'), ((5747, 5807), 'os.path.abspath', 'os.path.abspath', (["('%s/lda_%d.pickle' % (model_path, model_id))"], {}), "('%s/lda_%d.pickle' % (model_path, model_id))\n", (5762, 5807), False, 'import os\n'), ((6318, 6346), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (6326, 6346), True, 'import numpy as np\n'), ((6411, 6439), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (6419, 6439), True, 'import numpy as np\n'), ((10856, 10903), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['labels_lst', 'predict_lst'], {}), '(labels_lst, predict_lst)\n', (10878, 10903), False, 'from sklearn import metrics\n'), ((1855, 1880), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1878, 1880), False, 'import torch\n'), ((1956, 2001), 'os.path.join', 'os.path.join', (['model_path', '"""test_lda_360p.csv"""'], {}), "(model_path, 'test_lda_360p.csv')\n", (1968, 2001), False, 'import os\n'), ((2274, 2299), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2297, 2299), False, 'import torch\n'), ((5164, 5220), 'torch.load', 'torch.load', (["('%s/stats_1_%d.pth' % (model_path, model_id))"], {}), "('%s/stats_1_%d.pth' % (model_path, model_id))\n", (5174, 5220), False, 'import torch\n'), ((5289, 5345), 'torch.load', 'torch.load', (["('%s/stats_2_%d.pth' % (model_path, model_id))"], {}), "('%s/stats_2_%d.pth' % (model_path, model_id))\n", (5299, 5345), False, 'import torch\n'), ((5414, 5470), 'torch.load', 'torch.load', (["('%s/stats_3_%d.pth' % (model_path, model_id))"], {}), "('%s/stats_3_%d.pth' % (model_path, model_id))\n", (5424, 5470), False, 'import torch\n'), ((10793, 10840), 'numpy.concatenate', 'np.concatenate', (['(labels_lst, img_label)'], {'axis': '(0)'}), '((labels_lst, img_label), axis=0)\n', (10807, 10840), True, 'import numpy as np\n'), ((2152, 2173), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2171, 2173), True, 'import torchvision.transforms as transforms\n'), ((2184, 2250), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (2204, 2250), True, 'import torchvision.transforms as transforms\n'), ((2475, 2514), 'os.path.join', 'os.path.join', (['opt.dataset', 'opt.test_set'], {}), '(opt.dataset, opt.test_set)\n', (2487, 2514), False, 'import os\n'), ((4337, 4353), 'torch.mean', 'torch.mean', (['y', '(2)'], {}), '(y, 2)\n', (4347, 4353), False, 'import torch\n'), ((4373, 4388), 'torch.std', 'torch.std', (['y', '(2)'], {}), '(y, 2)\n', (4382, 4388), False, 'import torch\n'), ((4411, 4436), 'torch.cat', 'torch.cat', (['(mean, std)', '(1)'], {}), '((mean, std), 1)\n', (4420, 4436), False, 'import torch\n'), ((7199, 7237), 'torch.autograd.Variable', 'Variable', (['img_tmp'], {'requires_grad': '(False)'}), '(img_tmp, requires_grad=False)\n', (7207, 7237), False, 'from torch.autograd import Variable\n'), ((8225, 8279), 'numpy.concatenate', 'np.concatenate', (['(output_1, output_2, output_3)'], {'axis': '(1)'}), '((output_1, output_2, output_3), axis=1)\n', (8239, 8279), True, 'import numpy as np\n'), ((8308, 8343), 'numpy.vstack', 'np.vstack', (['(features_lst, output_t)'], {}), '((features_lst, output_t))\n', (8317, 8343), True, 'import numpy as np\n'), ((3917, 3952), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.depth', '(128)', '(3)', '(1)', '(1)'], {}), '(self.depth, 128, 3, 1, 1)\n', (3926, 3952), True, 'import torch.nn as nn\n'), ((3971, 3990), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3985, 3990), True, 'import torch.nn as nn\n'), ((4009, 4018), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4016, 4018), True, 'import torch.nn as nn\n'), ((4037, 4064), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)', '(1)', '(1)'], {}), '(128, 64, 3, 1, 1)\n', (4046, 4064), True, 'import torch.nn as nn\n'), ((4083, 4101), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (4097, 4101), True, 'import torch.nn as nn\n'), ((4120, 4129), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4127, 4129), True, 'import torch.nn as nn\n'), ((6576, 6604), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (6584, 6604), True, 'import numpy as np\n'), ((6827, 6855), 'numpy.random.shuffle', 'np.random.shuffle', (['subimages'], {}), '(subimages)\n', (6844, 6855), True, 'import numpy as np\n'), ((7062, 7103), 'torch.cat', 'torch.cat', (['(img_tmp, subimages[i])'], {'dim': '(0)'}), '((img_tmp, subimages[i]), dim=0)\n', (7071, 7103), False, 'import torch\n'), ((8414, 8453), 'math.ceil', 'math.ceil', (['(n_sub_imgs * 1.0 / batchSize)'], {}), '(n_sub_imgs * 1.0 / batchSize)\n', (8423, 8453), False, 'import math\n'), ((9079, 9117), 'torch.autograd.Variable', 'Variable', (['img_tmp'], {'requires_grad': '(False)'}), '(img_tmp, requires_grad=False)\n', (9087, 9117), False, 'from torch.autograd import Variable\n'), ((10169, 10223), 'numpy.concatenate', 'np.concatenate', (['(output_1, output_2, output_3)'], {'axis': '(1)'}), '((output_1, output_2, output_3), axis=1)\n', (10183, 10223), True, 'import numpy as np\n'), ((10256, 10291), 'numpy.vstack', 'np.vstack', (['(features_lst, output_t)'], {}), '((features_lst, output_t))\n', (10265, 10291), True, 'import numpy as np\n'), ((10672, 10688), 'numpy.array', 'np.array', (['[pred]'], {}), '([pred])\n', (10680, 10688), True, 'import numpy as np\n'), ((6929, 6950), 'torch.FloatTensor', 'torch.FloatTensor', (['[]'], {}), '([])\n', (6946, 6950), False, 'import torch\n'), ((8480, 8508), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (8488, 8508), True, 'import numpy as np\n'), ((8916, 8973), 'torch.cat', 'torch.cat', (['(img_tmp, subimages[i * batchSize + j])'], {'dim': '(0)'}), '((img_tmp, subimages[i * batchSize + j]), dim=0)\n', (8925, 8973), False, 'import torch\n'), ((8589, 8610), 'torch.FloatTensor', 'torch.FloatTensor', (['[]'], {}), '([])\n', (8606, 8610), False, 'import torch\n')] |
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2020 Nagoya University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# Calculate MCD using converted waveform.
import argparse
import fnmatch
import multiprocessing as mp
import os
import numpy as np
import pysptk
import pyworld as pw
import scipy
from fastdtw import fastdtw
from scipy.io import wavfile
from scipy.signal import firwin, lfilter
def find_files(root_dir, query="*.wav", include_root_dir=True):
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (str): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
list: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for filename in fnmatch.filter(filenames, query):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def low_cut_filter(x, fs, cutoff=70):
"""FUNCTION TO APPLY LOW CUT FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def spc2npow(spectrogram):
"""Calculate normalized power sequence from spectrogram
Parameters
----------
spectrogram : array, shape (T, `fftlen / 2 + 1`)
Array of spectrum envelope
Return
------
npow : array, shape (`T`, `1`)
Normalized power sequence
"""
# frame based processing
npow = np.apply_along_axis(_spvec2pow, 1, spectrogram)
meanpow = np.mean(npow)
npow = 10.0 * np.log10(npow / meanpow)
return npow
def _spvec2pow(specvec):
"""Convert a spectrum envelope into a power
Parameters
----------
specvec : vector, shape (`fftlen / 2 + 1`)
Vector of specturm envelope |H(w)|^2
Return
------
power : scala,
Power of a frame
"""
# set FFT length
fftl2 = len(specvec) - 1
fftl = fftl2 * 2
# specvec is not amplitude spectral |H(w)| but power spectral |H(w)|^2
power = specvec[0] + specvec[fftl2]
for k in range(1, fftl2):
power += 2.0 * specvec[k]
power /= fftl
return power
def extfrm(data, npow, power_threshold=-20):
"""Extract frame over the power threshold
Parameters
----------
data: array, shape (`T`, `dim`)
Array of input data
npow : array, shape (`T`)
Vector of normalized power sequence.
power_threshold : float, optional
Value of power threshold [dB]
Default set to -20
Returns
-------
data: array, shape (`T_ext`, `dim`)
Remaining data after extracting frame
`T_ext` <= `T`
"""
T = data.shape[0]
if T != len(npow):
raise ("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata
def world_extract(wav_path, args):
fs, x = wavfile.read(wav_path)
x = np.array(x, dtype=np.float64)
x = low_cut_filter(x, fs)
# extract features
f0, time_axis = pw.harvest(
x, fs, f0_floor=args.f0min, f0_ceil=args.f0max, frame_period=args.shiftms
)
sp = pw.cheaptrick(x, f0, time_axis, fs, fft_size=args.fftl)
ap = pw.d4c(x, f0, time_axis, fs, fft_size=args.fftl)
mcep = pysptk.sp2mc(sp, args.mcep_dim, args.mcep_alpha)
npow = spc2npow(sp)
return {
"sp": sp,
"mcep": mcep,
"ap": ap,
"f0": f0,
"npow": npow,
}
def get_basename(path):
return os.path.splitext(os.path.split(path)[-1])[0]
def calculate(file_list, gt_file_list, args, MCD):
for i, cvt_path in enumerate(file_list):
corresponding_list = list(
filter(lambda gt_path: get_basename(gt_path) in cvt_path, gt_file_list)
)
assert len(corresponding_list) == 1
gt_path = corresponding_list[0]
gt_basename = get_basename(gt_path)
# extract ground truth and converted features
gt_feats = world_extract(gt_path, args)
cvt_feats = world_extract(cvt_path, args)
# VAD & DTW based on power
gt_mcep_nonsil_pow = extfrm(gt_feats["mcep"], gt_feats["npow"])
cvt_mcep_nonsil_pow = extfrm(cvt_feats["mcep"], cvt_feats["npow"])
_, path = fastdtw(
cvt_mcep_nonsil_pow,
gt_mcep_nonsil_pow,
dist=scipy.spatial.distance.euclidean,
)
twf_pow = np.array(path).T
# MCD using power-based DTW
cvt_mcep_dtw_pow = cvt_mcep_nonsil_pow[twf_pow[0]]
gt_mcep_dtw_pow = gt_mcep_nonsil_pow[twf_pow[1]]
diff2sum = np.sum((cvt_mcep_dtw_pow - gt_mcep_dtw_pow) ** 2, 1)
mcd = np.mean(10.0 / np.log(10.0) * np.sqrt(2 * diff2sum), 0)
print("{} {}".format(gt_basename, mcd))
MCD.append(mcd)
def get_parser():
parser = argparse.ArgumentParser(description="calculate MCD.")
parser.add_argument(
"--wavdir",
required=True,
type=str,
help="path of directory for converted waveforms",
)
parser.add_argument(
"--gtwavdir",
required=True,
type=str,
help="path of directory for ground truth waveforms",
)
# analysis related
parser.add_argument(
"--mcep_dim", default=41, type=int, help="dimension of mel cepstrum coefficient"
)
parser.add_argument(
"--mcep_alpha", default=0.41, type=int, help="all pass constant"
)
parser.add_argument("--fftl", default=1024, type=int, help="fft length")
parser.add_argument("--shiftms", default=5, type=int, help="frame shift (ms)")
parser.add_argument(
"--f0min", required=True, type=int, help="fo search range (min)"
)
parser.add_argument(
"--f0max", required=True, type=int, help="fo search range (max)"
)
parser.add_argument(
"--n_jobs", default=40, type=int, help="number of parallel jobs"
)
return parser
def main():
args = get_parser().parse_args()
# find files
converted_files = sorted(find_files(args.wavdir))
gt_files = sorted(find_files(args.gtwavdir))
# Get and divide list
print("number of utterances = %d" % len(converted_files))
file_lists = np.array_split(converted_files, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
with mp.Manager() as manager:
MCD = manager.list()
processes = []
for f in file_lists:
p = mp.Process(target=calculate, args=(f, gt_files, args, MCD))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
mMCD = np.mean(np.array(MCD))
print("Mean MCD: {:.2f}".format(mMCD))
if __name__ == "__main__":
main()
| [
"numpy.log10",
"numpy.sqrt",
"multiprocessing.Process",
"numpy.log",
"numpy.array_split",
"numpy.array",
"os.walk",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.where",
"os.path.split",
"pyworld.d4c",
"scipy.signal.firwin",
"pyworld.harvest",
"scipy.io.wavfile.read",
"multiprocessin... | [((805, 840), 'os.walk', 'os.walk', (['root_dir'], {'followlinks': '(True)'}), '(root_dir, followlinks=True)\n', (812, 840), False, 'import os\n'), ((1464, 1505), 'scipy.signal.firwin', 'firwin', (['(255)', 'norm_cutoff'], {'pass_zero': '(False)'}), '(255, norm_cutoff, pass_zero=False)\n', (1470, 1505), False, 'from scipy.signal import firwin, lfilter\n'), ((1518, 1536), 'scipy.signal.lfilter', 'lfilter', (['fil', '(1)', 'x'], {}), '(fil, 1, x)\n', (1525, 1536), False, 'from scipy.signal import firwin, lfilter\n'), ((1905, 1952), 'numpy.apply_along_axis', 'np.apply_along_axis', (['_spvec2pow', '(1)', 'spectrogram'], {}), '(_spvec2pow, 1, spectrogram)\n', (1924, 1952), True, 'import numpy as np\n'), ((1968, 1981), 'numpy.mean', 'np.mean', (['npow'], {}), '(npow)\n', (1975, 1981), True, 'import numpy as np\n'), ((3232, 3264), 'numpy.where', 'np.where', (['(npow > power_threshold)'], {}), '(npow > power_threshold)\n', (3240, 3264), True, 'import numpy as np\n'), ((3399, 3421), 'scipy.io.wavfile.read', 'wavfile.read', (['wav_path'], {}), '(wav_path)\n', (3411, 3421), False, 'from scipy.io import wavfile\n'), ((3430, 3459), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float64'}), '(x, dtype=np.float64)\n', (3438, 3459), True, 'import numpy as np\n'), ((3534, 3624), 'pyworld.harvest', 'pw.harvest', (['x', 'fs'], {'f0_floor': 'args.f0min', 'f0_ceil': 'args.f0max', 'frame_period': 'args.shiftms'}), '(x, fs, f0_floor=args.f0min, f0_ceil=args.f0max, frame_period=\n args.shiftms)\n', (3544, 3624), True, 'import pyworld as pw\n'), ((3643, 3698), 'pyworld.cheaptrick', 'pw.cheaptrick', (['x', 'f0', 'time_axis', 'fs'], {'fft_size': 'args.fftl'}), '(x, f0, time_axis, fs, fft_size=args.fftl)\n', (3656, 3698), True, 'import pyworld as pw\n'), ((3708, 3756), 'pyworld.d4c', 'pw.d4c', (['x', 'f0', 'time_axis', 'fs'], {'fft_size': 'args.fftl'}), '(x, f0, time_axis, fs, fft_size=args.fftl)\n', (3714, 3756), True, 'import pyworld as pw\n'), ((3768, 3816), 'pysptk.sp2mc', 'pysptk.sp2mc', (['sp', 'args.mcep_dim', 'args.mcep_alpha'], {}), '(sp, args.mcep_dim, args.mcep_alpha)\n', (3780, 3816), False, 'import pysptk\n'), ((5323, 5376), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""calculate MCD."""'}), "(description='calculate MCD.')\n", (5346, 5376), False, 'import argparse\n'), ((6700, 6744), 'numpy.array_split', 'np.array_split', (['converted_files', 'args.n_jobs'], {}), '(converted_files, args.n_jobs)\n', (6714, 6744), True, 'import numpy as np\n'), ((866, 898), 'fnmatch.filter', 'fnmatch.filter', (['filenames', 'query'], {}), '(filenames, query)\n', (880, 898), False, 'import fnmatch\n'), ((2000, 2024), 'numpy.log10', 'np.log10', (['(npow / meanpow)'], {}), '(npow / meanpow)\n', (2008, 2024), True, 'import numpy as np\n'), ((4751, 4843), 'fastdtw.fastdtw', 'fastdtw', (['cvt_mcep_nonsil_pow', 'gt_mcep_nonsil_pow'], {'dist': 'scipy.spatial.distance.euclidean'}), '(cvt_mcep_nonsil_pow, gt_mcep_nonsil_pow, dist=scipy.spatial.\n distance.euclidean)\n', (4758, 4843), False, 'from fastdtw import fastdtw\n'), ((5093, 5145), 'numpy.sum', 'np.sum', (['((cvt_mcep_dtw_pow - gt_mcep_dtw_pow) ** 2)', '(1)'], {}), '((cvt_mcep_dtw_pow - gt_mcep_dtw_pow) ** 2, 1)\n', (5099, 5145), True, 'import numpy as np\n'), ((6838, 6850), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (6848, 6850), True, 'import multiprocessing as mp\n'), ((4904, 4918), 'numpy.array', 'np.array', (['path'], {}), '(path)\n', (4912, 4918), True, 'import numpy as np\n'), ((6960, 7019), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'calculate', 'args': '(f, gt_files, args, MCD)'}), '(target=calculate, args=(f, gt_files, args, MCD))\n', (6970, 7019), True, 'import multiprocessing as mp\n'), ((7179, 7192), 'numpy.array', 'np.array', (['MCD'], {}), '(MCD)\n', (7187, 7192), True, 'import numpy as np\n'), ((925, 953), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (937, 953), False, 'import os\n'), ((4013, 4032), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (4026, 4032), False, 'import os\n'), ((5190, 5211), 'numpy.sqrt', 'np.sqrt', (['(2 * diff2sum)'], {}), '(2 * diff2sum)\n', (5197, 5211), True, 'import numpy as np\n'), ((5175, 5187), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (5181, 5187), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
import logging
import os
import pickle
import shutil
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from lightautoml.automl.presets.text_presets import TabularNLPAutoML
from lightautoml.tasks import Task
def test_nlp_preset():
np.random.seed(42)
logging.basicConfig(format='[%(asctime)s] (%(levelname)s): %(message)s', level=logging.DEBUG)
data = pd.read_csv('../example_data/test_data_files/avito1k_train.csv')
train, test = train_test_split(data, test_size=500, random_state=42)
roles = {
'target': 'deal_probability',
'group': ['user_id'],
'text': ['description', 'title', 'param_1', 'param_2', 'param_3']
}
task = Task('reg', )
automl = TabularNLPAutoML(task=task, timeout=600)
oof_pred = automl.fit_predict(train, roles=roles)
test_pred = automl.predict(test)
not_nan = np.any(~np.isnan(oof_pred.data), axis=1)
logging.debug('Check scores...')
print('OOF score: {}'.format(mean_squared_error(train[roles['target']].values[not_nan], oof_pred.data[not_nan][:, 0])))
print('TEST score: {}'.format(mean_squared_error(test[roles['target']].values, test_pred.data[:, 0])))
print('Pickle automl')
with open('automl.pickle', 'wb') as f:
pickle.dump(automl, f)
logging.debug('Load pickled automl')
with open('automl.pickle', 'rb') as f:
automl = pickle.load(f)
logging.debug('Predict loaded automl')
test_pred = automl.predict(test)
print('TEST score, loaded: {}'.format(mean_squared_error(test['deal_probability'].values, test_pred.data[:, 0])))
os.remove('automl.pickle')
shutil.rmtree('./models', ignore_errors=True)
if __name__ == '__main__':
test_nlp_preset()
| [
"logging.basicConfig",
"lightautoml.automl.presets.text_presets.TabularNLPAutoML",
"lightautoml.tasks.Task",
"pickle.dump",
"logging.debug",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"pickle.load",
"sklearn.metrics.mean_squared_error",
"numpy.isnan",
"numpy.random.seed",
"... | [((365, 383), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (379, 383), True, 'import numpy as np\n'), ((388, 485), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s] (%(levelname)s): %(message)s"""', 'level': 'logging.DEBUG'}), "(format='[%(asctime)s] (%(levelname)s): %(message)s',\n level=logging.DEBUG)\n", (407, 485), False, 'import logging\n'), ((494, 558), 'pandas.read_csv', 'pd.read_csv', (['"""../example_data/test_data_files/avito1k_train.csv"""'], {}), "('../example_data/test_data_files/avito1k_train.csv')\n", (505, 558), True, 'import pandas as pd\n'), ((578, 632), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(500)', 'random_state': '(42)'}), '(data, test_size=500, random_state=42)\n', (594, 632), False, 'from sklearn.model_selection import train_test_split\n'), ((808, 819), 'lightautoml.tasks.Task', 'Task', (['"""reg"""'], {}), "('reg')\n", (812, 819), False, 'from lightautoml.tasks import Task\n'), ((836, 876), 'lightautoml.automl.presets.text_presets.TabularNLPAutoML', 'TabularNLPAutoML', ([], {'task': 'task', 'timeout': '(600)'}), '(task=task, timeout=600)\n', (852, 876), False, 'from lightautoml.automl.presets.text_presets import TabularNLPAutoML\n'), ((1028, 1060), 'logging.debug', 'logging.debug', (['"""Check scores..."""'], {}), "('Check scores...')\n", (1041, 1060), False, 'import logging\n'), ((1399, 1435), 'logging.debug', 'logging.debug', (['"""Load pickled automl"""'], {}), "('Load pickled automl')\n", (1412, 1435), False, 'import logging\n'), ((1516, 1554), 'logging.debug', 'logging.debug', (['"""Predict loaded automl"""'], {}), "('Predict loaded automl')\n", (1529, 1554), False, 'import logging\n'), ((1715, 1741), 'os.remove', 'os.remove', (['"""automl.pickle"""'], {}), "('automl.pickle')\n", (1724, 1741), False, 'import os\n'), ((1746, 1791), 'shutil.rmtree', 'shutil.rmtree', (['"""./models"""'], {'ignore_errors': '(True)'}), "('./models', ignore_errors=True)\n", (1759, 1791), False, 'import shutil\n'), ((1371, 1393), 'pickle.dump', 'pickle.dump', (['automl', 'f'], {}), '(automl, f)\n', (1382, 1393), False, 'import pickle\n'), ((1496, 1510), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1507, 1510), False, 'import pickle\n'), ((990, 1013), 'numpy.isnan', 'np.isnan', (['oof_pred.data'], {}), '(oof_pred.data)\n', (998, 1013), True, 'import numpy as np\n'), ((1094, 1187), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (["train[roles['target']].values[not_nan]", 'oof_pred.data[not_nan][:, 0]'], {}), "(train[roles['target']].values[not_nan], oof_pred.data[\n not_nan][:, 0])\n", (1112, 1187), False, 'from sklearn.metrics import mean_squared_error\n'), ((1219, 1289), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (["test[roles['target']].values", 'test_pred.data[:, 0]'], {}), "(test[roles['target']].values, test_pred.data[:, 0])\n", (1237, 1289), False, 'from sklearn.metrics import mean_squared_error\n'), ((1634, 1707), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (["test['deal_probability'].values", 'test_pred.data[:, 0]'], {}), "(test['deal_probability'].values, test_pred.data[:, 0])\n", (1652, 1707), False, 'from sklearn.metrics import mean_squared_error\n')] |
import tensorflow as tf
import cv2
import time
import argparse
import torch
from omegaconf import OmegaConf
from models.networks.LSTM import LSTM
import numpy as np
import posenet
#csvへの書き込み
import csv
import pprint
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=int, default=101)
parser.add_argument('--cam_id', type=int, default=0)
parser.add_argument('--cam_width', type=int, default=1280)
parser.add_argument('--cam_height', type=int, default=720)
parser.add_argument('--scale_factor', type=float, default=0.7125)
parser.add_argument('--file', type=str, default=None, help="Optionally use a video file instead of a live camera")
args = parser.parse_args()
cfg = OmegaConf.load('./configs/project/default.yaml')
print(cfg)
# cfg.model.initial_ckpt = "./model.pth"
# cfg.model.embedder.initial_ckpt = "./embedder.pth"
model = LSTM(cfg)
def main():
with tf.Session() as sess:
#make csv
with open("./data/sample.csv", 'w') as f:
writer = csv.writer(f)
model_cfg, model_outputs = posenet.load_model(args.model, sess)
output_stride = model_cfg['output_stride']
leftShoulder_list = [0]*10
rightShoulder_list = [0]*10
leftElbow_list = [0]*10
rightElbow_list = [0]*10
leftWrist_list = [0]*10
rightWrist_list = [0]*10
leftHip_list = [0]*10
rightHip_list = [0]*10
if args.file is not None:
cap = cv2.VideoCapture(args.file)
else:
cap = cv2.VideoCapture(args.cam_id)
cap.set(3, args.cam_width)
cap.set(4, args.cam_height)
start = time.time()
frame_count = 0
H = [0] * 510
H_score = [0] * 10
while True:
input_image, display_image, output_scale = posenet.read_cap(
cap, scale_factor=args.scale_factor, output_stride=output_stride)
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
model_outputs,
feed_dict={'image:0': input_image}
)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
heatmaps_result.squeeze(axis=0),
offsets_result.squeeze(axis=0),
displacement_fwd_result.squeeze(axis=0),
displacement_bwd_result.squeeze(axis=0),
output_stride=output_stride,
max_pose_detections=10,
min_pose_score=0.15)
keypoint_coords *= output_scale
# TODO this isn't particularly fast, use GL for drawing and display someday...
overlay_image = posenet.draw_skel_and_kp(
display_image, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.15, min_part_score=0.1)
cv2.imshow('posenet', overlay_image)
# 相対化
x0 = (keypoint_coords[0,5,:][1] + keypoint_coords[0,6,:][1]) / 2
y0 = (keypoint_coords[0,5,:][0] + keypoint_coords[0,6,:][0]) / 2
# 正規化
y_min = keypoint_coords[0,0,:][0]
y_max = (keypoint_coords[0,15,:][0] + keypoint_coords[0,16,:][0]) / 2
y_diff = int(y_max - y_min)
h = []
for i in range(17):
if y_diff != 0:
h.append((keypoint_scores[0, :][i]))
h.append((keypoint_coords[0,i,:][1] - x0) / y_diff)
h.append((- keypoint_coords[0,i,:][0] + y0) / y_diff)
else:
h = [0] * 51
if y_diff != 0:
H_score.append(1)
else:
H_score.append(0)
H.extend(h)
H[0:51] = []
H_score[0:1] = []
# print(len(H))
# for demo
x = []
x.append([H[i * 51 : (i+1) * 51] for i in range(10)])
x = torch.from_numpy(np.array(x)).float()
outputs = model.network(x)
# print(outputs)
if outputs[0][1] <= outputs[0][0] and outputs[0][2] <= outputs[0][0]:
print("go forward")
elif outputs[0][0] <= outputs[0][1] and outputs[0][2] <= outputs[0][1]:
print("go forward a little")
else:
print("stop")
#H_csv = H
#H_csv.append(0)
#vel = 0
#print(H_score)
#print(len(H))
#print(H[31])
#print(H_csv)
# print("len(H)")
# print(len(H))
#print("len(H_csv)")
#rint(len(H_csv))
"""
#csvデータの作成
H.append(2)
with open("./data/sample.csv", 'a') as f:
writer = csv.writer(f)
writer.writerow(H)
del H[-1]
print(H)
print(len(H))
"""
"""
# プロトタイプ
if sum(H_score) == 5:
for i in range(4):
x_vec = H[i * 51 + 31] - H[(i + 1) * 51 + 31]
y_vec = H[i * 51 + 32] - H[(i + 1) * 51 + 32]
vel = (x_vec ** 2 + y_vec ** 2) ** 0.5
#print(vel)
if vel >= 0.08:
print("go forward")
elif vel >= 0.02:
print("go forward a little")
else:
print("stop")
#print(vel)
else:
print("stop")
#for time in range(4):
"""
frame_count += 1
#print('Average FPS: ', frame_count / (time.time() - start))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#print('Average FPS: ', frame_count / (time.time() - start))
if __name__ == "__main__":
main() | [
"argparse.ArgumentParser",
"posenet.draw_skel_and_kp",
"tensorflow.Session",
"csv.writer",
"posenet.read_cap",
"omegaconf.OmegaConf.load",
"cv2.imshow",
"posenet.load_model",
"cv2.waitKey",
"numpy.array",
"cv2.VideoCapture",
"models.networks.LSTM.LSTM",
"time.time"
] | [((228, 253), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (251, 253), False, 'import argparse\n'), ((695, 743), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['"""./configs/project/default.yaml"""'], {}), "('./configs/project/default.yaml')\n", (709, 743), False, 'from omegaconf import OmegaConf\n'), ((857, 866), 'models.networks.LSTM.LSTM', 'LSTM', (['cfg'], {}), '(cfg)\n', (861, 866), False, 'from models.networks.LSTM import LSTM\n'), ((890, 902), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (900, 902), True, 'import tensorflow as tf\n'), ((1051, 1087), 'posenet.load_model', 'posenet.load_model', (['args.model', 'sess'], {}), '(args.model, sess)\n', (1069, 1087), False, 'import posenet\n'), ((1632, 1643), 'time.time', 'time.time', ([], {}), '()\n', (1641, 1643), False, 'import time\n'), ((1001, 1014), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1011, 1014), False, 'import csv\n'), ((1454, 1481), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.file'], {}), '(args.file)\n', (1470, 1481), False, 'import cv2\n'), ((1514, 1543), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.cam_id'], {}), '(args.cam_id)\n', (1530, 1543), False, 'import cv2\n'), ((1792, 1879), 'posenet.read_cap', 'posenet.read_cap', (['cap'], {'scale_factor': 'args.scale_factor', 'output_stride': 'output_stride'}), '(cap, scale_factor=args.scale_factor, output_stride=\n output_stride)\n', (1808, 1879), False, 'import posenet\n'), ((2698, 2829), 'posenet.draw_skel_and_kp', 'posenet.draw_skel_and_kp', (['display_image', 'pose_scores', 'keypoint_scores', 'keypoint_coords'], {'min_pose_score': '(0.15)', 'min_part_score': '(0.1)'}), '(display_image, pose_scores, keypoint_scores,\n keypoint_coords, min_pose_score=0.15, min_part_score=0.1)\n', (2722, 2829), False, 'import posenet\n'), ((2872, 2908), 'cv2.imshow', 'cv2.imshow', (['"""posenet"""', 'overlay_image'], {}), "('posenet', overlay_image)\n", (2882, 2908), False, 'import cv2\n'), ((5766, 5780), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5777, 5780), False, 'import cv2\n'), ((3973, 3984), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3981, 3984), True, 'import numpy as np\n')] |
import numpy as np
from neural import LSTM_WindTurbine
from neural import LSTM_PvSystem
from neural import LSTM_LedSystem
from neural import LSTM_LoadStationSystem
from neural.example import LSTM
# WIND TURBINE SYSTEM functions
def predictPw(forecastHours, visualize):
val = LSTM_WindTurbine.LSTM_RUN(
"data/dummy/WindTurbine.csv", forecastHours, visualize
)
return val
def getPw(forecastHours):
val = LSTM_WindTurbine.LSTM_RUN("data/dummy/WindTurbine.csv")
return val
# PHOTOVOLTAIC SYSTEM functions
def predictPpv(forecastHours, visualize):
val = LSTM_PvSystem.LSTM_RUN(
"data/dummy/PvSystem.csv", forecastHours, visualize
)
return val
def getPpv(forecastHours):
val = LSTM_PvSystem.LSTM_RUN("data/dummy/PvSystem.csv")
return val
# LED SYSTEM functions
def predictPl(forecastHours, visualize):
val = LSTM_LedSystem.LSTM_RUN(
"data/dummy/LedSystem.csv", forecastHours, visualize
)
return val
def getPl(forecastHours):
val = LSTM_LedSystem.LSTM_RUN(
"data/dummy/LedSystem.csv"
)
return val
# POWER CONSUMPTION SYSTEM functions
def predictPls(forecastHours, visualize):
val = LSTM_LoadStationSystem.LSTM_RUN(
"data/dummy/LoadStationSystem.csv", forecastHours, visualize
)
return val
def getPls(forecastHours):
val = LSTM_LoadStationSystem.LSTM_RUN("data/dummy/LoadStationSystem.csv")
return val
# Example and Dummy
def example(forecastHours):
return LSTM.LSTM_RUN("data/example/pollution.csv")
def dummyResult(forecastHours):
return (np.random.rand(1) * np.random.randint(1, 100))[0]
def predictNeurals(forecastHours, visualize):
Pw = predictPw(forecastHours, visualize)
Ppv = predictPpv(forecastHours, visualize)
Pl = predictPl(forecastHours, visualize)
Pls = predictPls(forecastHours, visualize)
return Pw, Ppv, Pl, Pls
| [
"numpy.random.rand",
"neural.example.LSTM.LSTM_RUN",
"neural.LSTM_PvSystem.LSTM_RUN",
"neural.LSTM_LoadStationSystem.LSTM_RUN",
"neural.LSTM_LedSystem.LSTM_RUN",
"numpy.random.randint",
"neural.LSTM_WindTurbine.LSTM_RUN"
] | [((281, 366), 'neural.LSTM_WindTurbine.LSTM_RUN', 'LSTM_WindTurbine.LSTM_RUN', (['"""data/dummy/WindTurbine.csv"""', 'forecastHours', 'visualize'], {}), "('data/dummy/WindTurbine.csv', forecastHours,\n visualize)\n", (306, 366), False, 'from neural import LSTM_WindTurbine\n'), ((430, 485), 'neural.LSTM_WindTurbine.LSTM_RUN', 'LSTM_WindTurbine.LSTM_RUN', (['"""data/dummy/WindTurbine.csv"""'], {}), "('data/dummy/WindTurbine.csv')\n", (455, 485), False, 'from neural import LSTM_WindTurbine\n'), ((587, 662), 'neural.LSTM_PvSystem.LSTM_RUN', 'LSTM_PvSystem.LSTM_RUN', (['"""data/dummy/PvSystem.csv"""', 'forecastHours', 'visualize'], {}), "('data/dummy/PvSystem.csv', forecastHours, visualize)\n", (609, 662), False, 'from neural import LSTM_PvSystem\n'), ((731, 780), 'neural.LSTM_PvSystem.LSTM_RUN', 'LSTM_PvSystem.LSTM_RUN', (['"""data/dummy/PvSystem.csv"""'], {}), "('data/dummy/PvSystem.csv')\n", (753, 780), False, 'from neural import LSTM_PvSystem\n'), ((872, 949), 'neural.LSTM_LedSystem.LSTM_RUN', 'LSTM_LedSystem.LSTM_RUN', (['"""data/dummy/LedSystem.csv"""', 'forecastHours', 'visualize'], {}), "('data/dummy/LedSystem.csv', forecastHours, visualize)\n", (895, 949), False, 'from neural import LSTM_LedSystem\n'), ((1017, 1068), 'neural.LSTM_LedSystem.LSTM_RUN', 'LSTM_LedSystem.LSTM_RUN', (['"""data/dummy/LedSystem.csv"""'], {}), "('data/dummy/LedSystem.csv')\n", (1040, 1068), False, 'from neural import LSTM_LedSystem\n'), ((1189, 1286), 'neural.LSTM_LoadStationSystem.LSTM_RUN', 'LSTM_LoadStationSystem.LSTM_RUN', (['"""data/dummy/LoadStationSystem.csv"""', 'forecastHours', 'visualize'], {}), "('data/dummy/LoadStationSystem.csv',\n forecastHours, visualize)\n", (1220, 1286), False, 'from neural import LSTM_LoadStationSystem\n'), ((1351, 1418), 'neural.LSTM_LoadStationSystem.LSTM_RUN', 'LSTM_LoadStationSystem.LSTM_RUN', (['"""data/dummy/LoadStationSystem.csv"""'], {}), "('data/dummy/LoadStationSystem.csv')\n", (1382, 1418), False, 'from neural import LSTM_LoadStationSystem\n'), ((1495, 1538), 'neural.example.LSTM.LSTM_RUN', 'LSTM.LSTM_RUN', (['"""data/example/pollution.csv"""'], {}), "('data/example/pollution.csv')\n", (1508, 1538), False, 'from neural.example import LSTM\n'), ((1585, 1602), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1599, 1602), True, 'import numpy as np\n'), ((1605, 1630), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (1622, 1630), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from audiomentations.augmentations.transforms import Mp3Compression
from audiomentations.core.composition import Compose
class TestMp3Compression(unittest.TestCase):
def test_apply_mp3_compression_pydub(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_lameenc(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_pydub(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_lameenc(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_invalid_argument_combination(self):
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=400, max_bitrate=800)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=2, max_bitrate=4)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=64, max_bitrate=8)
| [
"audiomentations.augmentations.transforms.Mp3Compression",
"numpy.random.normal"
] | [((2606, 2654), 'audiomentations.augmentations.transforms.Mp3Compression', 'Mp3Compression', ([], {'min_bitrate': '(400)', 'max_bitrate': '(800)'}), '(min_bitrate=400, max_bitrate=800)\n', (2620, 2654), False, 'from audiomentations.augmentations.transforms import Mp3Compression\n'), ((2720, 2764), 'audiomentations.augmentations.transforms.Mp3Compression', 'Mp3Compression', ([], {'min_bitrate': '(2)', 'max_bitrate': '(4)'}), '(min_bitrate=2, max_bitrate=4)\n', (2734, 2764), False, 'from audiomentations.augmentations.transforms import Mp3Compression\n'), ((2830, 2875), 'audiomentations.augmentations.transforms.Mp3Compression', 'Mp3Compression', ([], {'min_bitrate': '(64)', 'max_bitrate': '(8)'}), '(min_bitrate=64, max_bitrate=8)\n', (2844, 2875), False, 'from audiomentations.augmentations.transforms import Mp3Compression\n'), ((301, 340), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': 'sample_len'}), '(0, 1, size=sample_len)\n', (317, 340), True, 'import numpy as np\n'), ((430, 500), 'audiomentations.augmentations.transforms.Mp3Compression', 'Mp3Compression', ([], {'p': '(1.0)', 'min_bitrate': '(48)', 'max_bitrate': '(48)', 'backend': '"""pydub"""'}), "(p=1.0, min_bitrate=48, max_bitrate=48, backend='pydub')\n", (444, 500), False, 'from audiomentations.augmentations.transforms import Mp3Compression\n'), ((868, 907), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': 'sample_len'}), '(0, 1, size=sample_len)\n', (884, 907), True, 'import numpy as np\n'), ((997, 1069), 'audiomentations.augmentations.transforms.Mp3Compression', 'Mp3Compression', ([], {'p': '(1.0)', 'min_bitrate': '(48)', 'max_bitrate': '(48)', 'backend': '"""lameenc"""'}), "(p=1.0, min_bitrate=48, max_bitrate=48, backend='lameenc')\n", (1011, 1069), False, 'from audiomentations.augmentations.transforms import Mp3Compression\n'), ((1447, 1486), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': 'sample_len'}), '(0, 1, size=sample_len)\n', (1463, 1486), True, 'import numpy as np\n'), ((1576, 1644), 'audiomentations.augmentations.transforms.Mp3Compression', 'Mp3Compression', ([], {'p': '(1.0)', 'min_bitrate': '(8)', 'max_bitrate': '(8)', 'backend': '"""pydub"""'}), "(p=1.0, min_bitrate=8, max_bitrate=8, backend='pydub')\n", (1590, 1644), False, 'from audiomentations.augmentations.transforms import Mp3Compression\n'), ((2024, 2063), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': 'sample_len'}), '(0, 1, size=sample_len)\n', (2040, 2063), True, 'import numpy as np\n'), ((2153, 2223), 'audiomentations.augmentations.transforms.Mp3Compression', 'Mp3Compression', ([], {'p': '(1.0)', 'min_bitrate': '(8)', 'max_bitrate': '(8)', 'backend': '"""lameenc"""'}), "(p=1.0, min_bitrate=8, max_bitrate=8, backend='lameenc')\n", (2167, 2223), False, 'from audiomentations.augmentations.transforms import Mp3Compression\n')] |
import numpy as np
import tensorflow as tf
class Model:
"""Model class
Used for storing methods that generalize to all models.
"""
def __init__(
self,
initial_conditions=None,
model_parameters=None,
final_time=None,
time_steps=None):
self.initial_conditions = initial_conditions
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def init_converter(self, arg1: np.array) -> tf.constant:
"""Initial conditions converter.
Converts the initial_conditions ndarray into a Tensorflow
constant n-dimensional tensor.
`tf.constant <https://www.tensorflow.org/api_docs/python/tf/constant>`_
Parameters
----------
arg1
Initial conditions for the system of ODEs to be solved.
Returns
-------
tf.constant
Constant tf.float64 n-d tensor based on initial_conditions
provided.
"""
init_state = tf.constant(arg1, dtype=tf.float64)
return init_state
def ode_solver(self, arg1: tf.stack, arg2: np.ndarray) -> list:
"""Ordinary Differential Equation (ODE) solver.
Uses Tensorflow/Numpy odeint to numerically solve the input system
of ODEs given the provided initial conditions and optional time
array parameters.
`odeint <https://www.tensorflow.org/api_docs/python/tf/contrib/integrate/odeint>`_
Parameters
----------
arg1
Tensorflow stack representing the equations for the system of ODEs.
arg2
Initial conditions for the system of ODEs to be solved.
Returns
-------
list
y: (n+1)-d tensor. Contains the solved value of y for each desired
time point in t.
info_dict: only if full_output=True for odeint, additional info.
"""
t = np.linspace(0, self.final_time, num=self.time_steps)
tensor_state, tensor_info = tf.contrib.integrate.odeint(arg1, self.init_converter(arg2), t, full_output=True)
return [tensor_state, tensor_info]
def tf_session(self, arg1: tf.stack, arg2: np.ndarray) -> np.ndarray:
"""Tensorflow session runner.
Uses a Tensorflow session run to evaluate the provided system of ODEs.
`tf.Session.run <https://www.tensorflow.org/api_docs/python/tf/Session#run>`_
Parameters
----------
arg1
Tensorflow stack representing the equations for the system of ODEs.
arg2
Initial conditions for the system of ODEs to be solved.
Returns
-------
np.ndarray
Returns the transpose of the (n+1)-d state tensor returned from
ode_solver after it's been solved in the Tensorflow session.
"""
sess = tf.Session()
state, info = sess.run(self.ode_solver(arg1, arg2))
output = state.T
return output
def solve(self):
"""Solve
Solves the provided equations in a Tensorflow session with either the provided
or the default initial conditions.
Parameters
----------
self
Current instance state.
Returns
-------
np.ndarray
Returns the solution from the Tensorflow session.
"""
self.solution = self.tf_session(self.equations, self.initial_conditions)
return self.solution
class CoupledDampedSHM(Model):
"""Coupled Damped Simple Harmonic Motion
This system of ODEs models coupled damped simple harmonic motion, such as two carts
on a track coupled to each other and each edge of the track by springs.
"""
def __init__(
self,
initial_conditions=[0.5, 0.1, 0.1, 0.1],
model_parameters=[0.007, 0.27, 0.027, 0.25],
final_time=200,
time_steps=1000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x, y, x1, y1 = tf.unstack(state)
dx = y
dy = -(self.model_parameters[1] / self.model_parameters[3]) * x \
+ (self.model_parameters[2] / self.model_parameters[3]) * x1 \
- (self.model_parameters[0] / self.model_parameters[3]) * y
dx1 = y1
dy1 = (self.model_parameters[2] / self.model_parameters[3]) * x \
- (self.model_parameters[1] / self.model_parameters[3]) * x1 \
- (self.model_parameters[0] / self.model_parameters[3]) * y1
return tf.stack([dx, dy, dx1, dy1])
class DampedSHM(Model):
"""Damped Simple Harmonic Motion
This system of ODEs models damped simple harmonic motion.
"""
def __init__(
self,
initial_conditions=[0.1, 0.1],
model_parameters=[0.035, 0.5, 0.2],
final_time=50,
time_steps=500):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x, y = tf.unstack(state)
dx = y
dy = (-self.model_parameters[0] * y - self.model_parameters[1] * x) / self.model_parameters[2]
return tf.stack([dx, dy])
class FitzhughNagumo(Model):
"""Fitzhugh-Nagumo neuron model
This system of ODEs is an implementation of the Fitzhugh-Nagumo
model for the action potential of a point neuron.
"""
def __init__(
self,
initial_conditions=[0.01, 0.01],
model_parameters=[0.75, 0.8, 3, -0.4],
final_time=100,
time_steps=500):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
v, w = tf.unstack(state)
dv = self.model_parameters[2] * (v + w - (v**3/3) + self.model_parameters[3])
dw = -1/self.model_parameters[2] * (v - self.model_parameters[0] + self.model_parameters[1]*w)
return tf.stack([dv, dw])
class HindmarshRose(Model):
"""Hindmarsh-Rose neuron model
This system of ODEs is an implementation of the Hindmarsh-Rose
model for the action potential of a point neuron.
"""
def __init__(
self,
initial_conditions=[0.1, 0.1, 0.1],
model_parameters=[1., 3., 1., 5., 0.006, 4., 1.3, -1.5],
final_time=100,
time_steps=1000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x, y, z = tf.unstack(state)
dx = y - self.model_parameters[0] * (x ** 3) \
+ (self.model_parameters[1] * (x ** 2)) - z + self.model_parameters[6]
dy = self.model_parameters[2] - self.model_parameters[3] * (x ** 2) - y
dz = self.model_parameters[4] * (self.model_parameters[5] * (x - self.model_parameters[7]) - z)
return tf.stack([dx, dy, dz])
class HodgkinHuxley(Model):
"""Hodgkin-Huxley neuron model
This system of ODEs is an implementation of the Hodgkin-Huxley
model for the action potential of a point neuron.
"""
def __init__(
self,
initial_conditions=[0.1, 0.1, 0.1, 0.1],
model_parameters=[36., 120., 0.3, 12., -115., -10.613, 1., -10.],
final_time=100,
time_steps=1000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
i, n, m, h = tf.unstack(state)
# Alpha and beta functions for channel activation functions
alpha_n = (0.01 * (i + 10)) / (tf.exp((i + 10) / 10) - 1)
beta_n = 0.125 * tf.exp(i / 80)
alpha_m = (0.1 * (i + 25)) / (tf.exp((i + 25) / 10) - 1)
beta_m = 4 * tf.exp(i / 18)
alpha_h = (0.07 * tf.exp(i / 20))
beta_h = 1 / (tf.exp((i + 30) / 10) + 1)
# Differential Equations
di = (self.model_parameters[0] * (n ** 4) * (i - self.model_parameters[3])
+ self.model_parameters[1] * (m ** 3) * h * (i - self.model_parameters[4])
+ self.model_parameters[2] * (i - self.model_parameters[5])
- self.model_parameters[7]) * (-1 / self.model_parameters[6])
dn = alpha_n * (1 - n) - beta_n * n
dm = alpha_m * (1 - m) - beta_m * m
dh = alpha_h * (1 - h) - beta_h * h
return tf.stack([di, dn, dm, dh])
def solve(self):
i, n, m, h = self.tf_session(self.equations, self.initial_conditions)
self.solution = -1*i, n, m, h
return self.solution
class HIV(Model):
"""HIV dynamics
This system of ODEs is an implementation of a model for HIV
dynamics in a T-cell population.
"""
def __init__(
self,
initial_conditions=[1000, 0, 1],
model_parameters=[10., 0.02, 0.24, 2.4, 2.4e-5, 100],
final_time=500,
time_steps=500):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x1, x2, x3 = tf.unstack(state)
dx1 = -self.model_parameters[1] * x1 - self.model_parameters[4] * x1 * x3 + self.model_parameters[0]
dx2 = -self.model_parameters[3] * x2 + self.model_parameters[4] * x1 * x3
dx3 = self.model_parameters[5] * x2 - self.model_parameters[2] * x3
return tf.stack([dx1, dx2, dx3])
class Lorenz(Model):
"""Lorenz equations
This system of ODEs is an implementation of the Lorenz equations
which model atmospheric convection.
"""
def __init__(
self,
initial_conditions=[0, 2, 20],
model_parameters=[28., 10., 8. / 3.],
final_time=50,
time_steps=5000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
self.state = tf.tensor()
def equations(self, state, t):
x, y, z = tf.unstack(state)
dx = self.model_parameters[1] * (y - x)
dy = x * (self.model_parameters[0] - z) - y
dz = x * y - self.model_parameters[2] * z
return tf.stack([dx, dy, dz])
class MorrisLecar(Model):
"""Morris-Lecar neuron model
This system of ODEs is an implementation of the Morris-Lecar
model for the action potential of a point neuron.
"""
def __init__(
self,
initial_conditions=[0.01, 0.01],
model_parameters=[-84., 8., 130., 4.4, -60., 2., 0.04, -1.2, 18., 2., 30., 80.],
final_time=500,
time_steps=1000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
v, n = tf.unstack(state)
dv = (-self.model_parameters[3]
* (0.5 * (1 + tf.tanh((v - self.model_parameters[7]) / self.model_parameters[8])))
* (v - self.model_parameters[2]) - self.model_parameters[1] * n
* (v - self.model_parameters[0]) - self.model_parameters[5]
* (v - self.model_parameters[4]) + self.model_parameters[11])
dn = (self.model_parameters[6]
* ((0.5 * (1 + tf.tanh((v - self.model_parameters[9]) / self.model_parameters[10]))) - n)) \
/ (1 / tf.cosh((v - self.model_parameters[9]) / (2 * self.model_parameters[10])))
return tf.stack([dv, dn])
class Vanderpol(Model):
"""Van der pol oscillator
This system of ODEs is an implementation of the van der pol
oscillator, a commonly used introductory system in the study
of dynamical systems.
"""
def __init__(
self,
initial_conditions=[0.01, 0.01],
model_parameters=[-0.05],
final_time=50,
time_steps=250):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x, y = tf.unstack(state)
dx = y
dy = self.model_parameters[0]*y*(1 - x**2) - x
return tf.stack([dx, dy])
| [
"tensorflow.unstack",
"tensorflow.tensor",
"tensorflow.tanh",
"tensorflow.Session",
"numpy.array",
"numpy.linspace",
"tensorflow.constant",
"tensorflow.cosh",
"tensorflow.exp",
"tensorflow.stack"
] | [((1080, 1115), 'tensorflow.constant', 'tf.constant', (['arg1'], {'dtype': 'tf.float64'}), '(arg1, dtype=tf.float64)\n', (1091, 1115), True, 'import tensorflow as tf\n'), ((2002, 2054), 'numpy.linspace', 'np.linspace', (['(0)', 'self.final_time'], {'num': 'self.time_steps'}), '(0, self.final_time, num=self.time_steps)\n', (2013, 2054), True, 'import numpy as np\n'), ((2938, 2950), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2948, 2950), True, 'import tensorflow as tf\n'), ((4045, 4073), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (4053, 4073), True, 'import numpy as np\n'), ((4256, 4273), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (4266, 4273), True, 'import tensorflow as tf\n'), ((4764, 4792), 'tensorflow.stack', 'tf.stack', (['[dx, dy, dx1, dy1]'], {}), '([dx, dy, dx1, dy1])\n', (4772, 4792), True, 'import tensorflow as tf\n'), ((5147, 5175), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (5155, 5175), True, 'import numpy as np\n'), ((5350, 5367), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (5360, 5367), True, 'import tensorflow as tf\n'), ((5501, 5519), 'tensorflow.stack', 'tf.stack', (['[dx, dy]'], {}), '([dx, dy])\n', (5509, 5519), True, 'import tensorflow as tf\n'), ((5944, 5972), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (5952, 5972), True, 'import numpy as np\n'), ((6147, 6164), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (6157, 6164), True, 'import tensorflow as tf\n'), ((6369, 6387), 'tensorflow.stack', 'tf.stack', (['[dv, dw]'], {}), '([dv, dw])\n', (6377, 6387), True, 'import tensorflow as tf\n'), ((6831, 6859), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (6839, 6859), True, 'import numpy as np\n'), ((7037, 7054), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (7047, 7054), True, 'import tensorflow as tf\n'), ((7392, 7414), 'tensorflow.stack', 'tf.stack', (['[dx, dy, dz]'], {}), '([dx, dy, dz])\n', (7400, 7414), True, 'import tensorflow as tf\n'), ((7872, 7900), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (7880, 7900), True, 'import numpy as np\n'), ((8081, 8098), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (8091, 8098), True, 'import tensorflow as tf\n'), ((8967, 8993), 'tensorflow.stack', 'tf.stack', (['[di, dn, dm, dh]'], {}), '([di, dn, dm, dh])\n', (8975, 8993), True, 'import tensorflow as tf\n'), ((9552, 9580), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (9560, 9580), True, 'import numpy as np\n'), ((9761, 9778), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (9771, 9778), True, 'import tensorflow as tf\n'), ((10061, 10086), 'tensorflow.stack', 'tf.stack', (['[dx1, dx2, dx3]'], {}), '([dx1, dx2, dx3])\n', (10069, 10086), True, 'import tensorflow as tf\n'), ((10475, 10503), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (10483, 10503), True, 'import numpy as np\n'), ((10648, 10659), 'tensorflow.tensor', 'tf.tensor', ([], {}), '()\n', (10657, 10659), True, 'import tensorflow as tf\n'), ((10714, 10731), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (10724, 10731), True, 'import tensorflow as tf\n'), ((10897, 10919), 'tensorflow.stack', 'tf.stack', (['[dx, dy, dz]'], {}), '([dx, dy, dz])\n', (10905, 10919), True, 'import tensorflow as tf\n'), ((11378, 11406), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (11386, 11406), True, 'import numpy as np\n'), ((11581, 11598), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (11591, 11598), True, 'import tensorflow as tf\n'), ((12219, 12237), 'tensorflow.stack', 'tf.stack', (['[dv, dn]'], {}), '([dv, dn])\n', (12227, 12237), True, 'import tensorflow as tf\n'), ((12670, 12698), 'numpy.array', 'np.array', (['initial_conditions'], {}), '(initial_conditions)\n', (12678, 12698), True, 'import numpy as np\n'), ((12873, 12890), 'tensorflow.unstack', 'tf.unstack', (['state'], {}), '(state)\n', (12883, 12890), True, 'import tensorflow as tf\n'), ((12976, 12994), 'tensorflow.stack', 'tf.stack', (['[dx, dy]'], {}), '([dx, dy])\n', (12984, 12994), True, 'import tensorflow as tf\n'), ((8258, 8272), 'tensorflow.exp', 'tf.exp', (['(i / 80)'], {}), '(i / 80)\n', (8264, 8272), True, 'import tensorflow as tf\n'), ((8359, 8373), 'tensorflow.exp', 'tf.exp', (['(i / 18)'], {}), '(i / 18)\n', (8365, 8373), True, 'import tensorflow as tf\n'), ((8400, 8414), 'tensorflow.exp', 'tf.exp', (['(i / 20)'], {}), '(i / 20)\n', (8406, 8414), True, 'import tensorflow as tf\n'), ((8206, 8227), 'tensorflow.exp', 'tf.exp', (['((i + 10) / 10)'], {}), '((i + 10) / 10)\n', (8212, 8227), True, 'import tensorflow as tf\n'), ((8311, 8332), 'tensorflow.exp', 'tf.exp', (['((i + 25) / 10)'], {}), '((i + 25) / 10)\n', (8317, 8332), True, 'import tensorflow as tf\n'), ((8438, 8459), 'tensorflow.exp', 'tf.exp', (['((i + 30) / 10)'], {}), '((i + 30) / 10)\n', (8444, 8459), True, 'import tensorflow as tf\n'), ((12129, 12202), 'tensorflow.cosh', 'tf.cosh', (['((v - self.model_parameters[9]) / (2 * self.model_parameters[10]))'], {}), '((v - self.model_parameters[9]) / (2 * self.model_parameters[10]))\n', (12136, 12202), True, 'import tensorflow as tf\n'), ((12032, 12099), 'tensorflow.tanh', 'tf.tanh', (['((v - self.model_parameters[9]) / self.model_parameters[10])'], {}), '((v - self.model_parameters[9]) / self.model_parameters[10])\n', (12039, 12099), True, 'import tensorflow as tf\n'), ((11667, 11733), 'tensorflow.tanh', 'tf.tanh', (['((v - self.model_parameters[7]) / self.model_parameters[8])'], {}), '((v - self.model_parameters[7]) / self.model_parameters[8])\n', (11674, 11733), True, 'import tensorflow as tf\n')] |
import numpy as np
def load_synth_spectra(regridded=True, small=False, npca=10,\
noise=False, SN=10, datapath=None,\
wave_split=None, boss=False, hetsced=False,
bossnoise=False, test=False):
if datapath is None:
datapath = "/net/vdesk/data2/buiten/MRP2/pca-sdss-old/"
if bossnoise & regridded:
if test:
print ("Using test-only hybrid-grid spectra with BOSS noise.")
filename = "{}forest_spectra_BOSSnoise_npca{}BOSS-regridded_test-only.npy".format(datapath, npca)
data = np.load(filename)
else:
print ("Using bossnoise & regridded in load_synth_spectra")
# this is the setting we'll most likely be using
filename = "{}forest_spectra_BOSSnoise_npca{}BOSS-regridded.npy".format(datapath, npca)
data = np.load(filename)
elif bossnoise and not regridded:
if test:
print ("using test-only uniform grid spectra with BOSS noise.")
filename = "{}forest_spectra_BOSSnoise_npca{}BOSS-grid_test-only.npy".format(datapath, npca)
else:
filename = "{}forest_spectra_BOSSnoise_npca{}BOSS-grid.npy".format(datapath, npca)
data = np.load(filename)
elif noise:
if boss:
if (not hetsced) & regridded:
data = np.load(datapath + "forest_spectra_with_noiseSN"+str(SN)+"_npca"+str(npca)+"BOSS-regridded.npy")
elif (not hetsced) & (not regridded):
data = np.load(datapath + "forest_spectra_with_noiseSN"+str(SN)+"_npca"+str(npca)+"BOSS-grid.npy")
elif hetsced & regridded:
data = np.load(datapath + "forest_spectra_hetsced_noiseSN10-100_npca"+str(npca)+"BOSS-regridded.npy")
elif hetsced & (not regridded):
data = np.load(datapath + "forest_spectra_hetsced_noiseSN10-100_npca"+str(npca)+"BOSS-grid.npy")
else:
if regridded:
if (wave_split is None) or (wave_split == 1216):
data = np.load(datapath + "forest_spectra_with_noiseSN"+str(SN)+"_regridded_npca" + str(npca) + "smooth-window20.npy")
else:
data = np.load(datapath + "forest_spectra_with_noiseSN"+str(SN)+"_regridded_npca" + str(npca) + "smooth-window20_split"+str(int(wave_split))+".npy")
else:
data = np.load(datapath + "forest_spectra_with_noiseSN"+str(SN)+"_npca"+str(npca)+"smooth-window20.npy")
elif npca==10:
if regridded:
if small:
data = np.load(datapath+"gen_spectrum_regridded_array.npy")
else:
data = np.load(datapath+"gen_spectrum_regridded_big_array.npy")
else:
if small:
data = np.load(datapath+"gen_spectrum_nonregridded_array.npy")
else:
data = np.load(datapath+"gen_spectrum_nonregridded_big_array.npy")
else:
if regridded:
data = np.load(datapath+"gen_spectrum_regridded_big_array_npca"+str(npca)+".npy")
else:
data = np.load(datapath+"gen_spectrum_nonregridded_big_array_npca"+str(npca)+".npy")
wave_grid = data[0,:,0]
qso_cont = data[:,:,1]
qso_flux = data[:,:,2]
print ("Filename:", filename)
if noise:
if not hetsced:
flux_smooth = data[:,:,3]
return wave_grid, qso_cont, qso_flux, flux_smooth
else:
flux_smooth = data[:,:,3]
ivar = data[:,:,4]
return wave_grid, qso_cont, qso_flux, flux_smooth, ivar
else:
return wave_grid, qso_cont, qso_flux
def load_synth_noisy_cont(npca=10, smooth=False, window=20, homosced=True,\
poisson=False, SN=10, datapath=None):
'''Convenience function for loading the synthetic continua with homoscedastic
noise. qso_cont contains the continua, qso_flux contain the noisy continua.'''
if datapath is None:
datapath = "/net/vdesk/data2/buiten/MRP2/pca-sdss-old/"
npca_str = str(npca)
if smooth:
if homosced:
data = np.load(datapath+"continua_with_noiseSN"+str(SN)+"_regridded_npca"+npca_str+"smooth-window"+str(window)+".npy")
else:
if poisson:
if SN==10:
data = np.load(datapath+"continua_scaled-poisson-noise_regridded_npca"+npca_str+"smooth-window"+str(window)+".npy")
else:
data = np.load(datapath+"continua_scaled-poisson-noiseSN"+str(SN)+"_regridded_npca"+npca_str+"smooth-window"+str(window)+".npy")
else:
data = np.load(datapath+"continua_with_constSNnoise_regridded_npca"+npca_str+"smooth-window"+str(window)+".npy")
else:
data = np.load(datapath+"continua_with_noise_regridded_npca"+npca_str+".npy")
wave_grid = data[0,:,0]
qso_cont = data[:,:,1]
qso_flux = data[:,:,2]
if smooth:
qso_flux_smooth = data[:,:,3]
return wave_grid, qso_cont, qso_flux, qso_flux_smooth
else:
return wave_grid, qso_cont, qso_flux
def load_paris_spectra(noise=False, version=2, datapath=None):
'''Convenience function for loading the Paris hand-fit continua with
a simulated Ly-alpha forest and optional noise added in.'''
if datapath is None:
mainpath = "/net/vdesk/data2/buiten/MRP2/Data/"
else:
mainpath = datapath
if noise:
if version == 1:
filename = mainpath + "paris_noisyflux_regridded.npy"
else:
filename = mainpath + "paris_noisyflux_regridded_v"+str(version)+".npy"
else:
filename = mainpath + "paris_noiselessflux_regridded.npy"
data = np.load(filename)
wave_grid = data[0,:,0]
cont = data[:,:,1]
flux = data[:,:,2]
flux_smooth = data[:,:,3]
return wave_grid, cont, flux, flux_smooth
def split_data(attributes, targets, train_size=0.9, test_size=0.05):
from sklearn.model_selection import train_test_split
rest_size = 1 - train_size
X_train, X_rest, y_train, y_rest = train_test_split(attributes, targets,\
test_size=rest_size,\
random_state=0)
test_size_of_rest = test_size/rest_size
X_valid, X_test, y_valid, y_test = train_test_split(X_rest, y_rest,\
test_size=test_size_of_rest,
random_state=0)
return X_train, X_valid, X_test, y_train, y_valid, y_test
def normalise_spectra(wave_grid, flux, cont, windowmin=1270, windowmax=1290):
try:
wave_grid1d = wave_grid[0,:]
except:
wave_grid1d = wave_grid
window = (wave_grid1d > windowmin) & (wave_grid1d < windowmax)
flux_median_window = np.median(flux[:,window], axis=1)
flux_norm = np.zeros(flux.shape)
cont_norm = np.zeros(cont.shape)
for i in range(len(flux)):
flux_norm[i,:] = flux[i,:]/flux_median_window[i]
cont_norm[i,:] = cont[i,:]/flux_median_window[i]
return flux_norm, cont_norm
def normalise_ivar(wave_grid, flux, ivar, windowmin=1270, windowmax=1290):
try:
wave_grid1d = wave_grid[0,:]
except:
wave_grid1d = wave_grid
window = (wave_grid1d > windowmin) & (wave_grid1d < windowmax)
flux_median_window = np.median(flux[:,window], axis=1)
flux_norm = np.zeros(flux.shape)
ivar_norm = np.zeros(ivar.shape)
for i in range(len(flux)):
flux_norm[i,:] = flux[i,:] / flux_median_window[i]
ivar_norm[i,:] = ivar[i,:] * flux_median_window[i]**2
return flux_norm, ivar_norm | [
"sklearn.model_selection.train_test_split",
"numpy.median",
"numpy.load",
"numpy.zeros"
] | [((5769, 5786), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (5776, 5786), True, 'import numpy as np\n'), ((6135, 6209), 'sklearn.model_selection.train_test_split', 'train_test_split', (['attributes', 'targets'], {'test_size': 'rest_size', 'random_state': '(0)'}), '(attributes, targets, test_size=rest_size, random_state=0)\n', (6151, 6209), False, 'from sklearn.model_selection import train_test_split\n'), ((6408, 6485), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_rest', 'y_rest'], {'test_size': 'test_size_of_rest', 'random_state': '(0)'}), '(X_rest, y_rest, test_size=test_size_of_rest, random_state=0)\n', (6424, 6485), False, 'from sklearn.model_selection import train_test_split\n'), ((6926, 6960), 'numpy.median', 'np.median', (['flux[:, window]'], {'axis': '(1)'}), '(flux[:, window], axis=1)\n', (6935, 6960), True, 'import numpy as np\n'), ((6976, 6996), 'numpy.zeros', 'np.zeros', (['flux.shape'], {}), '(flux.shape)\n', (6984, 6996), True, 'import numpy as np\n'), ((7013, 7033), 'numpy.zeros', 'np.zeros', (['cont.shape'], {}), '(cont.shape)\n', (7021, 7033), True, 'import numpy as np\n'), ((7473, 7507), 'numpy.median', 'np.median', (['flux[:, window]'], {'axis': '(1)'}), '(flux[:, window], axis=1)\n', (7482, 7507), True, 'import numpy as np\n'), ((7523, 7543), 'numpy.zeros', 'np.zeros', (['flux.shape'], {}), '(flux.shape)\n', (7531, 7543), True, 'import numpy as np\n'), ((7560, 7580), 'numpy.zeros', 'np.zeros', (['ivar.shape'], {}), '(ivar.shape)\n', (7568, 7580), True, 'import numpy as np\n'), ((4829, 4905), 'numpy.load', 'np.load', (["(datapath + 'continua_with_noise_regridded_npca' + npca_str + '.npy')"], {}), "(datapath + 'continua_with_noise_regridded_npca' + npca_str + '.npy')\n", (4836, 4905), True, 'import numpy as np\n'), ((604, 621), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (611, 621), True, 'import numpy as np\n'), ((888, 905), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (895, 905), True, 'import numpy as np\n'), ((1268, 1285), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1275, 1285), True, 'import numpy as np\n'), ((2623, 2677), 'numpy.load', 'np.load', (["(datapath + 'gen_spectrum_regridded_array.npy')"], {}), "(datapath + 'gen_spectrum_regridded_array.npy')\n", (2630, 2677), True, 'import numpy as np\n'), ((2717, 2775), 'numpy.load', 'np.load', (["(datapath + 'gen_spectrum_regridded_big_array.npy')"], {}), "(datapath + 'gen_spectrum_regridded_big_array.npy')\n", (2724, 2775), True, 'import numpy as np\n'), ((2833, 2890), 'numpy.load', 'np.load', (["(datapath + 'gen_spectrum_nonregridded_array.npy')"], {}), "(datapath + 'gen_spectrum_nonregridded_array.npy')\n", (2840, 2890), True, 'import numpy as np\n'), ((2930, 2991), 'numpy.load', 'np.load', (["(datapath + 'gen_spectrum_nonregridded_big_array.npy')"], {}), "(datapath + 'gen_spectrum_nonregridded_big_array.npy')\n", (2937, 2991), True, 'import numpy as np\n')] |
import argparse
import configparser
import torch
import os
import numpy as np
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from model import ValueNetwork
from env import ENV
from train import run_one_episode
def visualize(model_config, env_config, weight_path, case, save):
state_dim = model_config.getint('model', 'state_dim')
gamma = model_config.getfloat('model', 'gamma')
bxmin = env_config.getfloat('sim', 'xmin')
bxmax = env_config.getfloat('sim', 'xmax')
bymin = env_config.getfloat('sim', 'ymin')
bymax = env_config.getfloat('sim', 'ymax')
xmin = env_config.getfloat('visualization', 'xmin')
xmax = env_config.getfloat('visualization', 'xmax')
ymin = env_config.getfloat('visualization', 'ymin')
ymax = env_config.getfloat('visualization', 'ymax')
crossing_radius = env_config.getfloat('sim', 'crossing_radius')
kinematic = env_config.getboolean('agent', 'kinematic')
radius = env_config.getfloat('agent', 'radius')
device = torch.device('cpu')
test_env = ENV(config=env_config, phase='test')
test_env.reset(case)
model = ValueNetwork(state_dim=state_dim, fc_layers=[100, 100, 100], kinematic=kinematic)
model.load_state_dict(torch.load(weight_path, map_location=lambda storage, loc: storage))
_, state_sequences, _, _ = run_one_episode(model, 'test', test_env, gamma, None, kinematic, device)
positions = list()
colors = list()
counter = list()
line_positions = list()
for i in range(len(state_sequences[0])):
counter.append(i)
if state_sequences[0][i] is None:
p0 = positions[-4][0]
c0 = 'tab:red'
h0 = 0
else:
p0 = (state_sequences[0][i].px, state_sequences[0][i].py)
c0 = 'tab:blue'
h0 = state_sequences[0][i].theta
xdata0 = [p0[0], p0[0]+radius*np.cos(h0)]
ydata0 = [p0[1], p0[1]+radius*np.sin(h0)]
if state_sequences[1][i] is None:
p1 = positions[-4][1]
c1 = 'tab:red'
h1 = 0
else:
p1 = (state_sequences[1][i].px, state_sequences[1][i].py)
c1 = 'tab:gray'
h1 = state_sequences[1][i].theta
xdata1 = [p1[0], p1[0]+radius*np.cos(h1)]
ydata1 = [p1[1], p1[1]+radius*np.sin(h1)]
if i == len(state_sequences[0])-1:
c0 = c1 = 'tab:red'
positions.append([p0, p1])
colors.append([c0, c1])
line_positions.append([[xdata0, ydata0], [xdata1, ydata1]])
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.add_artist(plt.Circle((0, 0), crossing_radius, fill=False, edgecolor='g', lw=1))
ax.add_artist(plt.Rectangle((bxmin, bymin), bxmax-bxmin, bymax-bymin, fill=False, linestyle='dashed', lw=1))
agent0 = plt.Circle(positions[0][0], radius, fill=True, color='b')
agent1 = plt.Circle(positions[0][1], radius, fill=True, color='c')
line0 = plt.Line2D(line_positions[0][0][0], line_positions[0][0][1], color='tab:red')
line1 = plt.Line2D(line_positions[0][1][0], line_positions[0][1][1], color='tab:red')
text = plt.text(0, 8, 'Step: {}'.format(counter[0]), fontsize=12)
ax.add_artist(agent0)
ax.add_artist(agent1)
ax.add_artist(line0)
ax.add_artist(line1)
ax.add_artist(text)
def update(frame_num):
agent0.center = positions[frame_num][0]
agent1.center = positions[frame_num][1]
agent0.set_color(colors[frame_num][0])
agent1.set_color(colors[frame_num][1])
line0.set_xdata(line_positions[frame_num][0][0])
line0.set_ydata(line_positions[frame_num][0][1])
line1.set_xdata(line_positions[frame_num][1][0])
line1.set_ydata(line_positions[frame_num][1][1])
text.set_text('Step: {}'.format(counter[frame_num]))
anim = animation.FuncAnimation(fig, update, frames=len(positions), interval=800)
if save:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=1800)
output_file = 'data/output.mp4'
anim.save(output_file, writer=writer)
plt.show()
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--output_dir', type=str)
parser.add_argument('--init', default=False, action='store_true')
parser.add_argument('--case', default=0, type=int)
parser.add_argument('--save', default=False, action='store_true')
args = parser.parse_args()
args.output_dir = "data/model/";
config_file = os.path.join(args.output_dir, 'model.config')
if args.init:
weight_file = os.path.join(args.output_dir, 'initialized_model.pth')
else:
weight_file = os.path.join(args.output_dir, 'trained_model.pth')
model_config = configparser.RawConfigParser()
model_config.read(config_file)
env_config = configparser.RawConfigParser()
env_config.read('configs/env.config')
visualize(model_config, env_config, weight_file, args.case, args.save)
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"train.run_one_episode",
"torch.load",
"numpy.sin",
"os.path.join",
"env.ENV",
"matplotlib.pyplot.Line2D",
"numpy.cos",
"model.ValueNetwork",
"configparser.RawConfigParser",
"mat... | [((1020, 1039), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1032, 1039), False, 'import torch\n'), ((1055, 1091), 'env.ENV', 'ENV', ([], {'config': 'env_config', 'phase': '"""test"""'}), "(config=env_config, phase='test')\n", (1058, 1091), False, 'from env import ENV\n'), ((1129, 1215), 'model.ValueNetwork', 'ValueNetwork', ([], {'state_dim': 'state_dim', 'fc_layers': '[100, 100, 100]', 'kinematic': 'kinematic'}), '(state_dim=state_dim, fc_layers=[100, 100, 100], kinematic=\n kinematic)\n', (1141, 1215), False, 'from model import ValueNetwork\n'), ((1336, 1408), 'train.run_one_episode', 'run_one_episode', (['model', '"""test"""', 'test_env', 'gamma', 'None', 'kinematic', 'device'], {}), "(model, 'test', test_env, gamma, None, kinematic, device)\n", (1351, 1408), False, 'from train import run_one_episode\n'), ((2556, 2584), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (2568, 2584), True, 'import matplotlib.pyplot as plt\n'), ((2855, 2912), 'matplotlib.pyplot.Circle', 'plt.Circle', (['positions[0][0]', 'radius'], {'fill': '(True)', 'color': '"""b"""'}), "(positions[0][0], radius, fill=True, color='b')\n", (2865, 2912), True, 'import matplotlib.pyplot as plt\n'), ((2926, 2983), 'matplotlib.pyplot.Circle', 'plt.Circle', (['positions[0][1]', 'radius'], {'fill': '(True)', 'color': '"""c"""'}), "(positions[0][1], radius, fill=True, color='c')\n", (2936, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2996, 3073), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['line_positions[0][0][0]', 'line_positions[0][0][1]'], {'color': '"""tab:red"""'}), "(line_positions[0][0][0], line_positions[0][0][1], color='tab:red')\n", (3006, 3073), True, 'import matplotlib.pyplot as plt\n'), ((3086, 3163), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['line_positions[0][1][0]', 'line_positions[0][1][1]'], {'color': '"""tab:red"""'}), "(line_positions[0][1][0], line_positions[0][1][1], color='tab:red')\n", (3096, 3163), True, 'import matplotlib.pyplot as plt\n'), ((4175, 4185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4183, 4185), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4264), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Parse configuration file"""'], {}), "('Parse configuration file')\n", (4236, 4264), False, 'import argparse\n'), ((4596, 4641), 'os.path.join', 'os.path.join', (['args.output_dir', '"""model.config"""'], {}), "(args.output_dir, 'model.config')\n", (4608, 4641), False, 'import os\n'), ((4840, 4870), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (4868, 4870), False, 'import configparser\n'), ((4923, 4953), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (4951, 4953), False, 'import configparser\n'), ((1237, 1303), 'torch.load', 'torch.load', (['weight_path'], {'map_location': '(lambda storage, loc: storage)'}), '(weight_path, map_location=lambda storage, loc: storage)\n', (1247, 1303), False, 'import torch\n'), ((2659, 2727), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', 'crossing_radius'], {'fill': '(False)', 'edgecolor': '"""g"""', 'lw': '(1)'}), "((0, 0), crossing_radius, fill=False, edgecolor='g', lw=1)\n", (2669, 2727), True, 'import matplotlib.pyplot as plt\n'), ((2747, 2848), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(bxmin, bymin)', '(bxmax - bxmin)', '(bymax - bymin)'], {'fill': '(False)', 'linestyle': '"""dashed"""', 'lw': '(1)'}), "((bxmin, bymin), bxmax - bxmin, bymax - bymin, fill=False,\n linestyle='dashed', lw=1)\n", (2760, 2848), True, 'import matplotlib.pyplot as plt\n'), ((4682, 4736), 'os.path.join', 'os.path.join', (['args.output_dir', '"""initialized_model.pth"""'], {}), "(args.output_dir, 'initialized_model.pth')\n", (4694, 4736), False, 'import os\n'), ((4769, 4819), 'os.path.join', 'os.path.join', (['args.output_dir', '"""trained_model.pth"""'], {}), "(args.output_dir, 'trained_model.pth')\n", (4781, 4819), False, 'import os\n'), ((1890, 1900), 'numpy.cos', 'np.cos', (['h0'], {}), '(h0)\n', (1896, 1900), True, 'import numpy as np\n'), ((1940, 1950), 'numpy.sin', 'np.sin', (['h0'], {}), '(h0)\n', (1946, 1950), True, 'import numpy as np\n'), ((2269, 2279), 'numpy.cos', 'np.cos', (['h1'], {}), '(h1)\n', (2275, 2279), True, 'import numpy as np\n'), ((2319, 2329), 'numpy.sin', 'np.sin', (['h1'], {}), '(h1)\n', (2325, 2329), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue 26 March 18:44:45 2020
@author: Mnemosyne
Vocal learning model results (plots of)
"""
import os
import time
import glob
import pickle
import numpy as np
import matplotlib
import librosa
from matplotlib import rcParams, cm, colors
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.mplot3d import Axes3D
import scipy.io.wavfile as wav
csfont = {'fontname':'Times New Roman'}
from songbird_data_analysis import Song_functions
def magnitude(v):
"""
:param v = (x,y,z): 3D cartesian coordinates - vector
:return m: magnitude (Euclidian norm in this case)
"""
m = np.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
return m
def polar_coord(v):
"""
:param v = (x,y,z): 3D cartesian coordinates - vector
:return r,phi, theta: polar coordinates
"""
r = np.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
phi = np.arctan(v[1]/v[0])
theta = np.arctan(np.sqrt(v[0]**2 + v[1]**2)/v[2])
return r, phi, theta
def arctan_coord(v):
"""
:param v: 3D cartesian coordinates - vector
:return x_new, y_new: 2D vector with x_new = arctan(v0/v2) ane y_new = arctan(v0/v2)
"""
x_new = np.arctan(v[0]/v[1])
y_new = np.arctan(v[0]/v[2])
return x_new, y_new
def arctan_distance(v,w):
"""
:param v, w: vectors of the same size
:return: "angular" distance component by componet - vector
"""
d = np.zeros((np.size(v),))
for i in range(0, np.size(v)):
d[i] = np.arctan(v[i] - w[i])
return d
def create_sphere(cx,cy,cz, r, resolution=360):
'''
create sphere with center (cx, cy, cz) and radius r
'''
phi = np.linspace(0, 2*np.pi, 2*resolution)
theta = np.linspace(0, np.pi, resolution)
theta, phi = np.meshgrid(theta, phi)
r_xy = r*np.sin(theta)
x = cx + np.cos(phi) * r_xy
y = cy + np.sin(phi) * r_xy
z = cz + r * np.cos(theta)
return np.stack([x,y,z])
def plot_auditory_activation(args):
"""
Plot the results of the different auditory activation functions (results from the test function)
"""
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for sim_counter in range(0, args.N_sim):
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
softmax_sum_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_sum_expl_' + str(sim_counter) + '.npy')
softmax_mean_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_mean_expl_' + str(sim_counter) + '.npy')
raw_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_raw_score_expl_' + str(sim_counter) + '.npy')
raw_mean_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_mean_expl_' + str(sim_counter) + '.npy')
mean_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_mean_norm_expl_' + str(sim_counter) + '.npy')
logistic_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_logistic_expl_' + str(sim_counter) + '.npy')
tanh_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_tanh_expl_' + str(sim_counter) + '.npy')
minmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_minmax_expl_' + str(sim_counter) + '.npy')
sign_minmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_sign_minmax_expl_' + str(sim_counter) + '.npy')
sign_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_sign_expl_' + str(sim_counter) + '.npy')
square_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_square_expl_' + str(sim_counter) + '.npy')
arctg_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_arctg_expl_' + str(sim_counter) + '.npy')
scaling_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_scaling_expl' + str(sim_counter) + '.npy', allow_pickle=True)
scaling_softmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_scaling_softmax_expl' + str(sim_counter) + '.npy', allow_pickle=True)
softmax_MAX_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_MAX_expl' + str(sim_counter) + '.npy', allow_pickle=True)
max_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_max_expl' + str(sim_counter) + '.npy', allow_pickle=True)
max_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_max_norm_expl' + str(sim_counter) + '.npy', allow_pickle=True)
p95_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_p95_expl' + str(sim_counter) + '.npy', allow_pickle=True)
for i in range(0, np.shape(raw_score_expl)[0]):
for j in range(0, len(classes)):
if p95_expl[i,j] > 1:
p95_expl[i,j] = 1
# Time vector
x_time = np.linspace(0, np.shape(raw_score_expl)[0], np.shape(raw_score_expl)[0])
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_score_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('MinMax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_raw_score_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(p95_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-0.1, 1)
ax[i, j].set_ylim(0, 1500)
ax[i, j].set_xlabel('p95 score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_p95_expl_pw' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(max_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Max score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_max_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(max_norm_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Max norm score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_max_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(scaling_softmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Scaling softmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_scaling_softmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_MAX_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Softmax MAX score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_softmax_MAX_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(scaling_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-0.1, 1)
ax[i, j].set_ylim(0, 1500)
ax[i, j].set_xlabel('Scaling score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_scaling_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(arctg_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Arctg score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_arctg_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(square_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Square root score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_square_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sign_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Sign score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_sign_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(minmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Minmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_minmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sign_minmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i,j].set_ylim(0,800)
ax[i, j].set_xlabel('Sign minmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_sign_minmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(logistic_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Logistic score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_logistic_expl_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(tanh_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Tanh score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_tanh_expl_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_mean_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw mean score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_raw_mean_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(mean_norm_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Mean score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_mean_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_sum_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Soft-max', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_softmax_sum_expl' + str(
sim_counter) + '.' + args.format)
plt.close('all')
for b in range(0, np.size(args.beta)):
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_mean_expl[b][:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[
cl] + '_softmax_mean_expl_beta_' + str(args.beta[b]) + '_' + str(
sim_counter) + '.' + args.format)
print('Done')
def plot_sensory(args):
"""
Plots of the results obtained from the leanring model (VLM function).
"""
# Colors
color = ['r', 'b', 'k', 'orange', 'magenta', 'purple']
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
p95_mean = np.zeros((len(args.learning_rate), args.n_points + 1, len(classes)))
for lr in range(0, len(args.learning_rate)):
print(args.learning_rate[lr])
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
p95_all_sim = []
for sim_counter in range(0, args.N_sim):
p95 = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_p95_sim_' + str(sim_counter) + '.npy')
p95_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_p95_expl_' + str(sim_counter) + '.npy')
# Focus on 200 time steps
p95_focus = p95[0:200, :]
# Remove focus (every N points up to 200 points) - CHECK PLOT
p95_begin = p95[0:200, :]
p95_jump = np.zeros((args.n_points + 1, np.size(args.T_names)))
p95_jump[0:14, :] = p95_begin[0::15, :]
p95_jump[14::, :] = p95[200::, :]
# All sim vector
p95_all_sim.append(p95_jump)
# Time vector
x_time = np.linspace(0, args.MAX_trial, np.shape(p95_jump)[0])
x_time_expl = np.linspace(0, np.shape(p95_expl)[0], np.shape(p95_expl)[0])
x_time_focus = np.linspace(0, np.shape(p95_focus)[0], np.shape(p95_focus)[0])
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_focus, p95_focus[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, np.shape(p95_focus)[0])
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_FOCUS_sim' + str(
sim_counter) + '.' + args.format)
W_p95 = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_W_p95_sim_' + str(sim_counter) + '.npy')[0:args.MAX_trial, :, :]
# Plot the evolution of the synaptic weights over trials
if np.size(args.T_names) == len(classes):
fig, ax = plt.subplots(4, 4, sharex='col', sharey='row', figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
for k in range(0, args.wavegan_latent_dim):
ax[i, j].plot(x_time_expl, W_p95[:, k, 4 * i + j], color[k])
ax[i, j].set_ylabel('Weights', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i,j].set_ylim(-1,1)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + 'Synaptic_weights_evolution_p95' + str(sim_counter) + '.' + args.format)
# Plot activation of the exploration
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, p95_expl[:, 4 * i + j], 'b')
#ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_expl' + str(
sim_counter) + '.' + args.format)
# Plot activation during learning
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, p95_all_sim[sim_counter][:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial-1)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_sim' + str(
sim_counter) + '.' + args.format)
# [TODO] add comment here when I try this option
if args.example == True:
if sim_counter == 1:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5), sharey=True, sharex=True)
for lr in range(0, len(args.learning_rate)):
ax.plot(x_time, p95_all_sim[sim_counter][:, 14], 'b')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.set_xlim(0, args.MAX_trial)
ax.set_xlabel('Time (in number of time steps)', fontsize=15)
ax.set_ylabel('Activation', fontsize=15)
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_R' + '.' + args.format)
plt.close('all')
# Average over multiple simulations
p95_mean_sim = np.mean(p95_all_sim, axis=0)
p95_mean[lr, :, :] = p95_mean_sim
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for sim_counter in range(0, args.N_sim):
for i in range(0, 4):
for j in range(0, 4):
#ax[i, j].plot(x_time, np.ones((np.shape(p95)[0], 1)), 'k')
ax[i, j].plot(x_time, p95_all_sim[sim_counter][:, 4 * i + j], c=color[sim_counter], alpha=.7)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_sim_ALL' + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for sim_counter in range(0, args.N_sim):
for i in range(0, 4):
for j in range(0, 4):
#ax[i, j].plot(x_time, np.ones((np.shape(p95)[0], 1)), 'k')
ax[i, j].plot(x_time, p95_mean_sim[:, 4 * i + j], c=color[sim_counter], alpha=.7)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_MEAN' + '.' + args.format)
# Comparison between different learning rates
cfr_lr = ['10e-1', '10e-2']
fig, ax = plt.subplots(4, 4, figsize=(12, 7))
for lr in range(0, len(args.learning_rate)):
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, p95_mean[lr,:, 4 * i + j], c=color[lr], alpha=.7, label=cfr_lr[lr])
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[0, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, 0].set_ylabel('Average A', fontsize=8)
ax[0, 0].legend(fontsize=5)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + '_p95_MEAN_all' + '.' + args.format)
np.save(args.data_dir + '/' + 'p95_MEAN_lr_' + str(args.wavegan_latent_dim) + '.npy' ,p95_mean)
plt.close('all')
print('Done')
def cfr_dim13(p95_MEAN, colors, ld, args):
"""
:param p95_MEAN: list of the arrays containing the data (one per latent space condition, two values each - one per learning rate condition)
:return: figure with the comparison (one per leanring rate condition)
"""
x_time = np.linspace(0, args.MAX_trial, 201)
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for lr in range(0, len(args.learning_rate)):
fig, ax = plt.subplots(4, 4, figsize=(12, 7))
for i in range(0, 4):
for j in range(0, 4):
for l in range(0, len(p95_MEAN)):
ax[i, j].plot(x_time, p95_MEAN[l][lr,:, 4 * i + j], c=colors[l], alpha=.7, label=str(ld[l]))
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[0, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, 0].set_ylabel('Average A', fontsize=8)
ax[0, 0].legend(fontsize=5)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + '_p95_MEAN_lr_' + str(args.learning_rate[lr]) + '.' + args.format)
plt.close('all')
print('Done')
def plot_sensory_test(args):
# Colors
color = ['r', 'b', 'k', 'orange', 'magenta', 'purple']
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for sim_counter in range(0, args.N_sim):
cfr_class_A_all = []
cfr_class_A_expl_all = []
cfr_class_raw_all = []
cfr_class_expl_all = []
conv = []
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
cfr_class_A = []
cfr_class_A_expl = []
cfr_class_raw = []
cfr_class_expl = []
mean_spectrogram_env = []
T = []
for lr in range(0, len(args.learning_rate)):
print(args.learning_rate[lr])
sensory_gen = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_sim_' + str(sim_counter) + '.npy')
sensory_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_expl_' + str(sim_counter) + '.npy')
sensory_expl_all = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_expl_all_' + str(sim_counter) + '.npy')
raw_score = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_raw_score_sim_' + str(sim_counter) + '.npy')
max_score = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_sim_' + str(sim_counter) + '.npy')
max_norm = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_norm_sim_' + str(sim_counter) + '.npy')
max_scaling = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_scaling_sim_' + str(sim_counter) + '.npy')
raw_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_raw_score_expl_' + str(sim_counter) + '.npy')
max_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_score_expl_' + str(sim_counter) + '.npy')
max_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_norm_expl_' + str(sim_counter) + '.npy')
max_scaling_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_scaling_expl_' + str(sim_counter) + '.npy')
#cfr_class_A.append(sensory_gen)
#cfr_class_A_expl.append(sensory_expl)
cfr_class_raw.append(raw_score)
cfr_class_expl.append(raw_score_expl)
# Time vector
x_time = np.linspace(0, args.MAX_trial, np.shape(raw_score)[0])
x_time_expl = np.linspace(0, np.shape(raw_score_expl)[0], np.shape(raw_score_expl)[0])
#
# if args.learning_rate[lr] == 0.01:
# for c in range(0, np.size(args.T_names)):
# loc = np.where(raw_score[:, c] > 0.9)[0]
#
# spectrograms_envelope = []
# for sp in range(0, np.size(loc)):
# samples_aux, sr = librosa.load(
# args.data_dir + '/' + args.sim_name + str(sim_counter) + '/' + args.classifier_name[
# cl] + '_lr' + str(args.learning_rate[lr]) + '_' + args.sim_name + str(
# sim_counter) + '_' + str(
# loc[sp]) + '/' + 'sensory_production_' + args.T_names[c] + '.wav', sr=16000)
# trim = librosa.effects.trim(samples_aux.astype(np.float), top_db=20)
# samples_aux = trim[0]
#
# if samples_aux.size / 16 < 4000:
# aux_size = 4000 - samples_aux.size / 16
# silence = np.zeros((int(round(aux_size / 2) * 16)), )
# samples_aux = np.append(silence, samples_aux)
# samples_aux = np.append(samples_aux, silence)
#
# rawsong = samples_aux.astype(float)
# rawsong = rawsong.flatten()
# amp = Song_functions.smooth_data(rawsong, sr, freq_cutoffs=(500, 7999))
#
# # if args.T_names[c] == 'N':
# # new_song = rawsong[0:np.where(amp > 0.00001)[0][-1]] # new training
# # silence = np.zeros((8000 - np.size(new_song),))
# # new_song = np.append(silence, new_song)
#
# # else:
# new_song = rawsong[np.where(amp > 0.00001)[0][0]::]
# silence = np.zeros((100000 - np.size(new_song),))
# new_song = np.append(new_song, silence)
#
# X = librosa.stft(new_song, n_fft=args.N, hop_length=args.H, win_length=args.N,
# window='hann',
# pad_mode='constant', center=True)
# T_coef = np.arange(X.shape[1]) * args.H / sr * 1000 # save to plot
# spectrograms_envelope.append(np.log(1 + 100 * np.abs(X ** 2)))
#
# mean_spectrogram_env.append(np.mean(spectrograms_envelope, axis=0)) # dimension 16
# T.append(T_coef)
#
# np.save(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
# args.learning_rate[lr]) + 'Mean_spectrogram_envelope', mean_spectrogram_env)
#
# # Mean spectrogram after convergence
# fig, axs = plt.subplots(nrows=4, ncols=4, figsize=(10, 14), sharey=True, sharex=True)
# for i in range(0, 4):
# for j in range(0, 4):
# extent = [0, np.max(T_coef[4 * i + j]), 0, 8000]
# if mean_spectrogram_env[4 * i + j].size > 1:
# axs[i, j].imshow(mean_spectrogram_env[4 * i + j], extent=extent, cmap=args.color,
# aspect='auto', origin='lower',
# norm=colors.PowerNorm(gamma=0.5)) # gamma 0.2 in original data
# axs[i, j].set_title(args.T_names[4 * i + j], fontsize=15)
# # axs[i, j].set_xlim(0,350)
# axs[i, j].spines['top'].set_color('none')
# axs[i, j].spines['right'].set_color('none')
# axs[0, j].set_xlabel('Time (ms)', fontsize=15)
# axs[i, 3].set_ylabel('Frequency (Hz)', fontsize=15)
# plt.tight_layout()
# plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
# args.learning_rate[lr]) + 'Mean_spectrogram_envelope.' + args.format)
#
# W and Delta W
# W = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_W_sim_' + str(sim_counter) + '.npy')[0:args.time_limit, :, :]
# Plot the evolution of the synaptic weights over trials
# if np.size(args.T_names) == len(classes):
# fig, ax = plt.subplots(4, 4, sharex='col', sharey='row', figsize=(10, 5))
# for i in range(0, 4):
# for j in range(0, 4):
# for k in range(0, args.wavegan_latent_dim):
# ax[i, j].plot(x_time, W[:, k, 4 * i + j], color[k])
# ax[i, j].set_ylabel('Weights', fontsize=8)
# ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
# ax[i, j].set_title(classes[4 * i + j], fontsize=8)
# plt.tight_layout()
# plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + 'Synaptic_weights_evolution_' + str(
# sim_counter) + '.' + args.format)
# diff = []
# for s in range(0, np.size(args.T_names)):
# diff.append(np.abs(np.diff(W[:, :, s], axis = 0)))
# fig, ax = plt.subplots(4, 4, figsize=(10, 5))
# for i in range(0, 4):
# for j in range(0, 4):
# for w in range(0, args.wavegan_latent_dim):
# ax[i,j].plot(x_time[0:args.time_limit-1], diff[4 * i + j][:, w], 'b')
# ax[i, j].set_ylim(0, np.max(diff))
# ax[i,j].set_ylabel('Delta W', fontsize=8)
# ax[i,j].set_xlabel('Time (in number of time steps)', fontsize=8)
# ax[i, j].set_title(classes[4 * i + j], fontsize=8)
# plt.tight_layout()
# plt.savefig(
# args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_diff_all' + str(sim_counter) + '.' + args.format)
if np.size(args.T_names) == len(classes):
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, np.ones((np.shape(max_score_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, max_score_expl[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Max score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_score_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, np.ones((np.shape(max_norm_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, max_norm_expl[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Max-norm score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, np.ones((np.shape(max_scaling_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, max_scaling_expl[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Scaling score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_scaling_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(max_score)[0], 1)), 'k')
ax[i, j].plot(x_time, max_score[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Max score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_sim' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(max_norm)[0], 1)), 'k')
ax[i, j].plot(x_time, max_norm[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Max-norm score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_norm_sim' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(max_scaling)[0], 1)), 'k')
ax[i, j].plot(x_time, max_scaling[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Scaling score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_scaling_sim' + str(
sim_counter) + '.' + args.format)
# Sensory response raw score
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(raw_score)[0], 1)), 'k')
ax[i, j].plot(x_time, raw_score[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Raw score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_raw_score_sim' + str(
sim_counter) + '.' + args.format)
raw_score_sum = np.zeros((args.time_limit,))
for t in range(0, args.time_limit):
raw_score_sum[t] = np.sum(raw_score[t, :])
aux_save_raw = []
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
print('Raw_score')
for i in range(0, 4):
for j in range(0, 4):
aux_save_raw.append(np.size(np.where(raw_score_expl[:, 4 * i + j] > 0.9)))
# print(np.size(np.where(raw_score_expl[:, 4 * i + j]>0.9)))
# input()
ax[i, j].plot(x_time_expl, np.ones((np.shape(raw_score_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, raw_score_expl[:, 4 * i + j], 'b')
ax[i, j].set_xlim(0, 300)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Raw_score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_raw_score_expl' + str(
sim_counter) + '.' + args.format)
if args.learning_rate[lr] == 0.1:
np.save(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_cumulative_raw_score_expl.npy', aux_save_raw)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_score_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h / np.max(h), width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_raw_score_expl_hist' + str(sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(sensory_gen)[0], 1)), 'k')
ax[i, j].plot(x_time, sensory_gen[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Soft-max', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr])+ '_Sensory_response_sim' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, np.ones((np.shape(sensory_expl_all)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, sensory_expl_all[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Soft-max', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_Sensory_response_expl_all' + str(
sim_counter) + '.' + args.format)
plt.close('all')
aux_save_softmax = []
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
print('sensory_expl')
for i in range(0, 4):
for j in range(0, 4):
aux_save_softmax.append(np.size(np.where(sensory_expl[:, 4 * i + j] > 0.9)))
##input()
ax[i, j].plot(x_time_expl, np.ones((np.shape(sensory_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, sensory_expl[:, 4 * i + j], 'b')
ax[i, j].set_xlim(0,300)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Soft-max', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr])+ '_Sensory_response_expl' + str(sim_counter) + '.' + args.format)
if args.learning_rate[lr] == 0.1:
np.save(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr])+ '_cumulative_softmax_expl.npy', aux_save_softmax)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sensory_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h/np.max(h), width = 0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Soft-max', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr])+ '_Sensory_response_expl_hist' + str(sim_counter) + '.' + args.format)
cfr_class_A_all.append(cfr_class_A)
cfr_class_A_expl_all.append(cfr_class_A_expl)
cfr_class_raw_all.append(cfr_class_raw)
cfr_class_expl_all.append(cfr_class_expl)
cfr_lr = ['10e-1', '10e-2']
# CFR classifier sensory response
for cl in range(0, len(args.classifier_name)):
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
for lr in range(0, len(args.learning_rate)):
ax[i, j].plot(x_time, np.ones((np.shape(cfr_class_A_all[cl][lr])[0], 1)), 'k')
ax[i, j].plot(x_time, cfr_class_A_all[cl][lr][:, 4 * i + j], color=color[lr], label = cfr_lr[lr])
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Soft-max', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].legend(loc='lower right', fontsize=5)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_CFR_Sensory_response_sim' + str(sim_counter) + '.' + args.format)
# CFR sensory response raw score
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
for lr in range(0, len(args.learning_rate)):
ax[i, j].plot(x_time, np.ones((np.shape(cfr_class_raw_all[cl][lr])[0], 1)), 'k')
ax[i, j].plot(x_time, cfr_class_raw_all[cl][lr][:, 4 * i + j], color=color[lr], label=cfr_lr[lr])
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Raw score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].legend(loc='lower right', fontsize=5)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_CFR_raw_score_sim' + str(
sim_counter) + '.' + args.format)
# Ex syllable B
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(10, 5), sharey=True, sharex=True)
for lr in range(0, len(args.learning_rate)):
axs[0].plot(x_time, np.ones((np.shape(cfr_class_expl_all[1][lr])[0], 1)), 'k')
axs[0].plot(x_time, cfr_class_expl_all[1][lr][:, 1], 'b')
axs[0].spines['top'].set_color('none')
axs[0].spines['right'].set_color('none')
axs[0].set_xlim(0, 300)
#axs[0].set_xlabel('Time (in number of time steps)', fontsize=8)
axs[0].legend(loc='lower right', fontsize=5)
axs[0].set_ylabel('Raw score', fontsize=15)
for lr in range(0, len(args.learning_rate)):
axs[1].plot(x_time, np.ones((np.shape(cfr_class_raw_all[1][lr])[0], 1)), 'k')
axs[1].plot(x_time, cfr_class_raw_all[1][lr][:, 1], color=color[lr], label = cfr_lr[lr])
axs[1].spines['top'].set_color('none')
axs[1].spines['right'].set_color('none')
axs[1].set_xlim(0, 300)
axs[1].set_xlabel('Time (in number of time steps)', fontsize=8)
axs[1].legend(loc='lower right', fontsize=5)
axs[1].set_ylabel('Raw score', fontsize=15)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'B1_realBIS.' + args.format)
# Ex syllable C
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(10, 5), sharey=True, sharex=True)
for lr in range(0, len(args.learning_rate)):
axs[0].plot(x_time, np.ones((np.shape(cfr_class_expl_all[0][lr])[0], 3)), 'k')
axs[0].plot(x_time, cfr_class_expl_all[0][lr][:, 3], 'b')
axs[0].set_xlim(0, 300)
axs[0].spines['top'].set_color('none')
axs[0].spines['right'].set_color('none')
#axs[0].set_xlabel('Time (in number of time steps)', fontsize=8)
axs[0].legend(loc='lower right', fontsize=5)
axs[0].set_ylabel('Raw score', fontsize=15)
for lr in range(0, len(args.learning_rate)):
axs[1].plot(x_time, np.ones((np.shape(cfr_class_raw_all[0][lr])[0], 3)), 'k')
axs[1].plot(x_time, cfr_class_raw_all[0][lr][:, 3], color=color[lr], label = cfr_lr[lr])
axs[1].set_xlim(0, 300)
axs[1].spines['top'].set_color('none')
axs[1].spines['right'].set_color('none')
axs[1].set_xlabel('Time (in number of time steps)', fontsize=8)
axs[1].legend(loc='lower right', fontsize=5)
axs[1].set_ylabel('Raw score', fontsize=15)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'C_extBIS.' + args.format)
input()
if np.size(args.T_names) == 3:
fig, ax = plt.subplots(args.ns, 1, figsize=(5, 10))
for j in range(0, args.ns):
ax.flat[j].plot(x_time, np.ones((np.shape(sensory_gen)[0], 1)))
ax.flat[j].plot(x_time, sensory_gen[:,j], color[j], label='Syllable '+ args.T_names[j])
ax[j].set_ylabel('Sensory response', fontsize=15)
ax[j].set_xlabel('Time (in number of time steps)', fontsize=15)
plt.legend(loc='lower right', fontsize=15)
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'Sensory_response_sim' + str(sim_counter) + '.' + args.format)
fig, ax = plt.subplots(args.ns, 1, figsize=(5, 10))
for j in range(0, args.ns):
ax.flat[j].plot(x_time, np.ones((np.shape(sensory_expl)[0], 1)))
ax.flat[j].plot(x_time, sensory_expl[:, j], color[j], label='Syllable ' + args.T_names[j])
ax[j].set_ylabel('Sensory response', fontsize=15)
ax[j].set_xlabel('Time (in number of time steps)', fontsize=15)
plt.legend(loc='lower right', fontsize=15)
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + 'Sensory_response_expl' + str(sim_counter) + '.' + args.format)
print('Done')
def plot_syll(args):
"""
Plot the example of a syllable across time: change the name in syllables variable (just below this comment)
"""
syllables = glob.glob(args.data_dir + '/' + '*R.wav')
counter = 0
while counter < len(syllables):
samples_aux, sr = librosa.load(syllables[counter], sr=16000)
trim = librosa.effects.trim(samples_aux.astype(np.float), top_db=20)
samples_aux = trim[0]
X = librosa.stft(samples_aux, n_fft=args.N, hop_length=args.H, win_length=args.N, window='hann', pad_mode='constant', center=True)
Y = np.log(1 + 100 * np.abs(X) ** 2)
T_coef = np.arange(X.shape[1]) * args.H / sr
K = args.N // 2
F_coef = np.arange(K + 1) * sr / args.N
plt.figure(figsize=(4, 18))
extent = [T_coef[0], T_coef[-1], F_coef[0], F_coef[-1]]
plt.imshow(Y, aspect='auto', origin='lower', extent=extent, cmap=args.color, norm=colors.PowerNorm(gamma=0.5))
plt.xlabel('Time (seconds)')
plt.ylabel('Frequency (Hz)')
plt.title(str(counter))
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'R' + '.' + args.format)
counter = counter + 1
print('Done')
def mean_spectro(learning_rate, sim_counter, ths, args):
"""
:param learning_rate: which learning rate
:param sim_counter: which simulation
:param ths threshold to define activation
:return: mean spectogram for each syllable when it is active more than a threshold
"""
# Load activation function and list of directories
p95 = np.load(args.data_dir + '/' + args.classifier_name[0] + '_lr' + str(learning_rate) + '_p95_sim_' + str(sim_counter) + '.npy')
# Remove focus (every N points up to 200 points)
p95_begin = p95[0:200, :]
p95_jump = np.zeros((args.n_points + 1, np.size(args.T_names)))
p95_jump[0:14, :] = p95_begin[0::15, :]
p95_jump[14::, :] = p95[200::, :]
list = np.zeros((args.n_points + 1,))
aux = np.linspace(0, 3000, 3000).astype(int)
list[0:200] = aux[0::15]
list[-1] = 3000
mean_spectrogram_env = []
T = []
for c in range(0, np.size(args.T_names)):
# Find where the activation threshold is reached/crossed
loc = np.where(p95_jump[:, c] > ths)[0]
spectrograms_envelope = []
for sp in range(0, np.size(loc)):
if loc[sp] < 200:
samples_aux, sr = librosa.load(
args.data_dir + '/' + args.sim_name + str(sim_counter) + '/' + args.classifier_name[
0] + '_lr' + str(learning_rate) + '_' + args.sim_name + str(sim_counter) + '_' + str(
int(list[loc[sp]])) + '/' + '__condition_0_' + str(int(list[loc[sp]])) + '/' + 'sensory_production_condition_0_' + args.T_names[c] + '.wav', sr=16000)
else:
loc[sp] = loc[sp]
samples_aux, sr = librosa.load(
args.data_dir + '/' + args.sim_name + str(sim_counter) + '/' + args.classifier_name[
0] + '_lr' + str(learning_rate) + '_' + args.sim_name + str(sim_counter) + '_' + str(
int(list[loc[sp]])) + '/' + '__condition_0_' + str(int(list[loc[sp]])) + '/' + 'sensory_production_condition_0_' + args.T_names[c] + '.wav', sr=16000)
trim = librosa.effects.trim(samples_aux.astype(np.float), top_db=20)
samples_aux = trim[0]
if samples_aux.size / 16 < 4000:
aux_size = 4000 - samples_aux.size / 16
silence = np.zeros((int(round(aux_size / 2) * 16)), )
samples_aux = np.append(silence, samples_aux)
samples_aux = np.append(samples_aux, silence)
rawsong = samples_aux.astype(float)
rawsong = rawsong.flatten()
amp = Song_functions.smooth_data(rawsong, sr, freq_cutoffs=(500, 7999))
new_song = rawsong[np.where(amp > 0.00001)[0][0]::]
silence = np.zeros((50000 - np.size(new_song),))
new_song = np.append(new_song, silence)
X = librosa.stft(new_song, n_fft=args.N, hop_length=args.H, win_length=args.N, window='hann', pad_mode='constant', center=True)
T_coef = np.arange(X.shape[1]) * args.H / sr * 1000
spectrograms_envelope.append(np.log(1 + 100 * np.abs(X ** 2)))
mean_spectrogram_env.append(np.mean(spectrograms_envelope, axis=0)) # dimension 16
T.append(T_coef)
#np.save(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[0] + '_' + str(sim_counter) + '_lr' + str(args.learning_rate) + 'Mean_spectrogram_envelope', mean_spectrogram_env)
# Mean spectrogram after convergence (plot)
fig, axs = plt.subplots(nrows=4, ncols=4, figsize=(10, 14), sharey=True, sharex=True)
for i in range(0, 4):
for j in range(0, 4):
extent = [0, 300, 0, 8000]
if mean_spectrogram_env[4 * i + j].size > 1:
axs[i, j].imshow(mean_spectrogram_env[4 * i + j], extent=extent, cmap=args.color, aspect='auto', origin='lower', norm=colors.PowerNorm(gamma=0.5)) # gamma 0.2 in original data
axs[i, j].set_title(args.T_names[4 * i + j], fontsize=15)
axs[i, j].set_xlim(0,20)
axs[i, j].spines['top'].set_color('none')
axs[i, j].spines['right'].set_color('none')
axs[0, j].set_xlabel('Time (ms)', fontsize=15)
axs[i, 3].set_ylabel('Frequency (Hz)', fontsize=15)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[0] + '_' + str(sim_counter) + '_lr' + str(args.learning_rate) + 'Mean_spectrogram_envelope.' + args.format)
print('Done')
if __name__ == '__main__':
import argparse
import glob
import sys
"""
Example how to run it:
>python plotGAN.py --option learning --data_dir experiment --output_dir plots
The output_dir will be created by default inside the data directory.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--option', type=str,
help='What do you want to see? Motor exploration or results after learning?',
choices=['sensory', 'activation_aud', 'syll', 'mean_spectro', 'cfr'])
parser.add_argument('--data_dir', type=str,
help='Data directory where the data are saved',
default=None)
parser.add_argument('--output_dir', type=str,
help='Output directory where to save the plots',
default=None)
simulation_args = parser.add_argument_group('Simulation')
simulation_args.add_argument('--MAX_trial', type=int,
help='Maximal number of trials',
default = 3001)
simulation_args.add_argument('--ns', type=int,
help='number of syllables',
default = 16)
simulation_args.add_argument('--N_sim', type=int, help='Number of instances', default=3)
simulation_args.add_argument('--T_names', type=list, help='Target syllables', default=['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']) #['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']) #['B1', 'C', 'M'])
simulation_args.add_argument('--sim_name', type=str, help='Sub directory containing the generations per each simulation', default='sensory_prod_sim_')
simulation_args.add_argument('--classifier_name', type=list, help='Which classifier model I want to use. Multiple classifier are allowed', default=['EXT']) #'REAL'
simulation_args.add_argument('--learning_rate', type=list,
help='Learning rate used during learning',
default = [0.1, 0.01]) #[0.1, 0.01]
simulation_args.add_argument('--beta', type=list, help='Type of auditory softmax activation',
default=[0.01, 0.1, 1, 5])
spectro_args = parser. add_argument_group('Spectorgram')
spectro_args.add_argument('--N', type = int, help='Nftt spectrogram librosa', default=256)
spectro_args.add_argument('--H', type = int, help='Hop length spectrogram librosa', default=64)
spectro_args.add_argument('--color', type = str, help='Colormap', default='inferno')
# TODO add reading of the params file, it could be that I need to change in the InverseLearningGAN the way I save
# args.txt. Perhaps using a dict or json instead or in addition.
wavegan_args = parser.add_argument_group('WaveGAN')
wavegan_args.add_argument('--wavegan_latent_dim', type=int,
help='Dimension of the latent space',
default = 2)
plot_args = parser.add_argument_group('Plots')
plot_args.add_argument('--format', type=str, help='Saving format', default='png')
plot_args.add_argument('--time_limit', type=int, help='Print only a certain time', default=100)
plot_args.add_argument('--n_points', type=int, help='How many point to be plot in the figure (=to saved points)', default=200)
plot_args.add_argument('--example', type=str, help='Figure of an example', default=True)
args = parser.parse_args()
# Make output dir
if args.output_dir != None:
if not os.path.isdir(args.data_dir + '/' + args.output_dir):
os.makedirs(args.data_dir + '/' + args.output_dir)
if args.option == 'activation_aud':
plot_auditory_activation(args)
if args.option == 'sensory':
plot_sensory(args)
if args.option == 'syll':
plot_syll(args)
if args.option =='mean_spectro':
learning_rate = 0.01
ths = 0.99
sim_counter = 2
mean_spectro(learning_rate, sim_counter, ths, args)
if args.option =='cfr':
# Latent space conditions
ld = [1, 2, 3, 6]
colors = ['r', 'b', 'gold', 'k']
p95_MEAN =[]
for i in range(0,len(ld)):
p95_MEAN.append(np.load(args.data_dir + '/' + 'p95_MEAN_lr_' + str(ld[i]) + '.npy'))
cfr_dim13(p95_MEAN, colors, ld, args) | [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.sin",
"numpy.arange",
"librosa.load",
"numpy.mean",
"numpy.histogram",
"argparse.ArgumentParser",
"numpy.where",
"matplotlib.pyplot.xlabel",
"songbird_data_analysis.Song_functions.smooth_data",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.... | [((661, 703), 'numpy.sqrt', 'np.sqrt', (['(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)'], {}), '(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)\n', (668, 703), True, 'import numpy as np\n'), ((859, 901), 'numpy.sqrt', 'np.sqrt', (['(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)'], {}), '(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)\n', (866, 901), True, 'import numpy as np\n'), ((906, 928), 'numpy.arctan', 'np.arctan', (['(v[1] / v[0])'], {}), '(v[1] / v[0])\n', (915, 928), True, 'import numpy as np\n'), ((1196, 1218), 'numpy.arctan', 'np.arctan', (['(v[0] / v[1])'], {}), '(v[0] / v[1])\n', (1205, 1218), True, 'import numpy as np\n'), ((1229, 1251), 'numpy.arctan', 'np.arctan', (['(v[0] / v[2])'], {}), '(v[0] / v[2])\n', (1238, 1251), True, 'import numpy as np\n'), ((1673, 1714), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(2 * resolution)'], {}), '(0, 2 * np.pi, 2 * resolution)\n', (1684, 1714), True, 'import numpy as np\n'), ((1723, 1756), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'resolution'], {}), '(0, np.pi, resolution)\n', (1734, 1756), True, 'import numpy as np\n'), ((1775, 1798), 'numpy.meshgrid', 'np.meshgrid', (['theta', 'phi'], {}), '(theta, phi)\n', (1786, 1798), True, 'import numpy as np\n'), ((1934, 1953), 'numpy.stack', 'np.stack', (['[x, y, z]'], {}), '([x, y, z])\n', (1942, 1953), True, 'import numpy as np\n'), ((29965, 30000), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(12, 7)'}), '(4, 4, figsize=(12, 7))\n', (29977, 30000), True, 'import matplotlib.pyplot as plt\n'), ((30920, 30936), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (30929, 30936), True, 'import matplotlib.pyplot as plt\n'), ((31246, 31281), 'numpy.linspace', 'np.linspace', (['(0)', 'args.MAX_trial', '(201)'], {}), '(0, args.MAX_trial, 201)\n', (31257, 31281), True, 'import numpy as np\n'), ((32317, 32333), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (32326, 32333), True, 'import matplotlib.pyplot as plt\n'), ((64059, 64100), 'glob.glob', 'glob.glob', (["(args.data_dir + '/' + '*R.wav')"], {}), "(args.data_dir + '/' + '*R.wav')\n", (64068, 64100), False, 'import glob\n'), ((65873, 65903), 'numpy.zeros', 'np.zeros', (['(args.n_points + 1,)'], {}), '((args.n_points + 1,))\n', (65881, 65903), True, 'import numpy as np\n'), ((68662, 68736), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(4)', 'ncols': '(4)', 'figsize': '(10, 14)', 'sharey': '(True)', 'sharex': '(True)'}), '(nrows=4, ncols=4, figsize=(10, 14), sharey=True, sharex=True)\n', (68674, 68736), True, 'import matplotlib.pyplot as plt\n'), ((69422, 69440), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (69438, 69440), True, 'import matplotlib.pyplot as plt\n'), ((69947, 69972), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (69970, 69972), False, 'import argparse\n'), ((1477, 1487), 'numpy.size', 'np.size', (['v'], {}), '(v)\n', (1484, 1487), True, 'import numpy as np\n'), ((1505, 1527), 'numpy.arctan', 'np.arctan', (['(v[i] - w[i])'], {}), '(v[i] - w[i])\n', (1514, 1527), True, 'import numpy as np\n'), ((1813, 1826), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1819, 1826), True, 'import numpy as np\n'), ((30679, 30697), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30695, 30697), True, 'import matplotlib.pyplot as plt\n'), ((30706, 30804), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.data_dir + '/' + args.output_dir + '/' + '_p95_MEAN_all' + '.' + args\n .format)"], {}), "(args.data_dir + '/' + args.output_dir + '/' + '_p95_MEAN_all' +\n '.' + args.format)\n", (30717, 30804), True, 'import matplotlib.pyplot as plt\n'), ((31449, 31484), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(12, 7)'}), '(4, 4, figsize=(12, 7))\n', (31461, 31484), True, 'import matplotlib.pyplot as plt\n'), ((32171, 32189), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (32187, 32189), True, 'import matplotlib.pyplot as plt\n'), ((59974, 60047), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'figsize': '(10, 5)', 'sharey': '(True)', 'sharex': '(True)'}), '(nrows=2, ncols=1, figsize=(10, 5), sharey=True, sharex=True)\n', (59986, 60047), True, 'import matplotlib.pyplot as plt\n'), ((61127, 61145), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (61143, 61145), True, 'import matplotlib.pyplot as plt\n'), ((61154, 61244), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.data_dir + '/' + args.output_dir + '/' + 'B1_realBIS.' + args.format)"], {}), "(args.data_dir + '/' + args.output_dir + '/' + 'B1_realBIS.' +\n args.format)\n", (61165, 61244), True, 'import matplotlib.pyplot as plt\n'), ((61285, 61358), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'figsize': '(10, 5)', 'sharey': '(True)', 'sharex': '(True)'}), '(nrows=2, ncols=1, figsize=(10, 5), sharey=True, sharex=True)\n', (61297, 61358), True, 'import matplotlib.pyplot as plt\n'), ((62438, 62456), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (62454, 62456), True, 'import matplotlib.pyplot as plt\n'), ((62465, 62553), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.data_dir + '/' + args.output_dir + '/' + 'C_extBIS.' + args.format)"], {}), "(args.data_dir + '/' + args.output_dir + '/' + 'C_extBIS.' +\n args.format)\n", (62476, 62553), True, 'import matplotlib.pyplot as plt\n'), ((64179, 64221), 'librosa.load', 'librosa.load', (['syllables[counter]'], {'sr': '(16000)'}), '(syllables[counter], sr=16000)\n', (64191, 64221), False, 'import librosa\n'), ((64342, 64473), 'librosa.stft', 'librosa.stft', (['samples_aux'], {'n_fft': 'args.N', 'hop_length': 'args.H', 'win_length': 'args.N', 'window': '"""hann"""', 'pad_mode': '"""constant"""', 'center': '(True)'}), "(samples_aux, n_fft=args.N, hop_length=args.H, win_length=args.\n N, window='hann', pad_mode='constant', center=True)\n", (64354, 64473), False, 'import librosa\n'), ((64648, 64675), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 18)'}), '(figsize=(4, 18))\n', (64658, 64675), True, 'import matplotlib.pyplot as plt\n'), ((64867, 64895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (seconds)"""'], {}), "('Time (seconds)')\n", (64877, 64895), True, 'import matplotlib.pyplot as plt\n'), ((64904, 64932), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (64914, 64932), True, 'import matplotlib.pyplot as plt\n'), ((64973, 64991), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (64989, 64991), True, 'import matplotlib.pyplot as plt\n'), ((65000, 65087), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.data_dir + '/' + args.output_dir + '/' + 'R' + '.' + args.format)"], {}), "(args.data_dir + '/' + args.output_dir + '/' + 'R' + '.' + args.\n format)\n", (65011, 65087), True, 'import matplotlib.pyplot as plt\n'), ((66066, 66087), 'numpy.size', 'np.size', (['args.T_names'], {}), '(args.T_names)\n', (66073, 66087), True, 'import numpy as np\n'), ((949, 979), 'numpy.sqrt', 'np.sqrt', (['(v[0] ** 2 + v[1] ** 2)'], {}), '(v[0] ** 2 + v[1] ** 2)\n', (956, 979), True, 'import numpy as np\n'), ((1441, 1451), 'numpy.size', 'np.size', (['v'], {}), '(v)\n', (1448, 1451), True, 'import numpy as np\n'), ((1840, 1851), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1846, 1851), True, 'import numpy as np\n'), ((1872, 1883), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1878, 1883), True, 'import numpy as np\n'), ((1908, 1921), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1914, 1921), True, 'import numpy as np\n'), ((5176, 5211), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (5188, 5211), True, 'import matplotlib.pyplot as plt\n'), ((5814, 5832), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5830, 5832), True, 'import matplotlib.pyplot as plt\n'), ((6049, 6084), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (6061, 6084), True, 'import matplotlib.pyplot as plt\n'), ((6680, 6698), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6696, 6698), True, 'import matplotlib.pyplot as plt\n'), ((6912, 6947), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (6924, 6947), True, 'import matplotlib.pyplot as plt\n'), ((7540, 7558), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7556, 7558), True, 'import matplotlib.pyplot as plt\n'), ((7769, 7804), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (7781, 7804), True, 'import matplotlib.pyplot as plt\n'), ((8407, 8425), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8423, 8425), True, 'import matplotlib.pyplot as plt\n'), ((8641, 8676), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (8653, 8676), True, 'import matplotlib.pyplot as plt\n'), ((9293, 9311), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9309, 9311), True, 'import matplotlib.pyplot as plt\n'), ((9534, 9569), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (9546, 9569), True, 'import matplotlib.pyplot as plt\n'), ((10178, 10196), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10194, 10196), True, 'import matplotlib.pyplot as plt\n'), ((10415, 10450), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (10427, 10450), True, 'import matplotlib.pyplot as plt\n'), ((11054, 11072), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11070, 11072), True, 'import matplotlib.pyplot as plt\n'), ((11287, 11322), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (11299, 11322), True, 'import matplotlib.pyplot as plt\n'), ((11874, 11892), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11890, 11892), True, 'import matplotlib.pyplot as plt\n'), ((12106, 12141), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (12118, 12141), True, 'import matplotlib.pyplot as plt\n'), ((12700, 12718), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12716, 12718), True, 'import matplotlib.pyplot as plt\n'), ((12932, 12967), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (12944, 12967), True, 'import matplotlib.pyplot as plt\n'), ((13517, 13535), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13533, 13535), True, 'import matplotlib.pyplot as plt\n'), ((13747, 13782), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (13759, 13782), True, 'import matplotlib.pyplot as plt\n'), ((14336, 14354), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14352, 14354), True, 'import matplotlib.pyplot as plt\n'), ((14568, 14603), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (14580, 14603), True, 'import matplotlib.pyplot as plt\n'), ((15210, 15228), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15226, 15228), True, 'import matplotlib.pyplot as plt\n'), ((15447, 15482), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (15459, 15482), True, 'import matplotlib.pyplot as plt\n'), ((16038, 16056), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16054, 16056), True, 'import matplotlib.pyplot as plt\n'), ((16277, 16312), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (16289, 16312), True, 'import matplotlib.pyplot as plt\n'), ((16862, 16880), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16878, 16880), True, 'import matplotlib.pyplot as plt\n'), ((17098, 17133), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (17110, 17133), True, 'import matplotlib.pyplot as plt\n'), ((17690, 17708), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17706, 17708), True, 'import matplotlib.pyplot as plt\n'), ((17924, 17959), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (17936, 17959), True, 'import matplotlib.pyplot as plt\n'), ((18513, 18531), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18529, 18531), True, 'import matplotlib.pyplot as plt\n'), ((18748, 18783), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (18760, 18783), True, 'import matplotlib.pyplot as plt\n'), ((19337, 19355), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19353, 19355), True, 'import matplotlib.pyplot as plt\n'), ((19564, 19580), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (19573, 19580), True, 'import matplotlib.pyplot as plt\n'), ((27558, 27586), 'numpy.mean', 'np.mean', (['p95_all_sim'], {'axis': '(0)'}), '(p95_all_sim, axis=0)\n', (27565, 27586), True, 'import numpy as np\n'), ((27656, 27691), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (27668, 27691), True, 'import matplotlib.pyplot as plt\n'), ((28781, 28816), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (28793, 28816), True, 'import matplotlib.pyplot as plt\n'), ((57728, 57763), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (57740, 57763), True, 'import matplotlib.pyplot as plt\n'), ((58594, 58612), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (58610, 58612), True, 'import matplotlib.pyplot as plt\n'), ((58846, 58881), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (58858, 58881), True, 'import matplotlib.pyplot as plt\n'), ((59715, 59733), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (59731, 59733), True, 'import matplotlib.pyplot as plt\n'), ((62579, 62600), 'numpy.size', 'np.size', (['args.T_names'], {}), '(args.T_names)\n', (62586, 62600), True, 'import numpy as np\n'), ((62629, 62670), 'matplotlib.pyplot.subplots', 'plt.subplots', (['args.ns', '(1)'], {'figsize': '(5, 10)'}), '(args.ns, 1, figsize=(5, 10))\n', (62641, 62670), True, 'import matplotlib.pyplot as plt\n'), ((63053, 63095), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'fontsize': '(15)'}), "(loc='lower right', fontsize=15)\n", (63063, 63095), True, 'import matplotlib.pyplot as plt\n'), ((63252, 63293), 'matplotlib.pyplot.subplots', 'plt.subplots', (['args.ns', '(1)'], {'figsize': '(5, 10)'}), '(args.ns, 1, figsize=(5, 10))\n', (63264, 63293), True, 'import matplotlib.pyplot as plt\n'), ((63680, 63722), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'fontsize': '(15)'}), "(loc='lower right', fontsize=15)\n", (63690, 63722), True, 'import matplotlib.pyplot as plt\n'), ((65755, 65776), 'numpy.size', 'np.size', (['args.T_names'], {}), '(args.T_names)\n', (65762, 65776), True, 'import numpy as np\n'), ((65914, 65940), 'numpy.linspace', 'np.linspace', (['(0)', '(3000)', '(3000)'], {}), '(0, 3000, 3000)\n', (65925, 65940), True, 'import numpy as np\n'), ((66169, 66199), 'numpy.where', 'np.where', (['(p95_jump[:, c] > ths)'], {}), '(p95_jump[:, c] > ths)\n', (66177, 66199), True, 'import numpy as np\n'), ((66267, 66279), 'numpy.size', 'np.size', (['loc'], {}), '(loc)\n', (66274, 66279), True, 'import numpy as np\n'), ((67760, 67825), 'songbird_data_analysis.Song_functions.smooth_data', 'Song_functions.smooth_data', (['rawsong', 'sr'], {'freq_cutoffs': '(500, 7999)'}), '(rawsong, sr, freq_cutoffs=(500, 7999))\n', (67786, 67825), False, 'from songbird_data_analysis import Song_functions\n'), ((67975, 68003), 'numpy.append', 'np.append', (['new_song', 'silence'], {}), '(new_song, silence)\n', (67984, 68003), True, 'import numpy as np\n'), ((68021, 68148), 'librosa.stft', 'librosa.stft', (['new_song'], {'n_fft': 'args.N', 'hop_length': 'args.H', 'win_length': 'args.N', 'window': '"""hann"""', 'pad_mode': '"""constant"""', 'center': '(True)'}), "(new_song, n_fft=args.N, hop_length=args.H, win_length=args.N,\n window='hann', pad_mode='constant', center=True)\n", (68033, 68148), False, 'import librosa\n'), ((68321, 68359), 'numpy.mean', 'np.mean', (['spectrograms_envelope'], {'axis': '(0)'}), '(spectrograms_envelope, axis=0)\n', (68328, 68359), True, 'import numpy as np\n'), ((73308, 73360), 'os.path.isdir', 'os.path.isdir', (["(args.data_dir + '/' + args.output_dir)"], {}), "(args.data_dir + '/' + args.output_dir)\n", (73321, 73360), False, 'import os\n'), ((73374, 73424), 'os.makedirs', 'os.makedirs', (["(args.data_dir + '/' + args.output_dir)"], {}), "(args.data_dir + '/' + args.output_dir)\n", (73385, 73424), False, 'import os\n'), ((19612, 19630), 'numpy.size', 'np.size', (['args.beta'], {}), '(args.beta)\n', (19619, 19630), True, 'import numpy as np\n'), ((19659, 19694), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (19671, 19694), True, 'import matplotlib.pyplot as plt\n'), ((20293, 20311), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20309, 20311), True, 'import matplotlib.pyplot as plt\n'), ((22368, 22403), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (22380, 22403), True, 'import matplotlib.pyplot as plt\n'), ((23065, 23083), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23081, 23083), True, 'import matplotlib.pyplot as plt\n'), ((24647, 24682), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (24659, 24682), True, 'import matplotlib.pyplot as plt\n'), ((25142, 25160), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25158, 25160), True, 'import matplotlib.pyplot as plt\n'), ((25500, 25535), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (25512, 25535), True, 'import matplotlib.pyplot as plt\n'), ((26200, 26218), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26216, 26218), True, 'import matplotlib.pyplot as plt\n'), ((27465, 27481), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (27474, 27481), True, 'import matplotlib.pyplot as plt\n'), ((28518, 28536), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (28534, 28536), True, 'import matplotlib.pyplot as plt\n'), ((29631, 29649), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29647, 29649), True, 'import matplotlib.pyplot as plt\n'), ((64531, 64552), 'numpy.arange', 'np.arange', (['X.shape[1]'], {}), '(X.shape[1])\n', (64540, 64552), True, 'import numpy as np\n'), ((64608, 64624), 'numpy.arange', 'np.arange', (['(K + 1)'], {}), '(K + 1)\n', (64617, 64624), True, 'import numpy as np\n'), ((64830, 64857), 'matplotlib.colors.PowerNorm', 'colors.PowerNorm', ([], {'gamma': '(0.5)'}), '(gamma=0.5)\n', (64846, 64857), True, 'import matplotlib.colors as colors\n'), ((67559, 67590), 'numpy.append', 'np.append', (['silence', 'samples_aux'], {}), '(silence, samples_aux)\n', (67568, 67590), True, 'import numpy as np\n'), ((67621, 67652), 'numpy.append', 'np.append', (['samples_aux', 'silence'], {}), '(samples_aux, silence)\n', (67630, 67652), True, 'import numpy as np\n'), ((4869, 4893), 'numpy.shape', 'np.shape', (['raw_score_expl'], {}), '(raw_score_expl)\n', (4877, 4893), True, 'import numpy as np\n'), ((5095, 5119), 'numpy.shape', 'np.shape', (['raw_score_expl'], {}), '(raw_score_expl)\n', (5103, 5119), True, 'import numpy as np\n'), ((5124, 5148), 'numpy.shape', 'np.shape', (['raw_score_expl'], {}), '(raw_score_expl)\n', (5132, 5148), True, 'import numpy as np\n'), ((5314, 5365), 'numpy.histogram', 'np.histogram', (['raw_score_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(raw_score_expl[:, 4 * i + j], bins=15)\n', (5326, 5365), True, 'import numpy as np\n'), ((6187, 6232), 'numpy.histogram', 'np.histogram', (['p95_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(p95_expl[:, 4 * i + j], bins=15)\n', (6199, 6232), True, 'import numpy as np\n'), ((7050, 7095), 'numpy.histogram', 'np.histogram', (['max_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(max_expl[:, 4 * i + j], bins=15)\n', (7062, 7095), True, 'import numpy as np\n'), ((7907, 7957), 'numpy.histogram', 'np.histogram', (['max_norm_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(max_norm_expl[:, 4 * i + j], bins=15)\n', (7919, 7957), True, 'import numpy as np\n'), ((8779, 8836), 'numpy.histogram', 'np.histogram', (['scaling_softmax_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(scaling_softmax_expl[:, 4 * i + j], bins=15)\n', (8791, 8836), True, 'import numpy as np\n'), ((9672, 9725), 'numpy.histogram', 'np.histogram', (['softmax_MAX_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(softmax_MAX_expl[:, 4 * i + j], bins=15)\n', (9684, 9725), True, 'import numpy as np\n'), ((10553, 10602), 'numpy.histogram', 'np.histogram', (['scaling_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(scaling_expl[:, 4 * i + j], bins=15)\n', (10565, 10602), True, 'import numpy as np\n'), ((11425, 11472), 'numpy.histogram', 'np.histogram', (['arctg_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(arctg_expl[:, 4 * i + j], bins=15)\n', (11437, 11472), True, 'import numpy as np\n'), ((12244, 12292), 'numpy.histogram', 'np.histogram', (['square_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(square_expl[:, 4 * i + j], bins=15)\n', (12256, 12292), True, 'import numpy as np\n'), ((13070, 13116), 'numpy.histogram', 'np.histogram', (['sign_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(sign_expl[:, 4 * i + j], bins=15)\n', (13082, 13116), True, 'import numpy as np\n'), ((13885, 13933), 'numpy.histogram', 'np.histogram', (['minmax_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(minmax_expl[:, 4 * i + j], bins=15)\n', (13897, 13933), True, 'import numpy as np\n'), ((14706, 14759), 'numpy.histogram', 'np.histogram', (['sign_minmax_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(sign_minmax_expl[:, 4 * i + j], bins=15)\n', (14718, 14759), True, 'import numpy as np\n'), ((15585, 15635), 'numpy.histogram', 'np.histogram', (['logistic_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(logistic_expl[:, 4 * i + j], bins=15)\n', (15597, 15635), True, 'import numpy as np\n'), ((16415, 16461), 'numpy.histogram', 'np.histogram', (['tanh_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(tanh_expl[:, 4 * i + j], bins=15)\n', (16427, 16461), True, 'import numpy as np\n'), ((17236, 17286), 'numpy.histogram', 'np.histogram', (['raw_mean_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(raw_mean_expl[:, 4 * i + j], bins=15)\n', (17248, 17286), True, 'import numpy as np\n'), ((18062, 18113), 'numpy.histogram', 'np.histogram', (['mean_norm_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(mean_norm_expl[:, 4 * i + j], bins=15)\n', (18074, 18113), True, 'import numpy as np\n'), ((18886, 18939), 'numpy.histogram', 'np.histogram', (['softmax_sum_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(softmax_sum_expl[:, 4 * i + j], bins=15)\n', (18898, 18939), True, 'import numpy as np\n'), ((23631, 23652), 'numpy.size', 'np.size', (['args.T_names'], {}), '(args.T_names)\n', (23638, 23652), True, 'import numpy as np\n'), ((23700, 23763), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'sharex': '"""col"""', 'sharey': '"""row"""', 'figsize': '(10, 5)'}), "(4, 4, sharex='col', sharey='row', figsize=(10, 5))\n", (23712, 23763), True, 'import matplotlib.pyplot as plt\n'), ((24332, 24350), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (24348, 24350), True, 'import matplotlib.pyplot as plt\n'), ((42293, 42314), 'numpy.size', 'np.size', (['args.T_names'], {}), '(args.T_names)\n', (42300, 42314), True, 'import numpy as np\n'), ((42362, 42397), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (42374, 42397), True, 'import matplotlib.pyplot as plt\n'), ((42997, 43015), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (43013, 43015), True, 'import matplotlib.pyplot as plt\n'), ((43331, 43366), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (43343, 43366), True, 'import matplotlib.pyplot as plt\n'), ((43969, 43987), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (43985, 43987), True, 'import matplotlib.pyplot as plt\n'), ((44302, 44337), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (44314, 44337), True, 'import matplotlib.pyplot as plt\n'), ((44945, 44963), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (44961, 44963), True, 'import matplotlib.pyplot as plt\n'), ((45281, 45316), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (45293, 45316), True, 'import matplotlib.pyplot as plt\n'), ((46101, 46119), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (46117, 46119), True, 'import matplotlib.pyplot as plt\n'), ((46428, 46463), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (46440, 46463), True, 'import matplotlib.pyplot as plt\n'), ((47251, 47269), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (47267, 47269), True, 'import matplotlib.pyplot as plt\n'), ((47583, 47618), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (47595, 47618), True, 'import matplotlib.pyplot as plt\n'), ((48411, 48429), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (48427, 48429), True, 'import matplotlib.pyplot as plt\n'), ((48795, 48830), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (48807, 48830), True, 'import matplotlib.pyplot as plt\n'), ((49615, 49633), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (49631, 49633), True, 'import matplotlib.pyplot as plt\n'), ((49954, 49982), 'numpy.zeros', 'np.zeros', (['(args.time_limit,)'], {}), '((args.time_limit,))\n', (49962, 49982), True, 'import numpy as np\n'), ((50175, 50210), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (50187, 50210), True, 'import matplotlib.pyplot as plt\n'), ((51273, 51291), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (51289, 51291), True, 'import matplotlib.pyplot as plt\n'), ((51849, 51884), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (51861, 51884), True, 'import matplotlib.pyplot as plt\n'), ((52528, 52546), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (52544, 52546), True, 'import matplotlib.pyplot as plt\n'), ((52809, 52844), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (52821, 52844), True, 'import matplotlib.pyplot as plt\n'), ((53567, 53585), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (53583, 53585), True, 'import matplotlib.pyplot as plt\n'), ((53877, 53912), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (53889, 53912), True, 'import matplotlib.pyplot as plt\n'), ((54515, 54533), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (54531, 54533), True, 'import matplotlib.pyplot as plt\n'), ((54850, 54866), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (54859, 54866), True, 'import matplotlib.pyplot as plt\n'), ((54940, 54975), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (54952, 54975), True, 'import matplotlib.pyplot as plt\n'), ((55948, 55966), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (55964, 55966), True, 'import matplotlib.pyplot as plt\n'), ((56452, 56487), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(10, 5)'}), '(4, 4, figsize=(10, 5))\n', (56464, 56487), True, 'import matplotlib.pyplot as plt\n'), ((57128, 57146), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (57144, 57146), True, 'import matplotlib.pyplot as plt\n'), ((19809, 19866), 'numpy.histogram', 'np.histogram', (['softmax_mean_expl[b][:, 4 * i + j]'], {'bins': '(15)'}), '(softmax_mean_expl[b][:, 4 * i + j], bins=15)\n', (19821, 19866), True, 'import numpy as np\n'), ((21837, 21858), 'numpy.size', 'np.size', (['args.T_names'], {}), '(args.T_names)\n', (21844, 21858), True, 'import numpy as np\n'), ((22133, 22151), 'numpy.shape', 'np.shape', (['p95_jump'], {}), '(p95_jump)\n', (22141, 22151), True, 'import numpy as np\n'), ((22201, 22219), 'numpy.shape', 'np.shape', (['p95_expl'], {}), '(p95_expl)\n', (22209, 22219), True, 'import numpy as np\n'), ((22224, 22242), 'numpy.shape', 'np.shape', (['p95_expl'], {}), '(p95_expl)\n', (22232, 22242), True, 'import numpy as np\n'), ((22293, 22312), 'numpy.shape', 'np.shape', (['p95_focus'], {}), '(p95_focus)\n', (22301, 22312), True, 'import numpy as np\n'), ((22317, 22336), 'numpy.shape', 'np.shape', (['p95_focus'], {}), '(p95_focus)\n', (22325, 22336), True, 'import numpy as np\n'), ((26662, 26735), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 5)', 'sharey': '(True)', 'sharex': '(True)'}), '(nrows=1, ncols=1, figsize=(10, 5), sharey=True, sharex=True)\n', (26674, 26735), True, 'import matplotlib.pyplot as plt\n'), ((35329, 35348), 'numpy.shape', 'np.shape', (['raw_score'], {}), '(raw_score)\n', (35337, 35348), True, 'import numpy as np\n'), ((35398, 35422), 'numpy.shape', 'np.shape', (['raw_score_expl'], {}), '(raw_score_expl)\n', (35406, 35422), True, 'import numpy as np\n'), ((35427, 35451), 'numpy.shape', 'np.shape', (['raw_score_expl'], {}), '(raw_score_expl)\n', (35435, 35451), True, 'import numpy as np\n'), ((50082, 50105), 'numpy.sum', 'np.sum', (['raw_score[t, :]'], {}), '(raw_score[t, :])\n', (50088, 50105), True, 'import numpy as np\n'), ((64498, 64507), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (64504, 64507), True, 'import numpy as np\n'), ((67931, 67948), 'numpy.size', 'np.size', (['new_song'], {}), '(new_song)\n', (67938, 67948), True, 'import numpy as np\n'), ((68166, 68187), 'numpy.arange', 'np.arange', (['X.shape[1]'], {}), '(X.shape[1])\n', (68175, 68187), True, 'import numpy as np\n'), ((69023, 69050), 'matplotlib.colors.PowerNorm', 'colors.PowerNorm', ([], {'gamma': '(0.5)'}), '(gamma=0.5)\n', (69039, 69050), True, 'import matplotlib.colors as colors\n'), ((52011, 52062), 'numpy.histogram', 'np.histogram', (['raw_score_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(raw_score_expl[:, 4 * i + j], bins=15)\n', (52023, 52062), True, 'import numpy as np\n'), ((56614, 56663), 'numpy.histogram', 'np.histogram', (['sensory_expl[:, 4 * i + j]'], {'bins': '(15)'}), '(sensory_expl[:, 4 * i + j], bins=15)\n', (56626, 56663), True, 'import numpy as np\n'), ((60142, 60177), 'numpy.shape', 'np.shape', (['cfr_class_expl_all[1][lr]'], {}), '(cfr_class_expl_all[1][lr])\n', (60150, 60177), True, 'import numpy as np\n'), ((60663, 60697), 'numpy.shape', 'np.shape', (['cfr_class_raw_all[1][lr]'], {}), '(cfr_class_raw_all[1][lr])\n', (60671, 60697), True, 'import numpy as np\n'), ((61453, 61488), 'numpy.shape', 'np.shape', (['cfr_class_expl_all[0][lr]'], {}), '(cfr_class_expl_all[0][lr])\n', (61461, 61488), True, 'import numpy as np\n'), ((61974, 62008), 'numpy.shape', 'np.shape', (['cfr_class_raw_all[0][lr]'], {}), '(cfr_class_raw_all[0][lr])\n', (61982, 62008), True, 'import numpy as np\n'), ((67858, 67879), 'numpy.where', 'np.where', (['(amp > 1e-05)'], {}), '(amp > 1e-05)\n', (67866, 67879), True, 'import numpy as np\n'), ((68267, 68281), 'numpy.abs', 'np.abs', (['(X ** 2)'], {}), '(X ** 2)\n', (68273, 68281), True, 'import numpy as np\n'), ((22659, 22678), 'numpy.shape', 'np.shape', (['p95_focus'], {}), '(p95_focus)\n', (22667, 22678), True, 'import numpy as np\n'), ((62760, 62781), 'numpy.shape', 'np.shape', (['sensory_gen'], {}), '(sensory_gen)\n', (62768, 62781), True, 'import numpy as np\n'), ((63383, 63405), 'numpy.shape', 'np.shape', (['sensory_expl'], {}), '(sensory_expl)\n', (63391, 63405), True, 'import numpy as np\n'), ((50394, 50438), 'numpy.where', 'np.where', (['(raw_score_expl[:, 4 * i + j] > 0.9)'], {}), '(raw_score_expl[:, 4 * i + j] > 0.9)\n', (50402, 50438), True, 'import numpy as np\n'), ((52119, 52128), 'numpy.max', 'np.max', (['h'], {}), '(h)\n', (52125, 52128), True, 'import numpy as np\n'), ((55166, 55208), 'numpy.where', 'np.where', (['(sensory_expl[:, 4 * i + j] > 0.9)'], {}), '(sensory_expl[:, 4 * i + j] > 0.9)\n', (55174, 55208), True, 'import numpy as np\n'), ((56718, 56727), 'numpy.max', 'np.max', (['h'], {}), '(h)\n', (56724, 56727), True, 'import numpy as np\n'), ((57956, 57989), 'numpy.shape', 'np.shape', (['cfr_class_A_all[cl][lr]'], {}), '(cfr_class_A_all[cl][lr])\n', (57964, 57989), True, 'import numpy as np\n'), ((59074, 59109), 'numpy.shape', 'np.shape', (['cfr_class_raw_all[cl][lr]'], {}), '(cfr_class_raw_all[cl][lr])\n', (59082, 59109), True, 'import numpy as np\n'), ((42550, 42574), 'numpy.shape', 'np.shape', (['max_score_expl'], {}), '(max_score_expl)\n', (42558, 42574), True, 'import numpy as np\n'), ((43519, 43542), 'numpy.shape', 'np.shape', (['max_norm_expl'], {}), '(max_norm_expl)\n', (43527, 43542), True, 'import numpy as np\n'), ((44490, 44516), 'numpy.shape', 'np.shape', (['max_scaling_expl'], {}), '(max_scaling_expl)\n', (44498, 44516), True, 'import numpy as np\n'), ((45464, 45483), 'numpy.shape', 'np.shape', (['max_score'], {}), '(max_score)\n', (45472, 45483), True, 'import numpy as np\n'), ((46611, 46629), 'numpy.shape', 'np.shape', (['max_norm'], {}), '(max_norm)\n', (46619, 46629), True, 'import numpy as np\n'), ((47766, 47787), 'numpy.shape', 'np.shape', (['max_scaling'], {}), '(max_scaling)\n', (47774, 47787), True, 'import numpy as np\n'), ((48978, 48997), 'numpy.shape', 'np.shape', (['raw_score'], {}), '(raw_score)\n', (48986, 48997), True, 'import numpy as np\n'), ((50632, 50656), 'numpy.shape', 'np.shape', (['raw_score_expl'], {}), '(raw_score_expl)\n', (50640, 50656), True, 'import numpy as np\n'), ((52992, 53013), 'numpy.shape', 'np.shape', (['sensory_gen'], {}), '(sensory_gen)\n', (53000, 53013), True, 'import numpy as np\n'), ((54065, 54091), 'numpy.shape', 'np.shape', (['sensory_expl_all'], {}), '(sensory_expl_all)\n', (54073, 54091), True, 'import numpy as np\n'), ((55313, 55335), 'numpy.shape', 'np.shape', (['sensory_expl'], {}), '(sensory_expl)\n', (55321, 55335), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 17:13:10 2018
@author: quinn
"""
import h5py
import json
import numpy as np
from model import model
## Section on reading weights
def read_weights(weights):
out = {}
if isinstance(weights, h5py.Dataset):
return np.asarray(weights)
for k in weights.keys():
out[k] = read_weights(weights[k])
return out
## Section on reading config
def read_config(config):
out = [[],[]]
con = json.loads(config.decode('utf-8'))
if isinstance(con['config'],list):
return read_layers(con['config'])
m = con['config']['layers']
for layer in m:
a, b = read_model(layer)
out[0] += a
out[1] += b
return out
def read_model(m):
out = [[],[]]
a, b = read_layers(m['config'])
out[0] += a
out[1] += b
return out
def read_layers(layers):
out = [[],[]]
if isinstance(layers,dict):
return read_layer(layers)
elif isinstance(layers,list):
for l in layers:
a, b = read_layers(l)
out[0] += a
out[1] += b
return out
def read_layer(layer):
if 'config' in layer.keys():
return [ [layer['config']['name'] ], [layer['config']] ]
elif 'name' in layer.keys():
return [ [layer['name'] ], [layer] ]
raise Exception(" unable to parse layer ")
## Section on building model
def build_model(filename, name='NONE'):
m = model(name)
file = h5py.File(filename)
w_index, layers, c_index = None, None, None
for k in file.keys():
if 'model_weights' in k:
w_index = read_weights(file[k])
if w_index is None:
raise Exception("no model weights read")
if 'model_config' in file.attrs.keys():
layers, c_index = read_config(file.attrs['model_config'])
for i,n in enumerate(layers):
if n in w_index.keys():
print(n)
m.add_layer(n, c_index[i], w_index[n])
else:
m.add_layer(n,c_index[i],None)
file.close()
return m
def convert_model(filename, name='NONE', path='./', verbose=True):
m = build_model(filename, name)
if verbose:
print(m.p_def())
print(m.p_func_call())
print(m.p_header())
file = open( path + name + '.c','w')
file.write("""/**This file was auto generated using the NN 2 CMSIS library
* More information can be found at github.com/quinnabrvau/Keras_2_CMSIS
**/\n""")
file.write('#include "' + name + '.h"\n')
file.write(m.p_def())
file.write(m.p_func_call())
file.close()
file = open( path + name + '.h','w')
file.write(m.p_header())
file.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="program to takes keras files and converts them to strive C, H files")
parser.add_argument('model', help="path to source model")
parser.add_argument('out_name', default='_no_name_', help="prefix for function calls and file name")
parser.add_argument('out_path', default='./', help="path to put files")
# parser.add_argument('-V','--verbose')
# parser.add_argument('-t','--test')
args = parser.parse_args()
print("saving output in",args.out_path + args.out_name + '.c',
' and ', args.out_path + args.out_name + '.h')
convert_model(args.model,
path = args.out_path,
name = args.out_name,
verbose = True)
| [
"numpy.asarray",
"model.model",
"argparse.ArgumentParser",
"h5py.File"
] | [((1473, 1484), 'model.model', 'model', (['name'], {}), '(name)\n', (1478, 1484), False, 'from model import model\n'), ((1496, 1515), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (1505, 1515), False, 'import h5py\n'), ((2758, 2869), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""program to takes keras files and converts them to strive C, H files"""'}), "(description=\n 'program to takes keras files and converts them to strive C, H files')\n", (2781, 2869), False, 'import argparse\n'), ((306, 325), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (316, 325), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
#
# This file is part of S4D.
#
# SD4 is a python package for speaker diarization based on SIDEKIT.
# S4D home page: http://www-lium.univ-lemans.fr/s4d/
# SIDEKIT home page: http://www-lium.univ-lemans.fr/sidekit/
#
# S4D is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# S4D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with SIDEKIT. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'meignier'
import sys
import os
from sidekit.features_extractor import FeaturesExtractor
from sidekit.features_server import FeaturesServer
import logging
import re
import numpy
def str2str_normalize(name):
"""
removes accents and replace '_' by '_' the the string speaker
:param name: the string to nomalize
:return:
"""
name = name.translate(str.maketrans('ÀÁÂÃÄÅàáâãäåÒÓÔÕÖØòóôõöøÈÉÊËèéêëÇçÌÍÎÏìíîïÙÚÛÜùúûüÿÑñ','AAAAAAaaaaaaOOOOOOooooooEEEEeeeeCcIIIIiiiiUUUUuuuuyNn')).lower()
name = name.translate(str.maketrans("'",'_'))
name = name.translate(str.maketrans('-','_'))
return re.sub('_+','_',name)
def path_show_ext(fullpath, shortext=False):
"""
splits a full file path into path, basename and extension
:param fullpath: str
:return: the path, the basename and the extension
"""
tmp = os.path.splitext(fullpath)
ext = tmp[1]
p = tmp[0]
if shortext == False:
while tmp[1] != '':
tmp = os.path.splitext(p)
ext = tmp[1] + ext
p = tmp[0]
path = os.path.dirname(p)
if path == '':
path = '.'
base = os.path.basename(p)
return path, base, ext
def levenshtein_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for index2, char2 in enumerate(s2):
new_distances = [index2 + 1]
for index1, char1 in enumerate(s1):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
return distances[-1]
def hms(s):
"""
conversion of seconds into hours, minutes and secondes
:param s:
:return: int, int, float
"""
h = int(s) // 3600
s %= 3600
m = int(s) // 60
s %= 60
return '{:d}:{:d}:{:.2f}'.format(h, m, s)
def get_feature_extractor(audio_filename_structure, type_feature_extractor):
if type_feature_extractor == 'sid':
fe = FeaturesExtractor(audio_filename_structure=audio_filename_structure,
feature_filename_structure=None,
sampling_frequency=16000,
lower_frequency=133.3333,
higher_frequency=6855.4976,
filter_bank="log",
filter_bank_size=40,
window_size=0.025,
shift=0.01,
ceps_number=13,
pre_emphasis=0.97,
keep_all_features=True,
vad='percentil',
#vad=None,
save_param=["energy", "cep", "vad"]
)
elif type_feature_extractor == 'sid8k':
fe = FeaturesExtractor(audio_filename_structure=audio_filename_structure,
feature_filename_structure=None,
sampling_frequency=8000,
lower_frequency=0,
higher_frequency=4000,
filter_bank="log",
filter_bank_size=24,
window_size=0.025,
shift=0.01,
ceps_number=12,
pre_emphasis=0.95,
keep_all_features=True,
#vad='percentil',
vad=None,
save_param=["energy", "cep", "vad"]
)
elif type_feature_extractor == '8k' or type_feature_extractor == '8kcms'\
or type_feature_extractor == '8ksns':
fe = FeaturesExtractor(audio_filename_structure=audio_filename_structure,
feature_filename_structure=None,
sampling_frequency=8000,
lower_frequency=0,
higher_frequency=4000,
filter_bank="log",
filter_bank_size=24,
window_size=0.025,
shift=0.01,
ceps_number=13,
pre_emphasis=0.97,
keep_all_features=True,
#vad='percentil',
vad=None,
save_param=["energy", "cep", "vad"]
)
elif type_feature_extractor == 'basic':
fe = FeaturesExtractor(audio_filename_structure=audio_filename_structure,
feature_filename_structure=None,
sampling_frequency=16000,
lower_frequency=133.3333,
higher_frequency=6855.4976,
filter_bank="log",
filter_bank_size=40,
window_size=0.025,
shift=0.01,
ceps_number=13,
pre_emphasis=0.97,
keep_all_features=True,
vad=None,
save_param=["energy", "cep", "vad"]
)
else:
logging.error('in get_feature_server, type_fe not found: ' + type_feature_extractor)
return None
return fe
def get_feature_server(filename_structure, feature_server_type):
path, show, ext = path_show_ext(filename_structure)
feature_filename_structure = None
logging.info(path+' ## '+show+' ## '+ext)
if ext.endswith('.h5') or ext.endswith('.hdf5'):
feature_extractor = None
feature_filename_structure = filename_structure
logging.info('feature extractor --> None')
else:
audio_filename_structure = filename_structure
feature_extractor = get_feature_extractor(audio_filename_structure, type_feature_extractor=feature_server_type)
logging.info('-'*20)
logging.info(feature_extractor)
logging.info('-'*20)
if feature_server_type == 'basic':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('energy', 'cep'),
keep_all_features=True)
elif feature_server_type == 'sns':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('cep'),
delta=True,
keep_all_features=True)
elif feature_server_type == 'sns_dnn':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('cep'),
delta=True,
context=(31, 31),
keep_all_features=True)
elif feature_server_type == 'sid':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('energy', 'cep'),
feat_norm='cmvn_sliding',
delta=True,
double_delta=True,
keep_all_features=True)
elif feature_server_type == 'sid8k':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('cep'),
feat_norm='cmvn_sliding',
delta=True,
double_delta=False,
keep_all_features=True)
elif feature_server_type == '8k':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('cep'),
#delta=True,
keep_all_features=True)
elif feature_server_type == '8ksns':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('cep'),
delta=True,
keep_all_features=True)
elif feature_server_type == '8kcms':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('cep'),
feat_norm='cms',
#delta=True,
keep_all_features=True)
elif feature_server_type == 'vad':
feature_server = FeaturesServer(features_extractor=feature_extractor,
feature_filename_structure=feature_filename_structure,
dataset_list=('energy'),
keep_all_features=True)
else:
logging.error('in get_feature_server, feature_server_type not found: ' + feature_server_type)
return None
logging.info(feature_server)
return feature_server
# def get_feature_server(input_dir='./{s}.h5', feature_server_type):
# logging.info('get_feature_server type: '+feature_server_type)
# if feature_server_type == 'diarization':
# return FeaturesServer_test(input_dir=input_dir,
# config='diar_16k')
# elif feature_server_type == 'sid':
# return FeaturesServer_test(input_dir=input_dir,
# config='diar_16k', log_e=True, delta=True,
# double_delta=True, feat_norm='cms_sliding')
# elif feature_server_type == 'sad':
# return FeaturesServer_test(input_dir=input_dir,
# config='diar_16k', log_e=False, delta=True, double_delta=False)
# else:
# logging.error('in get_feature_server, feature_server_type not found: ' + feature_server_type)
# return None
# def save_mfcc(diarization, audio_dir, mfcc_fn, feature_server_type):
# fh = h5py.File(mfcc_fn, "w")
# diar_out = diarization.copy_structure()
# shows = diarization.make_index(['show'])
#
# for show in shows:
# # logging.info('mfcc: '+ show)
# show_diar = shows[show]
# model_iv = ModelIV()
# feature_server = get_feature_server(audio_dir, feature_server_type=feature_server_type)
# model_iv.set_feature_server(feature_server)
# model_iv.set_diar(show_diar)
# if feature_server_type == 'sid':
# model_iv.vad()
# else:
# model_iv.diar_vad = show_diar
#
# cep_full, _ = feature_server.load(show)
# cluster_list = model_iv.diar_vad.make_index(['cluster'])
# index = model_iv.diar_vad.features_by_cluster(show=show, cep_len=cep_full.shape[0])
# for cluster in cluster_list:
# logging.info('mfcc: '+show+' '+cluster)
# mfcc_fn = show+'/'+cluster
# cep = cep_full[index[cluster], :]
# vad = numpy.ones(cep.shape[0])
# diar_out.append(show=mfcc_fn, start=0, stop=cep.shape[0], cluster=cluster)
# logging.info(cep.shape)
# write_hdf5(mfcc_fn, fh, cep, None, None, None, label=vad)
# return diar_out
# def save_mfcc(diarization, audio_dir='./', mfcc_fn='./out.h5', feature_server_type='sid'):
# fh = h5py.File(mfcc_fn, "w")
# shows = diarization.unique('show')
# diar_out = diarization.copy_structure()
# shows = diarization.make_index(['show'])
# for show in shows:
# # logging.info('mfcc: '+ show)
# show_diar = shows[show]
# model_iv = ModelIV()
# feature_server = get_feature_server(audio_dir, feature_server_type=feature_server_type)
# model_iv.set_feature_server(feature_server)
# model_iv.set_diar(show_diar)
# if feature_server_type == 'sid':
# model_iv.vad()
# else:
# model_iv.diar_vad = show_diar
# feature_server.load(show)
# cep_full = feature_server.cep[0]
# cluster_list = model_iv.diar_vad.make_index(['cluster'])
# index = model_iv.diar_vad.features_by_cluster(show=show, cep_len=cep_full.shape[0])
# for cluster in cluster_list:
# logging.info('mfcc: '+show+' '+cluster)
# mfcc_fn = show+'/'+cluster
# cep = cep_full[index[cluster], :]
# vad = numpy.ones(cep.shape[0])
# diar_out.append(show=mfcc_fn, start=0, stop=cep.shape[0], cluster=cluster)
# write_hdf5(mfcc_fn, fh, cep, label=vad)
# return diar_out
class FeatureServerFake(FeaturesServer):
def __init__(self, cep):
self.cep = cep
def load(self, show, channel=0, input_feature_filename=None, label=None, start=None, stop=None):
return self.cep, numpy.ones(self.cep.shape[0], dtype='bool')
class FeatureServerCache(FeaturesServer):
def __init__(self, featuresServer):
self.shows = dict()
self.featuresServer = featuresServer
def load(self, show, channel=0, input_feature_filename=None, label=None, start=None, stop=None):
key = show
if label is not None:
key += '##'+label
#if start is not None:
# key += '##'+str(start)+'##'+str(stop)
#for k in self.shows:
# logging.info('key: %s', k)
if key in self.shows:
#logging.info('load from mem '+key)
cep = self.shows[key][start:stop,:]
return cep, numpy.ones(cep.shape[0], dtype='bool')
else:
#logging.info('load from disque %s', key)
cep, lbl = self.featuresServer.load(show, label=label, start=start, stop=stop)
self.shows[key] = cep
#logging.info('add: %s %d %d', key, (key in self.shows), True)
return cep, lbl
| [
"numpy.ones",
"os.path.splitext",
"sidekit.features_server.FeaturesServer",
"os.path.dirname",
"os.path.basename",
"re.sub",
"sidekit.features_extractor.FeaturesExtractor",
"logging.info",
"logging.error"
] | [((1528, 1551), 're.sub', 're.sub', (['"""_+"""', '"""_"""', 'name'], {}), "('_+', '_', name)\n", (1534, 1551), False, 'import re\n'), ((1764, 1790), 'os.path.splitext', 'os.path.splitext', (['fullpath'], {}), '(fullpath)\n', (1780, 1790), False, 'import os\n'), ((1981, 1999), 'os.path.dirname', 'os.path.dirname', (['p'], {}), '(p)\n', (1996, 1999), False, 'import os\n'), ((2049, 2068), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (2065, 2068), False, 'import os\n'), ((5868, 5917), 'logging.info', 'logging.info', (["(path + ' ## ' + show + ' ## ' + ext)"], {}), "(path + ' ## ' + show + ' ## ' + ext)\n", (5880, 5917), False, 'import logging\n'), ((6291, 6313), 'logging.info', 'logging.info', (["('-' * 20)"], {}), "('-' * 20)\n", (6303, 6313), False, 'import logging\n'), ((6316, 6347), 'logging.info', 'logging.info', (['feature_extractor'], {}), '(feature_extractor)\n', (6328, 6347), False, 'import logging\n'), ((6352, 6374), 'logging.info', 'logging.info', (["('-' * 20)"], {}), "('-' * 20)\n", (6364, 6374), False, 'import logging\n'), ((9395, 9423), 'logging.info', 'logging.info', (['feature_server'], {}), '(feature_server)\n', (9407, 9423), False, 'import logging\n'), ((3080, 3466), 'sidekit.features_extractor.FeaturesExtractor', 'FeaturesExtractor', ([], {'audio_filename_structure': 'audio_filename_structure', 'feature_filename_structure': 'None', 'sampling_frequency': '(16000)', 'lower_frequency': '(133.3333)', 'higher_frequency': '(6855.4976)', 'filter_bank': '"""log"""', 'filter_bank_size': '(40)', 'window_size': '(0.025)', 'shift': '(0.01)', 'ceps_number': '(13)', 'pre_emphasis': '(0.97)', 'keep_all_features': '(True)', 'vad': '"""percentil"""', 'save_param': "['energy', 'cep', 'vad']"}), "(audio_filename_structure=audio_filename_structure,\n feature_filename_structure=None, sampling_frequency=16000,\n lower_frequency=133.3333, higher_frequency=6855.4976, filter_bank='log',\n filter_bank_size=40, window_size=0.025, shift=0.01, ceps_number=13,\n pre_emphasis=0.97, keep_all_features=True, vad='percentil', save_param=\n ['energy', 'cep', 'vad'])\n", (3097, 3466), False, 'from sidekit.features_extractor import FeaturesExtractor\n'), ((6060, 6102), 'logging.info', 'logging.info', (['"""feature extractor --> None"""'], {}), "('feature extractor --> None')\n", (6072, 6102), False, 'import logging\n'), ((6437, 6609), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': "('energy', 'cep')", 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=(\n 'energy', 'cep'), keep_all_features=True)\n", (6451, 6609), False, 'from sidekit.features_server import FeaturesServer\n'), ((1895, 1914), 'os.path.splitext', 'os.path.splitext', (['p'], {}), '(p)\n', (1911, 1914), False, 'import os\n'), ((3709, 4075), 'sidekit.features_extractor.FeaturesExtractor', 'FeaturesExtractor', ([], {'audio_filename_structure': 'audio_filename_structure', 'feature_filename_structure': 'None', 'sampling_frequency': '(8000)', 'lower_frequency': '(0)', 'higher_frequency': '(4000)', 'filter_bank': '"""log"""', 'filter_bank_size': '(24)', 'window_size': '(0.025)', 'shift': '(0.01)', 'ceps_number': '(12)', 'pre_emphasis': '(0.95)', 'keep_all_features': '(True)', 'vad': 'None', 'save_param': "['energy', 'cep', 'vad']"}), "(audio_filename_structure=audio_filename_structure,\n feature_filename_structure=None, sampling_frequency=8000,\n lower_frequency=0, higher_frequency=4000, filter_bank='log',\n filter_bank_size=24, window_size=0.025, shift=0.01, ceps_number=12,\n pre_emphasis=0.95, keep_all_features=True, vad=None, save_param=[\n 'energy', 'cep', 'vad'])\n", (3726, 4075), False, 'from sidekit.features_extractor import FeaturesExtractor\n'), ((6716, 6888), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': '"""cep"""', 'delta': '(True)', 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=\n 'cep', delta=True, keep_all_features=True)\n", (6730, 6888), False, 'from sidekit.features_server import FeaturesServer\n'), ((13175, 13218), 'numpy.ones', 'numpy.ones', (['self.cep.shape[0]'], {'dtype': '"""bool"""'}), "(self.cep.shape[0], dtype='bool')\n", (13185, 13218), False, 'import numpy\n'), ((4409, 4775), 'sidekit.features_extractor.FeaturesExtractor', 'FeaturesExtractor', ([], {'audio_filename_structure': 'audio_filename_structure', 'feature_filename_structure': 'None', 'sampling_frequency': '(8000)', 'lower_frequency': '(0)', 'higher_frequency': '(4000)', 'filter_bank': '"""log"""', 'filter_bank_size': '(24)', 'window_size': '(0.025)', 'shift': '(0.01)', 'ceps_number': '(13)', 'pre_emphasis': '(0.97)', 'keep_all_features': '(True)', 'vad': 'None', 'save_param': "['energy', 'cep', 'vad']"}), "(audio_filename_structure=audio_filename_structure,\n feature_filename_structure=None, sampling_frequency=8000,\n lower_frequency=0, higher_frequency=4000, filter_bank='log',\n filter_bank_size=24, window_size=0.025, shift=0.01, ceps_number=13,\n pre_emphasis=0.97, keep_all_features=True, vad=None, save_param=[\n 'energy', 'cep', 'vad'])\n", (4426, 4775), False, 'from sidekit.features_extractor import FeaturesExtractor\n'), ((7018, 7208), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': '"""cep"""', 'delta': '(True)', 'context': '(31, 31)', 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=\n 'cep', delta=True, context=(31, 31), keep_all_features=True)\n", (7032, 7208), False, 'from sidekit.features_server import FeaturesServer\n'), ((13859, 13897), 'numpy.ones', 'numpy.ones', (['cep.shape[0]'], {'dtype': '"""bool"""'}), "(cep.shape[0], dtype='bool')\n", (13869, 13897), False, 'import numpy\n'), ((5025, 5404), 'sidekit.features_extractor.FeaturesExtractor', 'FeaturesExtractor', ([], {'audio_filename_structure': 'audio_filename_structure', 'feature_filename_structure': 'None', 'sampling_frequency': '(16000)', 'lower_frequency': '(133.3333)', 'higher_frequency': '(6855.4976)', 'filter_bank': '"""log"""', 'filter_bank_size': '(40)', 'window_size': '(0.025)', 'shift': '(0.01)', 'ceps_number': '(13)', 'pre_emphasis': '(0.97)', 'keep_all_features': '(True)', 'vad': 'None', 'save_param': "['energy', 'cep', 'vad']"}), "(audio_filename_structure=audio_filename_structure,\n feature_filename_structure=None, sampling_frequency=16000,\n lower_frequency=133.3333, higher_frequency=6855.4976, filter_bank='log',\n filter_bank_size=40, window_size=0.025, shift=0.01, ceps_number=13,\n pre_emphasis=0.97, keep_all_features=True, vad=None, save_param=[\n 'energy', 'cep', 'vad'])\n", (5042, 5404), False, 'from sidekit.features_extractor import FeaturesExtractor\n'), ((5584, 5672), 'logging.error', 'logging.error', (["('in get_feature_server, type_fe not found: ' + type_feature_extractor)"], {}), "('in get_feature_server, type_fe not found: ' +\n type_feature_extractor)\n", (5597, 5672), False, 'import logging\n'), ((7351, 7585), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': "('energy', 'cep')", 'feat_norm': '"""cmvn_sliding"""', 'delta': '(True)', 'double_delta': '(True)', 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=(\n 'energy', 'cep'), feat_norm='cmvn_sliding', delta=True, double_delta=\n True, keep_all_features=True)\n", (7365, 7585), False, 'from sidekit.features_server import FeaturesServer\n'), ((7740, 7962), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': '"""cep"""', 'feat_norm': '"""cmvn_sliding"""', 'delta': '(True)', 'double_delta': '(False)', 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=\n 'cep', feat_norm='cmvn_sliding', delta=True, double_delta=False,\n keep_all_features=True)\n", (7754, 7962), False, 'from sidekit.features_server import FeaturesServer\n'), ((8117, 8277), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': '"""cep"""', 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=\n 'cep', keep_all_features=True)\n", (8131, 8277), False, 'from sidekit.features_server import FeaturesServer\n'), ((8418, 8590), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': '"""cep"""', 'delta': '(True)', 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=\n 'cep', delta=True, keep_all_features=True)\n", (8432, 8590), False, 'from sidekit.features_server import FeaturesServer\n'), ((8718, 8895), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': '"""cep"""', 'feat_norm': '"""cms"""', 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=\n 'cep', feat_norm='cms', keep_all_features=True)\n", (8732, 8895), False, 'from sidekit.features_server import FeaturesServer\n'), ((9051, 9214), 'sidekit.features_server.FeaturesServer', 'FeaturesServer', ([], {'features_extractor': 'feature_extractor', 'feature_filename_structure': 'feature_filename_structure', 'dataset_list': '"""energy"""', 'keep_all_features': '(True)'}), "(features_extractor=feature_extractor,\n feature_filename_structure=feature_filename_structure, dataset_list=\n 'energy', keep_all_features=True)\n", (9065, 9214), False, 'from sidekit.features_server import FeaturesServer\n'), ((9277, 9374), 'logging.error', 'logging.error', (["('in get_feature_server, feature_server_type not found: ' + feature_server_type\n )"], {}), "('in get_feature_server, feature_server_type not found: ' +\n feature_server_type)\n", (9290, 9374), False, 'import logging\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian
from a2c_ppo_acktr.utils import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, base=None, base_kwargs=None, use_pnn=False):
super(Policy, self).__init__()
if base_kwargs is None:
base_kwargs = {}
if base is None:
if use_pnn:
base = PNNConvBase
elif len(obs_shape) == 3:
base = CNNBase
elif len(obs_shape) == 1:
base = MLPBase
else:
raise NotImplementedError
self.base = base(obs_shape[0], **base_kwargs)
if action_space.__class__.__name__ == "Discrete":
num_outputs = action_space.n
self.dist = Categorical(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "Box":
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "MultiBinary":
num_outputs = action_space.shape[0]
self.dist = Bernoulli(self.base.output_size, num_outputs)
else:
raise NotImplementedError
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def act(self, inputs, rnn_hxs, masks, deterministic=False):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRU(recurrent_input_size, hidden_size)
for name, param in self.gru.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, hxs = self.gru(
x[start_idx:end_idx],
hxs * masks[start_idx].view(1, -1, 1))
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = hxs.squeeze(0)
return x, hxs
class CNNBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.main = nn.Sequential(
init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = self.main(inputs / 255.0)
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
class MLPBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
##### Added for CCM #######
class ScaleLayer(nn.Module):
def __init__(self, init_value=1e-3):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, x):
return x * self.scale
class PNNBase(NNBase):
def __init__(self, t, recurrent=False, hidden_size=512):
super(PNNBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.conv1 = init_(nn.Conv2d(t[0][0], t[0][1], t[0][2], stride=t[0][3]))
self.conv2 = init_(nn.Conv2d(t[1][0], t[1][1], t[1][2], stride=t[1][3]))
self.conv3 = init_(nn.Conv2d(t[2][0], t[2][1], t[2][2], stride=t[2][3]))
self.fc = init_(nn.Linear(t[3][0], t[3][1]))
self.mp = None
self.relu = nn.ReLU()
self.flatten = Flatten()
self.topology = [
[t[1][2], t[1][3]],
[t[2][2], t[2][3]],
t[3][1]
]
self.output_shapes = [x[1] for x in t]
self.input_shapes = [x[0] for x in t]
def layers(self, i, x):
if i == 0:
if not self.mp:
return self.relu(self.conv1(x))
else:
return self.mp(self.relu(self.conv1(x)))
elif i == 1:
return self.relu(self.conv2(x))
elif i == 2:
return self.relu(self.conv3(x))
elif i == 3:
return self.fc(self.flatten(x))
def forward(self, x):
outs = []
for i in range(4):
x = self.layers(i, x)
outs.append(x)
return outs
class PNNColumnAtari(PNNBase): # Use this for atari environments
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
t = [[num_inputs, 32, 8, 4], [32, 64, 4, 2], [64, 32, 3, 1], [32 * 7 * 7, hidden_size]]
# [n_input, n_output, fsize, stride] for c1, c2, c3 and [n_input, n_output] for FC
super(PNNColumnAtari, self).__init__(t, recurrent, hidden_size)
class PNNColumnGrid(PNNBase): # Use this for grid environments
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
t = [[num_inputs, 16, 2, 1], [16, 32, 2, 1], [32, 64, 2, 1], [64, 64]]
super(PNNColumnGrid, self).__init__(t, recurrent, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.mp = nn.MaxPool2d((2, 2))
self.fc = nn.Sequential(
init_(nn.Linear(hidden_size, 64)),
nn.Tanh(),
self.fc
)
class PNNConvBase(NNBase):
def __init__(self, num_inputs, recurrent=False, grid=False, hidden_size=512):
super(PNNConvBase, self).__init__(recurrent, hidden_size, hidden_size)
self.columns = nn.ModuleList([])
self.num_inputs = num_inputs
self.hidden_size = hidden_size
self.recurrent = recurrent
self.alpha = nn.ModuleList([])
self.V = nn.ModuleList([])
self.U = nn.ModuleList([])
self.flatten = Flatten()
self.grid = grid
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if grid:
self.critic_linear = nn.Sequential(
init_(nn.Linear(self.hidden_size, 64)),
nn.Tanh(),
init_(nn.Linear(64, 1))
)
else:
self.critic_linear = init_(nn.linear(self.hidden_size,1))
self.train()
self.n_layers = 4
def forward(self, x, rnn_hxs, masks):
assert self.columns, 'PNN should at least have one column (missing call to `new_task` ?)'
# x = (x / 255.0)
inputs = [self.columns[i].layers(0, x) for i in range(len(self.columns))]
for l in range(1, self.n_layers):
outputs = [self.columns[0].layers(l, inputs[0])]
for c in range(1, len(self.columns)):
pre_col = inputs[c - 1]
cur_out = self.columns[c].layers(l, inputs[c])
a = self.alpha[c - 1][l - 1]
a_h = F.relu(a(pre_col))
V = self.V[c - 1][l - 1]
V_a_h = F.relu(V(a_h))
U = self.U[c - 1][l - 1]
if l == self.n_layers - 1: # FC layer
V_a_h = self.flatten(V_a_h)
U_V_a_h = U(V_a_h)
out = F.relu(cur_out + U_V_a_h)
outputs.append(out)
else:
U_V_a_h = U(V_a_h) # conv layers
out = F.relu(cur_out + U_V_a_h)
outputs.append(out)
inputs = outputs
x = inputs[-1]
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
def new_task(self): # adds a new column to pnn
if self.grid:
new_column = PNNColumnGrid(self.num_inputs, self.recurrent, self.hidden_size)
else:
new_column = PNNColumnAtari(self.num_inputs, self.recurrent, self.hidden_size)
self.columns.append(new_column)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if len(self.columns) > 1:
pre_col, col = self.columns[-2], self.columns[-1]
a_list = []
V_list = []
U_list = []
for l in range(1, self.n_layers):
a = ScaleLayer(0.01)
map_in = pre_col.output_shapes[l - 1]
map_out = int(map_in / 2)
v = init_(nn.Conv2d(map_in, map_out, 1))
if l != self.n_layers - 1: # conv -> conv, last layer
cur_out = col.output_shapes[l]
size, stride = pre_col.topology[l - 1]
u = init_(nn.Conv2d(map_out, cur_out, size, stride=stride))
else:
input_size = int(col.input_shapes[-1] / 2)
hidden_size = self.hidden_size
u = init_(nn.Linear(input_size, hidden_size))
a_list.append(a)
V_list.append(v)
U_list.append(u)
a_list = nn.ModuleList(a_list)
V_list = nn.ModuleList(V_list)
U_list = nn.ModuleList(U_list)
self.alpha.append(a_list)
self.V.append(V_list)
self.U.append(U_list)
def freeze_columns(self, skip=None): # freezes the weights of previous columns
if skip is None:
skip = []
for i, c in enumerate(self.columns):
if i not in skip:
for params in c.parameters():
params.requires_grad = False
def parameters(self, col=None):
if col is None:
return super(PNNConvBase, self).parameters()
return self.columns[col].parameters()
| [
"torch.nn.ReLU",
"torch.nn.linear",
"numpy.sqrt",
"torch.nn.Tanh",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"a2c_ppo_acktr.distributions.Bernoulli",
"torch.nn.Conv2d",
"torch.nn.init.orthogonal_",
"a2c_ppo_acktr.distributions.Categorical",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"t... | [((8491, 8500), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8498, 8500), True, 'import torch.nn as nn\n'), ((10155, 10175), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (10167, 10175), True, 'import torch.nn as nn\n'), ((10523, 10540), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (10536, 10540), True, 'import torch.nn as nn\n'), ((10673, 10690), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (10686, 10690), True, 'import torch.nn as nn\n'), ((10708, 10725), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (10721, 10725), True, 'import torch.nn as nn\n'), ((10743, 10760), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (10756, 10760), True, 'import torch.nn as nn\n'), ((973, 1020), 'a2c_ppo_acktr.distributions.Categorical', 'Categorical', (['self.base.output_size', 'num_outputs'], {}), '(self.base.output_size, num_outputs)\n', (984, 1020), False, 'from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian\n'), ((2903, 2944), 'torch.nn.GRU', 'nn.GRU', (['recurrent_input_size', 'hidden_size'], {}), '(recurrent_input_size, hidden_size)\n', (2909, 2944), True, 'import torch.nn as nn\n'), ((5273, 5298), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(0)'}), '(outputs, dim=0)\n', (5282, 5298), False, 'import torch\n'), ((5831, 5840), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5838, 5840), True, 'import torch.nn as nn\n'), ((5893, 5902), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5900, 5902), True, 'import torch.nn as nn\n'), ((5955, 5964), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5962, 5964), True, 'import torch.nn as nn\n'), ((6032, 6041), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6039, 6041), True, 'import torch.nn as nn\n'), ((6202, 6227), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (6211, 6227), True, 'import torch.nn as nn\n'), ((6935, 6944), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6942, 6944), True, 'import torch.nn as nn\n'), ((7002, 7011), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (7009, 7011), True, 'import torch.nn as nn\n'), ((7106, 7115), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (7113, 7115), True, 'import torch.nn as nn\n'), ((7173, 7182), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (7180, 7182), True, 'import torch.nn as nn\n'), ((7220, 7245), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (7229, 7245), True, 'import torch.nn as nn\n'), ((7743, 7774), 'torch.FloatTensor', 'torch.FloatTensor', (['[init_value]'], {}), '([init_value])\n', (7760, 7774), False, 'import torch\n'), ((8177, 8229), 'torch.nn.Conv2d', 'nn.Conv2d', (['t[0][0]', 't[0][1]', 't[0][2]'], {'stride': 't[0][3]'}), '(t[0][0], t[0][1], t[0][2], stride=t[0][3])\n', (8186, 8229), True, 'import torch.nn as nn\n'), ((8258, 8310), 'torch.nn.Conv2d', 'nn.Conv2d', (['t[1][0]', 't[1][1]', 't[1][2]'], {'stride': 't[1][3]'}), '(t[1][0], t[1][1], t[1][2], stride=t[1][3])\n', (8267, 8310), True, 'import torch.nn as nn\n'), ((8339, 8391), 'torch.nn.Conv2d', 'nn.Conv2d', (['t[2][0]', 't[2][1]', 't[2][2]'], {'stride': 't[2][3]'}), '(t[2][0], t[2][1], t[2][2], stride=t[2][3])\n', (8348, 8391), True, 'import torch.nn as nn\n'), ((8417, 8444), 'torch.nn.Linear', 'nn.Linear', (['t[3][0]', 't[3][1]'], {}), '(t[3][0], t[3][1])\n', (8426, 8444), True, 'import torch.nn as nn\n'), ((10269, 10278), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (10276, 10278), True, 'import torch.nn as nn\n'), ((14036, 14057), 'torch.nn.ModuleList', 'nn.ModuleList', (['a_list'], {}), '(a_list)\n', (14049, 14057), True, 'import torch.nn as nn\n'), ((14079, 14100), 'torch.nn.ModuleList', 'nn.ModuleList', (['V_list'], {}), '(V_list)\n', (14092, 14100), True, 'import torch.nn as nn\n'), ((14122, 14143), 'torch.nn.ModuleList', 'nn.ModuleList', (['U_list'], {}), '(U_list)\n', (14135, 14143), True, 'import torch.nn as nn\n'), ((1148, 1196), 'a2c_ppo_acktr.distributions.DiagGaussian', 'DiagGaussian', (['self.base.output_size', 'num_outputs'], {}), '(self.base.output_size, num_outputs)\n', (1160, 1196), False, 'from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian\n'), ((5704, 5734), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (5726, 5734), True, 'import torch.nn as nn\n'), ((5790, 5828), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_inputs', '(32)', '(8)'], {'stride': '(4)'}), '(num_inputs, 32, 8, stride=4)\n', (5799, 5828), True, 'import torch.nn as nn\n'), ((5860, 5890), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(4)'], {'stride': '(2)'}), '(32, 64, 4, stride=2)\n', (5869, 5890), True, 'import torch.nn as nn\n'), ((5922, 5952), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(32)', '(3)'], {'stride': '(1)'}), '(64, 32, 3, stride=1)\n', (5931, 5952), True, 'import torch.nn as nn\n'), ((5995, 6029), 'torch.nn.Linear', 'nn.Linear', (['(32 * 7 * 7)', 'hidden_size'], {}), '(32 * 7 * 7, hidden_size)\n', (6004, 6029), True, 'import torch.nn as nn\n'), ((6831, 6841), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6838, 6841), True, 'import numpy as np\n'), ((6898, 6932), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', 'hidden_size'], {}), '(num_inputs, hidden_size)\n', (6907, 6932), True, 'import torch.nn as nn\n'), ((6964, 6999), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (6973, 6999), True, 'import torch.nn as nn\n'), ((7069, 7103), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', 'hidden_size'], {}), '(num_inputs, hidden_size)\n', (7078, 7103), True, 'import torch.nn as nn\n'), ((7135, 7170), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (7144, 7170), True, 'import torch.nn as nn\n'), ((8117, 8147), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (8139, 8147), True, 'import torch.nn as nn\n'), ((10104, 10134), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (10126, 10134), True, 'import torch.nn as nn\n'), ((10228, 10254), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(64)'], {}), '(hidden_size, 64)\n', (10237, 10254), True, 'import torch.nn as nn\n'), ((11080, 11089), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (11087, 11089), True, 'import torch.nn as nn\n'), ((11198, 11228), 'torch.nn.linear', 'nn.linear', (['self.hidden_size', '(1)'], {}), '(self.hidden_size, 1)\n', (11207, 11228), True, 'import torch.nn as nn\n'), ((1332, 1377), 'a2c_ppo_acktr.distributions.Bernoulli', 'Bernoulli', (['self.base.output_size', 'num_outputs'], {}), '(self.base.output_size, num_outputs)\n', (1341, 1377), False, 'from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian\n'), ((3060, 3087), 'torch.nn.init.constant_', 'nn.init.constant_', (['param', '(0)'], {}), '(param, 0)\n', (3077, 3087), True, 'import torch.nn as nn\n'), ((5647, 5670), 'torch.nn.init.constant_', 'nn.init.constant_', (['x', '(0)'], {}), '(x, 0)\n', (5664, 5670), True, 'import torch.nn as nn\n'), ((6109, 6132), 'torch.nn.init.constant_', 'nn.init.constant_', (['x', '(0)'], {}), '(x, 0)\n', (6126, 6132), True, 'import torch.nn as nn\n'), ((6774, 6797), 'torch.nn.init.constant_', 'nn.init.constant_', (['x', '(0)'], {}), '(x, 0)\n', (6791, 6797), True, 'import torch.nn as nn\n'), ((8060, 8083), 'torch.nn.init.constant_', 'nn.init.constant_', (['x', '(0)'], {}), '(x, 0)\n', (8077, 8083), True, 'import torch.nn as nn\n'), ((10047, 10070), 'torch.nn.init.constant_', 'nn.init.constant_', (['x', '(0)'], {}), '(x, 0)\n', (10064, 10070), True, 'import torch.nn as nn\n'), ((10885, 10908), 'torch.nn.init.constant_', 'nn.init.constant_', (['x', '(0)'], {}), '(x, 0)\n', (10902, 10908), True, 'import torch.nn as nn\n'), ((11030, 11061), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', '(64)'], {}), '(self.hidden_size, 64)\n', (11039, 11061), True, 'import torch.nn as nn\n'), ((11113, 11129), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (11122, 11129), True, 'import torch.nn as nn\n'), ((12174, 12199), 'torch.nn.functional.relu', 'F.relu', (['(cur_out + U_V_a_h)'], {}), '(cur_out + U_V_a_h)\n', (12180, 12199), True, 'import torch.nn.functional as F\n'), ((12343, 12368), 'torch.nn.functional.relu', 'F.relu', (['(cur_out + U_V_a_h)'], {}), '(cur_out + U_V_a_h)\n', (12349, 12368), True, 'import torch.nn.functional as F\n'), ((12982, 13005), 'torch.nn.init.constant_', 'nn.init.constant_', (['x', '(0)'], {}), '(x, 0)\n', (12999, 13005), True, 'import torch.nn as nn\n'), ((13416, 13445), 'torch.nn.Conv2d', 'nn.Conv2d', (['map_in', 'map_out', '(1)'], {}), '(map_in, map_out, 1)\n', (13425, 13445), True, 'import torch.nn as nn\n'), ((3147, 3173), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['param'], {}), '(param)\n', (3166, 3173), True, 'import torch.nn as nn\n'), ((13660, 13708), 'torch.nn.Conv2d', 'nn.Conv2d', (['map_out', 'cur_out', 'size'], {'stride': 'stride'}), '(map_out, cur_out, size, stride=stride)\n', (13669, 13708), True, 'import torch.nn as nn\n'), ((13878, 13912), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (13887, 13912), True, 'import torch.nn as nn\n')] |
from enum import Enum
import os
import pygame
import pygame.gfxdraw
import pygame.ftfont
import pygame.image
import pygame.transform
import numpy as np
import game_logic as game
from square_rect import SquareRect
from config import config
SQUARESIZE = 100
HALF_SQUARE = int(SQUARESIZE / 2)
RADIUS = int(HALF_SQUARE - 5)
COLOR_BOARD = (137, 149, 155)
BLACK = (0, 0, 0)
COLOR_LEFT_PLAYER = config["players"]["left_player"]["color"]
COLOR_RIGHT_PLAYER = config["players"]["right_player"]["color"]
BOARD_OFFSET_X = 4.5
BOARD_OFFSET_Y = 3
screen_width = 16 * SQUARESIZE
screen_height = 9 * SQUARESIZE
size = (screen_width, screen_height)
pygame.ftfont.init()
if os.environ.get("FULLSCREEN"):
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
else:
screen = pygame.display.set_mode(size)
class Fonts:
VOFFSET = {"left_player": 0.08 / SQUARESIZE, "right_player": -0.035 / SQUARESIZE}
HOFFSET = {"left_player": 0, "right_player": 0.05 / SQUARESIZE}
SCORE = {
"left_player": pygame.ftfont.Font(
"fonts/WDRSansUL-ExtraBold.otf", int((SQUARESIZE / 4) * 3)
),
"right_player": pygame.ftfont.Font(
"fonts/Barlow-Bold.otf", int((SQUARESIZE / 4) * 2.9)
),
}
NUMBERS = pygame.ftfont.Font(
"fonts/WDRSansUL-ExtraBold.otf", int((SQUARESIZE / 4) * 3)
)
GAME_END = SCORE
COUNTDOWN = {
"left_player": pygame.ftfont.Font(
"fonts/WDRSansUL-ExtraBold.otf", int(SQUARESIZE * 1.5)
),
"right_player": pygame.ftfont.Font(
"fonts/Barlow-Bold.otf", int(SQUARESIZE * 1.5)
),
}
STATUS = {
"left_player": pygame.ftfont.Font(
"fonts/WDRSans-Bold.otf", int((SQUARESIZE / 5) * 3)
),
"right_player": pygame.ftfont.Font(
"fonts/Barlow-Bold.otf", int((SQUARESIZE / 5) * 3)
),
}
STATUS_LARGE = {
"left_player": pygame.ftfont.Font(
"fonts/WDRSansUL-ExtraBold.otf",
int(
(SQUARESIZE / 4)
* 5
* (5 / len(config["players"]["left_player"]["name"]))
),
),
"right_player": pygame.ftfont.Font(
"fonts/Barlow-Bold.otf",
int(
(SQUARESIZE / 4)
* 5
* (5 / len(config["players"]["right_player"]["name"]))
),
),
}
class Images:
LOGOS = {
player: pygame.image.load(f"images/logo_{player}.png").convert_alpha()
for player in config["players"]
}
SCORE_LOGOS = {
player: pygame.transform.smoothscale(
surf, (int(surf.get_width() * SQUARESIZE / surf.get_height()), SQUARESIZE)
)
for player, surf in LOGOS.items()
}
STATUS_LOGOS = {
player: pygame.transform.smoothscale(
surf,
(
int(4 * SQUARESIZE),
int(surf.get_height() * 4 * SQUARESIZE / surf.get_width()),
),
)
for player, surf in LOGOS.items()
}
class Positions:
SCORE_HEIGHT = 1.0
CURRENT_PLAYER_LEFT_PLAYER_LEFT = 0.25
CURRENT_PLAYER_RIGHT_PLAYER_LEFT = 11.75
CURRENT_PLAYER = SquareRect(0, BOARD_OFFSET_Y - 1, 4, 3)
GAME_END = SquareRect(0, 1, 16, 1)
CURRENT_VOTE = SquareRect(BOARD_OFFSET_X, 1, game.COLUMN_COUNT, 1)
COUNTDOWN = SquareRect(0, 6, 4, 2)
class Align(Enum):
CENTER = "center"
LEFT = "left"
RIGHT = "right"
def draw_erase(square_rect, color=BLACK):
rect = square_rect.get_rect(SQUARESIZE)
pygame.draw.rect(screen, color, rect)
def draw_text(text, color, font, square_rect, align=Align.CENTER):
rect = square_rect.get_rect(SQUARESIZE)
draw_erase(square_rect)
drawn_text = font.render(text, 1, color)
if not text:
height_offset_umlaut = 0
elif len(text) == 1:
height_offset_umlaut = font.get_ascent() - font.metrics(text)[0][3]
else:
height_offset_umlaut = font.get_ascent() - max(
*[metric[3] for metric in font.metrics(text)]
)
height_offset_umlaut = min(0, height_offset_umlaut)
text_rect = drawn_text.get_rect(
center=(rect.left + int(rect.width / 2), rect.top + int(rect.height / 2))
)
text_rect.top += height_offset_umlaut / 2
if align is Align.LEFT:
text_rect.left = rect.left
if align is Align.RIGHT:
text_rect.right = rect.right
screen.blit(drawn_text, text_rect)
return SquareRect.from_rect(text_rect, SQUARESIZE)
def draw_hack_text(text, color, font, square_rect, align=Align.CENTER):
"""
We need this for the WDRSansUL font, because that is the only font with correct numbers,
but also has weird underlines baked into the font.
So we draw the text and then erase the underline as a hack.
"""
text_rect = draw_text(text, color, font, square_rect, align=align)
erase_rect = text_rect.copy()
erase_rect.top = erase_rect.bottom - 0.11 * text_rect.height
erase_rect.height = 0.07 * text_rect.height
erase_rect.left -= 0.05 * text_rect.height
erase_rect.width += 0.1 * text_rect.height
draw_erase(erase_rect)
return text_rect
def draw_piece(left, top, color, scale=1):
pygame.gfxdraw.filled_circle(
screen,
int(left * SQUARESIZE) + HALF_SQUARE,
int(top * SQUARESIZE) + HALF_SQUARE,
int(RADIUS * scale),
color,
)
for _ in range(2):
pygame.gfxdraw.aacircle(
screen,
int(left * SQUARESIZE) + HALF_SQUARE,
int(top * SQUARESIZE) + HALF_SQUARE,
int(RADIUS * scale),
color,
)
def draw_image(source, rect, vertical_align="top", horizontal_align="left"):
draw_erase(rect)
rect = rect.get_rect(SQUARESIZE)
if vertical_align == "center":
rect.top += int((rect.height - source.get_height()) / 2)
elif vertical_align == "bottom":
rect.top += int(rect.height - source.get_height())
if horizontal_align == "center":
rect.left += int((rect.width - source.get_width()) / 2)
elif horizontal_align == "right":
rect.left += int(rect.width - source.get_width())
return SquareRect.from_rect(screen.blit(source, rect), SQUARESIZE,)
def draw_board():
flipped_board = np.flip(game.board, 0)
for c in range(game.COLUMN_COUNT):
for r in range(game.ROW_COUNT):
left = c + BOARD_OFFSET_X
top = r + BOARD_OFFSET_Y
pygame.draw.rect(
screen,
COLOR_BOARD,
(int(left * SQUARESIZE), int(top * SQUARESIZE), SQUARESIZE, SQUARESIZE),
)
if flipped_board[r][c] == 1:
draw_piece(left, top, COLOR_LEFT_PLAYER)
elif flipped_board[r][c] == 2:
draw_piece(left, top, COLOR_RIGHT_PLAYER)
else:
draw_piece(left, top, BLACK)
def draw_current_vote(vote, turn):
color = config["players"][turn]["color"]
left = BOARD_OFFSET_X + vote
top = Positions.CURRENT_VOTE.top
draw_erase(Positions.CURRENT_VOTE)
draw_piece(left, top, color)
def draw_column_labels():
for c in range(game.COLUMN_COUNT):
square_rect = SquareRect(BOARD_OFFSET_X + c, BOARD_OFFSET_Y - 0.8, 1, 0.8,)
draw_hack_text(str(c + 1), COLOR_BOARD, Fonts.NUMBERS, square_rect)
def draw_game_end(turn, tie=False):
if tie:
color = COLOR_BOARD
text = "Unentschieden!".upper()
else:
color = config["players"][turn]["color"]
text = f"{config['players'][turn]['name']} gewinnt!".upper()
draw_hack_text(text, color, Fonts.GAME_END[turn], Positions.GAME_END)
def draw_current_player(turn):
color = config["players"][turn]["color"]
text = config["players"][turn]["name"]
if turn == "left_player":
text_left = Positions.CURRENT_PLAYER_LEFT_PLAYER_LEFT
erase_left = Positions.CURRENT_PLAYER_RIGHT_PLAYER_LEFT
else:
text_left = Positions.CURRENT_PLAYER_RIGHT_PLAYER_LEFT
erase_left = Positions.CURRENT_PLAYER_LEFT_PLAYER_LEFT
square_rect_logo = Positions.CURRENT_PLAYER.copy()
square_rect_logo.left = text_left
square_rect_erase = Positions.CURRENT_PLAYER.copy()
square_rect_erase.left = erase_left
draw_erase(square_rect_erase)
draw_image(Images.STATUS_LOGOS[turn], square_rect_logo, vertical_align="center")
font = Fonts.STATUS[turn]
font_voffset = font.get_height() * Fonts.VOFFSET[turn]
square_rect_text = square_rect_logo.copy()
square_rect_text.height = 1
square_rect_erase.height = 1
square_rect_text.top += 3 + font_voffset
square_rect_erase.top += 3
draw_erase(square_rect_erase)
draw_text("ist dran", color, font, square_rect_text)
def draw_countdown(turn, time_left, no_votes_message):
color = config["players"][turn]["color"]
if turn == "left_player":
text_left = Positions.CURRENT_PLAYER_LEFT_PLAYER_LEFT
erase_left = Positions.CURRENT_PLAYER_RIGHT_PLAYER_LEFT
else:
text_left = Positions.CURRENT_PLAYER_RIGHT_PLAYER_LEFT
erase_left = Positions.CURRENT_PLAYER_LEFT_PLAYER_LEFT
font = Fonts.COUNTDOWN[turn]
font_voffset = font.get_height() * Fonts.VOFFSET[turn]
square_rect_text = Positions.COUNTDOWN.copy()
square_rect_text.left = text_left
square_rect_text.top += font_voffset
square_rect_erase = Positions.COUNTDOWN.copy()
square_rect_erase.left = erase_left
draw_erase(square_rect_erase)
square_rect_countdown = draw_text(str(time_left), color, font, square_rect_text)
square_rect_countdown.top = square_rect_countdown.bottom - 0.15
square_rect_countdown.height = 0.1
draw_erase(square_rect_countdown)
# No votes text
font = Fonts.STATUS[turn]
font_voffset = font.get_height() * Fonts.VOFFSET[turn]
square_rect_text.top = 8 + font_voffset
square_rect_text.height = 1
draw_erase(square_rect_text, color=BLACK)
if no_votes_message:
draw_text("Keine Votes!", color, font, square_rect_text)
def draw_scoreboard(score):
player = "left_player"
font = Fonts.SCORE[player]
font_voffset = font.get_height() * Fonts.VOFFSET[player]
font_hoffset = font.get_height() * Fonts.HOFFSET[player]
colon_rect = SquareRect(7.85, font_voffset, 0.3, Positions.SCORE_HEIGHT)
draw_hack_text(":", COLOR_BOARD, font, colon_rect)
left_player_rect = SquareRect(
0, font_voffset, colon_rect.left, Positions.SCORE_HEIGHT,
)
left_player_rect.right = colon_rect.left - font_hoffset
left_text_rect = draw_hack_text(
f"{config['players'][player]['name']} {score[player]}",
COLOR_LEFT_PLAYER,
Fonts.SCORE[player],
left_player_rect,
align=Align.RIGHT,
)
draw_piece(left_text_rect.left - 1, 0, COLOR_LEFT_PLAYER, scale=0.75)
player = "right_player"
font = Fonts.SCORE[player]
font_voffset = font.get_height() * Fonts.VOFFSET[player]
font_hoffset = font.get_height() * Fonts.HOFFSET[player]
right_player_rect = SquareRect(
colon_rect.right + 0.01 + font_hoffset,
font_voffset,
colon_rect.left,
Positions.SCORE_HEIGHT,
)
right_text_rect = draw_hack_text(
f"{score[player]} {config['players'][player]['name']}",
COLOR_RIGHT_PLAYER,
font,
right_player_rect,
align=Align.LEFT,
)
draw_piece(right_text_rect.right, 0, COLOR_RIGHT_PLAYER, scale=0.75)
| [
"numpy.flip",
"pygame.display.set_mode",
"square_rect.SquareRect",
"os.environ.get",
"pygame.image.load",
"pygame.draw.rect",
"pygame.ftfont.init",
"square_rect.SquareRect.from_rect"
] | [((641, 661), 'pygame.ftfont.init', 'pygame.ftfont.init', ([], {}), '()\n', (659, 661), False, 'import pygame\n'), ((666, 694), 'os.environ.get', 'os.environ.get', (['"""FULLSCREEN"""'], {}), "('FULLSCREEN')\n", (680, 694), False, 'import os\n'), ((709, 757), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size', 'pygame.FULLSCREEN'], {}), '(size, pygame.FULLSCREEN)\n', (732, 757), False, 'import pygame\n'), ((777, 806), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size'], {}), '(size)\n', (800, 806), False, 'import pygame\n'), ((3217, 3256), 'square_rect.SquareRect', 'SquareRect', (['(0)', '(BOARD_OFFSET_Y - 1)', '(4)', '(3)'], {}), '(0, BOARD_OFFSET_Y - 1, 4, 3)\n', (3227, 3256), False, 'from square_rect import SquareRect\n'), ((3272, 3295), 'square_rect.SquareRect', 'SquareRect', (['(0)', '(1)', '(16)', '(1)'], {}), '(0, 1, 16, 1)\n', (3282, 3295), False, 'from square_rect import SquareRect\n'), ((3315, 3366), 'square_rect.SquareRect', 'SquareRect', (['BOARD_OFFSET_X', '(1)', 'game.COLUMN_COUNT', '(1)'], {}), '(BOARD_OFFSET_X, 1, game.COLUMN_COUNT, 1)\n', (3325, 3366), False, 'from square_rect import SquareRect\n'), ((3383, 3405), 'square_rect.SquareRect', 'SquareRect', (['(0)', '(6)', '(4)', '(2)'], {}), '(0, 6, 4, 2)\n', (3393, 3405), False, 'from square_rect import SquareRect\n'), ((3579, 3616), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'color', 'rect'], {}), '(screen, color, rect)\n', (3595, 3616), False, 'import pygame\n'), ((4501, 4544), 'square_rect.SquareRect.from_rect', 'SquareRect.from_rect', (['text_rect', 'SQUARESIZE'], {}), '(text_rect, SQUARESIZE)\n', (4521, 4544), False, 'from square_rect import SquareRect\n'), ((6326, 6348), 'numpy.flip', 'np.flip', (['game.board', '(0)'], {}), '(game.board, 0)\n', (6333, 6348), True, 'import numpy as np\n'), ((10353, 10412), 'square_rect.SquareRect', 'SquareRect', (['(7.85)', 'font_voffset', '(0.3)', 'Positions.SCORE_HEIGHT'], {}), '(7.85, font_voffset, 0.3, Positions.SCORE_HEIGHT)\n', (10363, 10412), False, 'from square_rect import SquareRect\n'), ((10492, 10560), 'square_rect.SquareRect', 'SquareRect', (['(0)', 'font_voffset', 'colon_rect.left', 'Positions.SCORE_HEIGHT'], {}), '(0, font_voffset, colon_rect.left, Positions.SCORE_HEIGHT)\n', (10502, 10560), False, 'from square_rect import SquareRect\n'), ((11132, 11242), 'square_rect.SquareRect', 'SquareRect', (['(colon_rect.right + 0.01 + font_hoffset)', 'font_voffset', 'colon_rect.left', 'Positions.SCORE_HEIGHT'], {}), '(colon_rect.right + 0.01 + font_hoffset, font_voffset, colon_rect\n .left, Positions.SCORE_HEIGHT)\n', (11142, 11242), False, 'from square_rect import SquareRect\n'), ((7271, 7331), 'square_rect.SquareRect', 'SquareRect', (['(BOARD_OFFSET_X + c)', '(BOARD_OFFSET_Y - 0.8)', '(1)', '(0.8)'], {}), '(BOARD_OFFSET_X + c, BOARD_OFFSET_Y - 0.8, 1, 0.8)\n', (7281, 7331), False, 'from square_rect import SquareRect\n'), ((2461, 2507), 'pygame.image.load', 'pygame.image.load', (['f"""images/logo_{player}.png"""'], {}), "(f'images/logo_{player}.png')\n", (2478, 2507), False, 'import pygame\n')] |
# -*- coding: utf-8 -*-
""""
ニューラルネットワーク・サンプル
"""
import os
import sys
import numpy as np
def main():
sys.path.append(os.path.join(os.getcwd(), 'src', 'lib'))
from neuralnetwork import NeuralNetwork
import utils
seed_value = 8976
# 学習データの作成 (評価にも利用)
X_train = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
], dtype=np.float32)
Y_train = np.array([
[1, 0],
[0, 1],
[0, 1],
[1, 0]
], dtype=np.float32)
# モデルについて
learning_rate = 0.4
epoch = 2000
num_hidden_units = 4
layer_model = [X_train.shape[1],
num_hidden_units,
Y_train.shape[1]]
model = NeuralNetwork(layer_model, seed_value=seed_value)
errors = model.train(learning_rate, epoch, X_train, Y_train, verbose=True)
predictions = model.predict(X_train)
utils.print_accuracy_rate(Y_train, predictions)
utils.plot_error_log(errors, epoch, 'chart/error_chart_sample-xor.png')
if __name__ == '__main__':
main()
| [
"os.getcwd",
"utils.plot_error_log",
"numpy.array",
"neuralnetwork.NeuralNetwork",
"utils.print_accuracy_rate"
] | [((309, 369), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)\n', (317, 369), True, 'import numpy as np\n'), ((428, 488), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [0, 1], [1, 0]]'], {'dtype': 'np.float32'}), '([[1, 0], [0, 1], [0, 1], [1, 0]], dtype=np.float32)\n', (436, 488), True, 'import numpy as np\n'), ((745, 794), 'neuralnetwork.NeuralNetwork', 'NeuralNetwork', (['layer_model'], {'seed_value': 'seed_value'}), '(layer_model, seed_value=seed_value)\n', (758, 794), False, 'from neuralnetwork import NeuralNetwork\n'), ((924, 971), 'utils.print_accuracy_rate', 'utils.print_accuracy_rate', (['Y_train', 'predictions'], {}), '(Y_train, predictions)\n', (949, 971), False, 'import utils\n'), ((977, 1048), 'utils.plot_error_log', 'utils.plot_error_log', (['errors', 'epoch', '"""chart/error_chart_sample-xor.png"""'], {}), "(errors, epoch, 'chart/error_chart_sample-xor.png')\n", (997, 1048), False, 'import utils\n'), ((151, 162), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (160, 162), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
from os import path as op
from .utils import _check_pytables
from .externals.six import string_types, text_type
##############################################################################
# WRITE
def write_hdf5(fname, data, overwrite=False):
"""Write python object to HDF5 format using Pytables
Parameters
----------
fname : str
Filename to use.
data : object
Object to write. Can be of any of these types:
{ndarray, dict, list, tuple, int, float, str}
Note that dict objects must only have ``str`` keys.
overwrite : bool
If True, overwrite file (if it exists).
"""
tb = _check_pytables()
if op.isfile(fname) and not overwrite:
raise IOError('file "%s" exists, use overwrite=True to overwrite'
% fname)
o_f = tb.open_file if hasattr(tb, 'open_file') else tb.openFile
with o_f(fname, mode='w') as fid:
if hasattr(fid, 'create_group'):
c_g = fid.create_group
c_t = fid.create_table
c_c_a = fid.create_carray
else:
c_g = fid.createGroup
c_t = fid.createTable
c_c_a = fid.createCArray
filters = tb.Filters(complib='zlib', complevel=5)
write_params = (c_g, c_t, c_c_a, filters)
_triage_write('mnepython', data, fid.root, *write_params)
def _triage_write(key, value, root, *write_params):
tb = _check_pytables()
create_group, create_table, create_c_array, filters = write_params
if isinstance(value, dict):
sub_root = create_group(root, key, 'dict')
for key, sub_value in value.items():
if not isinstance(key, string_types):
raise TypeError('All dict keys must be strings')
_triage_write('key_{0}'.format(key), sub_value, sub_root,
*write_params)
elif isinstance(value, (list, tuple)):
title = 'list' if isinstance(value, list) else 'tuple'
sub_root = create_group(root, key, title)
for vi, sub_value in enumerate(value):
_triage_write('idx_{0}'.format(vi), sub_value, sub_root,
*write_params)
elif isinstance(value, type(None)):
atom = tb.BoolAtom()
s = create_c_array(root, key, atom, (1,), title='None',
filters=filters)
s[:] = False
elif isinstance(value, (int, float)):
if isinstance(value, int):
title = 'int'
else: # isinstance(value, float):
title = 'float'
value = np.atleast_1d(value)
atom = tb.Atom.from_dtype(value.dtype)
s = create_c_array(root, key, atom, (1,),
title=title, filters=filters)
s[:] = value
elif isinstance(value, string_types):
atom = tb.UInt8Atom()
if isinstance(value, text_type): # unicode
value = np.fromstring(value.encode('utf-8'), np.uint8)
title = 'unicode'
else:
value = np.fromstring(value.encode('ASCII'), np.uint8)
title = 'ascii'
s = create_c_array(root, key, atom, (len(value),), title=title,
filters=filters)
s[:] = value
elif isinstance(value, np.ndarray):
atom = tb.Atom.from_dtype(value.dtype)
s = create_c_array(root, key, atom, value.shape,
title='ndarray', filters=filters)
s[:] = value
else:
raise TypeError('unsupported type %s' % type(value))
##############################################################################
# READ
def read_hdf5(fname):
"""Read python object from HDF5 format using Pytables
Parameters
----------
fname : str
File to load.
Returns
-------
data : object
The loaded data. Can be of any type supported by ``write_hdf5``.
"""
tb = _check_pytables()
if not op.isfile(fname):
raise IOError('file "%s" not found' % fname)
o_f = tb.open_file if hasattr(tb, 'open_file') else tb.openFile
with o_f(fname, mode='r') as fid:
if not hasattr(fid.root, 'mnepython'):
raise TypeError('no mne-python data found')
data = _triage_read(fid.root.mnepython)
return data
def _triage_read(node):
tb = _check_pytables()
type_str = node._v_title
if isinstance(node, tb.Group):
if type_str == 'dict':
data = dict()
for subnode in node:
key = subnode._v_name[4:] # cut off "idx_" or "key_" prefix
data[key] = _triage_read(subnode)
elif type_str in ['list', 'tuple']:
data = list()
ii = 0
while True:
subnode = getattr(node, 'idx_{0}'.format(ii), None)
if subnode is None:
break
data.append(_triage_read(subnode))
ii += 1
assert len(data) == ii
data = tuple(data) if type_str == 'tuple' else data
return data
else:
raise NotImplementedError('Unknown group type: {0}'
''.format(type_str))
elif type_str == 'ndarray':
data = np.array(node)
elif type_str in ('int', 'float'):
if type_str == 'int':
cast = int
else: # type_str == 'float':
cast = float
data = cast(np.array(node)[0])
elif type_str in ('unicode', 'ascii'):
decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
cast = text_type if type_str == 'unicode' else str
data = cast(np.array(node).tostring().decode(decoder))
elif type_str == 'None':
data = None
else:
raise TypeError('Unknown node type: {0}'.format(type_str))
return data
| [
"os.path.isfile",
"numpy.array",
"numpy.atleast_1d"
] | [((780, 796), 'os.path.isfile', 'op.isfile', (['fname'], {}), '(fname)\n', (789, 796), True, 'from os import path as op\n'), ((4036, 4052), 'os.path.isfile', 'op.isfile', (['fname'], {}), '(fname)\n', (4045, 4052), True, 'from os import path as op\n'), ((5339, 5353), 'numpy.array', 'np.array', (['node'], {}), '(node)\n', (5347, 5353), True, 'import numpy as np\n'), ((2676, 2696), 'numpy.atleast_1d', 'np.atleast_1d', (['value'], {}), '(value)\n', (2689, 2696), True, 'import numpy as np\n'), ((5529, 5543), 'numpy.array', 'np.array', (['node'], {}), '(node)\n', (5537, 5543), True, 'import numpy as np\n'), ((5734, 5748), 'numpy.array', 'np.array', (['node'], {}), '(node)\n', (5742, 5748), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import sys
import numpy as np
import numpysane as nps
import os
testdir = os.path.dirname(os.path.realpath(__file__))
# I import the LOCAL mrcal since that's what I'm testing
sys.path[:0] = f"{testdir}/..",
import mrcal
import testutils
import cv2
image = cv2.imread(f'{testdir}/data/figueroa-overpass-looking-S.0.downsampled.jpg',
cv2.IMREAD_GRAYSCALE)
# some made-up homography with scaling, rotating, skewing and translating
H01 = np.array((( 0.7, 0.3, 1.),
( -0.4, 0.95, 2.),
( 3e-5, 4e-6, 1.),),
dtype=np.float32)
H10 = np.linalg.inv(H01)
# The feature I'm going to test with. This is the corner of one of the towers
q0 = np.array((294,159), dtype=np.float32)
# The transformed image. The matcher should end-up reversing this
# transformation, since it will be given the homography.
#
# shape (H,W,2)
image1 = \
mrcal.transform_image(
image,
mrcal.apply_homography(
H01,
nps.glue(*[ nps.dummy(arr, -1) for arr in \
np.meshgrid( np.arange(500),
np.arange(600))],
axis=-1).astype(np.float32) ))
# I have the source images and the "right" homography and the "right" matching
# pixels coords. Run the matcher, and compare
templatesize = (30,20)
search_radius = 50
H10_shifted = H10.copy()
H10_shifted[0,2] += 10.2
H10_shifted[1,2] -= 20.4
q1_matched, diagnostics = \
mrcal.match_feature( image, image1,
q0,
H10 = H10_shifted,
search_radius1 = 50,
template_size1 = templatesize,
method = cv2.TM_CCOEFF_NORMED)
testutils.confirm_equal( q1_matched,
mrcal.apply_homography(H10, q0),
worstcase = True,
eps = 0.1,
msg=f'match_feature(method=TM_CCOEFF_NORMED) reports the correct pixel coordinate')
q1_matched, diagnostics = \
mrcal.match_feature( image, image1,
q0,
H10 = H10_shifted,
search_radius1 = 50,
template_size1 = templatesize,
method = cv2.TM_SQDIFF_NORMED)
testutils.confirm_equal( q1_matched,
mrcal.apply_homography(H10, q0),
worstcase = True,
eps = 0.1,
msg=f'match_feature(method=TM_SQDIFF_NORMED) reports the correct pixel coordinate')
q1_matched, diagnostics = \
mrcal.match_feature( image, image1,
q0,
H10 = H10_shifted,
search_radius1 = 1000,
template_size1 = templatesize,
method = cv2.TM_CCOEFF_NORMED)
testutils.confirm_equal( q1_matched,
mrcal.apply_homography(H10, q0),
worstcase = True,
eps = 0.1,
msg=f'out-of-bounds search_radius works ok')
templatesize_hw = np.array((templatesize[-1],templatesize[-2]))
testutils.confirm_equal( diagnostics['matchoutput_image'].shape,
image1.shape - templatesize_hw + 1,
msg = 'out-of-bounds search radius looks at the whole image')
q1_matched, diagnostics = \
mrcal.match_feature( image*0, image1,
q0,
H10 = H10_shifted,
search_radius1 = 50,
template_size1 = templatesize,
method = cv2.TM_CCOEFF_NORMED)
testutils.confirm_equal( q1_matched, None,
msg = 'failing correlation returns None')
try:
mrcal.match_feature( image*0, image1,
q0,
H10 = H10_shifted,
search_radius1 = 50,
template_size1 = (5000, 5000),
method = cv2.TM_CCOEFF_NORMED)
except:
testutils.confirm(True, msg='Too-big template size throws an exception')
else:
testutils.confirm(False, msg='Too-big template size throws an exception')
testutils.finish()
| [
"testutils.finish",
"os.path.realpath",
"numpy.array",
"testutils.confirm",
"numpy.linalg.inv",
"numpysane.dummy",
"mrcal.match_feature",
"testutils.confirm_equal",
"cv2.imread",
"mrcal.apply_homography",
"numpy.arange"
] | [((280, 381), 'cv2.imread', 'cv2.imread', (['f"""{testdir}/data/figueroa-overpass-looking-S.0.downsampled.jpg"""', 'cv2.IMREAD_GRAYSCALE'], {}), "(f'{testdir}/data/figueroa-overpass-looking-S.0.downsampled.jpg',\n cv2.IMREAD_GRAYSCALE)\n", (290, 381), False, 'import cv2\n'), ((478, 568), 'numpy.array', 'np.array', (['((0.7, 0.3, 1.0), (-0.4, 0.95, 2.0), (3e-05, 4e-06, 1.0))'], {'dtype': 'np.float32'}), '(((0.7, 0.3, 1.0), (-0.4, 0.95, 2.0), (3e-05, 4e-06, 1.0)), dtype=\n np.float32)\n', (486, 568), True, 'import numpy as np\n'), ((617, 635), 'numpy.linalg.inv', 'np.linalg.inv', (['H01'], {}), '(H01)\n', (630, 635), True, 'import numpy as np\n'), ((720, 758), 'numpy.array', 'np.array', (['(294, 159)'], {'dtype': 'np.float32'}), '((294, 159), dtype=np.float32)\n', (728, 758), True, 'import numpy as np\n'), ((1497, 1633), 'mrcal.match_feature', 'mrcal.match_feature', (['image', 'image1', 'q0'], {'H10': 'H10_shifted', 'search_radius1': '(50)', 'template_size1': 'templatesize', 'method': 'cv2.TM_CCOEFF_NORMED'}), '(image, image1, q0, H10=H10_shifted, search_radius1=50,\n template_size1=templatesize, method=cv2.TM_CCOEFF_NORMED)\n', (1516, 1633), False, 'import mrcal\n'), ((2099, 2235), 'mrcal.match_feature', 'mrcal.match_feature', (['image', 'image1', 'q0'], {'H10': 'H10_shifted', 'search_radius1': '(50)', 'template_size1': 'templatesize', 'method': 'cv2.TM_SQDIFF_NORMED'}), '(image, image1, q0, H10=H10_shifted, search_radius1=50,\n template_size1=templatesize, method=cv2.TM_SQDIFF_NORMED)\n', (2118, 2235), False, 'import mrcal\n'), ((2701, 2839), 'mrcal.match_feature', 'mrcal.match_feature', (['image', 'image1', 'q0'], {'H10': 'H10_shifted', 'search_radius1': '(1000)', 'template_size1': 'templatesize', 'method': 'cv2.TM_CCOEFF_NORMED'}), '(image, image1, q0, H10=H10_shifted, search_radius1=1000,\n template_size1=templatesize, method=cv2.TM_CCOEFF_NORMED)\n', (2720, 2839), False, 'import mrcal\n'), ((3251, 3297), 'numpy.array', 'np.array', (['(templatesize[-1], templatesize[-2])'], {}), '((templatesize[-1], templatesize[-2]))\n', (3259, 3297), True, 'import numpy as np\n'), ((3297, 3466), 'testutils.confirm_equal', 'testutils.confirm_equal', (["diagnostics['matchoutput_image'].shape", '(image1.shape - templatesize_hw + 1)'], {'msg': '"""out-of-bounds search radius looks at the whole image"""'}), "(diagnostics['matchoutput_image'].shape, image1.\n shape - templatesize_hw + 1, msg=\n 'out-of-bounds search radius looks at the whole image')\n", (3320, 3466), False, 'import testutils\n'), ((3543, 3684), 'mrcal.match_feature', 'mrcal.match_feature', (['(image * 0)', 'image1', 'q0'], {'H10': 'H10_shifted', 'search_radius1': '(50)', 'template_size1': 'templatesize', 'method': 'cv2.TM_CCOEFF_NORMED'}), '(image * 0, image1, q0, H10=H10_shifted, search_radius1=\n 50, template_size1=templatesize, method=cv2.TM_CCOEFF_NORMED)\n', (3562, 3684), False, 'import mrcal\n'), ((3831, 3917), 'testutils.confirm_equal', 'testutils.confirm_equal', (['q1_matched', 'None'], {'msg': '"""failing correlation returns None"""'}), "(q1_matched, None, msg=\n 'failing correlation returns None')\n", (3854, 3917), False, 'import testutils\n'), ((4409, 4427), 'testutils.finish', 'testutils.finish', ([], {}), '()\n', (4425, 4427), False, 'import testutils\n'), ((111, 137), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (127, 137), False, 'import os\n'), ((1845, 1876), 'mrcal.apply_homography', 'mrcal.apply_homography', (['H10', 'q0'], {}), '(H10, q0)\n', (1867, 1876), False, 'import mrcal\n'), ((2447, 2478), 'mrcal.apply_homography', 'mrcal.apply_homography', (['H10', 'q0'], {}), '(H10, q0)\n', (2469, 2478), False, 'import mrcal\n'), ((3051, 3082), 'mrcal.apply_homography', 'mrcal.apply_homography', (['H10', 'q0'], {}), '(H10, q0)\n', (3073, 3082), False, 'import mrcal\n'), ((3951, 4092), 'mrcal.match_feature', 'mrcal.match_feature', (['(image * 0)', 'image1', 'q0'], {'H10': 'H10_shifted', 'search_radius1': '(50)', 'template_size1': '(5000, 5000)', 'method': 'cv2.TM_CCOEFF_NORMED'}), '(image * 0, image1, q0, H10=H10_shifted, search_radius1=\n 50, template_size1=(5000, 5000), method=cv2.TM_CCOEFF_NORMED)\n', (3970, 4092), False, 'import mrcal\n'), ((4334, 4407), 'testutils.confirm', 'testutils.confirm', (['(False)'], {'msg': '"""Too-big template size throws an exception"""'}), "(False, msg='Too-big template size throws an exception')\n", (4351, 4407), False, 'import testutils\n'), ((4251, 4323), 'testutils.confirm', 'testutils.confirm', (['(True)'], {'msg': '"""Too-big template size throws an exception"""'}), "(True, msg='Too-big template size throws an exception')\n", (4268, 4323), False, 'import testutils\n'), ((1026, 1044), 'numpysane.dummy', 'nps.dummy', (['arr', '(-1)'], {}), '(arr, -1)\n', (1035, 1044), True, 'import numpysane as nps\n'), ((1095, 1109), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (1104, 1109), True, 'import numpy as np\n'), ((1148, 1162), 'numpy.arange', 'np.arange', (['(600)'], {}), '(600)\n', (1157, 1162), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Transformer for infer."""
import math
import copy
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from .beam_search import BeamSearchDecoder, TileBeam
from .embedding import EmbeddingLookup
from .positional_embedding import PositionalEmbedding
from .components import SaturateCast
from .create_attn_mask import CreateAttentionMaskFromInputMask
from .decoder import TransformerDecoder
from .encoder import TransformerEncoder
class PredLogProbs(nn.Cell):
"""
Get log probs.
Args:
batch_size (int): Batch size of input dataset.
seq_length (int): The length of sequences.
width (int): Number of parameters of a layer
compute_type (int): Type of input type.
dtype (int): Type of MindSpore output type.
"""
def __init__(self,
batch_size,
seq_length,
width,
compute_type=mstype.float32,
dtype=mstype.float32):
super(PredLogProbs, self).__init__()
self.batch_size = batch_size
self.seq_length = seq_length
self.width = width
self.compute_type = compute_type
self.dtype = dtype
self.reshape = P.Reshape()
self.matmul = P.MatMul(transpose_b=True)
self.log_softmax = nn.LogSoftmax(axis=-1)
self.shape_flat_sequence_tensor = (self.batch_size * self.seq_length, self.width)
self.cast = P.Cast()
def construct(self, input_tensor, output_weights):
"""
Calculate the log_softmax.
Inputs:
input_tensor (Tensor): A batch of sentences with shape (N, T).
output_weights (Tensor): A batch of masks with shape (N, T).
Returns:
Tensor, the prediction probability with shape (N, T').
"""
input_tensor = self.reshape(input_tensor, self.shape_flat_sequence_tensor)
input_tensor = self.cast(input_tensor, self.compute_type)
output_weights = self.cast(output_weights, self.compute_type)
logits = self.matmul(input_tensor, output_weights)
logits = self.cast(logits, self.dtype)
log_probs = self.log_softmax(logits)
return log_probs
class TransformerDecoderStep(nn.Cell):
"""
Multi-layer transformer decoder step.
Args:
config (TransformerConfig): The config of Transformer.
num_hidden_layers (int): The numbers of hidden layers.
attn_embed_dim (int): Dimensions of attention weights.
num_attn_heads=12 (int): Heads number.
seq_length (int): The length of a sequence.
intermediate_size: Hidden size in FFN.
attn_dropout_prob (float): Dropout rate in attention. Default: 0.1.
initializer_range (float): Initial range.
hidden_dropout_prob (float): Dropout rate in FFN.
hidden_act (str): Activation function in FFN.
compute_type (mstype): Mindspore data type. Default: mstype.float32.
embedding_lookup (function): Embeddings lookup operation. Default: None.
positional_embedding (function): Position Embedding operation. Default: None.
projection (function): Function to get log probs. Default: None.
"""
def __init__(self,
config,
num_hidden_layers,
attn_embed_dim,
num_attn_heads=12,
seq_length=64,
intermediate_size=3072,
attn_dropout_prob=0.1,
initializer_range=0.02,
hidden_dropout_prob=0.1,
hidden_act="relu",
compute_type=mstype.float32,
embedding_lookup=None,
positional_embedding=None,
projection=None):
super(TransformerDecoderStep, self).__init__(auto_prefix=False)
self.embedding_lookup = embedding_lookup
self.positional_embedding = positional_embedding
self.projection = projection
self.seq_length = seq_length
self.decoder = TransformerDecoder(
attn_embed_dim=attn_embed_dim,
num_attn_heads=num_attn_heads,
decoder_layers=num_hidden_layers,
intermediate_size=intermediate_size,
attn_dropout_prob=attn_dropout_prob,
initializer_range=initializer_range,
dropout_prob=hidden_dropout_prob,
hidden_act=hidden_act,
compute_type=compute_type)
self.ones_like = P.OnesLike()
self.shape = P.Shape()
self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(config)
self.expand = P.ExpandDims()
self.multiply = P.Mul()
ones = np.ones(shape=(seq_length, seq_length))
self.future_mask = Tensor(np.tril(ones), dtype=mstype.float32)
self.cast_compute_type = SaturateCast(dst_type=compute_type)
self.scale = Tensor([math.sqrt(float(attn_embed_dim))], dtype=mstype.float32)
def construct(self, input_ids, enc_states, enc_attention_mask):
"""
Get log probs.
Args:
input_ids: [batch_size * beam_width, m]
enc_states: [batch_size * beam_width, T, D]
enc_attention_mask: [batch_size * beam_width, T, D]
Returns:
Tensor, the log_probs. [batch_size * beam_width, 1, Vocabulary_Dimension]
"""
# process embedding. input_embedding: [batch_size * beam_width, m, D], embedding_tables: [V, D]
input_embedding, embedding_tables = self.embedding_lookup(input_ids)
input_embedding = self.multiply(input_embedding, self.scale)
input_embedding = self.positional_embedding(input_embedding)
input_embedding = self.cast_compute_type(input_embedding)
input_shape = self.shape(input_ids)
input_len = input_shape[1]
# [m,m]
future_mask = self.future_mask[0:input_len:1, 0:input_len:1]
# [batch_size * beam_width, m]
input_mask = self.ones_like(input_ids)
# [batch_size * beam_width, m, m]
input_mask = self._create_attention_mask_from_input_mask(input_mask)
# [batch_size * beam_width, m, m]
input_mask = self.multiply(input_mask, self.expand(future_mask, 0))
input_mask = self.cast_compute_type(input_mask)
# [batch_size * beam_width, m, D]
enc_attention_mask = enc_attention_mask[::, 0:input_len:1, ::]
# call TransformerDecoder: [batch_size * beam_width, m, D]
decoder_output = self.decoder(input_embedding, input_mask, enc_states, enc_attention_mask)
# take the last step, [batch_size * beam_width, 1, D]
decoder_output = decoder_output[::, input_len - 1:input_len:1, ::]
# projection and log_prob
log_probs = self.projection(decoder_output, embedding_tables)
# [batch_size * beam_width, 1, vocabulary_size]
return log_probs
class TransformerInferModel(nn.Cell):
"""
Transformer Infer.
Args:
config (TransformerConfig): The config of Transformer.
use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.
"""
def __init__(self,
config,
use_one_hot_embeddings=False):
super(TransformerInferModel, self).__init__()
config = copy.deepcopy(config)
config.hidden_dropout_prob = 0.0
config.attention_dropout_prob = 0.0
self.batch_size = config.batch_size
self.seq_length = config.seq_length
self.hidden_size = config.hidden_size
self.num_hidden_layers = config.num_hidden_layers
self.embedding_size = config.hidden_size
self.attn_embed_dim = config.hidden_size
self.num_layers = config.num_hidden_layers
self.last_idx = self.num_hidden_layers - 1
self.embedding_lookup = EmbeddingLookup(
vocab_size=config.vocab_size,
embed_dim=self.embedding_size,
use_one_hot_embeddings=use_one_hot_embeddings)
self.positional_embedding = PositionalEmbedding(
embedding_size=self.embedding_size,
max_position_embeddings=config.max_position_embeddings)
# use for infer
self.projection = PredLogProbs(
batch_size=config.batch_size * config.beam_width,
seq_length=1,
width=self.hidden_size,
compute_type=config.compute_type)
self.encoder = TransformerEncoder(
attn_embed_dim=self.attn_embed_dim,
encoder_layers=self.num_layers,
num_attn_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
attention_dropout_prob=config.attention_dropout_prob,
initializer_range=config.initializer_range,
hidden_dropout_prob=config.hidden_dropout_prob,
hidden_act=config.hidden_act,
compute_type=config.compute_type)
decoder_cell = TransformerDecoderStep(
config=config,
num_hidden_layers=config.num_hidden_layers,
attn_embed_dim=self.attn_embed_dim,
seq_length=config.seq_length,
num_attn_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
hidden_dropout_prob=config.hidden_dropout_prob,
compute_type=config.compute_type,
initializer_range=config.initializer_range,
hidden_act="relu",
embedding_lookup=self.embedding_lookup,
positional_embedding=self.positional_embedding,
attn_dropout_prob=config.attention_dropout_prob,
projection=self.projection
)
# link beam_search after decoder
self.decoder = BeamSearchDecoder(
batch_size=config.batch_size,
seq_length=config.seq_length,
vocab_size=config.vocab_size,
decoder=decoder_cell,
beam_width=config.beam_width,
length_penalty_weight=config.length_penalty_weight,
max_decode_length=config.max_decode_length)
self.cast = P.Cast()
self.dtype = config.dtype
self.cast_compute_type = SaturateCast(dst_type=config.compute_type)
self.expand = P.ExpandDims()
self.multiply = P.Mul()
self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(config)
# use for infer
self.tile_beam = TileBeam(beam_width=config.beam_width)
ones = np.ones(shape=(config.batch_size, config.max_decode_length))
self.encode_mask = Tensor(ones, dtype=mstype.float32)
self.scale = Tensor([math.sqrt(float(self.embedding_size))],
dtype=mstype.float32)
self.reshape = P.Reshape()
def construct(self, source_ids, source_mask, target_ids=None, target_mask=None):
"""
Process source sentence
Inputs:
source_ids (Tensor): Source sentences with shape (N, T).
source_mask (Tensor): Source sentences padding mask with shape (N, T),
where 0 indicates padding position.
Returns:
Tensor, Predictions with shape (N, T').
"""
# word_embeddings
src_embeddings, _ = self.embedding_lookup(source_ids)
src_embeddings = self.multiply(src_embeddings, self.scale)
# position_embeddings
src_embeddings = self.positional_embedding(src_embeddings)
# attention mask, [batch_size, seq_length, seq_length]
enc_attention_mask = self._create_attention_mask_from_input_mask(source_mask)
# encode
encoder_output = self.encoder(self.cast_compute_type(src_embeddings),
self.cast_compute_type(enc_attention_mask))
# bean search for encoder output
beam_encoder_output = self.tile_beam(encoder_output)
# [batch_size, T, D]
enc_attention_mask = self.multiply(
enc_attention_mask[::, 0:1:1, ::],
self.expand(self.encode_mask, -1))
# [N*batch_size, T, D]
beam_enc_attention_mask = self.tile_beam(enc_attention_mask)
beam_enc_attention_mask = self.cast_compute_type(beam_enc_attention_mask)
predicted_ids, predicted_probs = self.decoder(beam_encoder_output, beam_enc_attention_mask)
predicted_ids = self.reshape(predicted_ids, (self.batch_size, -1))
return predicted_ids, predicted_probs
| [
"copy.deepcopy",
"mindspore.ops.operations.Mul",
"numpy.ones",
"mindspore.nn.LogSoftmax",
"mindspore.ops.operations.MatMul",
"mindspore.ops.operations.Shape",
"mindspore.ops.operations.OnesLike",
"mindspore.ops.operations.Reshape",
"mindspore.common.tensor.Tensor",
"numpy.tril",
"mindspore.ops.o... | [((1983, 1994), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (1992, 1994), True, 'from mindspore.ops import operations as P\n'), ((2017, 2043), 'mindspore.ops.operations.MatMul', 'P.MatMul', ([], {'transpose_b': '(True)'}), '(transpose_b=True)\n', (2025, 2043), True, 'from mindspore.ops import operations as P\n'), ((2071, 2093), 'mindspore.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2084, 2093), True, 'import mindspore.nn as nn\n'), ((2204, 2212), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (2210, 2212), True, 'from mindspore.ops import operations as P\n'), ((5246, 5258), 'mindspore.ops.operations.OnesLike', 'P.OnesLike', ([], {}), '()\n', (5256, 5258), True, 'from mindspore.ops import operations as P\n'), ((5280, 5289), 'mindspore.ops.operations.Shape', 'P.Shape', ([], {}), '()\n', (5287, 5289), True, 'from mindspore.ops import operations as P\n'), ((5408, 5422), 'mindspore.ops.operations.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (5420, 5422), True, 'from mindspore.ops import operations as P\n'), ((5447, 5454), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (5452, 5454), True, 'from mindspore.ops import operations as P\n'), ((5471, 5510), 'numpy.ones', 'np.ones', ([], {'shape': '(seq_length, seq_length)'}), '(shape=(seq_length, seq_length))\n', (5478, 5510), True, 'import numpy as np\n'), ((8106, 8127), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (8119, 8127), False, 'import copy\n'), ((10899, 10907), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (10905, 10907), True, 'from mindspore.ops import operations as P\n'), ((11040, 11054), 'mindspore.ops.operations.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (11052, 11054), True, 'from mindspore.ops import operations as P\n'), ((11079, 11086), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (11084, 11086), True, 'from mindspore.ops import operations as P\n'), ((11287, 11347), 'numpy.ones', 'np.ones', ([], {'shape': '(config.batch_size, config.max_decode_length)'}), '(shape=(config.batch_size, config.max_decode_length))\n', (11294, 11347), True, 'import numpy as np\n'), ((11375, 11409), 'mindspore.common.tensor.Tensor', 'Tensor', (['ones'], {'dtype': 'mstype.float32'}), '(ones, dtype=mstype.float32)\n', (11381, 11409), False, 'from mindspore.common.tensor import Tensor\n'), ((11553, 11564), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (11562, 11564), True, 'from mindspore.ops import operations as P\n'), ((5545, 5558), 'numpy.tril', 'np.tril', (['ones'], {}), '(ones)\n', (5552, 5558), True, 'import numpy as np\n')] |
import os
import sys
sys.path.append('.')
import cv2
import numpy as np
import time
from Utilities import drawRegion, drawBox, col_rgb, CVConstants
try:
import pyMTF
mtf_available = 1
except ImportError as e:
print('MTF unavailable: {}'.format(e))
mtf_available = 0
from siamfc.SiamFC import SiamFC, SiamFCParams
from SiamMask.SiamMask import SiamMask, SiamMaskParams
from DaSiamRPN.DaSiamRPN import DaSiamRPN, DaSiamRPNParams
siamfc_available = 1
# try:
# from siamfc.SiamFC import SiamFC
#
# siamfc_available = 1
# except ImportError as e:
# print('Siamese FC import error: {}'.format(e))
# siamfc_available = 0
class PatchTrackerParams:
"""
:type tracker_type: int
:type mtf_cfg_dir: str
:type cv_tracker_type: int
:type show: int | bool
:type save: int | bool
:type box_color: str
:type text_fmt: tuple(str, int, float, int, int)
:type save_fmt: tuple(str, str, int)
:param SiamFCParams siam_fc:
:param SiamMaskParams siam_mask:
:param DaSiamRPNParams da_siam_rpn:
"""
def __init__(self):
self.tracker_type = '2'
self.mtf_cfg_dir = 'tracking/cfg/mtf'
self.cv_tracker_type = 0
self.show = 1
self.convert_to_rgb = 0
self.thickness = 2
self.box_color = 'red'
self.gt_color = 'green'
self.resize_factor = 1.0
self.show_text = 1
self.text_fmt = ('green', 0, 5, 1.0, 1)
self.save = 0
self.save_fmt = ('avi', 'XVID', 30)
self.save_dir = 'videos'
self.pause_after_frame = 0
self.siam_fc = SiamFCParams()
self.siam_mask = SiamMaskParams()
self.da_siam_rpn = DaSiamRPNParams()
self.help = {
'tracker_type': 'type of tracker to use:'
'0: OpenCV 3'
'1: MTF '
'2: Siamese FC '
'3: SiamMask '
'4: DaSiamRPN ',
'mtf_cfg_dir': 'directory containing the cfg files for MTF',
'cv_tracker_type': 'tracker type to use if use_mtf is disabled',
'siam_fc': 'SiamFC tracker params',
'siam_mask': 'SiamMask tracker params',
'da_siam_rpn': 'DaSiamRPN tracker params',
'show': 'show the tracked object location drawn on the input image',
'convert_to_rgb': 'convert the image to RGB before showing it; this is sometimes needed if the raw frame is'
' in BGR format so that it does not show correctly (blue and red channels are '
'interchanged)',
'thickness': 'thickness of the bounding box lines drawn on the image',
'box_color': 'color of the bounding box used to represent the tracked object location',
'resize_factor': 'multiplicative factor by which the images are resized before being shown or saved',
'show_text': 'write text in the top left corner of the image to indicate the frame number and FPS',
'text_fmt': '(color, location, font, font_size, thickness) of the text used to '
'indicate the frame number and FPS; '
'Available fonts: '
'0: cv2.FONT_HERSHEY_SIMPLEX, '
'1: cv2.FONT_HERSHEY_PLAIN, '
'2: cv2.FONT_HERSHEY_DUPLEX, '
'3: cv2.FONT_HERSHEY_COMPLEX, '
'4: cv2.FONT_HERSHEY_TRIPLEX, '
'5: cv2.FONT_HERSHEY_COMPLEX_SMALL, '
'6: cv2.FONT_HERSHEY_SCRIPT_SIMPLEX ,'
'7: cv2.FONT_HERSHEY_SCRIPT_COMPLEX; '
'Locations: 0: top left, 1: top right, 2: bottom right, 3: bottom left',
'save': 'save the visualization result with tracked object location drawn on the'
' input image as a video file',
'save_fmt': '(extension, encoder, FPS) of the saved video',
'save_dir': 'directory where to save the video',
}
class PatchTracker:
def __init__(self, params, logger, target_id, label='generic', show_only=False):
"""
:type params: PatchTrackerParams
:type logger: logging.logger
:type target_id: int
:rtype None
"""
self._params = params
self._logger = logger
self.target_id = target_id
self.label = label
self.mtf_id = 0
self.show_only = show_only
self.is_created = False
self.is_terminated = False
self.is_initialized = False
self.cv_tracker = None
self.siamfc_tracker = None
self.siam_mask_tracker = None
self.da_siam_rpn_tracker = None
self.tracker_type = None
self.box_color = col_rgb[self._params.box_color]
self.gt_color = col_rgb[self._params.gt_color]
self.text_color = col_rgb[self._params.text_fmt[0]]
self.text_font = CVConstants.fonts[self._params.text_fmt[2]]
self.text_font_size = self._params.text_fmt[3]
self.text_thickness = self._params.text_fmt[4]
self.text_location = (5, 15)
if cv2.__version__.startswith('2'):
self.text_line_type = cv2.CV_AA
else:
self.text_line_type = cv2.LINE_AA
if not self.show_only:
if self._params.tracker_type in ('0', 'cv'):
self.tracker_type = 0
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
print('major_ver: {}'.format(major_ver))
print('minor_ver: {}'.format(minor_ver))
if int(major_ver) < 3:
self._logger.error('OpenCV trackers are not available')
return
tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
tracker_type = tracker_types[self._params.cv_tracker_type]
self._logger.info('Using OpenCV {:s} tracker'.format(tracker_type))
if int(minor_ver) < 3:
self.cv_tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
self.cv_tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
self.cv_tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
self.cv_tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
self.cv_tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
self.cv_tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
self.cv_tracker = cv2.TrackerGOTURN_create()
elif self._params.tracker_type in ('1', 'mtf'):
self.tracker_type = 1
if not mtf_available:
self._logger.error('MTF is not available')
return
self._logger.info('Using MTF tracker')
elif self._params.tracker_type in ('2', 'siamfc'):
self.tracker_type = 2
if not siamfc_available:
self._logger.error('Siamese FC tracker is not available')
return
self._logger.info('Using Siamese FC tracker')
self.siamfc_tracker = SiamFC(self._params.siam_fc, label=self.label, target_id=self.target_id)
elif self._params.tracker_type in ('3', 'siam_mask'):
self.tracker_type = 3
self._logger.info('Using SiamMask tracker')
self.siam_mask_tracker = SiamMask(self._params.siam_mask, label=self.label, target_id=self.target_id)
elif self._params.tracker_type in ('4', 'da_siam_rpn'):
self.tracker_type = 4
self._logger.info('Using DaSiamRPN tracker')
self.da_siam_rpn_tracker = DaSiamRPN(self._params.da_siam_rpn, self._logger,
label=self.label, target_id=self.target_id)
else:
raise IOError('Invalid tracker_type: {}'.format(self._params.tracker_type))
self.window_name = 'Target {:d} : Press space/esc to stop tracking, s/S to toggle pause'.format(self.target_id)
if self._params.show:
# window for displaying the tracking result
cv2.namedWindow(self.window_name)
self.curr_corners = np.zeros((2, 4), dtype=np.float64)
self.out_bbox = None
self.curr_mask_cropped = None
self.curr_mask = None
# self.curr_mask_pts = None
self.score = 1
self.is_created = True
self.video_writer = None
self.pause_after_frame = self._params.pause_after_frame
def createTracker(self, init_frame, xmin, ymin, xmax, ymax):
if self.tracker_type == 0:
width = xmax - xmin + 1
height = ymax - ymin + 1
roi = (xmin, ymin, width, height)
ok = self.cv_tracker.init(init_frame, roi)
if not ok:
self._logger.error('Tracker initialization was unsuccessful')
return
elif self.tracker_type == 1:
# if len(init_frame.shape) == 3:
# init_frame_gs = cv2.cvtColor(init_frame, cv2.COLOR_BGR2GRAY)
# else:
# init_frame_gs = init_frame
init_corners = [
[xmin, ymin],
[xmax, ymin],
[xmax, ymax],
[xmin, ymax],
]
init_corners = np.array(init_corners).T
try:
# initialize tracker with the first frame and the initial corners
self.mtf_id = pyMTF.create(init_frame.astype(np.uint8), init_corners.astype(np.float64),
self._params.mtf_cfg_dir)
# print('mtf_id: ', self.mtf_id)
# print('type(mtf_id): ', type(self.mtf_id))
if not self.mtf_id:
tracker_created = False
else:
tracker_created = True
except:
tracker_created = False
if not tracker_created:
self._logger.error('MTF tracker creation was unsuccessful')
return
elif self.tracker_type == 2:
w = xmax - xmin
h = ymax - ymin
cx = xmin + w / 2.0
cy = ymin + h / 2.0
bbox = [cx, cy, w, h]
self.siamfc_tracker.initialize(init_frame, bbox)
elif self.tracker_type == 3:
w = xmax - xmin
h = ymax - ymin
cx = xmin + w / 2.0
cy = ymin + h / 2.0
bbox = [cx, cy, w, h]
self.siam_mask_tracker.initialize(init_frame, bbox)
elif self.tracker_type == 4:
w = xmax - xmin
h = ymax - ymin
cx = xmin + w / 2.0
cy = ymin + h / 2.0
bbox = [cx, cy, w, h]
self.da_siam_rpn_tracker.initialize(init_frame, bbox)
def initialize(self, init_frame, init_bbox):
# extract the true corners in the first frame and place them into a 2x4 array
xmin = init_bbox['xmin']
xmax = init_bbox['xmax']
ymin = init_bbox['ymin']
ymax = init_bbox['ymax']
shape = init_frame.shape
# print('init_frame.shape: ', init_frame.shape)
if len(shape) == 3:
n_rows, n_cols, n_ch = shape
else:
n_rows, n_cols = shape
if self._params.text_fmt[1] == 1:
self.text_location = (n_cols - 100, 15)
elif self._params.text_fmt[1] == 2:
self.text_location = (n_cols - 100, n_rows - 15)
elif self._params.text_fmt[1] == 3:
self.text_location = (5, n_rows - 15)
else:
self.text_location = (5, 15)
if not self.show_only:
self.createTracker(init_frame, xmin, ymin, xmax, ymax)
if self._params.save:
time_str = time.strftime("%y%m%d_%H%M", time.localtime())
save_fname = 'target_{:d}_{:s}.{:s}'.format(self.target_id, time_str, self._params.save_fmt[0])
save_path = os.path.join(self._params.save_dir, save_fname)
if not os.path.exists(self._params.save_dir):
os.makedirs(self._params.save_dir)
frame_size = (init_frame.shape[1], init_frame.shape[0])
if self._params.resize_factor != 1:
frame_size = (int(frame_size[0] * self._params.resize_factor),
int(frame_size[1] * self._params.resize_factor))
self.video_writer = cv2.VideoWriter()
if cv2.__version__.startswith('3'):
self.video_writer.open(filename=save_path, apiPreference=cv2.CAP_FFMPEG,
fourcc=cv2.VideoWriter_fourcc(*self._params.save_fmt[1]),
fps=int(self._params.save_fmt[2]), frameSize=frame_size)
else:
self.video_writer.open(filename=save_path, fourcc=cv2.cv.CV_FOURCC(*self._params.save_fmt[1]),
fps=self._params.save_fmt[2], frameSize=frame_size)
if not self.video_writer.isOpened():
self._logger.error('Video file {:s} could not be opened'.format(save_path))
return
print('Saving tracking output to {:s}'.format(save_path))
self.is_initialized = True
def update(self, frame, frame_id, file_path=None, gt_bbox=None):
start_time = time.clock()
if gt_bbox is not None:
gt_xmin = gt_bbox['xmin']
gt_xmax = gt_bbox['xmax']
gt_ymin = gt_bbox['ymin']
gt_ymax = gt_bbox['ymax']
gt_corners = np.zeros((2, 4), dtype=np.float64)
gt_corners[:, 0] = (gt_xmin, gt_ymin)
gt_corners[:, 1] = (gt_xmax, gt_ymin)
gt_corners[:, 2] = (gt_xmax, gt_ymax)
gt_corners[:, 3] = (gt_xmin, gt_ymax)
else:
gt_corners = None
if self.tracker_type == 0:
ok, bbox = self.cv_tracker.update(frame)
if not ok:
self._logger.error('Tracker update was unsuccessful')
self.out_bbox = None
self.is_terminated = True
return
xmin, ymin, width, height = bbox
xmax = xmin + width - 1
ymax = ymin + height - 1
self.curr_corners[:, 0] = (xmin, ymin)
self.curr_corners[:, 1] = (xmax, ymin)
self.curr_corners[:, 2] = (xmax, ymax)
self.curr_corners[:, 3] = (xmin, ymax)
elif self.tracker_type == 1:
# if len(frame.shape) == 3:
# frame_gs = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# else:
# frame_gs = frame
success = pyMTF.getRegion(frame, self.curr_corners, self.mtf_id)
if not success:
self._logger.error('Tracker update was unsuccessful')
self.out_bbox = None
self.is_terminated = True
return
elif self.tracker_type == 2:
bbox = self.siamfc_tracker.update(frame, frame_id, file_path)
# cx, cy, w, h = bbox
# xmin = cx + w/2.0
# ymin = cy + h/2.0
xmin, ymin, w, h = bbox
xmax = xmin + w
ymax = ymin + h
self.curr_corners[:, 0] = (xmin, ymin)
self.curr_corners[:, 1] = (xmax, ymin)
self.curr_corners[:, 2] = (xmax, ymax)
self.curr_corners[:, 3] = (xmin, ymax)
elif self.tracker_type == 3:
bbox = self.siam_mask_tracker.update(frame)
xmin, ymin, w, h = bbox
xmax = xmin + w
ymax = ymin + h
self.curr_corners[:, 0] = (xmin, ymin)
self.curr_corners[:, 1] = (xmax, ymin)
self.curr_corners[:, 2] = (xmax, ymax)
self.curr_corners[:, 3] = (xmin, ymax)
mask = self.siam_mask_tracker.mask
# self.curr_mask_pts = self.siam_mask_tracker.mask_pts
cv2.imshow('mask', mask)
if gt_bbox is not None:
mask_cropped = mask[gt_ymin:gt_ymax, gt_xmin:gt_xmax, ...]
cv2.imshow('mask_cropped', mask_cropped)
else:
mask_cropped = mask[int(ymin):int(ymax), int(xmin):int(xmax), ...]
cv2.imshow('mask_cropped', mask_cropped)
self.curr_mask = (mask * 255).astype(np.uint8)
self.curr_mask_cropped = mask_cropped
# self.curr_mask = (mask_cropped * 255).astype(np.uint8)
self.score = self.siam_mask_tracker.score
elif self.tracker_type == 4:
bbox = self.da_siam_rpn_tracker.update(frame)
xmin, ymin, w, h = bbox
xmax = xmin + w
ymax = ymin + h
self.curr_corners[:, 0] = (xmin, ymin)
self.curr_corners[:, 1] = (xmax, ymin)
self.curr_corners[:, 2] = (xmax, ymax)
self.curr_corners[:, 3] = (xmin, ymax)
end_time = time.clock()
# compute the tracking fps
fps = 1.0 / (end_time - start_time)
if self._params.show:
self.show(frame, self.curr_corners, frame_id, fps,
gt_corners=gt_corners,
# mask_img=self.curr_mask
)
# print('curr_corners: ', curr_corners)
xmin = int(self.curr_corners[0, 0])
ymin = int(self.curr_corners[1, 0])
xmax = int(self.curr_corners[0, 2])
ymax = int(self.curr_corners[1, 2])
self.out_bbox = dict(
xmin=xmin,
ymin=ymin,
xmax=xmax,
ymax=ymax,
)
return fps
# print('out_bbox: ', self.out_bbox)
def show(self, frame, corners, frame_id, fps=None, remote_fps=None, gt_corners=None, mask_img=None):
if self._params.convert_to_rgb:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# draw the tracker location
kw_args = {
'color': self.box_color,
'thickness': self._params.thickness,
'mask_img': mask_img,
}
# drawBox(frame, corners, **kw_args)
drawRegion(frame, corners, **kw_args)
if self._params.show_text:
# write statistics (error and fps) to the image
header_text = "frame {:d}".format(frame_id)
if fps is not None:
header_text = "{:s} {:5.2f} fps".format(header_text, fps)
if remote_fps is not None:
header_text = "{:s} {:5.2f} remote_fps".format(header_text, remote_fps)
if gt_corners is not None:
xmin, ymin = corners[:, 0]
xmax, ymax = corners[:, 2]
bb = [xmin, ymin, xmax, ymax]
xmin, ymin = gt_corners[:, 0]
xmax, ymax = gt_corners[:, 2]
bb_gt = [xmin, ymin, xmax, ymax]
bi = [max(bb[0], bb_gt[0]), max(bb[1], bb_gt[1]), min(bb[2], bb_gt[2]),
min(bb[3], bb_gt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw <= 0 or ih <= 0:
iou = 0
else:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bb_gt[2] - bb_gt[0]
+ 1) * (
bb_gt[3] - bb_gt[1] + 1) - iw * ih
iou = iw * ih / ua
header_text = "{:s} {:5.2f} iou".format(header_text, iou)
drawRegion(frame, gt_corners, self.gt_color, self._params.thickness)
if self._params.show_text:
cv2.putText(frame, header_text, self.text_location,
self.text_font, self.text_font_size, self.text_color, self.text_thickness,
self.text_line_type)
if self._params.resize_factor != 1:
frame = cv2.resize(frame, (0, 0), fx=self._params.resize_factor,
fy=self._params.resize_factor)
# display the image
cv2.imshow(self.window_name, frame)
if self.video_writer is not None:
self.video_writer.write(frame)
key = cv2.waitKey(1 - self.pause_after_frame)
if key == 27 or key == 32:
self.is_terminated = True
if key == ord('s') or key == ord('S'):
self.pause_after_frame = 1 - self.pause_after_frame
def close(self):
if self._params.show:
cv2.destroyWindow(self.window_name)
if self.tracker_type == 3:
cv2.destroyWindow('mask')
cv2.destroyWindow('mask_cropped')
if self.video_writer is not None:
self.video_writer.release()
self.video_writer = None
if not self.show_only:
if self.tracker_type == 1:
pyMTF.remove(self.mtf_id)
elif self.tracker_type == 2:
self.siamfc_tracker.close()
elif self.tracker_type == 3:
self.siam_mask_tracker.close()
elif self.tracker_type == 4:
self.da_siam_rpn_tracker.close()
| [
"cv2.TrackerGOTURN_create",
"siamfc.SiamFC.SiamFC",
"time.clock",
"cv2.TrackerKCF_create",
"cv2.imshow",
"numpy.array",
"siamfc.SiamFC.SiamFCParams",
"sys.path.append",
"cv2.TrackerMedianFlow_create",
"cv2.__version__.split",
"os.path.exists",
"pyMTF.getRegion",
"DaSiamRPN.DaSiamRPN.DaSiamRP... | [((22, 42), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (37, 42), False, 'import sys\n'), ((1618, 1632), 'siamfc.SiamFC.SiamFCParams', 'SiamFCParams', ([], {}), '()\n', (1630, 1632), False, 'from siamfc.SiamFC import SiamFC, SiamFCParams\n'), ((1658, 1674), 'SiamMask.SiamMask.SiamMaskParams', 'SiamMaskParams', ([], {}), '()\n', (1672, 1674), False, 'from SiamMask.SiamMask import SiamMask, SiamMaskParams\n'), ((1702, 1719), 'DaSiamRPN.DaSiamRPN.DaSiamRPNParams', 'DaSiamRPNParams', ([], {}), '()\n', (1717, 1719), False, 'from DaSiamRPN.DaSiamRPN import DaSiamRPN, DaSiamRPNParams\n'), ((5257, 5288), 'cv2.__version__.startswith', 'cv2.__version__.startswith', (['"""2"""'], {}), "('2')\n", (5283, 5288), False, 'import cv2\n'), ((8681, 8715), 'numpy.zeros', 'np.zeros', (['(2, 4)'], {'dtype': 'np.float64'}), '((2, 4), dtype=np.float64)\n', (8689, 8715), True, 'import numpy as np\n'), ((13888, 13900), 'time.clock', 'time.clock', ([], {}), '()\n', (13898, 13900), False, 'import time\n'), ((17498, 17510), 'time.clock', 'time.clock', ([], {}), '()\n', (17508, 17510), False, 'import time\n'), ((18668, 18705), 'Utilities.drawRegion', 'drawRegion', (['frame', 'corners'], {}), '(frame, corners, **kw_args)\n', (18678, 18705), False, 'from Utilities import drawRegion, drawBox, col_rgb, CVConstants\n'), ((20606, 20641), 'cv2.imshow', 'cv2.imshow', (['self.window_name', 'frame'], {}), '(self.window_name, frame)\n', (20616, 20641), False, 'import cv2\n'), ((20743, 20782), 'cv2.waitKey', 'cv2.waitKey', (['(1 - self.pause_after_frame)'], {}), '(1 - self.pause_after_frame)\n', (20754, 20782), False, 'import cv2\n'), ((8618, 8651), 'cv2.namedWindow', 'cv2.namedWindow', (['self.window_name'], {}), '(self.window_name)\n', (8633, 8651), False, 'import cv2\n'), ((12495, 12542), 'os.path.join', 'os.path.join', (['self._params.save_dir', 'save_fname'], {}), '(self._params.save_dir, save_fname)\n', (12507, 12542), False, 'import os\n'), ((12958, 12975), 'cv2.VideoWriter', 'cv2.VideoWriter', ([], {}), '()\n', (12973, 12975), False, 'import cv2\n'), ((12991, 13022), 'cv2.__version__.startswith', 'cv2.__version__.startswith', (['"""3"""'], {}), "('3')\n", (13017, 13022), False, 'import cv2\n'), ((14111, 14145), 'numpy.zeros', 'np.zeros', (['(2, 4)'], {'dtype': 'np.float64'}), '((2, 4), dtype=np.float64)\n', (14119, 14145), True, 'import numpy as np\n'), ((18390, 18428), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (18402, 18428), False, 'import cv2\n'), ((20073, 20141), 'Utilities.drawRegion', 'drawRegion', (['frame', 'gt_corners', 'self.gt_color', 'self._params.thickness'], {}), '(frame, gt_corners, self.gt_color, self._params.thickness)\n', (20083, 20141), False, 'from Utilities import drawRegion, drawBox, col_rgb, CVConstants\n'), ((20190, 20342), 'cv2.putText', 'cv2.putText', (['frame', 'header_text', 'self.text_location', 'self.text_font', 'self.text_font_size', 'self.text_color', 'self.text_thickness', 'self.text_line_type'], {}), '(frame, header_text, self.text_location, self.text_font, self.\n text_font_size, self.text_color, self.text_thickness, self.text_line_type)\n', (20201, 20342), False, 'import cv2\n'), ((20451, 20543), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': 'self._params.resize_factor', 'fy': 'self._params.resize_factor'}), '(frame, (0, 0), fx=self._params.resize_factor, fy=self._params.\n resize_factor)\n', (20461, 20543), False, 'import cv2\n'), ((21031, 21066), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self.window_name'], {}), '(self.window_name)\n', (21048, 21066), False, 'import cv2\n'), ((5576, 5602), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (5597, 5602), False, 'import cv2\n'), ((12345, 12361), 'time.localtime', 'time.localtime', ([], {}), '()\n', (12359, 12361), False, 'import time\n'), ((12562, 12599), 'os.path.exists', 'os.path.exists', (['self._params.save_dir'], {}), '(self._params.save_dir)\n', (12576, 12599), False, 'import os\n'), ((12617, 12651), 'os.makedirs', 'os.makedirs', (['self._params.save_dir'], {}), '(self._params.save_dir)\n', (12628, 12651), False, 'import os\n'), ((15220, 15274), 'pyMTF.getRegion', 'pyMTF.getRegion', (['frame', 'self.curr_corners', 'self.mtf_id'], {}), '(frame, self.curr_corners, self.mtf_id)\n', (15235, 15274), False, 'import pyMTF\n'), ((21122, 21147), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""mask"""'], {}), "('mask')\n", (21139, 21147), False, 'import cv2\n'), ((21164, 21197), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""mask_cropped"""'], {}), "('mask_cropped')\n", (21181, 21197), False, 'import cv2\n'), ((21405, 21430), 'pyMTF.remove', 'pyMTF.remove', (['self.mtf_id'], {}), '(self.mtf_id)\n', (21417, 21430), False, 'import pyMTF\n'), ((6191, 6223), 'cv2.Tracker_create', 'cv2.Tracker_create', (['tracker_type'], {}), '(tracker_type)\n', (6209, 6223), False, 'import cv2\n'), ((9816, 9838), 'numpy.array', 'np.array', (['init_corners'], {}), '(init_corners)\n', (9824, 9838), True, 'import numpy as np\n'), ((6339, 6367), 'cv2.TrackerBoosting_create', 'cv2.TrackerBoosting_create', ([], {}), '()\n', (6365, 6367), False, 'import cv2\n'), ((6456, 6479), 'cv2.TrackerMIL_create', 'cv2.TrackerMIL_create', ([], {}), '()\n', (6477, 6479), False, 'import cv2\n'), ((6568, 6591), 'cv2.TrackerKCF_create', 'cv2.TrackerKCF_create', ([], {}), '()\n', (6589, 6591), False, 'import cv2\n'), ((6680, 6703), 'cv2.TrackerTLD_create', 'cv2.TrackerTLD_create', ([], {}), '()\n', (6701, 6703), False, 'import cv2\n'), ((6799, 6829), 'cv2.TrackerMedianFlow_create', 'cv2.TrackerMedianFlow_create', ([], {}), '()\n', (6827, 6829), False, 'import cv2\n'), ((6921, 6947), 'cv2.TrackerGOTURN_create', 'cv2.TrackerGOTURN_create', ([], {}), '()\n', (6945, 6947), False, 'import cv2\n'), ((7576, 7648), 'siamfc.SiamFC.SiamFC', 'SiamFC', (['self._params.siam_fc'], {'label': 'self.label', 'target_id': 'self.target_id'}), '(self._params.siam_fc, label=self.label, target_id=self.target_id)\n', (7582, 7648), False, 'from siamfc.SiamFC import SiamFC, SiamFCParams\n'), ((13159, 13208), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*self._params.save_fmt[1]'], {}), '(*self._params.save_fmt[1])\n', (13181, 13208), False, 'import cv2\n'), ((13390, 13433), 'cv2.cv.CV_FOURCC', 'cv2.cv.CV_FOURCC', (['*self._params.save_fmt[1]'], {}), '(*self._params.save_fmt[1])\n', (13406, 13433), False, 'import cv2\n'), ((16500, 16524), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (16510, 16524), False, 'import cv2\n'), ((7854, 7930), 'SiamMask.SiamMask.SiamMask', 'SiamMask', (['self._params.siam_mask'], {'label': 'self.label', 'target_id': 'self.target_id'}), '(self._params.siam_mask, label=self.label, target_id=self.target_id)\n', (7862, 7930), False, 'from SiamMask.SiamMask import SiamMask, SiamMaskParams\n'), ((16653, 16693), 'cv2.imshow', 'cv2.imshow', (['"""mask_cropped"""', 'mask_cropped'], {}), "('mask_cropped', mask_cropped)\n", (16663, 16693), False, 'import cv2\n'), ((16811, 16851), 'cv2.imshow', 'cv2.imshow', (['"""mask_cropped"""', 'mask_cropped'], {}), "('mask_cropped', mask_cropped)\n", (16821, 16851), False, 'import cv2\n'), ((8141, 8238), 'DaSiamRPN.DaSiamRPN.DaSiamRPN', 'DaSiamRPN', (['self._params.da_siam_rpn', 'self._logger'], {'label': 'self.label', 'target_id': 'self.target_id'}), '(self._params.da_siam_rpn, self._logger, label=self.label,\n target_id=self.target_id)\n', (8150, 8238), False, 'from DaSiamRPN.DaSiamRPN import DaSiamRPN, DaSiamRPNParams\n')] |
from abc import abstractmethod
from typing import Optional, Tuple
from pylidar_slam.common.modules import _with_cv2
if _with_cv2:
import cv2
import numpy as np
from omegaconf import DictConfig
from pylidar_slam.common.utils import check_sizes, assert_debug
class ImageBased2DRegistration:
"""
Scan registration method using feature based Image Alignment.
"""
def __init__(self, config: DictConfig):
super().__init__()
self.config = config
# OpenCV algorithms
features = config.get("features", "orb")
assert_debug(features in ["orb", "akaze"])
if features == "akaze":
self.orb: cv2.Feature2D = cv2.AKAZE_create()
else:
self.orb: cv2.Feature2D = cv2.ORB_create()
self.matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
# Image Construction Parameters
self.H = self.config.get("im_height", 400)
self.W = self.config.get("im_width", 400)
self.inlier_threshold: int = self.config.get("inlier_threshold", 50)
self._distance_threshold: float = self.config.get("distance_threshold", 2.0)
@abstractmethod
def build_image(self, pc: np.ndarray):
"""Builds the image from the pointcloud (which will be matched by 2D feature based alignment)"""
raise NotImplementedError("")
@abstractmethod
def compute_transorm(self, ref_2d_pts, tgt_2d_pts, ref_img, tgt_img):
"""Computes the 3D transform from the aligned points"""
raise NotImplementedError("")
def compute_features(self, pc: np.ndarray):
"""Projects the pc into the image plane, and compute features and descriptors"""
image = self.build_image(pc)
# Extract KeyPoints and descriptors
kpts, desc = self.orb.detectAndCompute(image, None)
return image, kpts, desc
def compute_inliers(self, ref_pts, tgt_pts):
"""
Aligns the images using corresponding pair of points (with potentially many outliers)
By default, the best homography is found
"""
check_sizes(ref_pts, [-1, 2])
check_sizes(tgt_pts, [ref_pts.shape[0], 2])
h, inliers = cv2.findHomography(ref_pts, tgt_pts, cv2.RANSAC, self._distance_threshold)
return inliers
def align_2d(self, ref_kpts, ref_desc, tgt_kpts, tgt_desc, ref_img, tgt_img) -> \
Tuple[Optional[np.ndarray], np.ndarray, list]:
"""
Attempts to align the target onto the reference, and if enough inliers are found,
If succesful, returns the planar transforming the target keypoints into the reference keypoints
Otherwise, returns None
"""
matches = self.matcher.match(ref_desc, tgt_desc)
if len(matches) == 0:
return None, np.array([], dtype=np.int64), []
ref_pts = np.array([ref_kpts[m.queryIdx].pt for m in matches])
tgt_pts = np.array([tgt_kpts[m.trainIdx].pt for m in matches])
# Find homography to determine the matched pair of points
inliers = self.compute_inliers(ref_pts, tgt_pts)
n = inliers.shape[0]
inliers_indices = np.arange(0, n)[inliers[:, 0].astype(np.bool)]
inlier_matches = [matches[idx] for idx in inliers_indices]
ref_pts = ref_pts[inliers_indices]
tgt_pts = tgt_pts[inliers_indices]
points = np.concatenate([ref_pts.reshape(-1, 1, 2), tgt_pts.reshape(-1, 1, 2)], axis=1)
num_inliers = len(inlier_matches)
if num_inliers < self.inlier_threshold:
return None, points, inlier_matches
transform = self.compute_transorm(ref_pts, tgt_pts, ref_img, tgt_img)
return transform, points, inlier_matches
# ------------------------------------------------------------------------------------------------------------------
class ElevationImageRegistration(ImageBased2DRegistration):
"""2D Feature based registration which estimates the planar motion (x, y, yaw)
Only relevant for a sensor having "mainly 2D" motion, and can serve as good initialization of this motion
"""
def __init__(self, config: DictConfig):
super().__init__(config)
self.pixel_size: int = self.config.get("pixel_size", 0.4)
self.z_min: float = self.config.get("z_min", 0.0)
self.z_max: float = self.config.get("z_max", 5)
self.sigma: float = self.config.get("sigma", 0.1)
color_map: str = self.config.get("color_map", "jet")
from matplotlib import cm
self.color_map = cm.get_cmap(color_map)
def build_image(self, pc: np.ndarray):
"""Builds an elevation image"""
image = np.ones((self.H, self.W), dtype=np.float32) * self.z_min
pc_x = np.round(pc[:, 0] / self.pixel_size + self.H // 2).astype(np.int64)
pc_y = np.round(pc[:, 1] / self.pixel_size + self.W // 2).astype(np.int64)
pc_z = pc[:, 2]
_filter = (0 <= pc_x) * (pc_x < self.H) * (0 <= pc_y) * (pc_y < self.W)
pc_x = pc_x[_filter]
pc_y = pc_y[_filter]
pc_z = pc_z[_filter]
pc_z = np.clip(pc_z, self.z_min, self.z_max)
indices = np.argsort(pc_z)
pc_z = pc_z[indices]
pc_x = pc_x[indices]
pc_y = pc_y[indices]
pixels = np.concatenate([pc_x.reshape(-1, 1), pc_y.reshape(-1, 1)], axis=-1)
thetas = ((pc_z - self.z_min) / (self.z_max - self.z_min)).reshape(-1)
image[pixels[:, 0], pixels[:, 1]] = thetas
image = self.color_map(image)[:, :, :3] * 255.0
image = image.astype(np.uint8)
return image
def compute_transorm(self, ref_2d_pts, tgt_2d_pts, ref_img, tgt_img):
"""Computes the 3D Rigid transform associated to feature based 2D alignment"""
# Estimate the 2D transform best matching the pair of points
ref_2d_pts[:, 0] -= self.W // 2
ref_2d_pts[:, 1] -= self.H // 2
ref_2d_pts *= self.pixel_size
ref_2d_pts = ref_2d_pts[:, [1, 0]] # (row, col) for OpenCV corresponds to (y, x) for pointcloud params
tgt_2d_pts[:, 0] -= self.W // 2
tgt_2d_pts[:, 1] -= self.H // 2
tgt_2d_pts *= self.pixel_size
tgt_2d_pts = tgt_2d_pts[:, [1, 0]]
ref_mean = ref_2d_pts.mean(axis=0)
tgt_mean = tgt_2d_pts.mean(axis=0)
ref_centered = ref_2d_pts - ref_mean
tgt_centered = tgt_2d_pts - tgt_mean
sigma = tgt_centered.T.dot(ref_centered)
u, d, vt = np.linalg.svd(sigma)
# Compute The 2D Rotation and translation
rot2d = vt.T.dot(u.T)
tr2d = ref_mean - rot2d.dot(tgt_mean)
# Convert to 3D Relative Pose
tr = np.eye(4, dtype=np.float32)
tr[:2, :2] = rot2d
tr[:2, 3] = tr2d
return tr
| [
"numpy.clip",
"cv2.BFMatcher",
"numpy.eye",
"numpy.ones",
"cv2.findHomography",
"numpy.round",
"pylidar_slam.common.utils.assert_debug",
"cv2.AKAZE_create",
"numpy.argsort",
"numpy.array",
"cv2.ORB_create",
"pylidar_slam.common.utils.check_sizes",
"numpy.linalg.svd",
"matplotlib.cm.get_cma... | [((619, 661), 'pylidar_slam.common.utils.assert_debug', 'assert_debug', (["(features in ['orb', 'akaze'])"], {}), "(features in ['orb', 'akaze'])\n", (631, 661), False, 'from pylidar_slam.common.utils import check_sizes, assert_debug\n'), ((863, 906), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_L2'], {'crossCheck': '(True)'}), '(cv2.NORM_L2, crossCheck=True)\n', (876, 906), False, 'import cv2\n'), ((2254, 2283), 'pylidar_slam.common.utils.check_sizes', 'check_sizes', (['ref_pts', '[-1, 2]'], {}), '(ref_pts, [-1, 2])\n', (2265, 2283), False, 'from pylidar_slam.common.utils import check_sizes, assert_debug\n'), ((2296, 2339), 'pylidar_slam.common.utils.check_sizes', 'check_sizes', (['tgt_pts', '[ref_pts.shape[0], 2]'], {}), '(tgt_pts, [ref_pts.shape[0], 2])\n', (2307, 2339), False, 'from pylidar_slam.common.utils import check_sizes, assert_debug\n'), ((2365, 2439), 'cv2.findHomography', 'cv2.findHomography', (['ref_pts', 'tgt_pts', 'cv2.RANSAC', 'self._distance_threshold'], {}), '(ref_pts, tgt_pts, cv2.RANSAC, self._distance_threshold)\n', (2383, 2439), False, 'import cv2\n'), ((3073, 3125), 'numpy.array', 'np.array', (['[ref_kpts[m.queryIdx].pt for m in matches]'], {}), '([ref_kpts[m.queryIdx].pt for m in matches])\n', (3081, 3125), True, 'import numpy as np\n'), ((3148, 3200), 'numpy.array', 'np.array', (['[tgt_kpts[m.trainIdx].pt for m in matches]'], {}), '([tgt_kpts[m.trainIdx].pt for m in matches])\n', (3156, 3200), True, 'import numpy as np\n'), ((4872, 4894), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['color_map'], {}), '(color_map)\n', (4883, 4894), False, 'from matplotlib import cm\n'), ((5472, 5509), 'numpy.clip', 'np.clip', (['pc_z', 'self.z_min', 'self.z_max'], {}), '(pc_z, self.z_min, self.z_max)\n', (5479, 5509), True, 'import numpy as np\n'), ((5533, 5549), 'numpy.argsort', 'np.argsort', (['pc_z'], {}), '(pc_z)\n', (5543, 5549), True, 'import numpy as np\n'), ((6944, 6964), 'numpy.linalg.svd', 'np.linalg.svd', (['sigma'], {}), '(sigma)\n', (6957, 6964), True, 'import numpy as np\n'), ((7164, 7191), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (7170, 7191), True, 'import numpy as np\n'), ((740, 758), 'cv2.AKAZE_create', 'cv2.AKAZE_create', ([], {}), '()\n', (756, 758), False, 'import cv2\n'), ((819, 835), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (833, 835), False, 'import cv2\n'), ((3397, 3412), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (3406, 3412), True, 'import numpy as np\n'), ((5007, 5050), 'numpy.ones', 'np.ones', (['(self.H, self.W)'], {'dtype': 'np.float32'}), '((self.H, self.W), dtype=np.float32)\n', (5014, 5050), True, 'import numpy as np\n'), ((3017, 3045), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (3025, 3045), True, 'import numpy as np\n'), ((5084, 5134), 'numpy.round', 'np.round', (['(pc[:, 0] / self.pixel_size + self.H // 2)'], {}), '(pc[:, 0] / self.pixel_size + self.H // 2)\n', (5092, 5134), True, 'import numpy as np\n'), ((5171, 5221), 'numpy.round', 'np.round', (['(pc[:, 1] / self.pixel_size + self.W // 2)'], {}), '(pc[:, 1] / self.pixel_size + self.W // 2)\n', (5179, 5221), True, 'import numpy as np\n')] |
"""
tricks:
1.torch-optimizer:实现了最新的一些优化器.
2.numba:import numba as nb,纯python或numpy加速,加@nb.njit或@nb.jit(nopython=True)
3.swifter:df.apply()→·df.swifter.apply(),加速pandas
4.cupy:1000万以上数据更快
5.modin:import modin.pandas as mdpd,用mdpd代替pd即可,加速pandas,加载数据和查询数据更快,统计方法pandas更快
"""
import os
import sys
import argparse
import time
import random
import wandb
from tqdm import tqdm
import numpy as np
import numba as nb
import pandas as pd
import torch
import hiddenlayer as hl
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from models.module import Model
from data.custom_dataset import MyDataset
def test():
last = time.time()
torch.cuda.empty_cache()
test_losses = []
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_data_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
test_losses.append(loss.item())
val_loss = np.mean(np.mean(test_losses))
if __name__ == "__main__":
# #取每个 GPU 的剩余显存数,并存放到 tmp 文件中
# os.system("nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp")
# memory_gpu = [int(x.split()[2]) for x in open("tmp", "r").readlines()]
# torch.cuda.set_device(np.argmax(memory_gpu))
# os.system("rm tmp") # 删除临时生成的 tmp 文件
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
##命令行执行
# CUDA_VISIBLE_DEVICES=0,1 python train.py
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
# argparse for additional flags for experiment
parser = argparse.ArgumentParser(description="Train a network for ...")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--epochs", type=int, default=1000)
parser.add_argument("--resume", type=bool, default=False)
parser.add_argument("--path_to_checkpoint", type=str, default="../checkpoint")
opt = parser.parse_args()
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
wandb.init(project="my-project")
wandb.config.xxx = opt.xxx
# 准备数据
test_dataset = MyDataset("test_dataset_path") # 定义的数据集
test_data_loader = data.DataLoader(
test_dataset, batch_size=128, shuffle=True, drop_last=True
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device_ids = [0, 1]
model = Model(opt)
ckpt = torch.load(
opt.path_to_checkpoint + "lowest_val_loss_model.pt"
) # custom method for loading last checkpoint
model.load_state_dict(ckpt["model_state_dict"])
model.to(device)
# 并行运算,如果需要的话
# model = nn.DataParallel(model, device_ids=device_ids).to(device)
# summary(model, input_size=(channels, H, W))
# hl.build_graph(model, torch.zeros([1, 2, 3]))
# loss function, 比如交叉熵
criterion = nn.CrossEntropyLoss()
criterion.to(device)
wandb.watch(model, criterion)
writer = SummaryWriter("runs/models")
test()
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.utils.tensorboard.SummaryWriter",
"numpy.mean",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"data.custom_dataset.MyDataset",
"torch.load",
"wandb.init",
"wandb.watch",
"torch.cuda.is_available",
"numpy.random.seed",
"torch... | [((729, 740), 'time.time', 'time.time', ([], {}), '()\n', (738, 740), False, 'import time\n'), ((745, 769), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (767, 769), False, 'import torch\n'), ((1817, 1879), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a network for ..."""'}), "(description='Train a network for ...')\n", (1840, 1879), False, 'import argparse\n'), ((2175, 2199), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (2189, 2199), True, 'import numpy as np\n'), ((2204, 2231), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (2221, 2231), False, 'import torch\n'), ((2236, 2272), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['opt.seed'], {}), '(opt.seed)\n', (2262, 2272), False, 'import torch\n'), ((2278, 2310), 'wandb.init', 'wandb.init', ([], {'project': '"""my-project"""'}), "(project='my-project')\n", (2288, 2310), False, 'import wandb\n'), ((2372, 2402), 'data.custom_dataset.MyDataset', 'MyDataset', (['"""test_dataset_path"""'], {}), "('test_dataset_path')\n", (2381, 2402), False, 'from data.custom_dataset import MyDataset\n'), ((2436, 2511), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_dataset'], {'batch_size': '(128)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(test_dataset, batch_size=128, shuffle=True, drop_last=True)\n', (2451, 2511), False, 'from torch.utils import data\n'), ((2640, 2650), 'models.module.Model', 'Model', (['opt'], {}), '(opt)\n', (2645, 2650), False, 'from models.module import Model\n'), ((2662, 2725), 'torch.load', 'torch.load', (["(opt.path_to_checkpoint + 'lowest_val_loss_model.pt')"], {}), "(opt.path_to_checkpoint + 'lowest_val_loss_model.pt')\n", (2672, 2725), False, 'import torch\n'), ((3094, 3115), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3113, 3115), True, 'import torch.nn as nn\n'), ((3145, 3174), 'wandb.watch', 'wandb.watch', (['model', 'criterion'], {}), '(model, criterion)\n', (3156, 3174), False, 'import wandb\n'), ((3189, 3217), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['"""runs/models"""'], {}), "('runs/models')\n", (3202, 3217), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((817, 832), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (830, 832), False, 'import torch\n'), ((1126, 1146), 'numpy.mean', 'np.mean', (['test_losses'], {}), '(test_losses)\n', (1133, 1146), True, 'import numpy as np\n'), ((2563, 2588), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2586, 2588), False, 'import torch\n')] |
from yo_fluq_ds__tests.common import *
import numpy as np
class MiscMethodsTests(TestCase):
def test_pairwise(self):
result = Query.args(1,2,3).feed(fluq.pairwise()).to_list()
self.assertListEqual([(1,2),(2,3)],result)
def test_strjoin(self):
result = Query.args(1,2,3).feed(fluq.strjoin(','))
self.assertEqual("1,2,3",result)
def test_countby(self):
result = Query.args(1,1,1,2,2,3).feed(fluq.count_by(lambda z: z)).to_series()
self.assertListEqual([1,2,3],list(result.index))
self.assertListEqual([3,2,1],list(result))
def test_shuffle(self):
arg = Query.en(range(5)).feed(fluq.shuffle(1)).to_list()
self.assertListEqual([2,1,4,0,3],arg)
def test_shuffle_rstate(self):
arg = Query.en(range(5)).feed(fluq.shuffle(np.random.RandomState(1))).to_list()
self.assertListEqual([2,1,4,0,3],arg)
def test_shuffle_true(self):
arg = Query.en(range(5)).feed(fluq.shuffle(True)).to_set()
self.assertSetEqual({0,1,2,3,4}, arg)
self.assertEqual(5,len(arg))
def test_shuffle_false(self):
res = Query.en(range(5)).feed(fluq.shuffle(False)).to_list()
self.assertListEqual([0,1,2,3,4],res)
def test_shuffle_none(self):
res = Query.en(range(5)).feed(fluq.shuffle(None)).to_list()
self.assertListEqual([0,1,2,3,4],res)
def test_shuffle_raises(self):
self.assertRaises(
TypeError,
lambda: Query.en(range(5)).feed(fluq.shuffle('a')).to_list()
)
| [
"numpy.random.RandomState"
] | [((819, 843), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (840, 843), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.