code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from dataclasses import dataclass
import typing
import numpy as np
from arrus.ops.operation import Operation
@dataclass(frozen=True)
class Pulse:
"""
A definition of the pulse that can be triggered by the us4r device.
:param center_frequency: pulse center frequency [Hz]
:param n_periods: number of periods of the generated pulse, possible values: 0.5, 1, 1.5, ...
:param inverse: true if the signal amplitude should be reversed, false otherwise
"""
center_frequency: float
n_periods: float
inverse: bool
@dataclass(frozen=True)
class Tx(Operation):
"""
Single atomic operation of a signal transmit.
:param aperture: a set of TX channels that should be enabled - a binary \
mask, where 1 at location i means that the channel should be turned \
on, 0 means that the channel should be turned off
:param pulse: an excitation to perform
:param delays: an array of delays to set to active elements. Should have the \
shape (n_a,), where n_a is a number of active elements determined by \
tx aperture. When None, firings are performed with no delay (delays=0) [s]\
The stored value is always of type numpy.ndarray.
"""
aperture: np.ndarray
excitation: Pulse
delays: typing.Optional[np.ndarray] = None
def __post_init__(self):
object.__setattr__(self, "delays", np.asarray(self.delays))
object.__setattr__(self, "aperture", np.asarray(self.aperture))
if self.delays is not None and len(self.delays.shape) != 1:
raise ValueError("The array of delays should be a vector of "
"shape (number of active elements,)")
if self.delays is not None \
and self.delays.shape[0] != np.sum(self.aperture):
raise ValueError(f"The array of delays should have the size equal "
f"to the number of active elements of aperture "
f"({self.aperture.shape})")
@dataclass(frozen=True)
class Rx(Operation):
"""
Single atomic operation of echo data reception.
:param aperture: a set of RX channels that should be enabled - a binary
mask, where 1 at location i means that the channel should be turned on, \
0 means that the channel should be turned off. The stored value is \
always of type numpy.ndarray.
:param sample_range: a range of samples to acquire [start, end), starts from 0
:param downsampling_factor: a sampling frequency divider. For example, if \
nominal sampling frequency (fs) is equal to 65e6 Hz, ``fs_divider=1``,\
means to use the nominal fs, ``fs_divider=2`` means to use 32.5e6 Hz, \
etc.
:param padding: a pair of values (left, right); the left/right value means
how many zero-channels should be added from the left/right side of the
aperture. This parameter helps achieve a regular ndarray when
a sequence of Rxs has a non-constant aperture size (e.g. classical
beamforming).
"""
aperture: np.ndarray
sample_range: tuple
downsampling_factor: int = 1
padding: tuple = (0, 0)
def __post_init__(self):
object.__setattr__(self, "aperture", np.asarray(self.aperture))
def get_n_samples(self):
start, end = self.sample_range
return end-start
@dataclass(frozen=True)
class TxRx:
"""
Single atomic operation of pulse transmit and signal data reception.
:param tx: signal transmit to perform
:param rx: signal reception to perform
:param pri: pulse repetition interval [s] - time to next event
"""
tx: Tx
rx: Rx
pri: float
@dataclass(frozen=True)
class TxRxSequence:
"""
A sequence of tx/rx operations to perform.
:param operations: sequence of TX/RX operations to perform
:param tgc_curve: TGC curve samples [dB]
:param sri: sequence repetition interval - the time between consecutive RF \
frames. When None, the time between consecutive RF frames is \
determined by the total pri only. [s]
"""
ops: typing.List[TxRx]
tgc_curve: np.ndarray
sri: float = None
def __post_init__(self):
object.__setattr__(self, "tgc_curve", np.asarray(self.tgc_curve))
def get_n_samples(self):
"""
Returns a set of number of samples that the Tx/Rx sequence defines.
"""
return {op.rx.get_n_samples() for op in self.ops}
@dataclass(frozen=True)
class DataBufferSpec:
"""
Output data buffer specification.
:param n_elements: number of elements the buffer should consists of
:param type: type of a buffer, available values: "FIFO"
"""
n_elements: int
type: str
@dataclass(frozen=True)
class Scheme:
"""
A scheme to load on the us4r device.
:param tx_rx_sequence: a sequence of tx/rx parameters to perform
:param rx_buffer_size: number of elements the rx buffer (allocated on \
us4r ddr internal memory) should consists of
:param output_buffer: specification of the output buffer
:param work_mode: determines the system work mode, available values: 'ASYNC', 'HOST'
:param processing: data processing to perform on the raw channel RF data \
currently only arrus.utils.imaging is supported
"""
tx_rx_sequence: TxRxSequence
rx_buffer_size: int = 2
output_buffer: DataBufferSpec = DataBufferSpec(type="FIFO", n_elements=4)
work_mode: str = "HOST"
processing: object = None
| [
"numpy.asarray",
"numpy.sum",
"dataclasses.dataclass"
] | [((112, 134), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (121, 134), False, 'from dataclasses import dataclass\n'), ((547, 569), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (556, 569), False, 'from dataclasses import dataclass\n'), ((2015, 2037), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2024, 2037), False, 'from dataclasses import dataclass\n'), ((3371, 3393), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (3380, 3393), False, 'from dataclasses import dataclass\n'), ((3688, 3710), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (3697, 3710), False, 'from dataclasses import dataclass\n'), ((4471, 4493), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4480, 4493), False, 'from dataclasses import dataclass\n'), ((4740, 4762), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4749, 4762), False, 'from dataclasses import dataclass\n'), ((1386, 1409), 'numpy.asarray', 'np.asarray', (['self.delays'], {}), '(self.delays)\n', (1396, 1409), True, 'import numpy as np\n'), ((1456, 1481), 'numpy.asarray', 'np.asarray', (['self.aperture'], {}), '(self.aperture)\n', (1466, 1481), True, 'import numpy as np\n'), ((3247, 3272), 'numpy.asarray', 'np.asarray', (['self.aperture'], {}), '(self.aperture)\n', (3257, 3272), True, 'import numpy as np\n'), ((4252, 4278), 'numpy.asarray', 'np.asarray', (['self.tgc_curve'], {}), '(self.tgc_curve)\n', (4262, 4278), True, 'import numpy as np\n'), ((1774, 1795), 'numpy.sum', 'np.sum', (['self.aperture'], {}), '(self.aperture)\n', (1780, 1795), True, 'import numpy as np\n')] |
import numpy as np
def evaluate_voxel_prediction(preds, gt, thresh):
preds_occupy = preds[:, :, :, 1] >= thresh
diff = np.sum(np.logical_xor(preds_occupy, gt[:, :, :, 1]))
intersection = np.sum(np.logical_and(preds_occupy, gt[:, :, :, 1]))
union = np.sum(np.logical_or(preds_occupy, gt[:, :, :, 1]))
num_fp = np.sum(np.logical_and(preds_occupy, gt[:, :, :, 0])) # false positive
num_fn = np.sum(np.logical_and(np.logical_not(preds_occupy), gt[:, :, :, 0])) # false negative
return np.array([diff, intersection, union, num_fp, num_fn])
| [
"numpy.logical_and",
"numpy.logical_not",
"numpy.logical_xor",
"numpy.array",
"numpy.logical_or"
] | [((513, 566), 'numpy.array', 'np.array', (['[diff, intersection, union, num_fp, num_fn]'], {}), '([diff, intersection, union, num_fp, num_fn])\n', (521, 566), True, 'import numpy as np\n'), ((136, 180), 'numpy.logical_xor', 'np.logical_xor', (['preds_occupy', 'gt[:, :, :, 1]'], {}), '(preds_occupy, gt[:, :, :, 1])\n', (150, 180), True, 'import numpy as np\n'), ((208, 252), 'numpy.logical_and', 'np.logical_and', (['preds_occupy', 'gt[:, :, :, 1]'], {}), '(preds_occupy, gt[:, :, :, 1])\n', (222, 252), True, 'import numpy as np\n'), ((273, 316), 'numpy.logical_or', 'np.logical_or', (['preds_occupy', 'gt[:, :, :, 1]'], {}), '(preds_occupy, gt[:, :, :, 1])\n', (286, 316), True, 'import numpy as np\n'), ((338, 382), 'numpy.logical_and', 'np.logical_and', (['preds_occupy', 'gt[:, :, :, 0]'], {}), '(preds_occupy, gt[:, :, :, 0])\n', (352, 382), True, 'import numpy as np\n'), ((437, 465), 'numpy.logical_not', 'np.logical_not', (['preds_occupy'], {}), '(preds_occupy)\n', (451, 465), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Heat map plots."""
import os
from absl import app
from absl import flags
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from caltrain.run_calibration import estimate_ece
matplotlib.use('Agg')
font = {'size': 26}
matplotlib.rc('font', **font)
FLAGS = flags.FLAGS
flags.DEFINE_string('plot_dir', './caltrain/plots', 'location to write plots')
flags.DEFINE_string('data_dir', './caltrain/data',
'location of the source data')
def plot_imshow(ce_type):
"""Plot bias and variance heat map for given ce type."""
n_samples = [200, 400, 800, 1600, 3200, 6400]
num_bins = [2, 4, 8, 16, 32, 64]
config = {}
config['num_reps'] = 1000
config['split'] = ''
config['norm'] = 2
config['calibration_method'] = 'no_calibration'
config['bin_method'] = ''
config['ce_type'] = ce_type
config['dataset'] = 'polynomial'
config['a'] = 1.1
config['b'] = 0.1
config['d'] = 2
config['alpha'] = 1.1
config['beta'] = 0.1
ece_bias = np.zeros((len(num_bins), len(n_samples)))
ece_var = np.zeros((len(num_bins), len(n_samples)))
np.random.seed(2379587)
for i, num_samples in enumerate(n_samples):
for j, num_bin in enumerate(num_bins):
config['num_samples'] = num_samples
config['num_bins'] = num_bin
mean, var, _ = estimate_ece(config, FLAGS.data_dir)
ece_bias[j, i] = mean
ece_var[j, i] = var
for p in [0, 1]:
fig, ax = plt.subplots(figsize=(10, 10))
cur_data = ece_bias if p == 0 else ece_var
ax.imshow(np.abs(cur_data), cmap='Reds', vmin=0, vmax=9.0)
ax.set_xticks(np.arange(len(n_samples)))
ax.set_yticks(np.arange(len(num_bins)))
ax.set_xticklabels(n_samples)
ax.set_ylabel('# Bins')
ax.set_yticklabels(num_bins)
ax.set_xlabel('# Samples')
cur_data_type = 'Bias' if p == 0 else 'Sqrt(Variance)'
cur_title_data_type = 'Bias' if p == 0 else r'$\sqrt{\mathrm{Variance}}$'
cur_title_ce_type = r'Equal Width $\mathrm{ECE}_\mathrm{BIN}$' if ce_type == 'ew_ece_bin' else r'Equal Mass $\mathrm{ECE}_\mathrm{BIN}$'
ax.set_title('{} in {}'.format(cur_title_data_type, cur_title_ce_type))
for i in range(cur_data.shape[0]):
for j in range(cur_data.shape[1]):
ax.text(
j,
i,
'%.2f' % (cur_data[i, j]),
ha='center',
va='center',
color='#000000')
plt.tight_layout()
save_dir = os.path.join(FLAGS.plot_dir, 'heat')
os.makedirs(save_dir, exist_ok=True)
fig.savefig(
os.path.join(
save_dir,
'{}_{}_alpha_{}_beta_{}.pdf'.format(ce_type, cur_data_type,
config['alpha'],
config['beta'])),
dpi='figure',
bbox_inches='tight')
def main(_):
plot_imshow('em_ece_bin')
plot_imshow('ew_ece_bin')
if __name__ == '__main__':
app.run(main)
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.rc",
"caltrain.run_calibration.estimate_ece",
"numpy.random.seed",
"os.path.join",
"os.makedirs",
"numpy.abs",
"absl.flags.DEFINE_string",
"matplotlib.use",
"absl.app.run",
"matplotlib.pyplot.subplots"
] | [((806, 827), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (820, 827), False, 'import matplotlib\n'), ((848, 877), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (861, 877), False, 'import matplotlib\n'), ((899, 977), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""plot_dir"""', '"""./caltrain/plots"""', '"""location to write plots"""'], {}), "('plot_dir', './caltrain/plots', 'location to write plots')\n", (918, 977), False, 'from absl import flags\n'), ((978, 1063), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_dir"""', '"""./caltrain/data"""', '"""location of the source data"""'], {}), "('data_dir', './caltrain/data',\n 'location of the source data')\n", (997, 1063), False, 'from absl import flags\n'), ((1698, 1721), 'numpy.random.seed', 'np.random.seed', (['(2379587)'], {}), '(2379587)\n', (1712, 1721), True, 'import numpy as np\n'), ((3526, 3539), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (3533, 3539), False, 'from absl import app\n'), ((2035, 2065), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2047, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2997, 3015), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3013, 3015), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3067), 'os.path.join', 'os.path.join', (['FLAGS.plot_dir', '"""heat"""'], {}), "(FLAGS.plot_dir, 'heat')\n", (3043, 3067), False, 'import os\n'), ((3072, 3108), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (3083, 3108), False, 'import os\n'), ((1910, 1946), 'caltrain.run_calibration.estimate_ece', 'estimate_ece', (['config', 'FLAGS.data_dir'], {}), '(config, FLAGS.data_dir)\n', (1922, 1946), False, 'from caltrain.run_calibration import estimate_ece\n'), ((2127, 2143), 'numpy.abs', 'np.abs', (['cur_data'], {}), '(cur_data)\n', (2133, 2143), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Random Interval Spectral Forest (RISE).
# TODO move or remove RandomIntervalSpectralForest in v0.10.0
This classifier has been refactored to have the correct name. The incorrectly named
algorithm will be depreciated.
"""
__author__ = ["TonyBagnall", "<NAME>"]
__all__ = [
"RandomIntervalSpectralEnsemble",
"RandomIntervalSpectralForest",
"acf",
"matrix_acf",
"ps",
]
import numpy as np
from deprecated.sphinx import deprecated
from joblib import Parallel, delayed
from numba import int64, jit, prange
from sklearn.base import clone
from sklearn.ensemble._base import _partition_estimators
from sklearn.ensemble._forest import ForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.validation import check_random_state
from sktime.classification.base import BaseClassifier
from sktime.utils.validation.panel import check_X, check_X_y
def _transform(X, interval, lag):
"""Compute the ACF and PS for given intervals of input data X."""
n_instances, _ = X.shape
acf_x = np.empty(shape=(n_instances, lag))
ps_len = _round_to_nearest_power_of_two(interval[1] - interval[0])
ps_x = np.empty(shape=(n_instances, ps_len))
for j in range(n_instances):
interval_x = X[j, interval[0] : interval[1]]
acf_x[j] = acf(interval_x, lag)
ps_x[j] = ps(interval_x, n=ps_len * 2)
# interval_x = X[:, interval[0]: interval[1]]
# acf_x = matrix_acf(interval_x, n_instances, lag)
# ps_x = _ps(interval_x, n=ps_len*2)
transformed_x = np.concatenate((ps_x, acf_x), axis=1)
return transformed_x
def _parallel_build_trees(X, y, tree, interval, lag, acf_min_values):
"""Private function used to fit a single tree in parallel."""
temp_lag = lag
if temp_lag > interval[1] - interval[0] - acf_min_values:
temp_lag = interval[1] - interval[0] - acf_min_values
if temp_lag < 0:
temp_lag = 1
temp_lag = int(temp_lag)
transformed_x = _transform(X, interval, temp_lag)
tree.fit(transformed_x, y)
return temp_lag, tree
def _predict_proba_for_estimator(X, estimator, interval, lag):
"""Private function used to predict class probabilities in parallel."""
transformed_x = _transform(X, interval, lag)
return estimator.predict_proba(transformed_x)
def _make_estimator(base_estimator, random_state=None):
"""Make and configure a copy of the `base_estimator` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(base_estimator)
estimator.set_params(**{"random_state": random_state})
return estimator
def _select_interval(min_interval, max_interval, series_length, rng, method=3):
"""Private function used to select an interval for a single tree."""
interval = np.empty(2, dtype=int)
if method == 0:
interval[0] = rng.randint(series_length - min_interval)
interval[1] = rng.randint(interval[0] + min_interval, series_length)
else:
if rng.randint(2):
interval[0] = rng.randint(series_length - min_interval)
interval_range = min(series_length - interval[0], max_interval)
length = rng.randint(min_interval, interval_range)
interval[1] = interval[0] + length
else:
interval[1] = rng.randint(min_interval, series_length)
interval_range = min(interval[1], max_interval)
length = (
3
if interval_range == min_interval
else rng.randint(min_interval, interval_range)
)
interval[0] = interval[1] - length
return interval
def _produce_intervals(
n_estimators, min_interval, max_interval, series_length, rng, method=3
):
"""Private function used to produce intervals for all trees."""
intervals = np.empty((n_estimators, 2), dtype=int)
if method == 0:
# just keep it as a backup, untested
intervals[:, 0] = rng.randint(series_length - min_interval, size=n_estimators)
intervals[:, 1] = rng.randint(
intervals[:, 0] + min_interval, series_length, size=n_estimators
)
elif method == 3:
bools = rng.randint(2, size=n_estimators)
true = np.where(bools == 1)[0]
intervals[true, 0] = rng.randint(series_length - min_interval, size=true.size)
interval_range = np.fmin(series_length - intervals[true, 0], max_interval)
length = rng.randint(min_interval, interval_range)
intervals[true, 1] = intervals[true, 0] + length
false = np.where(bools == 0)[0]
intervals[false, 1] = rng.randint(min_interval, series_length, size=false.size)
interval_range = np.fmin(intervals[false, 1], max_interval)
min_mask = interval_range == min_interval
length = np.empty(false.size)
length[min_mask] = 3
length[~min_mask] = rng.randint(min_interval, interval_range[~min_mask])
intervals[false, 0] = intervals[false, 1] - length
return intervals
@deprecated(
version="0.8.1",
reason="RandomIntervalSpectralForest will be moved or removed in v0.10.0, "
"to be replaced by the correctly named RandomIntervalSpectralEnsemble",
category=FutureWarning,
)
class RandomIntervalSpectralForest(ForestClassifier, BaseClassifier):
"""Random Interval Spectral Forest (RISE).
Input: n series length m
for each tree
sample a random intervals
take the ACF and PS over this interval, and concatenate features
build tree on new features
ensemble the trees through averaging probabilities.
Parameters
----------
n_estimators : int, optional (default=200)
The number of trees in the forest.
min_interval : int, optional (default=16)
The minimum width of an interval.
acf_lag : int, optional (default=100)
The maximum number of autocorrelation terms to use.
acf_min_values : int, optional (default=4)
Never use fewer than this number of terms to find a correlation.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
n_classes : int
The number of classes, extracted from the data.
n_estimators : array of shape = [n_estimators] of DecisionTree classifiers
intervals : array of shape = [n_estimators][2]
Stores indexes of start and end points for all classifiers.
Notes
-----
..[1] <NAME>, <NAME> and <NAME>, "Time Series Classification
with HIVE-COTE: The Hierarchical Vote Collective of Transformation-Based Ensembles",
ACM Transactions on Knowledge and Data Engineering, 12(5): 2018
https://dl.acm.org/doi/10.1145/3182382
Java implementation
https://github.com/uea-machine-learning/tsml/blob/master/src/main/java/tsml/
classifiers/frequency_based/RISE.java
"""
# Capability tags
capabilities = {
"multivariate": False,
"unequal_length": False,
"missing_values": False,
"train_estimate": False,
"contractable": False,
}
# TO DO: handle missing values, unequal length series and multivariate
# problems
def __init__(
self,
n_estimators=500,
max_interval=0,
min_interval=16,
acf_lag=100,
acf_min_values=4,
n_jobs=None,
random_state=None,
):
super(RandomIntervalSpectralForest, self).__init__(
base_estimator=DecisionTreeClassifier(random_state=random_state),
n_estimators=n_estimators,
)
self.n_estimators = n_estimators
self.max_interval = max_interval
self.min_interval = min_interval
self.acf_lag = acf_lag
self.acf_min_values = acf_min_values
self.n_jobs = n_jobs
self.random_state = random_state
# We need to add is-fitted state when inheriting from scikit-learn
self._is_fitted = False
@property
def feature_importances_(self):
"""Feature importance not supported for the RISE classifier."""
raise NotImplementedError(
"The impurity-based feature importances of "
"RandomIntervalSpectralForest is currently not supported."
)
def fit(self, X, y):
"""Build a forest of trees from the training set (X, y).
using random intervals and spectral features.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances,
series_length] or shape = [n_instances,n_columns]
The training input samples. If a Pandas data frame is passed it
must have a single column (i.e., univariate classification).
RISE has no bespoke method for multivariate classification as yet.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self : object
"""
X, y = check_X_y(X, y, enforce_univariate=True, coerce_to_numpy=True)
X = X.squeeze(1)
n_instances, self.series_length = X.shape
self.min_interval_, self.max_interval_ = self.min_interval, self.max_interval
if self.max_interval_ not in range(1, self.series_length):
self.max_interval_ = self.series_length
if self.min_interval_ not in range(1, self.series_length + 1):
self.min_interval_ = self.series_length // 2
rng = check_random_state(self.random_state)
self.estimators_ = []
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
# self.intervals = _produce_intervals(
# self.n_estimators,
# self.min_interval,
# self.max_interval,
# self.series_length,
# rng
# )
self.intervals = np.empty((self.n_estimators, 2), dtype=int)
self.intervals[:] = [
_select_interval(
self.min_interval_, self.max_interval_, self.series_length, rng
)
for _ in range(self.n_estimators)
]
# Check lag against global properties
self.acf_lag_ = self.acf_lag
if self.acf_lag > self.series_length - self.acf_min_values:
self.acf_lag_ = self.series_length - self.acf_min_values
if self.acf_lag < 0:
self.acf_lag_ = 1
self.lags = np.zeros(self.n_estimators, dtype=int)
trees = [
_make_estimator(
self.base_estimator, random_state=rng.randint(np.iinfo(np.int32).max)
)
for _ in range(self.n_estimators)
]
# Parallel loop
worker_rets = Parallel(n_jobs=self.n_jobs)(
delayed(_parallel_build_trees)(
X,
y,
tree,
self.intervals[i],
self.acf_lag_,
self.acf_min_values,
)
for i, tree in enumerate(trees)
)
# Collect lags and newly grown trees
for i, (lag, tree) in enumerate(worker_rets):
self.lags[i] = lag
self.estimators_.append(tree)
self._is_fitted = True
return self
def predict(self, X):
"""Find predictions for all cases in X.
Built on top of `predict_proba`.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The input samples. If a Pandas data frame is passed it must have a
single column (i.e., univariate classification). RISE has no
bespoke method for multivariate classification as yet.
Returns
-------
y : array of shape = [n_instances]
The predicted classes.
"""
proba = self.predict_proba(X)
return np.asarray([self.classes_[np.argmax(prob)] for prob in proba])
def predict_proba(self, X):
"""Find probability estimates for each class for all cases in X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The input samples. If a Pandas data frame is passed it must have a
single column (i.e., univariate classification). RISE has no
bespoke method for multivariate classification as yet.
Attributes
----------
n_instances : int
Number of cases to classify.
n_columns : int
Number of attributes in X, must match `series_length` determined
in `fit`.
Returns
-------
output : array of shape = [n_instances, n_classes]
The class probabilities of all cases.
"""
# Check data
self.check_is_fitted()
X = check_X(X, enforce_univariate=True, coerce_to_numpy=True)
X = X.squeeze(1)
n_instances, n_columns = X.shape
if n_columns != self.series_length:
raise TypeError(
"ERROR number of attributes in the train does not match "
"that in the test data."
)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs)(
delayed(_predict_proba_for_estimator)(
X,
self.estimators_[i],
self.intervals[i],
self.lags[i],
)
for i in range(self.n_estimators)
)
return np.sum(all_proba, axis=0) / self.n_estimators
class RandomIntervalSpectralEnsemble(BaseClassifier):
"""Random Interval Spectral Ensemble (RISE).
Input: n series length m
For each tree
- sample a random intervals
- take the ACF and PS over this interval, and concatenate features
- build tree on new features
Ensemble the trees through averaging probabilities.
Parameters
----------
n_estimators : int, default=200
The number of trees in the forest.
min_interval : int, default=16
The minimum width of an interval.
acf_lag : int, default=100
The maximum number of autocorrelation terms to use.
acf_min_values : int, default=4
Never use fewer than this number of terms to find a correlation.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int, RandomState instance or None, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
n_classes_ : int
The number of classes.
classes_ : list
The classes labels.
intervals_ : array of shape = [n_estimators][2]
Stores indexes of start and end points for all classifiers.
Notes
-----
For the Java version, see
`TSML <https://github.com/uea-machine-learning/tsml/blob/master/src/main/java/tsml/
classifiers/interval_based/RISE.java>`_.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Time Series Classification
with HIVE-COTE: The Hierarchical Vote Collective of Transformation-Based
Ensembles", ACM Transactions on Knowledge and Data Engineering, 12(5): 2018
Examples
--------
>>> from sktime.classification.interval_based import RandomIntervalSpectralEnsemble
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> clf = RandomIntervalSpectralEnsemble(n_estimators=10)
>>> clf.fit(X_train, y_train)
RandomIntervalSpectralEnsemble(...)
>>> y_pred = clf.predict(X_test)
"""
_tags = {
"capability:multithreading": True,
}
def __init__(
self,
n_estimators=500,
max_interval=0,
min_interval=16,
acf_lag=100,
acf_min_values=4,
n_jobs=1,
random_state=None,
):
self.n_estimators = n_estimators
self.max_interval = max_interval
self.min_interval = min_interval
self.acf_lag = acf_lag
self.acf_min_values = acf_min_values
self.n_jobs = n_jobs
self.random_state = random_state
self.intervals_ = []
self.base_estimator = DecisionTreeClassifier(random_state=random_state)
super(RandomIntervalSpectralEnsemble, self).__init__()
@property
def feature_importances_(self):
"""Feature importance not supported for the RISE classifier."""
raise NotImplementedError(
"The impurity-based feature importances of "
"RandomIntervalSpectralForest is currently not supported."
)
def _fit(self, X, y):
"""Build a forest of trees from the training set (X, y).
using random intervals and spectral features.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances,
series_length] or shape = [n_instances,n_columns]
The training input samples. If a Pandas data frame is passed it
must have a single column (i.e., univariate classification).
RISE has no bespoke method for multivariate classification as yet.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self : object
"""
X = X.squeeze(1)
n_instances, self.series_length = X.shape
self.min_interval_, self.max_interval_ = self.min_interval, self.max_interval
if self.max_interval_ not in range(1, self.series_length):
self.max_interval_ = self.series_length
if self.min_interval_ not in range(1, self.series_length + 1):
self.min_interval_ = self.series_length // 2
rng = check_random_state(self.random_state)
self.estimators_ = []
# self.intervals = _produce_intervals(
# self.n_estimators,
# self.min_interval,
# self.max_interval,
# self.series_length,
# rng
# )
self.intervals_ = np.empty((self.n_estimators, 2), dtype=int)
self.intervals_[:] = [
_select_interval(
self.min_interval_, self.max_interval_, self.series_length, rng
)
for _ in range(self.n_estimators)
]
# Check lag against global properties
self.acf_lag_ = self.acf_lag
if self.acf_lag > self.series_length - self.acf_min_values:
self.acf_lag_ = self.series_length - self.acf_min_values
if self.acf_lag < 0:
self.acf_lag_ = 1
self.lags = np.zeros(self.n_estimators, dtype=int)
trees = [
_make_estimator(
self.base_estimator, random_state=rng.randint(np.iinfo(np.int32).max)
)
for _ in range(self.n_estimators)
]
# Parallel loop
worker_rets = Parallel(n_jobs=self._threads_to_use)(
delayed(_parallel_build_trees)(
X,
y,
tree,
self.intervals_[i],
self.acf_lag_,
self.acf_min_values,
)
for i, tree in enumerate(trees)
)
# Collect lags and newly grown trees
for i, (lag, tree) in enumerate(worker_rets):
self.lags[i] = lag
self.estimators_.append(tree)
return self
def _predict(self, X):
"""Find predictions for all cases in X.
Built on top of `predict_proba`.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The input samples. If a Pandas data frame is passed it must have a
single column (i.e., univariate classification). RISE has no
bespoke method for multivariate classification as yet.
Returns
-------
y : array of shape = [n_instances]
The predicted classes.
"""
proba = self._predict_proba(X)
return np.asarray([self.classes_[np.argmax(prob)] for prob in proba])
def _predict_proba(self, X):
"""Find probability estimates for each class for all cases in X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The input samples. If a Pandas data frame is passed it must have a
single column (i.e., univariate classification). RISE has no
bespoke method for multivariate classification as yet.
Attributes
----------
n_instances : int
Number of cases to classify.
n_columns : int
Number of attributes in X, must match `series_length` determined
in `fit`.
Returns
-------
output : array of shape = [n_instances, n_classes]
The class probabilities of all cases.
"""
X = X.squeeze(1)
n_instances, n_columns = X.shape
if n_columns != self.series_length:
raise ValueError(
"ERROR number of attributes in the train does not match "
"that in the test data."
)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self._threads_to_use)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs)(
delayed(_predict_proba_for_estimator)(
X,
self.estimators_[i],
self.intervals_[i],
self.lags[i],
)
for i in range(self.n_estimators)
)
return np.sum(all_proba, axis=0) / self.n_estimators
@jit(parallel=True, cache=True, nopython=True)
def acf(x, max_lag):
"""Autocorrelation function transform.
currently calculated using standard stats method. We could use inverse of power
spectrum, especially given we already have found it, worth testing for speed and
correctness. HOWEVER, for long series, it may not give much benefit, as we do not
use that many ACF terms.
Parameters
----------
x : array-like shape = [interval_width]
max_lag: int
The number of ACF terms to find.
Returns
-------
y : array-like shape = [max_lag]
"""
y = np.empty(max_lag)
length = len(x)
for lag in prange(1, max_lag + 1):
# Do it ourselves to avoid zero variance warnings
lag_length = length - lag
x1, x2 = x[:-lag], x[lag:]
s1 = np.sum(x1)
s2 = np.sum(x2)
m1 = s1 / lag_length
m2 = s2 / lag_length
ss1 = np.sum(x1 * x1)
ss2 = np.sum(x2 * x2)
v1 = ss1 - s1 * m1
v2 = ss2 - s2 * m2
v1_is_zero, v2_is_zero = v1 <= 1e-9, v2 <= 1e-9
if v1_is_zero and v2_is_zero: # Both zero variance,
# so must be 100% correlated
y[lag - 1] = 1
elif v1_is_zero or v2_is_zero: # One zero variance
# the other not
y[lag - 1] = 0
else:
y[lag - 1] = np.sum((x1 - m1) * (x2 - m2)) / np.sqrt(v1 * v2)
# _x = np.vstack((x[:-lag], x[lag:]))
# s = np.sum(_x, axis=1)
# ss = np.sum(_x * _x, axis=1)
# v = ss - s * s / l
# zero_variances = v <= 1e-9
# i = lag - 1
# if np.all(zero_variances): # Both zero variance,
# # so must be 100% correlated
# y[i] = 1
# elif np.any(zero_variances): # One zero variance
# # the other not
# y[i] = 0
# else:
# m = _x - s.reshape(2, 1) / l
# y[i] = (m[0] @ m[1]) / np.sqrt(np.prod(v))
return y
# y[lag - 1] = np.corrcoef(x[lag:], x[:-lag])[0][1]
# if np.isnan(y[lag - 1]) or np.isinf(y[lag-1]):
# y[lag-1]=0
# @jit(parallel=True, cache=True, nopython=True)
# def _acf(x, max_lag):
# y = np.empty(max_lag)
# length = len(x)
# n = length - np.arange(1, max_lag + 1)
# # _x = np.array([x[:-1], x[:0:-1]])
# # from_end_to_lag = slice(-1, -max_lag - 1, -1)
# # cs = np.cumsum(_x, axis=1)[:, from_end_to_lag]
# # cm = cs / n
# # css = np.cumsum(_x * _x, axis=1)[:, from_end_to_lag]
# # cv = css - cs
#
# a, b = x[:-1], x[:0:-1]
# from_end_to_lag = slice(-1, -max_lag - 1, -1)
# cs1 = np.cumsum(a)[from_end_to_lag] / n
# cs2 = np.cumsum(b)[from_end_to_lag] / n
# css1 = np.cumsum(a * a)[from_end_to_lag] / n
# css2 = np.cumsum(b * b)[from_end_to_lag] / n
# cv1 = css1 - cs1 * cs1
# cv2 = css2 - cs2 * cs2
# covar = cv1 * cv2
#
# for lag in prange(1, max_lag + 1):
# idx = lag - 1
# m1, m2, l = cs1[idx], cs2[idx], n[idx]
# y[idx] = np.sum((x[:-lag] - m1) * (x[lag:] - m2)) / l
# # both_zero = (cv1 <= 1e-9) & (cv2 <= 1e-9)
# # one_zero = (cv1 <= 1e-9) ^ (cv2 <= 1e-9)
# cv1_is_zero, cv2_is_zero = cv1 <= 1e-9, cv2 <= 1e-9
# non_zero = ~cv1_is_zero & ~cv2_is_zero
# y[cv1_is_zero & cv2_is_zero] = 1 # Both zero variance,
# # so must be 100% correlated
# y[cv1_is_zero ^ cv2_is_zero] = 0 # One zero variance
# # the other not
# y[non_zero] /= np.sqrt(covar[non_zero])
#
# return y
# @jit(parallel=True, cache=True, nopython=True)
def matrix_acf(x, num_cases, max_lag):
"""Autocorrelation function transform.
Calculated using standard stats method. We could use inverse of power
spectrum, especially given we already have found it, worth testing for speed and
correctness. HOWEVER, for long series, it may not give much benefit, as we do not
use that many ACF terms.
Parameters
----------
x : array-like shape = [num_cases, interval_width]
max_lag: int
The number of ACF terms to find.
Returns
-------
y : array-like shape = [num_cases,max_lag]
"""
y = np.empty(shape=(num_cases, max_lag))
length = x.shape[1]
for lag in prange(1, max_lag + 1):
# Could just do it ourselves ... TO TEST
# s1=np.sum(x[:-lag])/x.shape()[0]
# ss1=s1*s1
# s2=np.sum(x[lag:])
# ss2=s2*s2
#
lag_length = length - lag
x1, x2 = x[:, :-lag], x[:, lag:]
s1 = np.sum(x1, axis=1)
s2 = np.sum(x2, axis=1)
m1 = s1 / lag_length
m2 = s2 / lag_length
s12 = np.sum(x1 * x2, axis=1)
ss1 = np.sum(x1 * x1, axis=1)
ss2 = np.sum(x2 * x2, axis=1)
v1 = ss1 - s1 * m1
v2 = ss2 - s2 * m2
v12 = s12 - s1 * m2
v1_is_zero, v2_is_zero = v1 <= 1e-9, v2 <= 1e-9
non_zero = ~v1_is_zero & ~v2_is_zero
# y[:, lag - 1] = np.sum((x1 - m1[:, None]) *
# (x2 - m2[:, None]), axis=1)
y[v1_is_zero & v2_is_zero, lag - 1] = 1 # Both zero variance,
# so must be 100% correlated
y[v1_is_zero ^ v2_is_zero, lag - 1] = 0 # One zero variance
# the other not
var = (v1 * v2)[non_zero]
y[non_zero, lag - 1] = v12[non_zero] / np.sqrt(var)
# # y[lag - 1] = np.corrcoef(x[:, lag:], x[:, -lag])[0][1]
# # if np.isnan(y[lag - 1]) or np.isinf(y[lag - 1]):
# # y[lag - 1] = 0
return y
def ps(x, sign=1, n=None, pad="mean"):
"""Power spectrum transformer.
Power spectrum transform, currently calculated using np function.
It would be worth looking at ff implementation, see difference in speed
to java.
Parameters
----------
x : array-like shape = [interval_width]
sign : {-1, 1}, default = 1
n : int, default=None
pad : str or function, default='mean'
controls the mode of the pad function
see numpy.pad for more details
https://numpy.org/doc/stable/reference/generated/numpy.pad.html
Returns
-------
y : array-like shape = [len(x)/2]
"""
x_len = x.shape[-1]
x_is_1d = x.ndim == 1
# pad or slice series if length is not of power of 2 or n is specified
if x_len & (x_len - 1) != 0 or n:
# round n (or the length of x) to next power of 2
# when n is not specified
if not n:
n = _round_to_nearest_power_of_two(x_len)
# pad series up to n when n is larger otherwise slice series up to n
if n > x_len:
pad_length = (0, n - x_len) if x_is_1d else ((0, 0), (0, n - x_len))
x_in_power_2 = np.pad(x, pad_length, mode=pad)
else:
x_in_power_2 = x[:n] if x_is_1d else x[:, :n]
else:
x_in_power_2 = x
# use sign to determine inverse or normal fft
# using the norm in numpy fft function
# backward = normal fft, forward = inverse fft (divide by n after fft)
# note: use the following code when upgrade numpy to 1.20
# norm = "backward" if sign > 0 else "forward"
# fft = np.fft.rfft(x_in_power_2, norm=norm)
if sign < 0:
x_in_power_2 /= n
fft = np.fft.rfft(x_in_power_2)
fft = fft[:-1] if x_is_1d else fft[:, :-1]
return np.abs(fft)
@jit("int64(int64)", cache=True, nopython=True)
def _round_to_nearest_power_of_two(n):
return int64(1 << round(np.log2(n)))
| [
"numpy.fft.rfft",
"numpy.abs",
"numpy.sum",
"numpy.argmax",
"numpy.empty",
"numpy.iinfo",
"sklearn.tree.DecisionTreeClassifier",
"numba.prange",
"sklearn.base.clone",
"numpy.unique",
"numpy.pad",
"sktime.utils.validation.panel.check_X",
"sklearn.ensemble._base._partition_estimators",
"nump... | [((5121, 5327), 'deprecated.sphinx.deprecated', 'deprecated', ([], {'version': '"""0.8.1"""', 'reason': '"""RandomIntervalSpectralForest will be moved or removed in v0.10.0, to be replaced by the correctly named RandomIntervalSpectralEnsemble"""', 'category': 'FutureWarning'}), "(version='0.8.1', reason=\n 'RandomIntervalSpectralForest will be moved or removed in v0.10.0, to be replaced by the correctly named RandomIntervalSpectralEnsemble'\n , category=FutureWarning)\n", (5131, 5327), False, 'from deprecated.sphinx import deprecated\n'), ((22685, 22730), 'numba.jit', 'jit', ([], {'parallel': '(True)', 'cache': '(True)', 'nopython': '(True)'}), '(parallel=True, cache=True, nopython=True)\n', (22688, 22730), False, 'from numba import int64, jit, prange\n'), ((30028, 30074), 'numba.jit', 'jit', (['"""int64(int64)"""'], {'cache': '(True)', 'nopython': '(True)'}), "('int64(int64)', cache=True, nopython=True)\n", (30031, 30074), False, 'from numba import int64, jit, prange\n'), ((1116, 1150), 'numpy.empty', 'np.empty', ([], {'shape': '(n_instances, lag)'}), '(shape=(n_instances, lag))\n', (1124, 1150), True, 'import numpy as np\n'), ((1233, 1270), 'numpy.empty', 'np.empty', ([], {'shape': '(n_instances, ps_len)'}), '(shape=(n_instances, ps_len))\n', (1241, 1270), True, 'import numpy as np\n'), ((1610, 1647), 'numpy.concatenate', 'np.concatenate', (['(ps_x, acf_x)'], {'axis': '(1)'}), '((ps_x, acf_x), axis=1)\n', (1624, 1647), True, 'import numpy as np\n'), ((2617, 2638), 'sklearn.base.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (2622, 2638), False, 'from sklearn.base import clone\n'), ((2889, 2911), 'numpy.empty', 'np.empty', (['(2)'], {'dtype': 'int'}), '(2, dtype=int)\n', (2897, 2911), True, 'import numpy as np\n'), ((3928, 3966), 'numpy.empty', 'np.empty', (['(n_estimators, 2)'], {'dtype': 'int'}), '((n_estimators, 2), dtype=int)\n', (3936, 3966), True, 'import numpy as np\n'), ((23291, 23308), 'numpy.empty', 'np.empty', (['max_lag'], {}), '(max_lag)\n', (23299, 23308), True, 'import numpy as np\n'), ((23344, 23366), 'numba.prange', 'prange', (['(1)', '(max_lag + 1)'], {}), '(1, max_lag + 1)\n', (23350, 23366), False, 'from numba import int64, jit, prange\n'), ((26866, 26902), 'numpy.empty', 'np.empty', ([], {'shape': '(num_cases, max_lag)'}), '(shape=(num_cases, max_lag))\n', (26874, 26902), True, 'import numpy as np\n'), ((26942, 26964), 'numba.prange', 'prange', (['(1)', '(max_lag + 1)'], {}), '(1, max_lag + 1)\n', (26948, 26964), False, 'from numba import int64, jit, prange\n'), ((29929, 29954), 'numpy.fft.rfft', 'np.fft.rfft', (['x_in_power_2'], {}), '(x_in_power_2)\n', (29940, 29954), True, 'import numpy as np\n'), ((30013, 30024), 'numpy.abs', 'np.abs', (['fft'], {}), '(fft)\n', (30019, 30024), True, 'import numpy as np\n'), ((9530, 9592), 'sktime.utils.validation.panel.check_X_y', 'check_X_y', (['X', 'y'], {'enforce_univariate': '(True)', 'coerce_to_numpy': '(True)'}), '(X, y, enforce_univariate=True, coerce_to_numpy=True)\n', (9539, 9592), False, 'from sktime.utils.validation.panel import check_X, check_X_y\n'), ((10017, 10054), 'sklearn.utils.validation.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (10035, 10054), False, 'from sklearn.utils.validation import check_random_state\n'), ((10447, 10490), 'numpy.empty', 'np.empty', (['(self.n_estimators, 2)'], {'dtype': 'int'}), '((self.n_estimators, 2), dtype=int)\n', (10455, 10490), True, 'import numpy as np\n'), ((11001, 11039), 'numpy.zeros', 'np.zeros', (['self.n_estimators'], {'dtype': 'int'}), '(self.n_estimators, dtype=int)\n', (11009, 11039), True, 'import numpy as np\n'), ((13398, 13455), 'sktime.utils.validation.panel.check_X', 'check_X', (['X'], {'enforce_univariate': '(True)', 'coerce_to_numpy': '(True)'}), '(X, enforce_univariate=True, coerce_to_numpy=True)\n', (13405, 13455), False, 'from sktime.utils.validation.panel import check_X, check_X_y\n'), ((13789, 13842), 'sklearn.ensemble._base._partition_estimators', '_partition_estimators', (['self.n_estimators', 'self.n_jobs'], {}), '(self.n_estimators, self.n_jobs)\n', (13810, 13842), False, 'from sklearn.ensemble._base import _partition_estimators\n'), ((17221, 17270), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (17243, 17270), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((18735, 18772), 'sklearn.utils.validation.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (18753, 18772), False, 'from sklearn.utils.validation import check_random_state\n'), ((19040, 19083), 'numpy.empty', 'np.empty', (['(self.n_estimators, 2)'], {'dtype': 'int'}), '((self.n_estimators, 2), dtype=int)\n', (19048, 19083), True, 'import numpy as np\n'), ((19595, 19633), 'numpy.zeros', 'np.zeros', (['self.n_estimators'], {'dtype': 'int'}), '(self.n_estimators, dtype=int)\n', (19603, 19633), True, 'import numpy as np\n'), ((22244, 22306), 'sklearn.ensemble._base._partition_estimators', '_partition_estimators', (['self.n_estimators', 'self._threads_to_use'], {}), '(self.n_estimators, self._threads_to_use)\n', (22265, 22306), False, 'from sklearn.ensemble._base import _partition_estimators\n'), ((23508, 23518), 'numpy.sum', 'np.sum', (['x1'], {}), '(x1)\n', (23514, 23518), True, 'import numpy as np\n'), ((23532, 23542), 'numpy.sum', 'np.sum', (['x2'], {}), '(x2)\n', (23538, 23542), True, 'import numpy as np\n'), ((23615, 23630), 'numpy.sum', 'np.sum', (['(x1 * x1)'], {}), '(x1 * x1)\n', (23621, 23630), True, 'import numpy as np\n'), ((23645, 23660), 'numpy.sum', 'np.sum', (['(x2 * x2)'], {}), '(x2 * x2)\n', (23651, 23660), True, 'import numpy as np\n'), ((27269, 27287), 'numpy.sum', 'np.sum', (['x1'], {'axis': '(1)'}), '(x1, axis=1)\n', (27275, 27287), True, 'import numpy as np\n'), ((27301, 27319), 'numpy.sum', 'np.sum', (['x2'], {'axis': '(1)'}), '(x2, axis=1)\n', (27307, 27319), True, 'import numpy as np\n'), ((27392, 27415), 'numpy.sum', 'np.sum', (['(x1 * x2)'], {'axis': '(1)'}), '(x1 * x2, axis=1)\n', (27398, 27415), True, 'import numpy as np\n'), ((27430, 27453), 'numpy.sum', 'np.sum', (['(x1 * x1)'], {'axis': '(1)'}), '(x1 * x1, axis=1)\n', (27436, 27453), True, 'import numpy as np\n'), ((27468, 27491), 'numpy.sum', 'np.sum', (['(x2 * x2)'], {'axis': '(1)'}), '(x2 * x2, axis=1)\n', (27474, 27491), True, 'import numpy as np\n'), ((4468, 4525), 'numpy.fmin', 'np.fmin', (['(series_length - intervals[true, 0])', 'max_interval'], {}), '(series_length - intervals[true, 0], max_interval)\n', (4475, 4525), True, 'import numpy as np\n'), ((4796, 4838), 'numpy.fmin', 'np.fmin', (['intervals[false, 1]', 'max_interval'], {}), '(intervals[false, 1], max_interval)\n', (4803, 4838), True, 'import numpy as np\n'), ((4906, 4926), 'numpy.empty', 'np.empty', (['false.size'], {}), '(false.size)\n', (4914, 4926), True, 'import numpy as np\n'), ((11291, 11319), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (11299, 11319), False, 'from joblib import Parallel, delayed\n'), ((13888, 13911), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (13896, 13911), False, 'from joblib import Parallel, delayed\n'), ((14171, 14196), 'numpy.sum', 'np.sum', (['all_proba'], {'axis': '(0)'}), '(all_proba, axis=0)\n', (14177, 14196), True, 'import numpy as np\n'), ((19885, 19922), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self._threads_to_use'}), '(n_jobs=self._threads_to_use)\n', (19893, 19922), False, 'from joblib import Parallel, delayed\n'), ((22352, 22375), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (22360, 22375), False, 'from joblib import Parallel, delayed\n'), ((22636, 22661), 'numpy.sum', 'np.sum', (['all_proba'], {'axis': '(0)'}), '(all_proba, axis=0)\n', (22642, 22661), True, 'import numpy as np\n'), ((28049, 28061), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (28056, 28061), True, 'import numpy as np\n'), ((29407, 29438), 'numpy.pad', 'np.pad', (['x', 'pad_length'], {'mode': 'pad'}), '(x, pad_length, mode=pad)\n', (29413, 29438), True, 'import numpy as np\n'), ((4332, 4352), 'numpy.where', 'np.where', (['(bools == 1)'], {}), '(bools == 1)\n', (4340, 4352), True, 'import numpy as np\n'), ((4659, 4679), 'numpy.where', 'np.where', (['(bools == 0)'], {}), '(bools == 0)\n', (4667, 4679), True, 'import numpy as np\n'), ((8063, 8112), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (8085, 8112), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((10111, 10123), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (10120, 10123), True, 'import numpy as np\n'), ((30142, 30152), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (30149, 30152), True, 'import numpy as np\n'), ((11333, 11363), 'joblib.delayed', 'delayed', (['_parallel_build_trees'], {}), '(_parallel_build_trees)\n', (11340, 11363), False, 'from joblib import Parallel, delayed\n'), ((12474, 12489), 'numpy.argmax', 'np.argmax', (['prob'], {}), '(prob)\n', (12483, 12489), True, 'import numpy as np\n'), ((13925, 13962), 'joblib.delayed', 'delayed', (['_predict_proba_for_estimator'], {}), '(_predict_proba_for_estimator)\n', (13932, 13962), False, 'from joblib import Parallel, delayed\n'), ((19936, 19966), 'joblib.delayed', 'delayed', (['_parallel_build_trees'], {}), '(_parallel_build_trees)\n', (19943, 19966), False, 'from joblib import Parallel, delayed\n'), ((21049, 21064), 'numpy.argmax', 'np.argmax', (['prob'], {}), '(prob)\n', (21058, 21064), True, 'import numpy as np\n'), ((22389, 22426), 'joblib.delayed', 'delayed', (['_predict_proba_for_estimator'], {}), '(_predict_proba_for_estimator)\n', (22396, 22426), False, 'from joblib import Parallel, delayed\n'), ((24054, 24083), 'numpy.sum', 'np.sum', (['((x1 - m1) * (x2 - m2))'], {}), '((x1 - m1) * (x2 - m2))\n', (24060, 24083), True, 'import numpy as np\n'), ((24086, 24102), 'numpy.sqrt', 'np.sqrt', (['(v1 * v2)'], {}), '(v1 * v2)\n', (24093, 24102), True, 'import numpy as np\n'), ((10176, 10189), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (10186, 10189), True, 'import numpy as np\n'), ((11150, 11168), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (11158, 11168), True, 'import numpy as np\n'), ((19744, 19762), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (19752, 19762), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# --------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
from nav_msgs.msg import OccupancyGrid
from geometry_msgs.msg import PointStamped
import tf
from numpy import array, vstack, delete
from functions import gridValue, informationGain
from sklearn.cluster import MeanShift
from rrt_exploration.msg import PointArray
# Subscribers' callbacks------------------------------
mapData = OccupancyGrid()
frontiers = []
globalmaps = []
def callBack(data, args):
global frontiers, min_distance
transformedPoint = args[0].transformPoint(args[1], data)
x = [array([transformedPoint.point.x, transformedPoint.point.y])]
if len(frontiers) > 0:
frontiers = vstack((frontiers, x))
else:
frontiers = x
def mapCallBack(data):
global mapData
mapData = data
def globalMap(data):
global global1, globalmaps, litraIndx, namespace_init_count, n_robots
global1 = data
if n_robots > 1:
indx = int(data._connection_header['topic']
[litraIndx])-namespace_init_count
elif n_robots == 1:
indx = 0
globalmaps[indx] = data
# Node----------------------------------------------
def node():
global frontiers, mapData, global1, global2, global3, globalmaps, litraIndx, n_robots, namespace_init_count
rospy.init_node('filter', anonymous=False)
# fetching all parameters
map_topic = rospy.get_param('~map_topic', '/map')
threshold = rospy.get_param('~costmap_clearing_threshold', 70)
# this can be smaller than the laser scanner range, >> smaller >>less computation time>> too small is not good, info gain won't be accurate
info_radius = rospy.get_param('~info_radius', 1.0)
goals_topic = rospy.get_param('~goals_topic', '/detected_points')
n_robots = rospy.get_param('~n_robots', 1)
namespace = rospy.get_param('~namespace', '')
namespace_init_count = rospy.get_param('namespace_init_count', 1)
rateHz = rospy.get_param('~rate', 100)
global_costmap_topic = rospy.get_param(
'~global_costmap_topic', '/move_base/global_costmap/costmap')
robot_frame = rospy.get_param('~robot_frame', 'base_link')
litraIndx = len(namespace)
rate = rospy.Rate(rateHz)
# -------------------------------------------
rospy.Subscriber(map_topic, OccupancyGrid, mapCallBack)
# ---------------------------------------------------------------------------------------------------------------
for i in range(0, n_robots):
globalmaps.append(OccupancyGrid())
if len(namespace) > 0:
for i in range(0, n_robots):
rospy.Subscriber(namespace+str(i+namespace_init_count) +
global_costmap_topic, OccupancyGrid, globalMap)
elif len(namespace) == 0:
rospy.Subscriber(global_costmap_topic, OccupancyGrid, globalMap)
# wait if map is not received yet
while (len(mapData.data) < 1):
rospy.loginfo('Waiting for the map')
rospy.sleep(0.1)
pass
# wait if any of robots' global costmap map is not received yet
for i in range(0, n_robots):
while (len(globalmaps[i].data) < 1):
rospy.loginfo('Waiting for the global costmap')
rospy.sleep(0.1)
pass
global_frame = "/"+mapData.header.frame_id
tfLisn = tf.TransformListener()
if len(namespace) > 0:
for i in range(0, n_robots):
tfLisn.waitForTransform(global_frame[1:], namespace+str(
i+namespace_init_count)+'/'+robot_frame, rospy.Time(0), rospy.Duration(10.0))
elif len(namespace) == 0:
tfLisn.waitForTransform(
global_frame[1:], '/'+robot_frame, rospy.Time(0), rospy.Duration(10.0))
rospy.Subscriber(goals_topic, PointStamped, callback=callBack,
callback_args=[tfLisn, global_frame[1:]])
pub = rospy.Publisher('frontiers', Marker, queue_size=10)
pub2 = rospy.Publisher('centroids', Marker, queue_size=10)
filterpub = rospy.Publisher('filtered_points', PointArray, queue_size=10)
rospy.loginfo("the map and global costmaps are received")
# wait if no frontier is received yet
while len(frontiers) < 1:
pass
points = Marker()
points_clust = Marker()
# Set the frame ID and timestamp. See the TF tutorials for information on these.
points.header.frame_id = mapData.header.frame_id
points.header.stamp = rospy.Time.now()
points.ns = "markers2"
points.id = 0
points.type = Marker.POINTS
# Set the marker action for latched frontiers. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
points.action = Marker.ADD
points.pose.orientation.w = 1.0
points.scale.x = 0.2
points.scale.y = 0.2
points.color.r = 255.0/255.0
points.color.g = 255.0/255.0
points.color.b = 0.0/255.0
points.color.a = 1
points.lifetime = rospy.Duration()
p = Point()
p.z = 0
pp = []
pl = []
points_clust.header.frame_id = mapData.header.frame_id
points_clust.header.stamp = rospy.Time.now()
points_clust.ns = "markers3"
points_clust.id = 4
points_clust.type = Marker.POINTS
# Set the marker action for centroids. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
points_clust.action = Marker.ADD
points_clust.pose.orientation.w = 1.0
points_clust.scale.x = 0.2
points_clust.scale.y = 0.2
points_clust.color.r = 0.0/255.0
points_clust.color.g = 255.0/255.0
points_clust.color.b = 0.0/255.0
points_clust.color.a = 1
points_clust.lifetime = rospy.Duration()
temppoint = PointStamped()
temppoint.header.frame_id = mapData.header.frame_id
temppoint.header.stamp = rospy.Time(0)
temppoint.point.z = 0.0
arraypoints = PointArray()
tempPoint = Point()
tempPoint.z = 0.0
# -------------------------------------------------------------------------
# --------------------- Main Loop -------------------------------
# -------------------------------------------------------------------------
while not rospy.is_shutdown():
# -------------------------------------------------------------------------
# Clustering frontier points
centroids = []
front = copy(frontiers)
if len(front) > 1:
ms = MeanShift(bandwidth=0.3)
ms.fit(front)
centroids = ms.cluster_centers_ # centroids array is the centers of each cluster
# if there is only one frontier no need for clustering, i.e. centroids=frontiers
if len(front) == 1:
centroids = front
frontiers = copy(centroids)
# -------------------------------------------------------------------------
# clearing old frontiers
z = 0
while z < len(centroids):
cond = False
temppoint.point.x = centroids[z][0]
temppoint.point.y = centroids[z][1]
for i in range(0, n_robots):
transformedPoint = tfLisn.transformPoint(
globalmaps[i].header.frame_id, temppoint)
x = array([transformedPoint.point.x, transformedPoint.point.y])
cond = (gridValue(globalmaps[i], x) > threshold) or cond
if (cond or (informationGain(mapData, [centroids[z][0], centroids[z][1]], info_radius*0.5)) < 0.2):
centroids = delete(centroids, (z), axis=0)
z = z-1
z += 1
# -------------------------------------------------------------------------
# publishing
arraypoints.points = []
for i in centroids:
tempPoint.x = i[0]
tempPoint.y = i[1]
arraypoints.points.append(copy(tempPoint))
filterpub.publish(arraypoints)
pp = []
for q in range(0, len(frontiers)):
p.x = frontiers[q][0]
p.y = frontiers[q][1]
pp.append(copy(p))
points.points = pp
pp = []
for q in range(0, len(centroids)):
p.x = centroids[q][0]
p.y = centroids[q][1]
pp.append(copy(p))
points_clust.points = pp
pub.publish(points)
pub2.publish(points_clust)
rate.sleep()
# -------------------------------------------------------------------------
if __name__ == '__main__':
try:
node()
except rospy.ROSInterruptException:
pass
| [
"rospy.Subscriber",
"rospy.Time",
"rospy.Duration",
"rospy.Time.now",
"rospy.Rate",
"rospy.is_shutdown",
"rospy.init_node",
"rrt_exploration.msg.PointArray",
"functions.gridValue",
"nav_msgs.msg.OccupancyGrid",
"sklearn.cluster.MeanShift",
"rospy.loginfo",
"visualization_msgs.msg.Marker",
... | [((505, 520), 'nav_msgs.msg.OccupancyGrid', 'OccupancyGrid', ([], {}), '()\n', (518, 520), False, 'from nav_msgs.msg import OccupancyGrid\n'), ((1406, 1448), 'rospy.init_node', 'rospy.init_node', (['"""filter"""'], {'anonymous': '(False)'}), "('filter', anonymous=False)\n", (1421, 1448), False, 'import rospy\n'), ((1496, 1533), 'rospy.get_param', 'rospy.get_param', (['"""~map_topic"""', '"""/map"""'], {}), "('~map_topic', '/map')\n", (1511, 1533), False, 'import rospy\n'), ((1550, 1600), 'rospy.get_param', 'rospy.get_param', (['"""~costmap_clearing_threshold"""', '(70)'], {}), "('~costmap_clearing_threshold', 70)\n", (1565, 1600), False, 'import rospy\n'), ((1763, 1799), 'rospy.get_param', 'rospy.get_param', (['"""~info_radius"""', '(1.0)'], {}), "('~info_radius', 1.0)\n", (1778, 1799), False, 'import rospy\n'), ((1818, 1869), 'rospy.get_param', 'rospy.get_param', (['"""~goals_topic"""', '"""/detected_points"""'], {}), "('~goals_topic', '/detected_points')\n", (1833, 1869), False, 'import rospy\n'), ((1885, 1916), 'rospy.get_param', 'rospy.get_param', (['"""~n_robots"""', '(1)'], {}), "('~n_robots', 1)\n", (1900, 1916), False, 'import rospy\n'), ((1933, 1966), 'rospy.get_param', 'rospy.get_param', (['"""~namespace"""', '""""""'], {}), "('~namespace', '')\n", (1948, 1966), False, 'import rospy\n'), ((1994, 2036), 'rospy.get_param', 'rospy.get_param', (['"""namespace_init_count"""', '(1)'], {}), "('namespace_init_count', 1)\n", (2009, 2036), False, 'import rospy\n'), ((2050, 2079), 'rospy.get_param', 'rospy.get_param', (['"""~rate"""', '(100)'], {}), "('~rate', 100)\n", (2065, 2079), False, 'import rospy\n'), ((2107, 2184), 'rospy.get_param', 'rospy.get_param', (['"""~global_costmap_topic"""', '"""/move_base/global_costmap/costmap"""'], {}), "('~global_costmap_topic', '/move_base/global_costmap/costmap')\n", (2122, 2184), False, 'import rospy\n'), ((2212, 2256), 'rospy.get_param', 'rospy.get_param', (['"""~robot_frame"""', '"""base_link"""'], {}), "('~robot_frame', 'base_link')\n", (2227, 2256), False, 'import rospy\n'), ((2300, 2318), 'rospy.Rate', 'rospy.Rate', (['rateHz'], {}), '(rateHz)\n', (2310, 2318), False, 'import rospy\n'), ((2369, 2424), 'rospy.Subscriber', 'rospy.Subscriber', (['map_topic', 'OccupancyGrid', 'mapCallBack'], {}), '(map_topic, OccupancyGrid, mapCallBack)\n', (2385, 2424), False, 'import rospy\n'), ((3394, 3416), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (3414, 3416), False, 'import tf\n'), ((3796, 3904), 'rospy.Subscriber', 'rospy.Subscriber', (['goals_topic', 'PointStamped'], {'callback': 'callBack', 'callback_args': '[tfLisn, global_frame[1:]]'}), '(goals_topic, PointStamped, callback=callBack,\n callback_args=[tfLisn, global_frame[1:]])\n', (3812, 3904), False, 'import rospy\n'), ((3932, 3983), 'rospy.Publisher', 'rospy.Publisher', (['"""frontiers"""', 'Marker'], {'queue_size': '(10)'}), "('frontiers', Marker, queue_size=10)\n", (3947, 3983), False, 'import rospy\n'), ((3995, 4046), 'rospy.Publisher', 'rospy.Publisher', (['"""centroids"""', 'Marker'], {'queue_size': '(10)'}), "('centroids', Marker, queue_size=10)\n", (4010, 4046), False, 'import rospy\n'), ((4063, 4124), 'rospy.Publisher', 'rospy.Publisher', (['"""filtered_points"""', 'PointArray'], {'queue_size': '(10)'}), "('filtered_points', PointArray, queue_size=10)\n", (4078, 4124), False, 'import rospy\n'), ((4130, 4187), 'rospy.loginfo', 'rospy.loginfo', (['"""the map and global costmaps are received"""'], {}), "('the map and global costmaps are received')\n", (4143, 4187), False, 'import rospy\n'), ((4288, 4296), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (4294, 4296), False, 'from visualization_msgs.msg import Marker\n'), ((4316, 4324), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (4322, 4324), False, 'from visualization_msgs.msg import Marker\n'), ((4486, 4502), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (4500, 4502), False, 'import rospy\n'), ((4956, 4972), 'rospy.Duration', 'rospy.Duration', ([], {}), '()\n', (4970, 4972), False, 'import rospy\n'), ((4982, 4989), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (4987, 4989), False, 'from geometry_msgs.msg import Point\n'), ((5120, 5136), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5134, 5136), False, 'import rospy\n'), ((5651, 5667), 'rospy.Duration', 'rospy.Duration', ([], {}), '()\n', (5665, 5667), False, 'import rospy\n'), ((5685, 5699), 'geometry_msgs.msg.PointStamped', 'PointStamped', ([], {}), '()\n', (5697, 5699), False, 'from geometry_msgs.msg import PointStamped\n'), ((5785, 5798), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (5795, 5798), False, 'import rospy\n'), ((5846, 5858), 'rrt_exploration.msg.PointArray', 'PointArray', ([], {}), '()\n', (5856, 5858), False, 'from rrt_exploration.msg import PointArray\n'), ((5875, 5882), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (5880, 5882), False, 'from geometry_msgs.msg import Point\n'), ((685, 744), 'numpy.array', 'array', (['[transformedPoint.point.x, transformedPoint.point.y]'], {}), '([transformedPoint.point.x, transformedPoint.point.y])\n', (690, 744), False, 'from numpy import array, vstack, delete\n'), ((793, 815), 'numpy.vstack', 'vstack', (['(frontiers, x)'], {}), '((frontiers, x))\n', (799, 815), False, 'from numpy import array, vstack, delete\n'), ((3009, 3045), 'rospy.loginfo', 'rospy.loginfo', (['"""Waiting for the map"""'], {}), "('Waiting for the map')\n", (3022, 3045), False, 'import rospy\n'), ((3054, 3070), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (3065, 3070), False, 'import rospy\n'), ((6147, 6166), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (6164, 6166), False, 'import rospy\n'), ((6328, 6343), 'copy.copy', 'copy', (['frontiers'], {}), '(frontiers)\n', (6332, 6343), False, 'from copy import copy\n'), ((6701, 6716), 'copy.copy', 'copy', (['centroids'], {}), '(centroids)\n', (6705, 6716), False, 'from copy import copy\n'), ((2601, 2616), 'nav_msgs.msg.OccupancyGrid', 'OccupancyGrid', ([], {}), '()\n', (2614, 2616), False, 'from nav_msgs.msg import OccupancyGrid\n'), ((2867, 2931), 'rospy.Subscriber', 'rospy.Subscriber', (['global_costmap_topic', 'OccupancyGrid', 'globalMap'], {}), '(global_costmap_topic, OccupancyGrid, globalMap)\n', (2883, 2931), False, 'import rospy\n'), ((3238, 3285), 'rospy.loginfo', 'rospy.loginfo', (['"""Waiting for the global costmap"""'], {}), "('Waiting for the global costmap')\n", (3251, 3285), False, 'import rospy\n'), ((3298, 3314), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (3309, 3314), False, 'import rospy\n'), ((6388, 6412), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bandwidth': '(0.3)'}), '(bandwidth=0.3)\n', (6397, 6412), False, 'from sklearn.cluster import MeanShift\n'), ((3607, 3620), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (3617, 3620), False, 'import rospy\n'), ((3622, 3642), 'rospy.Duration', 'rospy.Duration', (['(10.0)'], {}), '(10.0)\n', (3636, 3642), False, 'import rospy\n'), ((3754, 3767), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (3764, 3767), False, 'import rospy\n'), ((3769, 3789), 'rospy.Duration', 'rospy.Duration', (['(10.0)'], {}), '(10.0)\n', (3783, 3789), False, 'import rospy\n'), ((7171, 7230), 'numpy.array', 'array', (['[transformedPoint.point.x, transformedPoint.point.y]'], {}), '([transformedPoint.point.x, transformedPoint.point.y])\n', (7176, 7230), False, 'from numpy import array, vstack, delete\n'), ((7444, 7472), 'numpy.delete', 'delete', (['centroids', 'z'], {'axis': '(0)'}), '(centroids, z, axis=0)\n', (7450, 7472), False, 'from numpy import array, vstack, delete\n'), ((7767, 7782), 'copy.copy', 'copy', (['tempPoint'], {}), '(tempPoint)\n', (7771, 7782), False, 'from copy import copy\n'), ((7972, 7979), 'copy.copy', 'copy', (['p'], {}), '(p)\n', (7976, 7979), False, 'from copy import copy\n'), ((8157, 8164), 'copy.copy', 'copy', (['p'], {}), '(p)\n', (8161, 8164), False, 'from copy import copy\n'), ((7329, 7408), 'functions.informationGain', 'informationGain', (['mapData', '[centroids[z][0], centroids[z][1]]', '(info_radius * 0.5)'], {}), '(mapData, [centroids[z][0], centroids[z][1]], info_radius * 0.5)\n', (7344, 7408), False, 'from functions import gridValue, informationGain\n'), ((7255, 7282), 'functions.gridValue', 'gridValue', (['globalmaps[i]', 'x'], {}), '(globalmaps[i], x)\n', (7264, 7282), False, 'from functions import gridValue, informationGain\n')] |
"""
Conditional VAE
"""
from __future__ import absolute_import
from __future__ import print_function
from six.moves import xrange
import numpy as np
from keras import backend as K
from keras import optimizers
from keras import objectives
from keras.layers import Input, Lambda, Concatenate, Reshape
from keras.models import Model
from .. import objectives as hyp_obj
from ..layers import *
from .tied_vae_qyqz import TiedVAE_qYqZ
class TiedCVAE_qYqZ(TiedVAE_qYqZ):
def __init__(self, encoder_net, decoder_net,
px_cond_form='diag_normal',
qy_form='diag_normal',
qz_form='diag_normal',
py_prior_form='std_normal',
pz_prior_form='std_normal',
min_kl=0.2):
super(TiedCVAE_qYqZ,self).__init__(
encoder_net, decoder_net, px_cond_form=px_cond_form,
qy_form=qy_form, qz_form=qz_form,
py_prior_form=py_prior_form, pz_prior_form=pz_prior_form,
min_kl=min_kl)
self.r_dim=0
def build(self, num_samples=1, max_seq_length=None):
self.x_dim=self.encoder_net.internal_input_shapes[0][-1]
self.r_dim=self.encoder_net.internal_input_shapes[1][-1]
self.y_dim=self.decoder_net.internal_input_shapes[0][-1]
self.z_dim=self.decoder_net.internal_input_shapes[1][-1]
if max_seq_length is None:
self.max_seq_length=self.encoder_net.internal_input_shapes[0][-2]
else:
self.max_seq_length = max_seq_length
assert(self.r_dim==self.decoder_net.internal_input_shapes[2][-1])
self.num_samples = num_samples
self._build_model()
self._build_loss()
self.is_compiled = False
def _build_model(self):
x=Input(shape=(self.max_seq_length, self.x_dim,))
r=Input(shape=(self.max_seq_length, self.r_dim,))
qyz_param=self.encoder_net([x, r])
if self.qz_form == 'diag_normal':
self.qy_param=qyz_param[:2]
self.qz_param=qyz_param[2:]
z = DiagNormalSampler(num_samples=self.num_samples)(self.qz_param)
else:
self.qy_param=qyz_param[:3]
self.qz_param=qyz_param[3:]
z = NormalSampler(num_samples=self.num_samples)(self.qz_param)
if self.qy_form == 'diag_normal':
y = DiagNormalSamplerFromSeqLevel(seq_length=self.max_seq_length,
num_samples=self.num_samples)(self.qy_param)
else:
y = NormalSamplerFromSeqLevel(seq_length=self.max_seq_length,
num_samples=self.num_samples)(self.qy_param)
if self.num_samples > 1:
r_rep = Repeat(self.num_samples, axis=0)(r)
else:
r_rep = r
x_dec_param=self.decoder_net([y, z, r_rep])
# hack for keras to work
if self.px_cond_form != 'bernoulli':
if self.px_cond_form == 'normal':
x_chol = Reshape((self.max_seq_length, self.x_dim**2))(x_dec_param[2])
x_dec_param = [x_dec_param[0], x_dec_param[1], x_chol]
x_dec_param=Concatenate(axis=-1)(x_dec_param)
self.model=Model([x, r], x_dec_param)
def fit(self, x_train, r_train, x_val=None, r_val=None,
optimizer=None,
sample_weight_train=None, sample_weight_val=None,
**kwargs):
if not self.is_compiled:
self.compile(optimizer)
if x_val is not None:
assert(r_val is not None)
if sample_weight_val is None:
x_val = ([x_val, r_val], x_val)
else:
x_val = ([x_val, r_val], x_val, sample_weight_val)
return self.model.fit([x_train, r_train], x_train,
sample_weight=sample_weight_train,
validation_data=x_val, **kwargs)
def compute_qyz_x(self, x, r, batch_size):
return self.encoder_net.predict([x, r], batch_size=batch_size)
def compute_px_yz(self, y, z, r, batch_size):
return self.decoder_net.predict([y, z, r], batch_size=batch_size)
def decode_yz(self, y, z, r, batch_size, sample_x=True):
if y.ndim == 2:
y=np.expand_dims(y, axis=1)
if y.shape[1]==1:
y=np.tile(y, (1, z.shape[1],1))
if not sample_x:
x_param=self.decoder_net.predict([y, z, r], batch_size=batch_size)
if self.px_cond_form=='bernoulli':
return x_param
return x_param[:,:,:self.x_dim]
y_input = Input(shape=(self.max_seq_length, self.y_dim,))
z_input = Input(shape=(self.max_seq_length, self.z_dim,))
r_input = Input(shape=(self.max_seq_length, self.r_dim,))
x_param = self.decoder_net([y_input, z_input, r_input])
if self.px_cond_form == 'bernoulli' :
x_sampled = BernoulliSampler()(x_param)
elif self.px_cond_form == 'diag_normal' :
x_sampled = DiagNormalSampler()(x_param)
elif self.px_cond_form == 'normal' :
x_sampled = NormalSampler()(x_param)
else:
raise ValueError()
generator = Model([y_input, z_input, r_input], x_sampled)
return generator.predict([y, z, r], batch_size=batch_size)
def generate(self, r, batch_size,sample_x=True):
num_seqs=r.shape[0]
num_samples=r.shape[1]
y=np.random.normal(loc=0.,scale=1.,size=(num_seqs, 1, self.y_dim))
z=np.random.normal(loc=0.,scale=1.,size=(num_seqs, num_samples,self.z_dim))
return self.decode_yz(y, z, r, batch_size,sample_x)
def generate_x_g_y(self, y, r, batch_size,sample_x=True):
num_seqs=r.shape[0]
num_samples=r.shape[1]
z=np.random.normal(loc=0.,scale=1.,size=(num_seqs, num_samples, self.z_dim))
return self.decode_yz(y, z, r, batch_size, sample_x)
| [
"numpy.expand_dims",
"keras.models.Model",
"keras.layers.Concatenate",
"numpy.tile",
"numpy.random.normal",
"keras.layers.Input",
"keras.layers.Reshape"
] | [((1795, 1841), 'keras.layers.Input', 'Input', ([], {'shape': '(self.max_seq_length, self.x_dim)'}), '(shape=(self.max_seq_length, self.x_dim))\n', (1800, 1841), False, 'from keras.layers import Input, Lambda, Concatenate, Reshape\n'), ((1853, 1899), 'keras.layers.Input', 'Input', ([], {'shape': '(self.max_seq_length, self.r_dim)'}), '(shape=(self.max_seq_length, self.r_dim))\n', (1858, 1899), False, 'from keras.layers import Input, Lambda, Concatenate, Reshape\n'), ((3249, 3275), 'keras.models.Model', 'Model', (['[x, r]', 'x_dec_param'], {}), '([x, r], x_dec_param)\n', (3254, 3275), False, 'from keras.models import Model\n'), ((4673, 4719), 'keras.layers.Input', 'Input', ([], {'shape': '(self.max_seq_length, self.y_dim)'}), '(shape=(self.max_seq_length, self.y_dim))\n', (4678, 4719), False, 'from keras.layers import Input, Lambda, Concatenate, Reshape\n'), ((4739, 4785), 'keras.layers.Input', 'Input', ([], {'shape': '(self.max_seq_length, self.z_dim)'}), '(shape=(self.max_seq_length, self.z_dim))\n', (4744, 4785), False, 'from keras.layers import Input, Lambda, Concatenate, Reshape\n'), ((4805, 4851), 'keras.layers.Input', 'Input', ([], {'shape': '(self.max_seq_length, self.r_dim)'}), '(shape=(self.max_seq_length, self.r_dim))\n', (4810, 4851), False, 'from keras.layers import Input, Lambda, Concatenate, Reshape\n'), ((5279, 5324), 'keras.models.Model', 'Model', (['[y_input, z_input, r_input]', 'x_sampled'], {}), '([y_input, z_input, r_input], x_sampled)\n', (5284, 5324), False, 'from keras.models import Model\n'), ((5516, 5584), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(num_seqs, 1, self.y_dim)'}), '(loc=0.0, scale=1.0, size=(num_seqs, 1, self.y_dim))\n', (5532, 5584), True, 'import numpy as np\n'), ((5591, 5669), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(num_seqs, num_samples, self.z_dim)'}), '(loc=0.0, scale=1.0, size=(num_seqs, num_samples, self.z_dim))\n', (5607, 5669), True, 'import numpy as np\n'), ((5858, 5936), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(num_seqs, num_samples, self.z_dim)'}), '(loc=0.0, scale=1.0, size=(num_seqs, num_samples, self.z_dim))\n', (5874, 5936), True, 'import numpy as np\n'), ((4331, 4356), 'numpy.expand_dims', 'np.expand_dims', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4345, 4356), True, 'import numpy as np\n'), ((4397, 4427), 'numpy.tile', 'np.tile', (['y', '(1, z.shape[1], 1)'], {}), '(y, (1, z.shape[1], 1))\n', (4404, 4427), True, 'import numpy as np\n'), ((3195, 3215), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (3206, 3215), False, 'from keras.layers import Input, Lambda, Concatenate, Reshape\n'), ((3038, 3085), 'keras.layers.Reshape', 'Reshape', (['(self.max_seq_length, self.x_dim ** 2)'], {}), '((self.max_seq_length, self.x_dim ** 2))\n', (3045, 3085), False, 'from keras.layers import Input, Lambda, Concatenate, Reshape\n')] |
import sys
sys.path.insert(1, "../../../")
import h2o
import numpy as np
import random
import math
import scipy.special
def expr_math_ops(ip,port):
sin_cos_tan_atan_sinh_cosh_tanh_asinh_data = [[random.uniform(-10,10) for r in range(10)] for c in range(10)]
asin_acos_atanh_data = [[random.uniform(-1,1) for r in range(10)] for c in range(10)]
acosh_data = [[random.uniform(1,10) for r in range(10)] for c in range(10)]
abs_data = [[random.uniform(-100000,0) for r in range(10)] for c in range(10)]
h2o_data1_1 = h2o.H2OFrame(python_obj=sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
h2o_data2_1 = h2o.H2OFrame(python_obj=asin_acos_atanh_data)
h2o_data3_1 = h2o.H2OFrame(python_obj=acosh_data)
h2o_data4_1 = h2o.H2OFrame(python_obj=abs_data)
np_data1 = np.array(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
np_data2 = np.array(asin_acos_atanh_data)
np_data3 = np.array(acosh_data)
np_data4 = np.array(abs_data)
h2o_data1 = h2o_data1_1 + 2
h2o_data2 = h2o_data2_1 / 1.01
h2o_data3 = h2o_data3_1 * 1.5
h2o_data4 = h2o_data4_1 - 1.5
np_data1 = np_data1 + 2
np_data2 = np_data2 / 1.01
np_data3 = np_data3 * 1.5
np_data4 = np_data4 - 1.5
h2o.np_comparison_check(h2o_data1.cos(), np.cos(np_data1), 10)
h2o.np_comparison_check(h2o_data1.sin(), np.sin(np_data1), 10)
h2o.np_comparison_check(h2o_data1.tan(), np.tan(np_data1), 10)
h2o.np_comparison_check(h2o_data2.acos(), np.arccos(np_data2), 10)
h2o.np_comparison_check(h2o_data2.asin(), np.arcsin(np_data2), 10)
h2o.np_comparison_check(h2o_data1.atan(), np.arctan(np_data1), 10)
h2o.np_comparison_check(h2o_data1.cosh(), np.cosh(np_data1), 10)
h2o.np_comparison_check(h2o_data1.sinh(), np.sinh(np_data1), 10)
h2o.np_comparison_check(h2o_data1.tanh(), np.tanh(np_data1), 10)
h2o.np_comparison_check(h2o_data3.acosh(), np.arccosh(np_data3), 10)
h2o.np_comparison_check(h2o_data1.asinh(), np.arcsinh(np_data1), 10)
h2o.np_comparison_check(h2o_data2.atanh(), np.arctanh(np_data2), 10)
h2o.np_comparison_check((h2o_data2/math.pi).cospi(), np.cos(np_data2), 10)
h2o.np_comparison_check((h2o_data2/math.pi).sinpi(), np.sin(np_data2), 10)
h2o.np_comparison_check((h2o_data2/math.pi).tanpi(), np.tan(np_data2), 10)
h2o.np_comparison_check(h2o_data4.abs(), np.fabs(np_data4), 10)
h2o.np_comparison_check(h2o_data2.sign(), np.sign(np_data2), 10)
h2o.np_comparison_check(h2o_data3.sqrt(), np.sqrt(np_data3), 10)
h2o.np_comparison_check(h2o_data3.trunc(), np.trunc(np_data3), 10)
h2o.np_comparison_check(h2o_data3.ceil(), np.ceil(np_data3), 10)
h2o.np_comparison_check(h2o_data3.floor(), np.floor(np_data3), 10)
h2o.np_comparison_check(h2o_data3.log(), np.log(np_data3), 10)
h2o.np_comparison_check(h2o_data3.log10(), np.log10(np_data3), 10)
h2o.np_comparison_check(h2o_data3.log1p(), np.log1p(np_data3), 10)
h2o.np_comparison_check(h2o_data3.log2(), np.log2(np_data3), 10)
h2o.np_comparison_check(h2o_data3.exp(), np.exp(np_data3), 10)
h2o.np_comparison_check(h2o_data3.expm1(), np.expm1(np_data3), 10)
h2o_val = h2o_data3.gamma()[5,5]
num_val = math.gamma(h2o_data3[5,5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal gamma values between h2o and " \
"math".format(h2o_val,num_val)
h2o_val = h2o_data3.lgamma()[5,5]
num_val = math.lgamma(h2o_data3[5,5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal lgamma values between h2o and " \
"math".\
format(h2o_val,num_val)
h2o_val = h2o_data3.digamma()[5,5]
num_val = scipy.special.polygamma(0,h2o_data3[5,5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal digamma values between h2o and " \
"math"\
.format(h2o_val,num_val)
h2o_val = h2o_data3.trigamma()[5,5]
num_val = float(scipy.special.polygamma(1,h2o_data3[5,5]))
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal trigamma values between h2o and " \
"math".format(h2o_val,num_val)
if __name__ == "__main__":
h2o.run_test(sys.argv, expr_math_ops)
| [
"numpy.floor",
"numpy.arccosh",
"numpy.sin",
"numpy.exp",
"h2o.run_test",
"numpy.trunc",
"numpy.arctanh",
"numpy.arcsin",
"numpy.expm1",
"math.gamma",
"numpy.tan",
"numpy.arcsinh",
"numpy.fabs",
"numpy.log10",
"numpy.arccos",
"numpy.log1p",
"numpy.tanh",
"numpy.ceil",
"numpy.log2... | [((11, 42), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../"""'], {}), "(1, '../../../')\n", (26, 42), False, 'import sys\n'), ((546, 613), 'h2o.H2OFrame', 'h2o.H2OFrame', ([], {'python_obj': 'sin_cos_tan_atan_sinh_cosh_tanh_asinh_data'}), '(python_obj=sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)\n', (558, 613), False, 'import h2o\n'), ((632, 677), 'h2o.H2OFrame', 'h2o.H2OFrame', ([], {'python_obj': 'asin_acos_atanh_data'}), '(python_obj=asin_acos_atanh_data)\n', (644, 677), False, 'import h2o\n'), ((696, 731), 'h2o.H2OFrame', 'h2o.H2OFrame', ([], {'python_obj': 'acosh_data'}), '(python_obj=acosh_data)\n', (708, 731), False, 'import h2o\n'), ((750, 783), 'h2o.H2OFrame', 'h2o.H2OFrame', ([], {'python_obj': 'abs_data'}), '(python_obj=abs_data)\n', (762, 783), False, 'import h2o\n'), ((800, 852), 'numpy.array', 'np.array', (['sin_cos_tan_atan_sinh_cosh_tanh_asinh_data'], {}), '(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)\n', (808, 852), True, 'import numpy as np\n'), ((868, 898), 'numpy.array', 'np.array', (['asin_acos_atanh_data'], {}), '(asin_acos_atanh_data)\n', (876, 898), True, 'import numpy as np\n'), ((914, 934), 'numpy.array', 'np.array', (['acosh_data'], {}), '(acosh_data)\n', (922, 934), True, 'import numpy as np\n'), ((950, 968), 'numpy.array', 'np.array', (['abs_data'], {}), '(abs_data)\n', (958, 968), True, 'import numpy as np\n'), ((3187, 3214), 'math.gamma', 'math.gamma', (['h2o_data3[5, 5]'], {}), '(h2o_data3[5, 5])\n', (3197, 3214), False, 'import math\n'), ((3500, 3528), 'math.lgamma', 'math.lgamma', (['h2o_data3[5, 5]'], {}), '(h2o_data3[5, 5])\n', (3511, 3528), False, 'import math\n'), ((4494, 4531), 'h2o.run_test', 'h2o.run_test', (['sys.argv', 'expr_math_ops'], {}), '(sys.argv, expr_math_ops)\n', (4506, 4531), False, 'import h2o\n'), ((1271, 1287), 'numpy.cos', 'np.cos', (['np_data1'], {}), '(np_data1)\n', (1277, 1287), True, 'import numpy as np\n'), ((1338, 1354), 'numpy.sin', 'np.sin', (['np_data1'], {}), '(np_data1)\n', (1344, 1354), True, 'import numpy as np\n'), ((1405, 1421), 'numpy.tan', 'np.tan', (['np_data1'], {}), '(np_data1)\n', (1411, 1421), True, 'import numpy as np\n'), ((1473, 1492), 'numpy.arccos', 'np.arccos', (['np_data2'], {}), '(np_data2)\n', (1482, 1492), True, 'import numpy as np\n'), ((1544, 1563), 'numpy.arcsin', 'np.arcsin', (['np_data2'], {}), '(np_data2)\n', (1553, 1563), True, 'import numpy as np\n'), ((1615, 1634), 'numpy.arctan', 'np.arctan', (['np_data1'], {}), '(np_data1)\n', (1624, 1634), True, 'import numpy as np\n'), ((1686, 1703), 'numpy.cosh', 'np.cosh', (['np_data1'], {}), '(np_data1)\n', (1693, 1703), True, 'import numpy as np\n'), ((1755, 1772), 'numpy.sinh', 'np.sinh', (['np_data1'], {}), '(np_data1)\n', (1762, 1772), True, 'import numpy as np\n'), ((1824, 1841), 'numpy.tanh', 'np.tanh', (['np_data1'], {}), '(np_data1)\n', (1831, 1841), True, 'import numpy as np\n'), ((1894, 1914), 'numpy.arccosh', 'np.arccosh', (['np_data3'], {}), '(np_data3)\n', (1904, 1914), True, 'import numpy as np\n'), ((1967, 1987), 'numpy.arcsinh', 'np.arcsinh', (['np_data1'], {}), '(np_data1)\n', (1977, 1987), True, 'import numpy as np\n'), ((2040, 2060), 'numpy.arctanh', 'np.arctanh', (['np_data2'], {}), '(np_data2)\n', (2050, 2060), True, 'import numpy as np\n'), ((2123, 2139), 'numpy.cos', 'np.cos', (['np_data2'], {}), '(np_data2)\n', (2129, 2139), True, 'import numpy as np\n'), ((2202, 2218), 'numpy.sin', 'np.sin', (['np_data2'], {}), '(np_data2)\n', (2208, 2218), True, 'import numpy as np\n'), ((2281, 2297), 'numpy.tan', 'np.tan', (['np_data2'], {}), '(np_data2)\n', (2287, 2297), True, 'import numpy as np\n'), ((2348, 2365), 'numpy.fabs', 'np.fabs', (['np_data4'], {}), '(np_data4)\n', (2355, 2365), True, 'import numpy as np\n'), ((2417, 2434), 'numpy.sign', 'np.sign', (['np_data2'], {}), '(np_data2)\n', (2424, 2434), True, 'import numpy as np\n'), ((2486, 2503), 'numpy.sqrt', 'np.sqrt', (['np_data3'], {}), '(np_data3)\n', (2493, 2503), True, 'import numpy as np\n'), ((2556, 2574), 'numpy.trunc', 'np.trunc', (['np_data3'], {}), '(np_data3)\n', (2564, 2574), True, 'import numpy as np\n'), ((2626, 2643), 'numpy.ceil', 'np.ceil', (['np_data3'], {}), '(np_data3)\n', (2633, 2643), True, 'import numpy as np\n'), ((2696, 2714), 'numpy.floor', 'np.floor', (['np_data3'], {}), '(np_data3)\n', (2704, 2714), True, 'import numpy as np\n'), ((2765, 2781), 'numpy.log', 'np.log', (['np_data3'], {}), '(np_data3)\n', (2771, 2781), True, 'import numpy as np\n'), ((2834, 2852), 'numpy.log10', 'np.log10', (['np_data3'], {}), '(np_data3)\n', (2842, 2852), True, 'import numpy as np\n'), ((2905, 2923), 'numpy.log1p', 'np.log1p', (['np_data3'], {}), '(np_data3)\n', (2913, 2923), True, 'import numpy as np\n'), ((2975, 2992), 'numpy.log2', 'np.log2', (['np_data3'], {}), '(np_data3)\n', (2982, 2992), True, 'import numpy as np\n'), ((3043, 3059), 'numpy.exp', 'np.exp', (['np_data3'], {}), '(np_data3)\n', (3049, 3059), True, 'import numpy as np\n'), ((3112, 3130), 'numpy.expm1', 'np.expm1', (['np_data3'], {}), '(np_data3)\n', (3120, 3130), True, 'import numpy as np\n'), ((211, 234), 'random.uniform', 'random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (225, 234), False, 'import random\n'), ((303, 324), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (317, 324), False, 'import random\n'), ((383, 404), 'random.uniform', 'random.uniform', (['(1)', '(10)'], {}), '(1, 10)\n', (397, 404), False, 'import random\n'), ((461, 487), 'random.uniform', 'random.uniform', (['(-100000)', '(0)'], {}), '(-100000, 0)\n', (475, 487), False, 'import random\n')] |
# --- For cmd.py
from __future__ import division, print_function
import os
import subprocess
import multiprocessing
import collections
import glob
import pandas as pd
import numpy as np
import distutils.dir_util
import shutil
import stat
import re
def spanwiseBD(tsAvg,vr,R,postprofile=None, IR=None):
# --- Extract radial data
Columns=[]
for sB in ['B1','B2','B3']:
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d)TDxr_\[m\]',sB+'TDxr_[m]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d)TDyr_\[m\]',sB+'TDyr_[m]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d)TDzr_\[m\]',sB+'TDzr_[m]'))
dfRad, nrMax, ValidRow = _HarmonizeSpanwiseData('BeamDyn', Columns, vr, R, IR=IR)
# --- Export to csv
if postprofile is not None and dfRad is not None:
dfRad.to_csv(postprofile,sep='\t',index=False)
return dfRad
def spanwiseED(tsAvg,vr,R,postprofile=None, IR=None):
nr=len(vr)
# --- Extract radial data
Columns=[]
for sB in ['b1','b2','b3']:
SB=sB.upper()
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)ALx'+sB+'_\[m/s^2\]',SB+'ALx_[m/s^2]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)ALy'+sB+'_\[m/s^2\]',SB+'ALy_[m/s^2]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)ALz'+sB+'_\[m/s^2\]',SB+'ALz_[m/s^2]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)TDx'+sB+'_\[m\]' ,SB+'TDx_[m]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)TDy'+sB+'_\[m\]' ,SB+'TDy_[m]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)TDz'+sB+'_\[m\]' ,SB+'TDz_[m]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)RDx'+sB+'_\[deg\]' ,SB+'RDx_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)RDy'+sB+'_\[deg\]' ,SB+'RDy_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)RDz'+sB+'_\[deg\]' ,SB+'RDz_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)FLx'+sB+'_\[kN\]' ,SB+'FLx_[kN]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)FLy'+sB+'_\[kN\]' ,SB+'FLy_[kN]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)FLz'+sB+'_\[kN\]' ,SB+'FLz_[kN]'))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)MLy'+sB+'_\[kN-m\]' ,SB+'MLx_[kN-m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)MLx'+sB+'_\[kN-m\]' ,SB+'MLy_[kN-m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^Spn(\d)MLz'+sB+'_\[kN-m\]' ,SB+'MLz_[kN-m]' ))
dfRad, nrMax, ValidRow = _HarmonizeSpanwiseData('ElastoDyn', Columns, vr, R, IR=IR)
# --- Export to csv
if postprofile is not None and dfRad is not None:
dfRad.to_csv(postprofile,sep='\t',index=False)
return dfRad
def spanwiseAD(tsAvg,vr=None,rho=None,R=None,nB=None,chord=None,postprofile=None,IR=None):
# --- Extract radial data
Columns=[]
for sB in ['B1','B2','B3']:
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Alpha_\[deg\]',sB+'Alpha_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)AOA_\[deg\]' ,sB+'Alpha_[deg]')) # DBGOuts
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)AxInd_\[-\]' ,sB+'AxInd_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)TnInd_\[-\]' ,sB+'TnInd_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)AIn_\[deg\]' ,sB+'AxInd_[-]' )) # DBGOuts NOTE BUG
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)ApI_\[deg\]' ,sB+'TnInd_[-]' )) # DBGOuts NOTE BUG
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)AIn_\[-\]' ,sB+'AxInd_[-]' )) # DBGOuts
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)ApI_\[-\]' ,sB+'TnInd_[-]' )) # DBGOuts
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cl_\[-\]' ,sB+'Cl_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cd_\[-\]' ,sB+'Cd_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cm_\[-\]' ,sB+'Cm_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cx_\[-\]' ,sB+'Cx_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cy_\[-\]' ,sB+'Cy_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cn_\[-\]' ,sB+'Cn_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Ct_\[-\]' ,sB+'Ct_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Re_\[-\]' ,sB+'Re_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vrel_\[m/s\]' ,sB+'Vrel_[m/s]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Theta_\[deg\]',sB+'Theta_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Phi_\[deg\]' ,sB+'Phi_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Twst_\[deg\]' ,sB+'Twst_[deg]')) #DBGOuts
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Curve_\[deg\]',sB+'Curve_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vindx_\[m/s\]',sB+'Vindx_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vindy_\[m/s\]',sB+'Vindy_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fx_\[N/m\]' ,sB+'Fx_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fy_\[N/m\]' ,sB+'Fy_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fl_\[N/m\]' ,sB+'Fl_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fd_\[N/m\]' ,sB+'Fd_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fn_\[N/m\]' ,sB+'Fn_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Ft_\[N/m\]' ,sB+'Ft_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VUndx_\[m/s\]',sB+'VUndx_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VUndy_\[m/s\]',sB+'VUndy_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VUndz_\[m/s\]',sB+'VUndz_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VDisx_\[m/s\]',sB+'VDisx_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VDisy_\[m/s\]',sB+'VDisy_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VDisz_\[m/s\]',sB+'VDisz_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vx_\[m/s\]' ,sB+'Vx_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vy_\[m/s\]' ,sB+'Vy_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vz_\[m/s\]' ,sB+'Vz_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)DynP_\[Pa\]' ,sB+'DynP_[Pa]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)M_\[-\]' ,sB+'M_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Mm_\[N-m/m\]' ,sB+'Mm_[N-m/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Gam_\[' ,sB+'Gam_[m^2/s]')) #DBGOuts
# --- AD 14
Columns.append(extractSpanTSReg(tsAvg,'^Alpha(\d*)_\[deg\]' ,'Alpha_[deg]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^DynPres(\d*)_\[Pa\]' ,'DynPres_[Pa]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CLift(\d*)_\[-\]' ,'CLift_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CDrag(\d*)_\[-\]' ,'CDrag_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CNorm(\d*)_\[-\]' ,'CNorm_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CTang(\d*)_\[-\]' ,'CTang_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CMomt(\d*)_\[-\]' ,'CMomt_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^Pitch(\d*)_\[deg\]' ,'Pitch_[deg]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^AxInd(\d*)_\[-\]' ,'AxInd_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^TanInd(\d*)_\[-\]' ,'TanInd_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^ForcN(\d*)_\[N\]' ,'ForcN_[N]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^ForcT(\d*)_\[N\]' ,'ForcT_[N]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^Pmomt(\d*)_\[N-m\]' ,'Pmomt_[N-N]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^ReNum(\d*)_\[x10^6\]' ,'ReNum_[x10^6]', IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^Gamma(\d*)_\[m^2/s\]' ,'Gamma_[m^2/s]', IR=IR))
dfRad, nrMax, ValidRow = _HarmonizeSpanwiseData('AeroDyn', Columns, vr, R, IR=IR)
# --- Compute additional values (AD15 only)
if chord is not None:
if vr is not None:
chord =chord[0:nrMax]
chord = chord [ValidRow[0]]
for sB in ['B1','B2','B3']:
try:
vr_bar=vr/R
Fx = dfRad[sB+'Fx_[N/m]']
U0 = tsAvg['Wind1VelX_[m/s]']
Ct=nB*Fx/(0.5 * rho * 2 * U0**2 * np.pi * r)
Ct[vr<0.01*R] = 0
dfRad[sB+'Ct_[-]'] = Ct
CT=2*np.trapz(vr_bar*Ct,vr_bar)
dfRad[sB+'CtAvg_[-]']= CT*np.ones(r.shape)
except:
pass
try:
dfRad[sB+'Gamma_[m^2/s]'] = 1/2 * chord* dfRad[sB+'Vrel_[m/s]'] * dfRad[sB+'Cl_[-]']
except:
pass
try:
if not sB+'Vindx_[m/s]' in dfRad.columns:
dfRad[sB+'Vindx_[m/s]']= -dfRad[sB+'AxInd_[-]'].values * dfRad[sB+'Vx_[m/s]'].values
dfRad[sB+'Vindy_[m/s]']= dfRad[sB+'TnInd_[-]'].values * dfRad[sB+'Vy_[m/s]'].values
except:
pass
# --- Export to csv
if postprofile is not None and dfRad is not None:
dfRad.to_csv(postprofile,sep='\t',index=False)
return dfRad
def spanwisePostProLegacy(FST_In=None,avgMethod='constantwindow',avgParam=5,out_ext='.outb',postprofile=None,df=None):
"""
Postprocess FAST radial data
INPUTS:
- FST_IN: Fast .fst input file
- avgMethod='periods', avgParam=2: average over 2 last periods, Needs Azimuth sensors!!!
- avgMethod='constantwindow', avgParam=5: average over 5s of simulation
- postprofile: outputfile to write radial data
"""
# --- Opens Fast output and performs averaging
if df is None:
df = weio.read(FST_In.replace('.fst',out_ext)).toDataFrame()
returnDF=True
else:
returnDF=False
# NOTE: spanwise script doest not support duplicate columns
df = df.loc[:,~df.columns.duplicated()]
dfAvg = averageDF(df,avgMethod=avgMethod ,avgParam=avgParam) # NOTE: average 5 last seconds
# --- Extract info (e.g. radial positions) from Fast input file
# We don't have a .fst input file, so we'll rely on some default values for "r"
rho = 1.225
chord = None
# --- Extract radial positions of output channels
r_AD, r_ED, r_BD, IR_AD, IR_ED, IR_BD, R, r_hub, fst = FASTRadialOutputs(FST_In, OutputCols=df.columns.values)
if R is None:
R=1
try:
chord = fst.AD.Bld1['BldAeroNodes'][:,5] # Full span
except:
pass
try:
rho = fst.AD['Rho']
except:
rho = fst.AD['AirDens']
print('r_AD:', r_AD)
print('r_ED:', r_ED)
print('r_BD:', r_BD)
#print('I_AD:', IR_AD)
#print('I_ED:', IR_ED)
#print('I_BD:', IR_BD)
# --- Extract radial data and export to csv if needed
dfRad_AD = None
dfRad_ED = None
dfRad_BD = None
dfRad_AD = spanwiseAD(dfAvg.iloc[0], r_AD, rho , R=R, nB=3, chord=chord, postprofile=postprofile, IR=IR_AD)
if r_ED is not None:
dfRad_ED = spanwiseED(dfAvg.iloc[0], r_ED, R=R, IR=IR_ED, postprofile=postprofile)
if r_BD is not None:
dfRad_BD = spanwiseBD(dfAvg.iloc[0], r_BD, R=R, IR=IR_BD, postprofile=postprofile)
if returnDF:
return dfRad_ED , dfRad_AD, dfRad_BD, df
else:
return dfRad_ED , dfRad_AD, dfRad_BD
| [
"numpy.trapz",
"numpy.ones"
] | [((8850, 8879), 'numpy.trapz', 'np.trapz', (['(vr_bar * Ct)', 'vr_bar'], {}), '(vr_bar * Ct, vr_bar)\n', (8858, 8879), True, 'import numpy as np\n'), ((8915, 8931), 'numpy.ones', 'np.ones', (['r.shape'], {}), '(r.shape)\n', (8922, 8931), True, 'import numpy as np\n')] |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""
Convert depreciated VGG16 snapshots to the ones that support tensorflow format
It will check the specific snapshot at the vgg16_depre folder, and copy it to the same location at vgg16 folder
See experimental/scripts/convert_vgg16.sh for how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import _init_paths
import lib.datasets as datasets
from lib.model.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from lib.model.train_val import filter_roidb, get_training_roidb
from lib.datasets.factory import get_imdb
import lib.datasets.imdb
import argparse
import pprint
import numpy as np
import sys
import os
import os.path as osp
import shutil
try:
import cPickle as pickle
except ImportError:
import pickle
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from lib.nets.vgg16 import vgg16
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Convert an old VGG16 snapshot to new format')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--snapshot', dest='snapshot',
help='vgg snapshot prefix',
type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=70000, type=int)
parser.add_argument('--tag', dest='tag',
help='tag of the model',
default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def combined_roidb(imdb_names):
"""
Combine multiple roidbs
"""
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
def get_variables_in_checkpoint_file(file_name):
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
return var_to_shape_map
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def convert_names(name):
# removing :0
name = name.replace(':0', '')
# replace
name = name.replace('vgg_16/', 'vgg16_default/')
name = name.replace('/biases', '/bias')
name = name.replace('/weights', '/weight')
name = name.replace('/conv1/', '/')
name = name.replace('/conv2/', '/')
name = name.replace('/conv3/', '/')
name = name.replace('/conv4/', '/')
name = name.replace('/conv5/', '/')
return name
# Just build the graph, load the weights/statistics, and save them
def convert_from_depre(net, imdb, input_dir, output_dir, snapshot, max_iters):
if not osp.exists(output_dir):
os.makedirs(output_dir)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
num_classes = imdb.num_classes
with sess.graph.as_default():
tf.set_random_seed(cfg.RNG_SEED)
layers = net.create_architecture(sess, 'TRAIN', num_classes, tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
loss = layers['total_loss']
# Learning rate should be reduced already
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA, trainable=False)
momentum = cfg.TRAIN.MOMENTUM
optimizer = tf.train.MomentumOptimizer(lr, momentum)
gvs = optimizer.compute_gradients(loss)
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for grad, var in gvs:
scale = 1.
if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:
scale *= 2.
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = optimizer.apply_gradients(final_gvs)
else:
train_op = optimizer.apply_gradients(gvs)
checkpoint = osp.join(input_dir, snapshot + '.ckpt')
variables = tf.global_variables()
name2var = {convert_names(v.name): v for v in variables}
target_names = get_variables_in_checkpoint_file(checkpoint)
restorer = tf.train.Saver(name2var)
saver = tf.train.Saver()
print('Importing...')
restorer.restore(sess, checkpoint)
checkpoint = osp.join(output_dir, snapshot + '.ckpt')
print('Exporting...')
saver.save(sess, checkpoint)
# also copy the pkl file
index = osp.join(input_dir, snapshot + '.pkl')
outdex = osp.join(output_dir, snapshot + '.pkl')
shutil.copy(index, outdex)
sess.close()
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
# train set
imdb, _ = combined_roidb(args.imdb_name)
# output directory where the snapshot will be exported
output_dir = get_output_dir(imdb, args.tag)
print('Output will be exported to `{:s}`'.format(output_dir))
# input directory where the snapshot will be imported
input_dir = output_dir.replace('/vgg16/', '/vgg16_depre/')
print('Input will be imported from `{:s}`'.format(input_dir))
net = vgg16()
convert_from_depre(net, imdb, input_dir, output_dir, args.snapshot, args.max_iters)
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.allclose",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"lib.nets.vgg16.vgg16",
"tensorflow.Variable",
"pprint.pprint",
"lib.model.config.cfg_from_list",
"tensorflow.multiply",
"os.path.join",
"lib.datasets.factory.get_imdb",
... | [((1219, 1306), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert an old VGG16 snapshot to new format"""'}), "(description=\n 'Convert an old VGG16 snapshot to new format')\n", (1242, 1306), False, 'import argparse\n'), ((4265, 4306), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (4279, 4306), True, 'import tensorflow as tf\n'), ((4363, 4390), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tfconfig'}), '(config=tfconfig)\n', (4373, 4390), True, 'import tensorflow as tf\n'), ((6631, 6649), 'pprint.pprint', 'pprint.pprint', (['cfg'], {}), '(cfg)\n', (6644, 6649), False, 'import pprint\n'), ((6655, 6683), 'numpy.random.seed', 'np.random.seed', (['cfg.RNG_SEED'], {}), '(cfg.RNG_SEED)\n', (6669, 6683), True, 'import numpy as np\n'), ((6823, 6853), 'lib.model.config.get_output_dir', 'get_output_dir', (['imdb', 'args.tag'], {}), '(imdb, args.tag)\n', (6837, 6853), False, 'from lib.model.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\n'), ((7119, 7126), 'lib.nets.vgg16.vgg16', 'vgg16', ([], {}), '()\n', (7124, 7126), False, 'from lib.nets.vgg16 import vgg16\n'), ((2293, 2304), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2301, 2304), False, 'import sys\n'), ((2477, 2496), 'lib.datasets.factory.get_imdb', 'get_imdb', (['imdb_name'], {}), '(imdb_name)\n', (2485, 2496), False, 'from lib.datasets.factory import get_imdb\n'), ((2720, 2744), 'lib.model.train_val.get_training_roidb', 'get_training_roidb', (['imdb'], {}), '(imdb)\n', (2738, 2744), False, 'from lib.model.train_val import filter_roidb, get_training_roidb\n'), ((2993, 3036), 'lib.datasets.imdb.imdb', 'datasets.imdb.imdb', (['imdb_names', 'tmp.classes'], {}), '(imdb_names, tmp.classes)\n', (3011, 3036), True, 'import lib.datasets as datasets\n'), ((3062, 3082), 'lib.datasets.factory.get_imdb', 'get_imdb', (['imdb_names'], {}), '(imdb_names)\n', (3070, 3082), False, 'from lib.datasets.factory import get_imdb\n'), ((3183, 3231), 'tensorflow.python.pywrap_tensorflow.NewCheckpointReader', 'pywrap_tensorflow.NewCheckpointReader', (['file_name'], {}), '(file_name)\n', (3220, 3231), False, 'from tensorflow.python import pywrap_tensorflow\n'), ((4193, 4215), 'os.path.exists', 'osp.exists', (['output_dir'], {}), '(output_dir)\n', (4203, 4215), True, 'import os.path as osp\n'), ((4225, 4248), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (4236, 4248), False, 'import os\n'), ((4469, 4501), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['cfg.RNG_SEED'], {}), '(cfg.RNG_SEED)\n', (4487, 4501), True, 'import tensorflow as tf\n'), ((4833, 4904), 'tensorflow.Variable', 'tf.Variable', (['(cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA)'], {'trainable': '(False)'}), '(cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA, trainable=False)\n', (4844, 4904), True, 'import tensorflow as tf\n'), ((4963, 5003), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['lr', 'momentum'], {}), '(lr, momentum)\n', (4989, 5003), True, 'import tensorflow as tf\n'), ((5661, 5700), 'os.path.join', 'osp.join', (['input_dir', "(snapshot + '.ckpt')"], {}), "(input_dir, snapshot + '.ckpt')\n", (5669, 5700), True, 'import os.path as osp\n'), ((5721, 5742), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5740, 5742), True, 'import tensorflow as tf\n'), ((5895, 5919), 'tensorflow.train.Saver', 'tf.train.Saver', (['name2var'], {}), '(name2var)\n', (5909, 5919), True, 'import tensorflow as tf\n'), ((5936, 5952), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5950, 5952), True, 'import tensorflow as tf\n'), ((6048, 6088), 'os.path.join', 'osp.join', (['output_dir', "(snapshot + '.ckpt')"], {}), "(output_dir, snapshot + '.ckpt')\n", (6056, 6088), True, 'import os.path as osp\n'), ((6206, 6244), 'os.path.join', 'osp.join', (['input_dir', "(snapshot + '.pkl')"], {}), "(input_dir, snapshot + '.pkl')\n", (6214, 6244), True, 'import os.path as osp\n'), ((6262, 6301), 'os.path.join', 'osp.join', (['output_dir', "(snapshot + '.pkl')"], {}), "(output_dir, snapshot + '.pkl')\n", (6270, 6301), True, 'import os.path as osp\n'), ((6310, 6336), 'shutil.copy', 'shutil.copy', (['index', 'outdex'], {}), '(index, outdex)\n', (6321, 6336), False, 'import shutil\n'), ((6499, 6527), 'lib.model.config.cfg_from_file', 'cfg_from_file', (['args.cfg_file'], {}), '(args.cfg_file)\n', (6512, 6527), False, 'from lib.model.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\n'), ((6570, 6598), 'lib.model.config.cfg_from_list', 'cfg_from_list', (['args.set_cfgs'], {}), '(args.set_cfgs)\n', (6583, 6598), False, 'from lib.model.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\n'), ((5130, 5164), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Gradient_Mult"""'], {}), "('Gradient_Mult')\n", (5147, 5164), True, 'import tensorflow as tf\n'), ((5380, 5403), 'numpy.allclose', 'np.allclose', (['scale', '(1.0)'], {}), '(scale, 1.0)\n', (5391, 5403), True, 'import numpy as np\n'), ((5436, 5460), 'tensorflow.multiply', 'tf.multiply', (['grad', 'scale'], {}), '(grad, scale)\n', (5447, 5460), True, 'import tensorflow as tf\n')] |
import numpy as np
from nnweaver import *
def test_regularizers():
nn = NN(2)
nn.add_layer(Layer(1))
reg = L1L2Regularizer(1, 0)
nn.layers[0].weights = np.matrix([[1, 0]])
nn.layers[0].bias = np.matrix([[0]])
assert reg(nn) == 1
nn.layers[0].weights = np.matrix([[1, 0], [0, 0]])
nn.layers[0].bias = np.matrix([[0], [0]])
assert reg(nn) == 0.5
nn.layers[0].weights = np.matrix([[1, 0], [0, 0]])
nn.layers[0].bias = np.matrix([[0], [1]])
assert reg(nn) == 1
nn.layers[0].weights = np.matrix([[3, -2]])
nn.layers[0].bias = np.matrix([[1]])
g_weights, g_bias = reg.gradient(nn.layers[0])
np.testing.assert_array_equal([[1, -1]], g_weights)
np.testing.assert_array_equal([[1]], g_bias)
reg = L1L2Regularizer(0, 1)
nn.layers[0].weights = np.matrix([[1, 0]])
nn.layers[0].bias = np.matrix([[0]])
assert reg(nn) == 0.5
nn.layers[0].weights = np.matrix([[1, 0], [0, 0]])
nn.layers[0].bias = np.matrix([[0], [0]])
assert reg(nn) == 0.25
nn.layers[0].weights = np.matrix([[1, 0], [0, 0]])
nn.layers[0].bias = np.matrix([[0], [1]])
assert reg(nn) == 0.5
g_weights, g_bias = reg.gradient(nn.layers[0])
np.testing.assert_array_equal(nn.layers[0].weights, g_weights)
np.testing.assert_array_equal(nn.layers[0].bias, g_bias)
| [
"numpy.matrix",
"numpy.testing.assert_array_equal"
] | [((172, 191), 'numpy.matrix', 'np.matrix', (['[[1, 0]]'], {}), '([[1, 0]])\n', (181, 191), True, 'import numpy as np\n'), ((216, 232), 'numpy.matrix', 'np.matrix', (['[[0]]'], {}), '([[0]])\n', (225, 232), True, 'import numpy as np\n'), ((284, 311), 'numpy.matrix', 'np.matrix', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (293, 311), True, 'import numpy as np\n'), ((336, 357), 'numpy.matrix', 'np.matrix', (['[[0], [0]]'], {}), '([[0], [0]])\n', (345, 357), True, 'import numpy as np\n'), ((411, 438), 'numpy.matrix', 'np.matrix', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (420, 438), True, 'import numpy as np\n'), ((463, 484), 'numpy.matrix', 'np.matrix', (['[[0], [1]]'], {}), '([[0], [1]])\n', (472, 484), True, 'import numpy as np\n'), ((536, 556), 'numpy.matrix', 'np.matrix', (['[[3, -2]]'], {}), '([[3, -2]])\n', (545, 556), True, 'import numpy as np\n'), ((581, 597), 'numpy.matrix', 'np.matrix', (['[[1]]'], {}), '([[1]])\n', (590, 597), True, 'import numpy as np\n'), ((653, 704), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[[1, -1]]', 'g_weights'], {}), '([[1, -1]], g_weights)\n', (682, 704), True, 'import numpy as np\n'), ((709, 753), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[[1]]', 'g_bias'], {}), '([[1]], g_bias)\n', (738, 753), True, 'import numpy as np\n'), ((814, 833), 'numpy.matrix', 'np.matrix', (['[[1, 0]]'], {}), '([[1, 0]])\n', (823, 833), True, 'import numpy as np\n'), ((858, 874), 'numpy.matrix', 'np.matrix', (['[[0]]'], {}), '([[0]])\n', (867, 874), True, 'import numpy as np\n'), ((928, 955), 'numpy.matrix', 'np.matrix', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (937, 955), True, 'import numpy as np\n'), ((980, 1001), 'numpy.matrix', 'np.matrix', (['[[0], [0]]'], {}), '([[0], [0]])\n', (989, 1001), True, 'import numpy as np\n'), ((1056, 1083), 'numpy.matrix', 'np.matrix', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (1065, 1083), True, 'import numpy as np\n'), ((1108, 1129), 'numpy.matrix', 'np.matrix', (['[[0], [1]]'], {}), '([[0], [1]])\n', (1117, 1129), True, 'import numpy as np\n'), ((1211, 1273), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['nn.layers[0].weights', 'g_weights'], {}), '(nn.layers[0].weights, g_weights)\n', (1240, 1273), True, 'import numpy as np\n'), ((1278, 1334), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['nn.layers[0].bias', 'g_bias'], {}), '(nn.layers[0].bias, g_bias)\n', (1307, 1334), True, 'import numpy as np\n')] |
"""Functions to plot EEG sensor montages or digitizer montages."""
from copy import deepcopy
import numpy as np
from ..utils import check_version, logger, _check_option
from . import plot_sensors
def plot_montage(montage, scale_factor=20, show_names=True, kind='topomap',
show=True):
"""Plot a montage.
Parameters
----------
montage : instance of Montage or DigMontage
The montage to visualize.
scale_factor : float
Determines the size of the points.
show_names : bool
Whether to show the channel names.
kind : str
Whether to plot the montage as '3d' or 'topomap' (default).
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object.
"""
from scipy.spatial.distance import cdist
from ..channels import Montage, DigMontage
from .. import create_info
if isinstance(montage, Montage):
ch_names = montage.ch_names
title = montage.kind
elif isinstance(montage, DigMontage):
ch_names = montage._point_names
title = None
else:
raise TypeError("montage must be an instance of "
"mne.channels.montage.Montage or"
"mne.channels.montage.DigMontage")
_check_option('kind', kind, ['topomap', '3d'])
if isinstance(montage, Montage): # check for duplicate labels
dists = cdist(montage.pos, montage.pos)
# only consider upper triangular part by setting the rest to np.nan
dists[np.tril_indices(dists.shape[0])] = np.nan
dupes = np.argwhere(np.isclose(dists, 0))
if dupes.any():
montage = deepcopy(montage)
n_chans = montage.pos.shape[0]
n_dupes = dupes.shape[0]
idx = np.setdiff1d(montage.selection, dupes[:, 1]).tolist()
logger.info("{} duplicate electrode labels found:".format(n_dupes))
logger.info(", ".join([ch_names[d[0]] + "/" + ch_names[d[1]]
for d in dupes]))
logger.info("Plotting {} unique labels.".format(n_chans - n_dupes))
montage.ch_names = [montage.ch_names[i] for i in idx]
ch_names = montage.ch_names
montage.pos = montage.pos[idx, :]
montage.selection = np.arange(n_chans - n_dupes)
info = create_info(ch_names, sfreq=256, ch_types="eeg", montage=montage)
fig = plot_sensors(info, kind=kind, show_names=show_names, show=show,
title=title)
collection = fig.axes[0].collections[0]
if check_version("matplotlib", "1.4"):
collection.set_sizes([scale_factor])
else:
collection._sizes = [scale_factor]
return fig
| [
"scipy.spatial.distance.cdist",
"numpy.tril_indices",
"copy.deepcopy",
"numpy.setdiff1d",
"numpy.isclose",
"numpy.arange"
] | [((1456, 1487), 'scipy.spatial.distance.cdist', 'cdist', (['montage.pos', 'montage.pos'], {}), '(montage.pos, montage.pos)\n', (1461, 1487), False, 'from scipy.spatial.distance import cdist\n'), ((1578, 1609), 'numpy.tril_indices', 'np.tril_indices', (['dists.shape[0]'], {}), '(dists.shape[0])\n', (1593, 1609), True, 'import numpy as np\n'), ((1648, 1668), 'numpy.isclose', 'np.isclose', (['dists', '(0)'], {}), '(dists, 0)\n', (1658, 1668), True, 'import numpy as np\n'), ((1716, 1733), 'copy.deepcopy', 'deepcopy', (['montage'], {}), '(montage)\n', (1724, 1733), False, 'from copy import deepcopy\n'), ((2356, 2384), 'numpy.arange', 'np.arange', (['(n_chans - n_dupes)'], {}), '(n_chans - n_dupes)\n', (2365, 2384), True, 'import numpy as np\n'), ((1832, 1876), 'numpy.setdiff1d', 'np.setdiff1d', (['montage.selection', 'dupes[:, 1]'], {}), '(montage.selection, dupes[:, 1])\n', (1844, 1876), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- <NAME>
For COPYING and LICENSE details, please refer to the LICENSE file
"""
# good introduction into packing can be found in
# https://python-packaging-user-guide.readthedocs.org/en/latest/index.html
from setuptools import setup
from distutils.core import setup as setup_dist # todo use only one setup
import os
import glob
# the setuptools are supposed to be used as a standard. Thats why we ommit
# usage of distutils here
# example of setup.py can be found here:
# https://github.com/pypa/sampleproject/blob/master/setup.py
# a small example how to build dependencies is given here:
# http://stackoverflow.com/questions/11010151/distributing-a-shared-library-and-some-c-code-with-a-cython-extension-module
import os
import numpy as np
import json
from setuptools import setup, Extension
from setuptools import find_packages # Always prefer setuptools over distutils
from Cython.Distutils import build_ext
def get_current_version():
ppath = os.path.dirname(os.path.realpath(__file__))
return json.load(open(ppath + os.sep + 'pycmbs' + os.sep + 'version.json'))
# requires scipy:
# http://stackoverflow.com/questions/11128070/cannot-import-minimize-in-scipy
install_requires = ["numpy>0.1", "cdo>1.2", "netCDF4", "pytz",
"matplotlib", 'shapely', 'cartopy', 'cython', 'scipy']
#~ ext_polygon_utils = Extension('polygon_utils',
#~ sources=['.' + os.sep + 'pycmbs' +
#~ os.sep + 'polygon_utils.pyx'],
#~ # this is needed to get proper information on
#~ # numpy headers
#~ include_dirs=[np.get_include()]
#~ )
# in case of a compilation error, one should look that the name is the same as the extension sources
# see here:
# http://stackoverflow.com/questions/8024805/cython-compiled-c-extension-importerror-dynamic-module-does-not-define-init-fu
ext_variogramm = Extension('variogram_base',
sources=['.' + os.sep + 'pycmbs' + os.sep +
'geostatistic' + os.sep + 'variogram_base.pyx'],
# this is needed to get proper information on numpy
# headers
include_dirs=[np.get_include()]
)
def old_get_packages():
return ['pycmbs', 'pycmbs/benchmarking', 'pycmbs/tests',
'pycmbs/benchmarking/logo', 'pycmbs/examples', 'pycmbs/diagnostic', 'pycmbs/colormaps', 'pycmbs/plots']
def get_packages():
#find_packages(exclude=['contrib', 'docs', 'tests*']),
return find_packages()
setup(name='pycmbs',
version=get_current_version(),
description='pyCMBS - python Climate Model Benchmarking Suite',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=get_packages(),
package_dir={'pycmbs': 'pycmbs'},
package_data={'pycmbs': ['benchmarking/configuration/*',
'benchmarking/logo/*', 'version.json']},
author="<NAME>",
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
url='https://github.com/pygeo/pycmbs',
long_description='The pyCMBS project is a suite of tools to \
process, analyze, visualize and benchmark \
scientific model output against each other or \
against observational data. It is in particular \
useful for analyzing in an efficient way output \
from climate model simulations.',
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=install_requires,
keywords=["data", "science", "climate", "meteorology",
"model evaluation", "benchmarking", "metrics"],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target
# platform.
entry_points={
'console_scripts': [
'pycmbs_benchmarking = pycmbs_benchmarking:main'
]},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 'Development Status :: 4 - beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Scientific/Engineering :: Visualization',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7'
],
ext_modules=[ext_variogramm],
cmdclass={'build_ext': build_ext}
)
########################################################################
# Some useful information on shipping packages
########################################################################
# PIP
#~ python setup.py register
#~ python setup.py sdist
#~ python setup.py upload
| [
"os.path.realpath",
"numpy.get_include",
"setuptools.find_packages"
] | [((2747, 2762), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2760, 2762), False, 'from setuptools import find_packages\n'), ((1045, 1071), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1061, 1071), False, 'import os\n'), ((2405, 2421), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (2419, 2421), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import os
import numpy as np
from torchvision.datasets import ImageFolder
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.metrics import roc_curve, auc, f1_score, precision_recall_curve, average_precision_score
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
data_root = r'D:\TJU\GBDB\set113\set113_images\test1' # 测试集路径
test_weights_path = r"C:\Users\admin\Desktop\fsdownload\epoch_0278_top1_70.565_'checkpoint.pth.tar'" # 预训练模型参数
num_class = 113 # 类别数量
gpu = "cuda:0"
# mean=[0.948078, 0.93855226, 0.9332005], var=[0.14589554, 0.17054074, 0.18254866]
def test(model, test_path):
# 加载测试集和预训练模型参数
test_dir = os.path.join(data_root, 'test_images')
class_list = list(os.listdir(test_dir))
class_list.sort()
transform_test = get_transform_for_test(mean=[0.948078, 0.93855226, 0.9332005],
var=[0.14589554, 0.17054074, 0.18254866])
test_dataset = ImageFolder(test_dir, transform=transform_test)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=1, shuffle=False, drop_last=False, pin_memory=True, num_workers=1)
checkpoint = torch.load(test_path)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
score_list = [] # 存储预测得分
label_list = [] # 存储真实标签
for i, (inputs, labels) in enumerate(test_loader):
inputs = inputs.cuda()
labels = labels.cuda()
outputs = model(inputs)
# prob_tmp = torch.nn.Softmax(dim=1)(outputs) # (batchsize, nclass)
score_tmp = outputs # (batchsize, nclass)
score_list.extend(score_tmp.detach().cpu().numpy())
label_list.extend(labels.cpu().numpy())
score_array = np.array(score_list)
# 将label转换成onehot形式
label_tensor = torch.tensor(label_list)
label_tensor = label_tensor.reshape((label_tensor.shape[0], 1))
label_onehot = torch.zeros(label_tensor.shape[0], num_class)
label_onehot.scatter_(dim=1, index=label_tensor, value=1)
label_onehot = np.array(label_onehot)
print("score_array:", score_array.shape) # (batchsize, classnum)
print("label_onehot:", label_onehot.shape) # torch.Size([batchsize, classnum])
# 调用sklearn库,计算每个类别对应的fpr和tpr
fpr_dict = dict()
tpr_dict = dict()
roc_auc_dict = dict()
for i in range(num_class):
fpr_dict[i], tpr_dict[i], _ = roc_curve(label_onehot[:, i], score_array[:, i])
roc_auc_dict[i] = auc(fpr_dict[i], tpr_dict[i])
# micro
fpr_dict["micro"], tpr_dict["micro"], _ = roc_curve(label_onehot.ravel(), score_array.ravel())
roc_auc_dict["micro"] = auc(fpr_dict["micro"], tpr_dict["micro"])
# macro
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr_dict[i] for i in range(num_class)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_class):
mean_tpr += interp(all_fpr, fpr_dict[i], tpr_dict[i])
# Finally average it and compute AUC
mean_tpr /= num_class
fpr_dict["macro"] = all_fpr
tpr_dict["macro"] = mean_tpr
roc_auc_dict["macro"] = auc(fpr_dict["macro"], tpr_dict["macro"])
# 绘制所有类别平均的roc曲线
plt.figure()
lw = 2
plt.plot(fpr_dict["micro"], tpr_dict["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc_dict["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr_dict["macro"], tpr_dict["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc_dict["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(num_class), colors):
plt.plot(fpr_dict[i], tpr_dict[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc_dict[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.savefig('set113_roc.jpg')
plt.show()
if __name__ == '__main__':
# 加载模型
seresnet = FineTuneSEResnet50(num_class=num_class)
device = torch.device(gpu)
seresnet = seresnet.to(device)
test(seresnet, test_weights_path) | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"torch.device",
"itertools.cycle",
"os.path.join",
"numpy.zeros_like",
"torch.utils.data.DataLoader",
"torch.load",
"torch.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"torchvision.datasets.Imag... | [((701, 739), 'os.path.join', 'os.path.join', (['data_root', '"""test_images"""'], {}), "(data_root, 'test_images')\n", (713, 739), False, 'import os\n'), ((995, 1042), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['test_dir'], {'transform': 'transform_test'}), '(test_dir, transform=transform_test)\n', (1006, 1042), False, 'from torchvision.datasets import ImageFolder\n'), ((1061, 1184), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'drop_last': '(False)', 'pin_memory': '(True)', 'num_workers': '(1)'}), '(test_dataset, batch_size=1, shuffle=False,\n drop_last=False, pin_memory=True, num_workers=1)\n', (1088, 1184), False, 'import torch\n'), ((1207, 1228), 'torch.load', 'torch.load', (['test_path'], {}), '(test_path)\n', (1217, 1228), False, 'import torch\n'), ((1764, 1784), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (1772, 1784), True, 'import numpy as np\n'), ((1828, 1852), 'torch.tensor', 'torch.tensor', (['label_list'], {}), '(label_list)\n', (1840, 1852), False, 'import torch\n'), ((1940, 1985), 'torch.zeros', 'torch.zeros', (['label_tensor.shape[0]', 'num_class'], {}), '(label_tensor.shape[0], num_class)\n', (1951, 1985), False, 'import torch\n'), ((2067, 2089), 'numpy.array', 'np.array', (['label_onehot'], {}), '(label_onehot)\n', (2075, 2089), True, 'import numpy as np\n'), ((2663, 2704), 'sklearn.metrics.auc', 'auc', (["fpr_dict['micro']", "tpr_dict['micro']"], {}), "(fpr_dict['micro'], tpr_dict['micro'])\n", (2666, 2704), False, 'from sklearn.metrics import roc_curve, auc, f1_score, precision_recall_curve, average_precision_score\n'), ((2914, 2936), 'numpy.zeros_like', 'np.zeros_like', (['all_fpr'], {}), '(all_fpr)\n', (2927, 2936), True, 'import numpy as np\n'), ((3190, 3231), 'sklearn.metrics.auc', 'auc', (["fpr_dict['macro']", "tpr_dict['macro']"], {}), "(fpr_dict['macro'], tpr_dict['macro'])\n", (3193, 3231), False, 'from sklearn.metrics import roc_curve, auc, f1_score, precision_recall_curve, average_precision_score\n'), ((3258, 3270), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3268, 3270), True, 'import matplotlib.pyplot as plt\n'), ((3745, 3792), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (3750, 3792), False, 'from itertools import cycle\n'), ((4031, 4069), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {'lw': 'lw'}), "([0, 1], [0, 1], 'k--', lw=lw)\n", (4039, 4069), True, 'import matplotlib.pyplot as plt\n'), ((4074, 4094), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (4082, 4094), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4120), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (4107, 4120), True, 'import matplotlib.pyplot as plt\n'), ((4125, 4158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (4135, 4158), True, 'import matplotlib.pyplot as plt\n'), ((4163, 4195), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (4173, 4195), True, 'import matplotlib.pyplot as plt\n'), ((4200, 4279), 'matplotlib.pyplot.title', 'plt.title', (['"""Some extension of Receiver operating characteristic to multi-class"""'], {}), "('Some extension of Receiver operating characteristic to multi-class')\n", (4209, 4279), True, 'import matplotlib.pyplot as plt\n'), ((4284, 4313), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4294, 4313), True, 'import matplotlib.pyplot as plt\n'), ((4318, 4347), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""set113_roc.jpg"""'], {}), "('set113_roc.jpg')\n", (4329, 4347), True, 'import matplotlib.pyplot as plt\n'), ((4352, 4362), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4360, 4362), True, 'import matplotlib.pyplot as plt\n'), ((4471, 4488), 'torch.device', 'torch.device', (['gpu'], {}), '(gpu)\n', (4483, 4488), False, 'import torch\n'), ((762, 782), 'os.listdir', 'os.listdir', (['test_dir'], {}), '(test_dir)\n', (772, 782), False, 'import os\n'), ((2419, 2467), 'sklearn.metrics.roc_curve', 'roc_curve', (['label_onehot[:, i]', 'score_array[:, i]'], {}), '(label_onehot[:, i], score_array[:, i])\n', (2428, 2467), False, 'from sklearn.metrics import roc_curve, auc, f1_score, precision_recall_curve, average_precision_score\n'), ((2494, 2523), 'sklearn.metrics.auc', 'auc', (['fpr_dict[i]', 'tpr_dict[i]'], {}), '(fpr_dict[i], tpr_dict[i])\n', (2497, 2523), False, 'from sklearn.metrics import roc_curve, auc, f1_score, precision_recall_curve, average_precision_score\n'), ((2988, 3029), 'scipy.interp', 'interp', (['all_fpr', 'fpr_dict[i]', 'tpr_dict[i]'], {}), '(all_fpr, fpr_dict[i], tpr_dict[i])\n', (2994, 3029), False, 'from scipy import interp\n')] |
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
# daal4py low order moments example for streaming on shared memory systems
import daal4py as d4p
import numpy as np
import os
from daal4py.oneapi import sycl_context, sycl_buffer
# let's use a generator for getting stream from file (defined in stream.py)
import sys
sys.path.insert(0, '..')
from stream import read_next
# At this moment with sycl we are working only with numpy arrays
def to_numpy(data):
try:
from pandas import DataFrame
if isinstance(data, DataFrame):
return np.ascontiguousarray(data.values)
except:
pass
try:
from scipy.sparse import csr_matrix
if isinstance(data, csr_matrix):
return data.toarray()
except:
pass
return data
def main(readcsv=None, method='defaultDense'):
# read data from file
infile = os.path.join('..', 'data', 'batch', 'covcormoments_dense.csv')
# Using of the classic way (computations on CPU)
# Configure a low order moments object for streaming
algo = d4p.low_order_moments(streaming=True)
# get the generator (defined in stream.py)...
rn = read_next(infile, 55, readcsv)
# ... and iterate through chunks/stream
for chunk in rn:
algo.compute(chunk)
# finalize computation
result_classic = algo.finalize()
# It is possible to specify to make the computations on GPU
with sycl_context('gpu'):
# Configure a low order moments object for streaming
algo = d4p.low_order_moments(streaming=True)
# get the generator (defined in stream.py)...
rn = read_next(infile, 55, readcsv)
# ... and iterate through chunks/stream
for chunk in rn:
sycl_chunk = sycl_buffer(to_numpy(chunk))
algo.compute(sycl_chunk)
# finalize computation
result_gpu = algo.finalize()
# It is possible to specify to make the computations on CPU
with sycl_context('cpu'):
# Configure a low order moments object for streaming
algo = d4p.low_order_moments(streaming=True)
# get the generator (defined in stream.py)...
rn = read_next(infile, 55, readcsv)
# ... and iterate through chunks/stream
for chunk in rn:
sycl_chunk = sycl_buffer(to_numpy(chunk))
algo.compute(sycl_chunk)
# finalize computation
result_cpu = algo.finalize()
# result provides minimum, maximum, sum, sumSquares, sumSquaresCentered,
# mean, secondOrderRawMoment, variance, standardDeviation, variation
for name in ['minimum', 'maximum', 'sum', 'sumSquares', 'sumSquaresCentered', 'mean',
'secondOrderRawMoment', 'variance', 'standardDeviation', 'variation']:
assert np.allclose(getattr(result_classic, name), getattr(result_gpu, name))
assert np.allclose(getattr(result_classic, name), getattr(result_cpu, name))
return result_classic
if __name__ == "__main__":
res = main()
# print results
print("\nMinimum:\n", res.minimum)
print("\nMaximum:\n", res.maximum)
print("\nSum:\n", res.sum)
print("\nSum of squares:\n", res.sumSquares)
print("\nSum of squared difference from the means:\n", res.sumSquaresCentered)
print("\nMean:\n", res.mean)
print("\nSecond order raw moment:\n", res.secondOrderRawMoment)
print("\nVariance:\n", res.variance)
print("\nStandard deviation:\n", res.standardDeviation)
print("\nVariation:\n", res.variation)
print('All looks good!')
| [
"numpy.ascontiguousarray",
"daal4py.low_order_moments",
"sys.path.insert",
"stream.read_next",
"daal4py.oneapi.sycl_context",
"os.path.join"
] | [((1083, 1107), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (1098, 1107), False, 'import sys\n'), ((1645, 1707), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""batch"""', '"""covcormoments_dense.csv"""'], {}), "('..', 'data', 'batch', 'covcormoments_dense.csv')\n", (1657, 1707), False, 'import os\n'), ((1830, 1867), 'daal4py.low_order_moments', 'd4p.low_order_moments', ([], {'streaming': '(True)'}), '(streaming=True)\n', (1851, 1867), True, 'import daal4py as d4p\n'), ((1927, 1957), 'stream.read_next', 'read_next', (['infile', '(55)', 'readcsv'], {}), '(infile, 55, readcsv)\n', (1936, 1957), False, 'from stream import read_next\n'), ((2189, 2208), 'daal4py.oneapi.sycl_context', 'sycl_context', (['"""gpu"""'], {}), "('gpu')\n", (2201, 2208), False, 'from daal4py.oneapi import sycl_context, sycl_buffer\n'), ((2286, 2323), 'daal4py.low_order_moments', 'd4p.low_order_moments', ([], {'streaming': '(True)'}), '(streaming=True)\n', (2307, 2323), True, 'import daal4py as d4p\n'), ((2391, 2421), 'stream.read_next', 'read_next', (['infile', '(55)', 'readcsv'], {}), '(infile, 55, readcsv)\n', (2400, 2421), False, 'from stream import read_next\n'), ((2728, 2747), 'daal4py.oneapi.sycl_context', 'sycl_context', (['"""cpu"""'], {}), "('cpu')\n", (2740, 2747), False, 'from daal4py.oneapi import sycl_context, sycl_buffer\n'), ((2825, 2862), 'daal4py.low_order_moments', 'd4p.low_order_moments', ([], {'streaming': '(True)'}), '(streaming=True)\n', (2846, 2862), True, 'import daal4py as d4p\n'), ((2930, 2960), 'stream.read_next', 'read_next', (['infile', '(55)', 'readcsv'], {}), '(infile, 55, readcsv)\n', (2939, 2960), False, 'from stream import read_next\n'), ((1329, 1362), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data.values'], {}), '(data.values)\n', (1349, 1362), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import argparse
def evaluate_explanation_score(x, y, w, h, saliency_map):
"""
:param x: normalized coordinate x int
:param y: normalized coordinate y int
:param w: normalized coordinate w int
:param h: normalized coordinate h int
:param saliency_map: saliency map
:return:
"""
bbox = np.zeros_like(saliency_map)
bbox[int(y):int(y + h), int(x):int(x + w)] = 1
'''
fig, ax = plt.subplots()
im = ax.imshow(bbox)
plt.show()
'''
corr = np.mean(np.multiply(bbox, saliency_map))
return corr
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", default="/home/feiyi/workspace/git-repo/PANDAmodels/datasets/chestxray/", type=str)
parser.add_argument("--model", default="densenet", type=str)
parser.add_argument("--optimizer", default="adam", type=str)
parser.add_argument("--lr", default=0.0001, type=float)
parser.add_argument("--weight_decay", default=0.0, type=float)
parser.add_argument("--drop_rate", default=0.0, type=float)
parser.add_argument("--epochs", default=0, type=int)
parser.add_argument("--batch_size", default=16, type=int)
parser.add_argument("--workers", default=8, type=int)
parser.add_argument("--seed", default=123456, type=int)
parser.add_argument("--tag", default="relabeled", type=str)
parser.add_argument("--toy", action="store_false")### true
parser.add_argument("--save_path", default="saliencymap_densenet", type=str)
parser.add_argument("--scale", default=512, type=int)
parser.add_argument("--horizontal_flip", action="store_true")
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--scratch", action="store_true")
parser.add_argument("--train_weighted", action="store_true")
parser.add_argument("--valid_weighted", action="store_true")
parser.add_argument("--size", default=None, type=str)
parser.add_argument("--kernel_x", default=64, type=int)
parser.add_argument("--kernel_y", default=64, type=int)
parser.add_argument("--stride_x", default=32, type=int)
parser.add_argument("--stride_y", default=32, type=int)
return parser
| [
"numpy.zeros_like",
"numpy.multiply",
"argparse.ArgumentParser"
] | [((374, 401), 'numpy.zeros_like', 'np.zeros_like', (['saliency_map'], {}), '(saliency_map)\n', (387, 401), True, 'import numpy as np\n'), ((643, 668), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (666, 668), False, 'import argparse\n'), ((560, 591), 'numpy.multiply', 'np.multiply', (['bbox', 'saliency_map'], {}), '(bbox, saliency_map)\n', (571, 591), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mAP.py
# Copy from:
# https://github.com/huanghoujing/person-reid-triplet-loss-baseline/blob/master/tri_loss/utils/metric.py#L107
import sklearn
from sklearn.metrics import average_precision_score
import numpy as np
def mean_ap(
distmat,
query_ids=None,
gallery_ids=None,
query_cams=None,
gallery_cams=None,
average=True):
"""
Copy from:
https://github.com/huanghoujing/person-reid-triplet-loss-baseline/blob/master/tri_loss/utils/metric.py#L107
Args:
distmat: numpy array with shape [num_query, num_gallery], the
pairwise distance between query and gallery samples
query_ids: numpy array with shape [num_query]
gallery_ids: numpy array with shape [num_gallery]
query_cams: numpy array with shape [num_query]
gallery_cams: numpy array with shape [num_gallery]
average: whether to average the results across queries
Returns:
If `average` is `False`:
ret: numpy array with shape [num_query]
is_valid_query: numpy array with shape [num_query], containing 0's and
1's, whether each query is valid or not
If `average` is `True`:
a scalar
"""
# -------------------------------------------------------------------------
# The behavior of method `sklearn.average_precision` has changed since version
# 0.19.
# Version 0.18.1 has same results as Matlab evaluation code by <NAME>ong
# (https://github.com/zhunzhong07/person-re-ranking/
# blob/master/evaluation/utils/evaluation.m) and by <NAME>
# (http://www.liangzheng.org/Project/project_reid.html).
# My current awkward solution is sticking to this older version.
cur_version = sklearn.__version__
required_version = '0.18.1'
if cur_version != required_version:
print('User Warning: Version {} is required for package scikit-learn, '
'your current version is {}. '
'As a result, the mAP score may not be totally correct. '
'You can try `pip uninstall scikit-learn` '
'and then `pip install scikit-learn=={}`'.format(
required_version, cur_version, required_version))
# -------------------------------------------------------------------------
# Ensure numpy array
assert isinstance(distmat, np.ndarray)
assert isinstance(query_ids, np.ndarray)
assert isinstance(gallery_ids, np.ndarray)
assert isinstance(query_cams, np.ndarray)
assert isinstance(gallery_cams, np.ndarray)
m, n = distmat.shape
# Sort and find correct matches
indices = np.argsort(distmat, axis=1)
matches = (gallery_ids[indices] == query_ids[:, np.newaxis])
# Compute AP for each query
aps = np.zeros(m)
is_valid_query = np.zeros(m)
for i in range(m):
# Filter out the same id and same camera
valid = ((gallery_ids[indices[i]] != query_ids[i]) |
(gallery_cams[indices[i]] != query_cams[i]))
y_true = matches[i, valid]
y_score = -distmat[i][indices[i]][valid]
if not np.any(y_true): continue
is_valid_query[i] = 1
aps[i] = average_precision_score(y_true, y_score)
if len(aps) == 0:
raise RuntimeError("No valid query")
if average:
return float(np.sum(aps)) / np.sum(is_valid_query)
return aps, is_valid_query
| [
"numpy.sum",
"numpy.zeros",
"numpy.argsort",
"numpy.any",
"sklearn.metrics.average_precision_score"
] | [((2538, 2565), 'numpy.argsort', 'np.argsort', (['distmat'], {'axis': '(1)'}), '(distmat, axis=1)\n', (2548, 2565), True, 'import numpy as np\n'), ((2667, 2678), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (2675, 2678), True, 'import numpy as np\n'), ((2698, 2709), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (2706, 2709), True, 'import numpy as np\n'), ((3042, 3082), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (3065, 3082), False, 'from sklearn.metrics import average_precision_score\n'), ((2978, 2992), 'numpy.any', 'np.any', (['y_true'], {}), '(y_true)\n', (2984, 2992), True, 'import numpy as np\n'), ((3190, 3212), 'numpy.sum', 'np.sum', (['is_valid_query'], {}), '(is_valid_query)\n', (3196, 3212), True, 'import numpy as np\n'), ((3175, 3186), 'numpy.sum', 'np.sum', (['aps'], {}), '(aps)\n', (3181, 3186), True, 'import numpy as np\n')] |
import PIL.Image
import random
import numpy as np
class RandomResizeLong():
def __init__(self, min_long, max_long):
self.min_long = min_long
self.max_long = max_long
def __call__(self, img):
target_long = random.randint(self.min_long, self.max_long)
w, h = img.size
if w < h:
target_shape = (int(round(w * target_long / h)), target_long)
else:
target_shape = (target_long, int(round(h * target_long / w)))
img = img.resize(target_shape, resample=PIL.Image.CUBIC)
return img
class RandomCrop():
def __init__(self, cropsize):
self.cropsize = cropsize
def __call__(self, imgarr):
h, w, c = imgarr.shape
ch = min(self.cropsize, h)
cw = min(self.cropsize, w)
w_space = w - self.cropsize
h_space = h - self.cropsize
if w_space > 0:
cont_left = 0
img_left = random.randrange(w_space+1)
else:
cont_left = random.randrange(-w_space+1)
img_left = 0
if h_space > 0:
cont_top = 0
img_top = random.randrange(h_space+1)
else:
cont_top = random.randrange(-h_space+1)
img_top = 0
container = np.zeros((self.cropsize, self.cropsize, imgarr.shape[-1]), np.float32)
container[cont_top:cont_top+ch, cont_left:cont_left+cw] = \
imgarr[img_top:img_top+ch, img_left:img_left+cw]
return container
def get_random_crop_box(imgsize, cropsize):
h, w = imgsize
ch = min(cropsize, h)
cw = min(cropsize, w)
w_space = w - cropsize
h_space = h - cropsize
if w_space > 0:
cont_left = 0
img_left = random.randrange(w_space + 1)
else:
cont_left = random.randrange(-w_space + 1)
img_left = 0
if h_space > 0:
cont_top = 0
img_top = random.randrange(h_space + 1)
else:
cont_top = random.randrange(-h_space + 1)
img_top = 0
return cont_top, cont_top+ch, cont_left, cont_left+cw, img_top, img_top+ch, img_left, img_left+cw
def crop_with_box(img, box):
if len(img.shape) == 3:
img_cont = np.zeros((max(box[1]-box[0], box[4]-box[5]), max(box[3]-box[2], box[7]-box[6]), img.shape[-1]), dtype=img.dtype)
else:
img_cont = np.zeros((max(box[1] - box[0], box[4] - box[5]), max(box[3] - box[2], box[7] - box[6])), dtype=img.dtype)
img_cont[box[0]:box[1], box[2]:box[3]] = img[box[4]:box[5], box[6]:box[7]]
return img_cont
def random_crop(images, cropsize, fills):
if isinstance(images[0], PIL.Image.Image):
imgsize = images[0].size[::-1]
else:
imgsize = images[0].shape[:2]
box = get_random_crop_box(imgsize, cropsize)
new_images = []
for img, f in zip(images, fills):
if isinstance(img, PIL.Image.Image):
img = img.crop((box[6], box[4], box[7], box[5]))
cont = PIL.Image.new(img.mode, (cropsize, cropsize))
cont.paste(img, (box[2], box[0]))
new_images.append(cont)
else:
if len(img.shape) == 3:
cont = np.ones((cropsize, cropsize, img.shape[2]), img.dtype)*f
else:
cont = np.ones((cropsize, cropsize), img.dtype)*f
cont[box[0]:box[1], box[2]:box[3]] = img[box[4]:box[5], box[6]:box[7]]
new_images.append(cont)
return new_images
class AvgPool2d():
def __init__(self, ksize):
self.ksize = ksize
def __call__(self, img):
import skimage.measure
return skimage.measure.block_reduce(img, (self.ksize, self.ksize, 1), np.mean)
class RandomHorizontalFlip():
def __init__(self):
return
def __call__(self, img):
if bool(random.getrandbits(1)):
img = np.fliplr(img).copy()
return img
class CenterCrop():
def __init__(self, cropsize, default_value=0):
self.cropsize = cropsize
self.default_value = default_value
def __call__(self, npimg):
h, w = npimg.shape[:2]
ch = min(self.cropsize, h)
cw = min(self.cropsize, w)
sh = h - self.cropsize
sw = w - self.cropsize
if sw > 0:
cont_left = 0
img_left = int(round(sw / 2))
else:
cont_left = int(round(-sw / 2))
img_left = 0
if sh > 0:
cont_top = 0
img_top = int(round(sh / 2))
else:
cont_top = int(round(-sh / 2))
img_top = 0
if len(npimg.shape) == 2:
container = np.ones((self.cropsize, self.cropsize), npimg.dtype)*self.default_value
else:
container = np.ones((self.cropsize, self.cropsize, npimg.shape[2]), npimg.dtype)*self.default_value
container[cont_top:cont_top+ch, cont_left:cont_left+cw] = \
npimg[img_top:img_top+ch, img_left:img_left+cw]
return container
def HWC_to_CHW(img):
return np.transpose(img, (2, 0, 1))
class RescaleNearest():
def __init__(self, scale):
self.scale = scale
def __call__(self, npimg):
import cv2
return cv2.resize(npimg, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_NEAREST)
def crf_inference(img, probs, t=10, scale_factor=1, labels=21):
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral
if len(img.shape) == 2: img = img[:,:,None]
h, w = img.shape[:2]
n_labels = labels
d = dcrf.DenseCRF2D(w, h, n_labels)
unary = unary_from_softmax(probs)
unary = np.ascontiguousarray(unary)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=3/scale_factor, compat=3)
#d.addPairwiseBilateral(sxy=80/scale_factor, srgb=13, rgbim=np.copy(img), compat=10)
energy = create_pairwise_bilateral(sdims=(10,10), schan=0.01, img=np.copy(img), chdim=2)
d.addPairwiseEnergy(energy, compat=10)
Q = d.inference(t)
return np.array(Q).reshape((n_labels, h, w)) | [
"random.randint",
"numpy.copy",
"numpy.transpose",
"numpy.zeros",
"numpy.ones",
"numpy.fliplr",
"random.randrange",
"pydensecrf.utils.unary_from_softmax",
"numpy.array",
"random.getrandbits",
"pydensecrf.densecrf.DenseCRF2D",
"numpy.ascontiguousarray",
"cv2.resize"
] | [((5009, 5037), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (5021, 5037), True, 'import numpy as np\n'), ((5565, 5596), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['w', 'h', 'n_labels'], {}), '(w, h, n_labels)\n', (5580, 5596), True, 'import pydensecrf.densecrf as dcrf\n'), ((5610, 5635), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['probs'], {}), '(probs)\n', (5628, 5635), False, 'from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral\n'), ((5648, 5675), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['unary'], {}), '(unary)\n', (5668, 5675), True, 'import numpy as np\n'), ((242, 286), 'random.randint', 'random.randint', (['self.min_long', 'self.max_long'], {}), '(self.min_long, self.max_long)\n', (256, 286), False, 'import random\n'), ((1282, 1352), 'numpy.zeros', 'np.zeros', (['(self.cropsize, self.cropsize, imgarr.shape[-1])', 'np.float32'], {}), '((self.cropsize, self.cropsize, imgarr.shape[-1]), np.float32)\n', (1290, 1352), True, 'import numpy as np\n'), ((1742, 1771), 'random.randrange', 'random.randrange', (['(w_space + 1)'], {}), '(w_space + 1)\n', (1758, 1771), False, 'import random\n'), ((1802, 1832), 'random.randrange', 'random.randrange', (['(-w_space + 1)'], {}), '(-w_space + 1)\n', (1818, 1832), False, 'import random\n'), ((1914, 1943), 'random.randrange', 'random.randrange', (['(h_space + 1)'], {}), '(h_space + 1)\n', (1930, 1943), False, 'import random\n'), ((1973, 2003), 'random.randrange', 'random.randrange', (['(-h_space + 1)'], {}), '(-h_space + 1)\n', (1989, 2003), False, 'import random\n'), ((5188, 5279), 'cv2.resize', 'cv2.resize', (['npimg', 'None'], {'fx': 'self.scale', 'fy': 'self.scale', 'interpolation': 'cv2.INTER_NEAREST'}), '(npimg, None, fx=self.scale, fy=self.scale, interpolation=cv2.\n INTER_NEAREST)\n', (5198, 5279), False, 'import cv2\n'), ((951, 980), 'random.randrange', 'random.randrange', (['(w_space + 1)'], {}), '(w_space + 1)\n', (967, 980), False, 'import random\n'), ((1017, 1047), 'random.randrange', 'random.randrange', (['(-w_space + 1)'], {}), '(-w_space + 1)\n', (1033, 1047), False, 'import random\n'), ((1143, 1172), 'random.randrange', 'random.randrange', (['(h_space + 1)'], {}), '(h_space + 1)\n', (1159, 1172), False, 'import random\n'), ((1208, 1238), 'random.randrange', 'random.randrange', (['(-h_space + 1)'], {}), '(-h_space + 1)\n', (1224, 1238), False, 'import random\n'), ((3794, 3815), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (3812, 3815), False, 'import random\n'), ((5920, 5932), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (5927, 5932), True, 'import numpy as np\n'), ((6026, 6037), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (6034, 6037), True, 'import numpy as np\n'), ((4622, 4674), 'numpy.ones', 'np.ones', (['(self.cropsize, self.cropsize)', 'npimg.dtype'], {}), '((self.cropsize, self.cropsize), npimg.dtype)\n', (4629, 4674), True, 'import numpy as np\n'), ((4732, 4800), 'numpy.ones', 'np.ones', (['(self.cropsize, self.cropsize, npimg.shape[2])', 'npimg.dtype'], {}), '((self.cropsize, self.cropsize, npimg.shape[2]), npimg.dtype)\n', (4739, 4800), True, 'import numpy as np\n'), ((3165, 3219), 'numpy.ones', 'np.ones', (['(cropsize, cropsize, img.shape[2])', 'img.dtype'], {}), '((cropsize, cropsize, img.shape[2]), img.dtype)\n', (3172, 3219), True, 'import numpy as np\n'), ((3263, 3303), 'numpy.ones', 'np.ones', (['(cropsize, cropsize)', 'img.dtype'], {}), '((cropsize, cropsize), img.dtype)\n', (3270, 3303), True, 'import numpy as np\n'), ((3836, 3850), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (3845, 3850), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
import seaborn as sns
import matplotlib.pyplot as plt
import configparser
from dateutil.parser import parse
import os
from sklearn.metrics import roc_auc_score, f1_score, precision_score,\
recall_score, classification_report, accuracy_score
import logging
logger = logging.getLogger(__name__)
print = logger.info
def multilabel_from_tags(tag_list):
"""
function to generate pd dataframe for tags based on list of tag strings
tag_list: the raw list of tags from input. each row is "tag1, tag2, tag3..."
"""
# turn tag list strings into list for each row
tag_list = [[tag.strip() for tag in tag_text.split(',')] for tag_text in tag_list]
# obtain unique tags
unique_tags = list(set([tag for tags in tag_list for tag in tags]))
try:
unique_tags.remove('')
except:
print("Unique tags does not have empty situations")
# create df based on tags
tag_dict = {}
for tag in unique_tags:
tag_dict[f"Tag_{tag}"] = [1 if tag in tags else 0 for tags in tag_list]
tag_df = pd.DataFrame(tag_dict)
return tag_df
def create_tag_columns(train_df, tag_col='Tags'):
"""
function to create tags columns for a training dataframe
train_df: pd DataFrame of training text and tags
tag_col: str. Column name of the column that houses the multilabel tags
"""
tag_list = train_df[tag_col].to_list()
tag_df = multilabel_from_tags(tag_list)
train_df = pd.concat([train_df, tag_df], axis=1)
return train_df
def binary_tag_to_tags(text_df, tag_values):
"""
+++INPUT+++
text_df: dataframe with binary tags, fillna with 0
tag_values: array of tag strings
example: tag_values = text_df.columns[2:].values
+++OUTPUT+++
text_df: with Tags column added containing tags
"""
tags_list = []
for row_index in range(len(text_df)):
selector = text_df.loc[row_index, tag_values].values.astype(bool)
selected_tags = tag_values[selector]
tags_string = ", ".join(selected_tags)
tags_list.append(tags_string)
text_df['Tags'] = tags_list
return text_df
def df_to_json_form(sample_df, tag_col='Tags', ui_dir='../input/',
ui_filename='text_tags.json'):
"""
function to save a sampled text df to directory for human tags
sample_df: pd.DataFrame. Has "Text" and "UID" columns
tag_col: str. The expected name of the tags column. Blank fields will be
populated for human input
ui_dir: str. directory of the human input json form
ui_filename: str. file name for the human input. should be in json
"""
try:
assert "Text" in sample_df.columns
assert "UID" in sample_df.columns
except:
print("Make sure the DF has Text and UID columns!")
exit(1)
if tag_col not in sample_df.columns:
print(f"Column {tag_col} not in columns. Adding empty column for it.")
sample_df[tag_col] = ''
sample_df = sample_df.loc[:, ['Text', 'UID', tag_col]]
print("Saving the sampled texts as JSON for human tags")
Path(ui_dir).mkdir(parents=True, exist_ok=True)
sample_df.to_json(f'{ui_dir}{ui_filename}', orient='records', indent=2)
print("Done")
def kmeans_from_proba(scored_df, tsne_fig_name, score_col_prefix='proba_', random_state=0):
print("Extracting tag scores and training KMeans for clusters")
# extract tag scores into np.array
proba_scores = scored_df.loc[:, scored_df.columns.str.startswith(score_col_prefix)].values
# fit and extract kmeans clusters
kmeans = KMeans(n_clusters=proba_scores.shape[1] + 1, random_state=random_state)
kmeans.fit(proba_scores)
clusters = kmeans.predict(proba_scores).reshape((-1, 1))
print("Visualizing tag score-based KMeans clusters with tSNE")
# visualize the clusters using tsne
tsne_xy = TSNE(n_components=2).fit_transform(proba_scores)
visualize_df = pd.DataFrame(
np.concatenate((tsne_xy, clusters), axis=1), columns=['tsne_1', 'tsne_2', 'cluster_id'])
plt.figure(figsize=(10, 6))
sns.scatterplot(data=visualize_df,x='tsne_1',y='tsne_2',hue='cluster_id',
legend="full",alpha=0.5, palette='pastel')
plt.title("KMeans Cluster on TSNE 2D Transformation")
plt.savefig(tsne_fig_name, bbox_inches='tight')
plt.close()
# save cluster info back to scored_df
print("Saving cluster information back to dataframe")
scored_df['cluster_id'] = clusters
return scored_df, kmeans
def sample_by_cluster(scored_df, sample_size, cluster_col='cluster_id', row_key='UID'):
print("Sampling records based on cluster information...")
group_sample_n = sample_size // scored_df[cluster_col].nunique()
sample_df = scored_df.groupby(cluster_col).apply(lambda x: x.sample(n=group_sample_n)).reset_index(drop=True)
unsampled_count = sample_size - sample_df.shape[0]
print(f"A total of {sample_df.shape[0]:,} records were sampled based on clusters.")
if unsampled_count > 0:
print(f"{unsampled_count:,} remaining records are to be sampled from total population.")
unsampled_ids = scored_df[row_key][~np.isin(scored_df.UID, sample_df.UID)]
additional_ids = np.random.choice(unsampled_ids, unsampled_count, replace=False)
additional_df = scored_df.loc[np.isin(scored_df[row_key], additional_ids), :]
sample_df = pd.concat([sample_df, additional_df], ignore_index=True)
sample_df['Tags'] = ''
return sample_df
def sample_by_random(scored_df, sample_size, cluster_col='cluster_id', row_key='UID'):
print("Sampling records based on pure randomness...")
print(f"{sample_size:,} records are to be sampled from total population.")
sample_ids = np.random.choice(scored_df[row_key], sample_size, replace=False)
sample_df = scored_df.loc[np.isin(scored_df[row_key], sample_ids), :].reset_index(drop=True)
sample_df['Tags'] = ''
return sample_df
def coder_sim(samples_df, answers_df):
assert "UID" in samples_df.columns
assert "UID" in answers_df.columns
assert "Tags" in samples_df.columns
assert "Tags" in answers_df.columns
samples_df['Tags'] = answers_df.set_index("UID").loc[samples_df.UID, ['Tags']].values.flatten()
print("Samples have been tagged using the provided answers dataframe")
return samples_df
class MetaProject(object):
def __init__(self, project_path, rundir='./wrapper_al/'):
"""
Simple MetaProject class to analyze project output
project_path: path to the project folder of the active learning run
rundir: the path where the active learning ran, default './wrapper_al/'
"""
print(">>> Instantiate MetaProject class...")
self.project_path = project_path
self.rundir = rundir
self.cfg_path = os.path.abspath(f'{self.project_path}orchestration_record.cfg')
self.log_path = os.path.abspath(f'{self.project_path}orchestration_log.log')
self._load_config()
self.total_rounds = int(self.config.get('active_learning', 'total_rounds'))
self.round_sample = int(self.config.get('sampling', 'sample_size'))
self.total_sample = self.total_rounds * self.round_sample
# get abspath of the answer file since the exec path of project is different from analytics path
self.answer_file = os.path.abspath(os.path.join(
self.rundir, self.config.get('coder_sim', 'answer_file')))
print(self.answer_file)
self.max_tags = int(self.config.get('training', 'max_tags'))
self.run_sim = int(self.config.get('active_learning', 'run_sim'))
self.run_time = self._parse_log(self.log_path)
self._gen_tag_sum_df(self.answer_file)
def _load_config(self):
print(">>> Loading project orchestration config")
self.config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
self.config.read(self.cfg_path)
def _parse_log(self, log_path):
"""
Method to parse orchestration log file to obtain run duration in seconds
"""
print(">>> Parsing project execution run time")
with open(log_path, 'r') as logfile:
first_line = logfile.readline()
for last_line in logfile:
pass
try:
start_time = parse(first_line[:23])
end_time = parse(last_line[:23])
run_time = (end_time - start_time).seconds
except:
print(">>> Project did not run successfully based on log records!")
run_time = -1
return run_time
def _gen_tag_sum_df(self, tag_col='Tag_'):
"""
Method to generate tag positive ratios of a given DF (stored in JSON format)
"""
print(">>> Reading full dataset...")
df = pd.read_json(self.answer_file, orient='records')
df = create_tag_columns(df)
self.df = df
self.total_records = df.shape[0]
if self.run_sim == 1:
print(">>> Project ran as simulation...")
self.answer_tag_sum_df = df.loc[:, df.columns.str.startswith(tag_col)].sum().sort_values(
ascending=False).reset_index().rename(
{'index':'Tag_Name', 0: 'Pos_Count'}, axis=1)
self.answer_tag_sum_df['Pos_Rate'] = self.answer_tag_sum_df.Pos_Count / df.shape[0]
else:
print(">>> Project ran in real time with manual coders...")
self.answer_tag_sum_df = None
def describe(self):
"""
Method to describe the project with Meta Cfg and Logs
method only loads attributes of the object
"""
print(">>> Composing project high level description...")
self.stmts = []
self.stmts.append('INTRO\n-------')
self.stmts.append(f"\nThis Active Learning Run has a round count of {self.total_rounds:,},")
self.stmts.append(f"and a total of {self.total_sample:,} samples are included for model training.")
if self.run_sim == 1:
self.stmts.append("This run is a simulation with known tags already available.")
else:
self.stmts.append("This run is an actual application with manual coder input for tags on the fly.")
self.stmts.append(f"In each round, {int(self.config.get('sampling', 'sample_size')):,} samples are selected as additional training data.")
self.stmts.append(f"While the first round always runs random sampling to gather the samples,")
self.stmts.append(f"the second and beyond rounds use {self.config.get('sampling', 'sampling_method')} method.")
self.stmts.append('\n\nDATA\n-------')
self.stmts.append(f'\nThe input dataframe has a total of {self.total_records:,} records.')
if self.answer_tag_sum_df is not None:
self.stmts.append('The positive rates of each tag in the full answer dataset:')
self.stmts.append("\n" + self.answer_tag_sum_df.to_string())
self.stmts.append('\n\nMODELING\n-------')
self.stmts.append("\nThe training config for each round's Bi-Directional LSTM modeling is as below:")
for key, value in dict(self.config['training']).items():
self.stmts.append(f"\n\t{key}: {value}")
if self.config.get('training', 'random_embed') == 'True':
self.stmts.append('\nThe text embeddings are randomly initiated 300-length via Tensorflow 2.')
else:
self.stmts.append('\nThe text embeddings are GloVe 300-length text embeddings loaded via Spacy.')
self.stmts.append('\n\nRUNTIME\n-------')
if self.run_time > 0:
self.stmts.append(f"\nExecution of the run took {self.run_time / 60:,.2f} minutes to complete")
else:
self.stmts.append("Program log file indicates that this run was not successfully executed...")
self.description = " ".join(self.stmts)
print(">>> Displaying the description:")
print(self.description)
class RoundResult(object):
def __init__(self, round_path, answer_file, proba_cutoff, rundir='./wrapper_al/'):
self.round_path = os.path.abspath(os.path.join(rundir, round_path))
print(self.round_path)
self.config_dir = f"{self.round_path.rstrip('/')}/config/"
self.sample_dir = f"{self.round_path.rstrip('/')}/sample/"
self.label_dir = f"{self.round_path.rstrip('/')}/label/"
self.input_dir = f"{self.round_path.rstrip('/')}/input/"
self.output_dir = f"{self.round_path.rstrip('/')}/output/"
self.train_file = f"{self.output_dir.rstrip('/')}/train_df.csv"
self.scored_file = f"{self.output_dir.rstrip('/')}/scored/scored_output.json"
self.answer_file = os.path.abspath(os.path.join(rundir, answer_file))
self.proba_cutoff = proba_cutoff
self.load_outputs()
def load_outputs(self, proba_prefix='proba_', tag_prefix='Tag_', row_key='UID'):
# read the round related datasets
train_df = pd.read_csv(self.train_file)
scored_df = pd.read_json(self.scored_file, orient='records')
answer_df = pd.read_json(self.answer_file, orient='records')
answer_df = create_tag_columns(answer_df)
# prepare col selectors
proba_cols = scored_df.columns[scored_df.columns.str.startswith(proba_prefix)].tolist()
tag_names = [proba_col.replace(proba_prefix, '').strip() for proba_col in proba_cols]
true_tag_cols = [f"{tag_prefix}{tag}" for tag in tag_names]
# prepare row selectors
all_ids = answer_df[row_key].unique()
train_ids = train_df[row_key].unique()
test_ids = [uid for uid in all_ids if uid not in train_ids]
# create 2 dicts for round outputs and results
round_outputs = {}
round_results = {}
for tag_name, proba_col, true_tag_col in zip(tag_names, proba_cols, true_tag_cols):
round_outputs[tag_name] = {}
round_results[tag_name] = {}
# save the y_true, y_pred, and y_proba for train and test runs
round_outputs[tag_name]['train_y_proba'] = scored_df.loc[scored_df[row_key].isin(train_ids), proba_col].values
round_outputs[tag_name]['train_y_pred'] = (round_outputs[tag_name]['train_y_proba'] >= self.proba_cutoff).astype(int)
round_outputs[tag_name]['train_y_true'] = answer_df.loc[answer_df[row_key].isin(train_ids), true_tag_col].values
round_outputs[tag_name]['test_y_proba'] = scored_df.loc[scored_df[row_key].isin(test_ids), proba_col].values
round_outputs[tag_name]['test_y_pred'] = (round_outputs[tag_name]['test_y_proba'] >= self.proba_cutoff).astype(int)
round_outputs[tag_name]['test_y_true'] = answer_df.loc[answer_df[row_key].isin(test_ids), true_tag_col].values
# calculate train side metrics
round_results[tag_name]['train_roc_auc'] = roc_auc_score(
round_outputs[tag_name]['train_y_true'], round_outputs[tag_name]['train_y_proba'])
round_results[tag_name]['train_f1'] = f1_score(
round_outputs[tag_name]['train_y_true'], round_outputs[tag_name]['train_y_pred'], zero_division=0)
round_results[tag_name]['train_precision'] = precision_score(
round_outputs[tag_name]['train_y_true'], round_outputs[tag_name]['train_y_pred'], zero_division=0)
round_results[tag_name]['train_recall'] = recall_score(
round_outputs[tag_name]['train_y_true'], round_outputs[tag_name]['train_y_pred'], zero_division=0)
round_results[tag_name]['train_cr'] = classification_report(
round_outputs[tag_name]['train_y_true'], round_outputs[tag_name]['train_y_pred'], zero_division=0)
round_results[tag_name]['train_f1'] = roc_auc_score(
round_outputs[tag_name]['train_y_true'], round_outputs[tag_name]['train_y_pred'])
round_results[tag_name]['train_pos_rate'] = round_outputs[tag_name]['train_y_true'].sum() \
/ round_outputs[tag_name]['train_y_true'].shape[0]
# calculate test side metrics
round_results[tag_name]['test_roc_auc'] = roc_auc_score(
round_outputs[tag_name]['test_y_true'], round_outputs[tag_name]['test_y_proba'])
round_results[tag_name]['test_f1'] = f1_score(
round_outputs[tag_name]['test_y_true'], round_outputs[tag_name]['test_y_pred'], zero_division=0)
round_results[tag_name]['test_precision'] = precision_score(
round_outputs[tag_name]['test_y_true'], round_outputs[tag_name]['test_y_pred'], zero_division=0)
round_results[tag_name]['test_recall'] = recall_score(
round_outputs[tag_name]['test_y_true'], round_outputs[tag_name]['test_y_pred'], zero_division=0)
round_results[tag_name]['test_cr'] = classification_report(
round_outputs[tag_name]['test_y_true'], round_outputs[tag_name]['test_y_pred'], zero_division=0)
round_results[tag_name]['test_f1'] = roc_auc_score(
round_outputs[tag_name]['test_y_true'], round_outputs[tag_name]['test_y_pred'])
round_results[tag_name]['test_pos_rate'] = round_outputs[tag_name]['test_y_true'].sum() \
/ round_outputs[tag_name]['test_y_true'].shape[0]
self.round_outputs = round_outputs
self.round_results = round_results
def describe_round_metrics(self):
self.stmts = []
for tag in self.round_results.keys():
self.stmts.append(f"==========Tag - {tag.upper()}==========")
self.stmts.append(f"\n>>> Pos Rate: Train - {self.round_results[tag]['train_pos_rate'] * 100:.2f}%; Test - {self.round_results[tag]['test_pos_rate'] * 100:.2f}%")
self.stmts.append(f"\n>>> ROC AUC: Train - {self.round_results[tag]['train_roc_auc']:.3f}; Test - {self.round_results[tag]['test_roc_auc']:.3f}\n")
self.stmts.append("\n>>> Classification Reports:")
self.stmts.append(f"\n>>> Train:\n {self.round_results[tag]['train_cr']}")
self.stmts.append(f"\n>>> Test:\n {self.round_results[tag]['test_cr']}")
self.stmts.append('\n======================================\n')
self.description = " ".join(self.stmts)
print(">>> Displaying the description:")
print(self.description)
class MetaProjectWithRounds(object):
def __init__(self, project_path, rundir='./wrapper_al/', alternative_cutoff=None):
"""
Comprehensive Meta Project loader that also loads results of each round
project_path: path to the project folder of the active learning run
rundir: the path where the active learning ran, default './wrapper_al/'
"""
print(">>> Instantiate MetaProject class...")
self.project_path = project_path
self.rundir = rundir
self.cfg_path = os.path.abspath(f'{self.project_path}orchestration_record.cfg')
self.log_path = os.path.abspath(f'{self.project_path}orchestration_log.log')
self._load_config()
self.total_rounds = int(self.config.get('active_learning', 'total_rounds'))
self.round_sample = int(self.config.get('sampling', 'sample_size'))
self.total_sample = self.total_rounds * self.round_sample
# get abspath of the answer file since the exec path of project is different from analytics path
self.answer_file = os.path.abspath(os.path.join(
self.rundir, self.config.get('coder_sim', 'answer_file')))
if alternative_cutoff is not None:
self.proba_cutoff = alternative_cutoff
else:
self.proba_cutoff = float(self.config.get('scoring', 'clf_threshold'))
self.max_tags = int(self.config.get('training', 'max_tags'))
self.run_sim = int(self.config.get('active_learning', 'run_sim'))
self.run_time = self._parse_log(self.log_path)
self._gen_tag_sum_df(self.answer_file)
self._load_rounds()
def _load_config(self):
print(">>> Loading project orchestration config")
self.config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
self.config.read(self.cfg_path)
def _parse_log(self, log_path):
"""
Method to parse orchestration log file to obtain run duration in seconds
"""
print(">>> Parsing project execution run time")
with open(log_path, 'r') as logfile:
first_line = logfile.readline()
for last_line in logfile:
pass
try:
start_time = parse(first_line[:23])
end_time = parse(last_line[:23])
run_time = (end_time - start_time).seconds
except:
print(">>> Project did not run successfully based on log records!")
run_time = -1
return run_time
def _gen_tag_sum_df(self, tag_col='Tag_'):
"""
Method to generate tag positive ratios of a given DF (stored in JSON format)
"""
print(">>> Reading full dataset...")
df = pd.read_json(self.answer_file, orient='records')
df = create_tag_columns(df)
self.df = df
self.total_records = df.shape[0]
if self.run_sim == 1:
print(">>> Project ran as simulation...")
self.answer_tag_sum_df = df.loc[:, df.columns.str.startswith(tag_col)].sum().sort_values(
ascending=False).reset_index().rename(
{'index':'Tag_Name', 0: 'Pos_Count'}, axis=1)
self.answer_tag_sum_df['Pos_Rate'] = self.answer_tag_sum_df.Pos_Count / df.shape[0]
else:
print(">>> Project ran in real time with manual coders...")
self.answer_tag_sum_df = None
def _load_rounds(self):
print(">>> Loading results for each round...")
self.rounds = {}
self.round_results = {}
self.round_outputs = {}
self.round_descriptions = {}
for round_id in range(self.total_rounds):
config_project_path = self.config.get('active_learning', 'project_path')
round_path = f"{config_project_path.rstrip('/')}/round_{round_id + 1}/"
self.rounds[round_id + 1] = RoundResult(
round_path=round_path, answer_file=self.answer_file,
proba_cutoff=self.proba_cutoff)
self.round_results[round_id + 1] = self.rounds[round_id + 1].round_results
self.round_outputs[round_id + 1] = self.rounds[round_id + 1].round_outputs
self._flatten_results()
def _flatten_results(self):
self.flatten_result_dict = {}
self.flatten_result_dict['round_id'] = []
self.flatten_result_dict['tag_name'] = []
for round_id, round_result in self.round_results.items():
for tag_name, model_scores in round_result.items():
self.flatten_result_dict['round_id'].append(round_id)
self.flatten_result_dict['tag_name'].append(tag_name)
for metric_name, metric_value in model_scores.items():
if metric_name not in self.flatten_result_dict.keys():
self.flatten_result_dict[metric_name] = [metric_value]
else:
self.flatten_result_dict[metric_name].append(metric_value)
self.flatten_result_df = pd.DataFrame(self.flatten_result_dict)
def describe(self):
"""
Method to describe the project with Meta Cfg and Logs
method only loads attributes of the object
"""
print(">>> Composing project high level description...")
self.stmts = []
self.stmts.append('INTRO\n-------')
self.stmts.append(f"\nThis Active Learning Run has a round count of {self.total_rounds:,},")
self.stmts.append(f"and a total of {self.total_sample:,} samples are included for model training.")
if self.run_sim == 1:
self.stmts.append("This run is a simulation with known tags already available.")
else:
self.stmts.append("This run is an actual application with manual coder input for tags on the fly.")
self.stmts.append(f"In each round, {int(self.config.get('sampling', 'sample_size')):,} samples are selected as additional training data.")
self.stmts.append(f"While the first round always runs random sampling to gather the samples,")
self.stmts.append(f"the second and beyond rounds use {self.config.get('sampling', 'sampling_method')} method.")
self.stmts.append('\n\nDATA\n-------')
self.stmts.append(f'\nThe input dataframe has a total of {self.total_records:,} records.')
if self.answer_tag_sum_df is not None:
self.stmts.append('The positive rates of each tag in the full answer dataset:')
self.stmts.append("\n" + self.answer_tag_sum_df.to_string())
self.stmts.append('\n\nMODELING\n-------')
self.stmts.append("\nThe training config for each round's Bi-Directional LSTM modeling is as below:")
for key, value in dict(self.config['training']).items():
self.stmts.append(f"\n\t{key}: {value}")
if self.config.get('training', 'random_embed') == 'True':
self.stmts.append('\nThe text embeddings are randomly initiated 300-length via Tensorflow 2.')
else:
self.stmts.append('\nThe text embeddings are GloVe 300-length text embeddings loaded via Spacy.')
self.stmts.append('\n\nRUNTIME\n-------')
if self.run_time > 0:
self.stmts.append(f"\nExecution of the run took {self.run_time / 60:,.2f} minutes to complete")
else:
self.stmts.append("Program log file indicates that this run was not successfully executed...")
self.description = " ".join(self.stmts)
print(">>> Displaying the description:")
print(self.description)
| [
"matplotlib.pyplot.title",
"numpy.isin",
"pandas.read_csv",
"logging.getLogger",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"pathlib.Path",
"sklearn.metrics.f1_score",
"os.path.join",
"pandas.DataFrame",
"os.path.abspath",
"sklearn.cluster.KMeans",
"matplotlib.pyplo... | [((404, 431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'import logging\n'), ((1178, 1200), 'pandas.DataFrame', 'pd.DataFrame', (['tag_dict'], {}), '(tag_dict)\n', (1190, 1200), True, 'import pandas as pd\n'), ((1579, 1616), 'pandas.concat', 'pd.concat', (['[train_df, tag_df]'], {'axis': '(1)'}), '([train_df, tag_df], axis=1)\n', (1588, 1616), True, 'import pandas as pd\n'), ((3701, 3772), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(proba_scores.shape[1] + 1)', 'random_state': 'random_state'}), '(n_clusters=proba_scores.shape[1] + 1, random_state=random_state)\n', (3707, 3772), False, 'from sklearn.cluster import KMeans\n'), ((4167, 4194), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4177, 4194), True, 'import matplotlib.pyplot as plt\n'), ((4199, 4323), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'visualize_df', 'x': '"""tsne_1"""', 'y': '"""tsne_2"""', 'hue': '"""cluster_id"""', 'legend': '"""full"""', 'alpha': '(0.5)', 'palette': '"""pastel"""'}), "(data=visualize_df, x='tsne_1', y='tsne_2', hue='cluster_id',\n legend='full', alpha=0.5, palette='pastel')\n", (4214, 4323), True, 'import seaborn as sns\n'), ((4340, 4393), 'matplotlib.pyplot.title', 'plt.title', (['"""KMeans Cluster on TSNE 2D Transformation"""'], {}), "('KMeans Cluster on TSNE 2D Transformation')\n", (4349, 4393), True, 'import matplotlib.pyplot as plt\n'), ((4398, 4445), 'matplotlib.pyplot.savefig', 'plt.savefig', (['tsne_fig_name'], {'bbox_inches': '"""tight"""'}), "(tsne_fig_name, bbox_inches='tight')\n", (4409, 4445), True, 'import matplotlib.pyplot as plt\n'), ((4450, 4461), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4459, 4461), True, 'import matplotlib.pyplot as plt\n'), ((5859, 5923), 'numpy.random.choice', 'np.random.choice', (['scored_df[row_key]', 'sample_size'], {'replace': '(False)'}), '(scored_df[row_key], sample_size, replace=False)\n', (5875, 5923), True, 'import numpy as np\n'), ((4074, 4117), 'numpy.concatenate', 'np.concatenate', (['(tsne_xy, clusters)'], {'axis': '(1)'}), '((tsne_xy, clusters), axis=1)\n', (4088, 4117), True, 'import numpy as np\n'), ((5341, 5404), 'numpy.random.choice', 'np.random.choice', (['unsampled_ids', 'unsampled_count'], {'replace': '(False)'}), '(unsampled_ids, unsampled_count, replace=False)\n', (5357, 5404), True, 'import numpy as np\n'), ((5511, 5567), 'pandas.concat', 'pd.concat', (['[sample_df, additional_df]'], {'ignore_index': '(True)'}), '([sample_df, additional_df], ignore_index=True)\n', (5520, 5567), True, 'import pandas as pd\n'), ((6943, 7006), 'os.path.abspath', 'os.path.abspath', (['f"""{self.project_path}orchestration_record.cfg"""'], {}), "(f'{self.project_path}orchestration_record.cfg')\n", (6958, 7006), False, 'import os\n'), ((7031, 7091), 'os.path.abspath', 'os.path.abspath', (['f"""{self.project_path}orchestration_log.log"""'], {}), "(f'{self.project_path}orchestration_log.log')\n", (7046, 7091), False, 'import os\n'), ((8995, 9043), 'pandas.read_json', 'pd.read_json', (['self.answer_file'], {'orient': '"""records"""'}), "(self.answer_file, orient='records')\n", (9007, 9043), True, 'import pandas as pd\n'), ((13183, 13211), 'pandas.read_csv', 'pd.read_csv', (['self.train_file'], {}), '(self.train_file)\n', (13194, 13211), True, 'import pandas as pd\n'), ((13232, 13280), 'pandas.read_json', 'pd.read_json', (['self.scored_file'], {'orient': '"""records"""'}), "(self.scored_file, orient='records')\n", (13244, 13280), True, 'import pandas as pd\n'), ((13301, 13349), 'pandas.read_json', 'pd.read_json', (['self.answer_file'], {'orient': '"""records"""'}), "(self.answer_file, orient='records')\n", (13313, 13349), True, 'import pandas as pd\n'), ((19114, 19177), 'os.path.abspath', 'os.path.abspath', (['f"""{self.project_path}orchestration_record.cfg"""'], {}), "(f'{self.project_path}orchestration_record.cfg')\n", (19129, 19177), False, 'import os\n'), ((19202, 19262), 'os.path.abspath', 'os.path.abspath', (['f"""{self.project_path}orchestration_log.log"""'], {}), "(f'{self.project_path}orchestration_log.log')\n", (19217, 19262), False, 'import os\n'), ((21353, 21401), 'pandas.read_json', 'pd.read_json', (['self.answer_file'], {'orient': '"""records"""'}), "(self.answer_file, orient='records')\n", (21365, 21401), True, 'import pandas as pd\n'), ((23650, 23688), 'pandas.DataFrame', 'pd.DataFrame', (['self.flatten_result_dict'], {}), '(self.flatten_result_dict)\n', (23662, 23688), True, 'import pandas as pd\n'), ((3212, 3224), 'pathlib.Path', 'Path', (['ui_dir'], {}), '(ui_dir)\n', (3216, 3224), False, 'from pathlib import Path\n'), ((3984, 4004), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (3988, 4004), False, 'from sklearn.manifold import TSNE\n'), ((12327, 12359), 'os.path.join', 'os.path.join', (['rundir', 'round_path'], {}), '(rundir, round_path)\n', (12339, 12359), False, 'import os\n'), ((12924, 12957), 'os.path.join', 'os.path.join', (['rundir', 'answer_file'], {}), '(rundir, answer_file)\n', (12936, 12957), False, 'import os\n'), ((15090, 15191), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (["round_outputs[tag_name]['train_y_true']", "round_outputs[tag_name]['train_y_proba']"], {}), "(round_outputs[tag_name]['train_y_true'], round_outputs[\n tag_name]['train_y_proba'])\n", (15103, 15191), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((15254, 15366), 'sklearn.metrics.f1_score', 'f1_score', (["round_outputs[tag_name]['train_y_true']", "round_outputs[tag_name]['train_y_pred']"], {'zero_division': '(0)'}), "(round_outputs[tag_name]['train_y_true'], round_outputs[tag_name][\n 'train_y_pred'], zero_division=0)\n", (15262, 15366), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((15436, 15555), 'sklearn.metrics.precision_score', 'precision_score', (["round_outputs[tag_name]['train_y_true']", "round_outputs[tag_name]['train_y_pred']"], {'zero_division': '(0)'}), "(round_outputs[tag_name]['train_y_true'], round_outputs[\n tag_name]['train_y_pred'], zero_division=0)\n", (15451, 15555), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((15622, 15738), 'sklearn.metrics.recall_score', 'recall_score', (["round_outputs[tag_name]['train_y_true']", "round_outputs[tag_name]['train_y_pred']"], {'zero_division': '(0)'}), "(round_outputs[tag_name]['train_y_true'], round_outputs[\n tag_name]['train_y_pred'], zero_division=0)\n", (15634, 15738), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((15801, 15925), 'sklearn.metrics.classification_report', 'classification_report', (["round_outputs[tag_name]['train_y_true']", "round_outputs[tag_name]['train_y_pred']"], {'zero_division': '(0)'}), "(round_outputs[tag_name]['train_y_true'],\n round_outputs[tag_name]['train_y_pred'], zero_division=0)\n", (15822, 15925), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((15989, 16089), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (["round_outputs[tag_name]['train_y_true']", "round_outputs[tag_name]['train_y_pred']"], {}), "(round_outputs[tag_name]['train_y_true'], round_outputs[\n tag_name]['train_y_pred'])\n", (16002, 16089), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((16369, 16468), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (["round_outputs[tag_name]['test_y_true']", "round_outputs[tag_name]['test_y_proba']"], {}), "(round_outputs[tag_name]['test_y_true'], round_outputs[\n tag_name]['test_y_proba'])\n", (16382, 16468), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((16530, 16640), 'sklearn.metrics.f1_score', 'f1_score', (["round_outputs[tag_name]['test_y_true']", "round_outputs[tag_name]['test_y_pred']"], {'zero_division': '(0)'}), "(round_outputs[tag_name]['test_y_true'], round_outputs[tag_name][\n 'test_y_pred'], zero_division=0)\n", (16538, 16640), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((16709, 16826), 'sklearn.metrics.precision_score', 'precision_score', (["round_outputs[tag_name]['test_y_true']", "round_outputs[tag_name]['test_y_pred']"], {'zero_division': '(0)'}), "(round_outputs[tag_name]['test_y_true'], round_outputs[\n tag_name]['test_y_pred'], zero_division=0)\n", (16724, 16826), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((16892, 17006), 'sklearn.metrics.recall_score', 'recall_score', (["round_outputs[tag_name]['test_y_true']", "round_outputs[tag_name]['test_y_pred']"], {'zero_division': '(0)'}), "(round_outputs[tag_name]['test_y_true'], round_outputs[tag_name\n ]['test_y_pred'], zero_division=0)\n", (16904, 17006), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((17068, 17191), 'sklearn.metrics.classification_report', 'classification_report', (["round_outputs[tag_name]['test_y_true']", "round_outputs[tag_name]['test_y_pred']"], {'zero_division': '(0)'}), "(round_outputs[tag_name]['test_y_true'], round_outputs\n [tag_name]['test_y_pred'], zero_division=0)\n", (17089, 17191), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((17253, 17351), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (["round_outputs[tag_name]['test_y_true']", "round_outputs[tag_name]['test_y_pred']"], {}), "(round_outputs[tag_name]['test_y_true'], round_outputs[\n tag_name]['test_y_pred'])\n", (17266, 17351), False, 'from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, classification_report, accuracy_score\n'), ((5277, 5314), 'numpy.isin', 'np.isin', (['scored_df.UID', 'sample_df.UID'], {}), '(scored_df.UID, sample_df.UID)\n', (5284, 5314), True, 'import numpy as np\n'), ((5443, 5486), 'numpy.isin', 'np.isin', (['scored_df[row_key]', 'additional_ids'], {}), '(scored_df[row_key], additional_ids)\n', (5450, 5486), True, 'import numpy as np\n'), ((8013, 8049), 'configparser.ExtendedInterpolation', 'configparser.ExtendedInterpolation', ([], {}), '()\n', (8047, 8049), False, 'import configparser\n'), ((8483, 8505), 'dateutil.parser.parse', 'parse', (['first_line[:23]'], {}), '(first_line[:23])\n', (8488, 8505), False, 'from dateutil.parser import parse\n'), ((8533, 8554), 'dateutil.parser.parse', 'parse', (['last_line[:23]'], {}), '(last_line[:23])\n', (8538, 8554), False, 'from dateutil.parser import parse\n'), ((20371, 20407), 'configparser.ExtendedInterpolation', 'configparser.ExtendedInterpolation', ([], {}), '()\n', (20405, 20407), False, 'import configparser\n'), ((20841, 20863), 'dateutil.parser.parse', 'parse', (['first_line[:23]'], {}), '(first_line[:23])\n', (20846, 20863), False, 'from dateutil.parser import parse\n'), ((20891, 20912), 'dateutil.parser.parse', 'parse', (['last_line[:23]'], {}), '(last_line[:23])\n', (20896, 20912), False, 'from dateutil.parser import parse\n'), ((5954, 5993), 'numpy.isin', 'np.isin', (['scored_df[row_key]', 'sample_ids'], {}), '(scored_df[row_key], sample_ids)\n', (5961, 5993), True, 'import numpy as np\n')] |
import datetime
import math
import os
import fire
import logging
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.keras.callbacks import ModelCheckpoint
from fastprogress.fastprogress import master_bar, progress_bar
from mozhi.bin.urn.datasets_urn import DATASET_OBJ_MAP
from mozhi.bin.urn.models_urn import TF_MODEL_OBJECT_MAP
from mozhi.bin.urn.preprocessor_urn import PREPROCESSOR_OBJ_MAP
from mozhi.config.config import settings
from seqeval.metrics import precision_score, recall_score, f1_score, classification_report
from sklearn_crfsuite.metrics import flat_classification_report
# https://medium.com/analytics-vidhya/ner-tensorflow-2-2-0-9f10dcf5a0a
# https://github.com/bhuvanakundumani/NER_tensorflow2.2.0
from mozhi.utils.pretty_print import print_info, print_error
def plot_history(history):
plt.style.use('ggplot')
accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(accuracy) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, accuracy, 'b', label='Training acc')
plt.plot(x, val_accuracy, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
# plt.waitforbuttonpress()
def pred2label(pred, preprocessor):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
p_i = np.argmax(p)
out_i.append(preprocessor.id2label(p_i))
out.append(out_i)
return out
def main(dataset_name,
preprocessor_name,
model_name):
epochs = 10
batch_size = 128
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y ',
level=logging.INFO)
dataset = DATASET_OBJ_MAP[dataset_name](**settings.datasets[dataset_name])
preprocessor = PREPROCESSOR_OBJ_MAP[preprocessor_name].init_vf(dataset_info=dataset.get_dataset_info(),
**settings.preprocessor[preprocessor_name])
model = TF_MODEL_OBJECT_MAP[model_name].init_vf(preprocessor_data_info=preprocessor.get_preprocessor_info(),
**settings.models[model_name])
optimizer = model.optimizer
loss_fn = model.loss
train_dataset = preprocessor.get_tf_data_iterator(sentences=dataset.train_tuple_pairs)
valid_dataset = preprocessor.get_tf_data_iterator(sentences=dataset.val_tuple_pairs)
test_dataset = preprocessor.get_tf_data_iterator(sentences=dataset.test_tuple_pairs)
shuffled_train_dataset = train_dataset.shuffle(buffer_size=len(dataset.train_tuple_pairs),
reshuffle_each_iteration=True)
batched_train_dataset = shuffled_train_dataset.batch(batch_size,
drop_remainder=True)
batched_valid_dataset = valid_dataset.batch(batch_size,
drop_remainder=True)
batched_test_dataset = test_dataset.batch(batch_size,
drop_remainder=True)
train_log_dir = os.path.expanduser(f"~/.mozhi/logs/{str(model.NAME).lower()}/train")
valid_log_dir = os.path.expanduser(f"~/.mozhi/logs/{str(model.NAME).lower()}/valid")
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
valid_summary_writer = tf.summary.create_file_writer(valid_log_dir)
tf.summary.trace_on(graph=True)
tf.profiler.experimental.start(train_log_dir)
train_loss_metric = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)
valid_loss_metric = tf.keras.metrics.Mean('valid_loss', dtype=tf.float32)
epoch_bar = master_bar(range(epochs))
train_pb_max_len = math.ceil(float(len(dataset.train_tuple_pairs)) / float(batch_size))
valid_pb_max_len = math.ceil(float(len(dataset.val_tuple_pairs)) / float(batch_size))
test_pb_max_len = math.ceil(float(len(dataset.test_tuple_pairs)) / float(batch_size))
def train_step_fn(sentences_batch, labels_batch):
with tf.GradientTape() as tape:
logits = model(sentences_batch) # batchsize, max_seq_len, num_labels
loss = loss_fn(labels_batch, logits) # batchsize, max_seq_len
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(
(grad, var)
for (grad, var) in zip(gradients, model.trainable_variables)
if grad is not None
)
return loss, logits
def valid_step_fn(sentences_batch, labels_batch):
logits = model(sentences_batch)
loss = loss_fn(labels_batch, logits)
return loss, logits
for epoch in epoch_bar:
with train_summary_writer.as_default():
for sentences_batch, labels_batch in progress_bar(batched_train_dataset,
total=train_pb_max_len,
parent=epoch_bar):
loss, logits = train_step_fn(sentences_batch, labels_batch)
train_loss_metric(loss)
epoch_bar.child.comment = f'training loss : {train_loss_metric.result()}'
tf.summary.scalar('training loss', train_loss_metric.result(), step=epoch)
train_loss_metric.reset_states()
with valid_summary_writer.as_default():
for sentences_batch, labels_batch in progress_bar(batched_valid_dataset, total=valid_pb_max_len,
parent=epoch_bar):
loss, logits = valid_step_fn(sentences_batch, labels_batch)
valid_loss_metric.update_state(loss)
epoch_bar.child.comment = f'validation loss : {valid_loss_metric.result()}'
# Logging after each Epoch !
tf.summary.scalar('valid loss', valid_loss_metric.result(), step=epoch)
valid_loss_metric.reset_states()
print_info(f"Saving model weights to {model.model_file_path}")
# model.save_weights(f"{model.model_file_path}/model_weights", save_format='tf')
# tf.saved_model.save(model, f"{model.model_file_path}")
tf.keras.models.save_model(model, f"{model.model_file_path}")
# model.save(f"{model.model_file_path}/model_weights")
print_info(f"Model weights saved")
# model.load_weights(f"{model.model_file_path}/model_weights")
# model = tf.saved_model.load(f"{model.model_file_path}")
# model = tf.keras.models.load_model(f"{model.model_file_path}/model_weights")
model = tf.keras.models.load_model(f"{model.model_file_path}")
print_error("loaded")
y_test = []
y_pred = []
for sentences_batch, labels_batch in progress_bar(batched_test_dataset,
total=test_pb_max_len):
logits = model(sentences_batch)
preds = tf.nn.softmax(logits)
y_test.append(np.asarray(labels_batch))
y_pred.append(np.asarray(preds))
y_pred = pred2label(pred=y_pred, preprocessor=preprocessor)
y_test = pred2label(pred=y_test, preprocessor=preprocessor)
print_info("*"*100)
print_info("F1-score: {:.1%}".format(f1_score(y_test, y_pred)))
print_info("*"*100)
report = flat_classification_report(y_pred=y_pred, y_true=y_test)
print_info(report)
if __name__ == '__main__':
# Ref: https://www.tensorflow.org/guide/gpu
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
fire.Fire(main)
| [
"matplotlib.pyplot.title",
"numpy.argmax",
"tensorflow.keras.metrics.Mean",
"fastprogress.fastprogress.progress_bar",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"mozhi.utils.pretty_print.print_error",
"tensorflow.nn.softmax",
"tensorflow.summary.trace_on",
"seqeval.metrics.f1_score... | [((865, 888), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (878, 888), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1130), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (1113, 1130), True, 'import matplotlib.pyplot as plt\n'), ((1135, 1155), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1146, 1155), True, 'import matplotlib.pyplot as plt\n'), ((1160, 1208), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'accuracy', '"""b"""'], {'label': '"""Training acc"""'}), "(x, accuracy, 'b', label='Training acc')\n", (1168, 1208), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1267), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_accuracy', '"""r"""'], {'label': '"""Validation acc"""'}), "(x, val_accuracy, 'r', label='Validation acc')\n", (1221, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1317), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation accuracy"""'], {}), "('Training and validation accuracy')\n", (1281, 1317), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1334), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1332, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1339, 1359), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1350, 1359), True, 'import matplotlib.pyplot as plt\n'), ((1364, 1409), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'loss', '"""b"""'], {'label': '"""Training loss"""'}), "(x, loss, 'b', label='Training loss')\n", (1372, 1409), True, 'import matplotlib.pyplot as plt\n'), ((1414, 1465), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_loss', '"""r"""'], {'label': '"""Validation loss"""'}), "(x, val_loss, 'r', label='Validation loss')\n", (1422, 1465), True, 'import matplotlib.pyplot as plt\n'), ((1470, 1511), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (1479, 1511), True, 'import matplotlib.pyplot as plt\n'), ((1516, 1528), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1526, 1528), True, 'import matplotlib.pyplot as plt\n'), ((1920, 2038), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y """', 'level': 'logging.INFO'}), "(format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y ', level=logging.INFO)\n", (1939, 2038), False, 'import logging\n'), ((3691, 3735), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['train_log_dir'], {}), '(train_log_dir)\n', (3720, 3735), True, 'import tensorflow as tf\n'), ((3763, 3807), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['valid_log_dir'], {}), '(valid_log_dir)\n', (3792, 3807), True, 'import tensorflow as tf\n'), ((3813, 3844), 'tensorflow.summary.trace_on', 'tf.summary.trace_on', ([], {'graph': '(True)'}), '(graph=True)\n', (3832, 3844), True, 'import tensorflow as tf\n'), ((3849, 3894), 'tensorflow.profiler.experimental.start', 'tf.profiler.experimental.start', (['train_log_dir'], {}), '(train_log_dir)\n', (3879, 3894), True, 'import tensorflow as tf\n'), ((3920, 3976), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""training_loss"""'], {'dtype': 'tf.float32'}), "('training_loss', dtype=tf.float32)\n", (3941, 3976), True, 'import tensorflow as tf\n'), ((4001, 4054), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""valid_loss"""'], {'dtype': 'tf.float32'}), "('valid_loss', dtype=tf.float32)\n", (4022, 4054), True, 'import tensorflow as tf\n'), ((6987, 7041), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['f"""{model.model_file_path}"""'], {}), "(f'{model.model_file_path}')\n", (7013, 7041), True, 'import tensorflow as tf\n'), ((7046, 7067), 'mozhi.utils.pretty_print.print_error', 'print_error', (['"""loaded"""'], {}), "('loaded')\n", (7057, 7067), False, 'from mozhi.utils.pretty_print import print_info, print_error\n'), ((7143, 7200), 'fastprogress.fastprogress.progress_bar', 'progress_bar', (['batched_test_dataset'], {'total': 'test_pb_max_len'}), '(batched_test_dataset, total=test_pb_max_len)\n', (7155, 7200), False, 'from fastprogress.fastprogress import master_bar, progress_bar\n'), ((7556, 7577), 'mozhi.utils.pretty_print.print_info', 'print_info', (["('*' * 100)"], {}), "('*' * 100)\n", (7566, 7577), False, 'from mozhi.utils.pretty_print import print_info, print_error\n'), ((7648, 7669), 'mozhi.utils.pretty_print.print_info', 'print_info', (["('*' * 100)"], {}), "('*' * 100)\n", (7658, 7669), False, 'from mozhi.utils.pretty_print import print_info, print_error\n'), ((7681, 7737), 'sklearn_crfsuite.metrics.flat_classification_report', 'flat_classification_report', ([], {'y_pred': 'y_pred', 'y_true': 'y_test'}), '(y_pred=y_pred, y_true=y_test)\n', (7707, 7737), False, 'from sklearn_crfsuite.metrics import flat_classification_report\n'), ((7742, 7760), 'mozhi.utils.pretty_print.print_info', 'print_info', (['report'], {}), '(report)\n', (7752, 7760), False, 'from mozhi.utils.pretty_print import print_info, print_error\n'), ((7850, 7888), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (7881, 7888), True, 'import tensorflow as tf\n'), ((8379, 8394), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (8388, 8394), False, 'import fire\n'), ((6369, 6431), 'mozhi.utils.pretty_print.print_info', 'print_info', (['f"""Saving model weights to {model.model_file_path}"""'], {}), "(f'Saving model weights to {model.model_file_path}')\n", (6379, 6431), False, 'from mozhi.utils.pretty_print import print_info, print_error\n'), ((6594, 6655), 'tensorflow.keras.models.save_model', 'tf.keras.models.save_model', (['model', 'f"""{model.model_file_path}"""'], {}), "(model, f'{model.model_file_path}')\n", (6620, 6655), True, 'import tensorflow as tf\n'), ((6727, 6761), 'mozhi.utils.pretty_print.print_info', 'print_info', (['f"""Model weights saved"""'], {}), "(f'Model weights saved')\n", (6737, 6761), False, 'from mozhi.utils.pretty_print import print_info, print_error\n'), ((7312, 7333), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (7325, 7333), True, 'import tensorflow as tf\n'), ((1697, 1709), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (1706, 1709), True, 'import numpy as np\n'), ((4439, 4456), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4454, 4456), True, 'import tensorflow as tf\n'), ((5187, 5264), 'fastprogress.fastprogress.progress_bar', 'progress_bar', (['batched_train_dataset'], {'total': 'train_pb_max_len', 'parent': 'epoch_bar'}), '(batched_train_dataset, total=train_pb_max_len, parent=epoch_bar)\n', (5199, 5264), False, 'from fastprogress.fastprogress import master_bar, progress_bar\n'), ((5826, 5903), 'fastprogress.fastprogress.progress_bar', 'progress_bar', (['batched_valid_dataset'], {'total': 'valid_pb_max_len', 'parent': 'epoch_bar'}), '(batched_valid_dataset, total=valid_pb_max_len, parent=epoch_bar)\n', (5838, 5903), False, 'from fastprogress.fastprogress import master_bar, progress_bar\n'), ((7356, 7380), 'numpy.asarray', 'np.asarray', (['labels_batch'], {}), '(labels_batch)\n', (7366, 7380), True, 'import numpy as np\n'), ((7404, 7421), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (7414, 7421), True, 'import numpy as np\n'), ((7617, 7641), 'seqeval.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7625, 7641), False, 'from seqeval.metrics import precision_score, recall_score, f1_score, classification_report\n'), ((8111, 8161), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (8154, 8161), True, 'import tensorflow as tf\n'), ((8032, 8083), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (8072, 8083), True, 'import tensorflow as tf\n')] |
"""Process simtel data for the training of the estimator models."""
import numpy as np
import astropy.units as u
from astropy.coordinates.angle_utilities import angular_separation
from sys import exit as sys_exit
from glob import glob
import signal
import tables as tb
import pandas as pd
from tqdm import tqdm
from ctapipe.utils import CutFlow
from protopipe.pipeline.temp import MySimTelEventSource
from protopipe.pipeline import EventPreparer
from protopipe.pipeline.io import load_config, load_models
from protopipe.pipeline.utils import (
make_argparser,
prod5N_array,
prod3b_array,
str2bool,
SignalHandler,
bcolors,
)
def main():
# Argument parser
parser = make_argparser()
parser.add_argument(
"--debug",
action="store_true",
help="Print debugging information",
)
parser.add_argument(
"--show_progress_bar",
action="store_true",
help="Show information about execution progress",
)
parser.add_argument(
"--save_images",
action="store_true",
help="Save also all images",
)
parser.add_argument(
"--estimate_energy",
type=str2bool,
default=False,
help="Estimate the events' energy with a regressor from\
protopipe.scripts.build_model",
)
parser.add_argument(
"--regressor_dir", type=str, default="./", help="regressors directory"
)
parser.add_argument(
"--regressor_config",
type=str,
default=None,
help="Configuration file used to produce regressor model",
)
args = parser.parse_args()
# Read configuration file
cfg = load_config(args.config_file)
# Check that the user specify site, array and production
try:
site = cfg["General"]["site"]
array = cfg["General"]["array"]
production = cfg["General"]["production"]
assert all(len(x) > 0 for x in [site, array, production])
except (KeyError, AssertionError):
raise ValueError(
bcolors.FAIL
+ """At least one of 'site', 'array' and 'production'
are not properly defined in the analysis configuration
file."""
+ bcolors.ENDC
)
sys_exit(-1)
if args.infile_list:
filenamelist = []
for f in args.infile_list:
filenamelist += glob("{}/{}".format(args.indir, f))
filenamelist.sort()
else:
raise ValueError("don't know which input to use...")
if not filenamelist:
print("no files found; check indir: {}".format(args.indir))
sys_exit(-1)
else:
print("found {} files".format(len(filenamelist)))
# Get the IDs of the involved telescopes and associated cameras together
# with the equivalent focal lengths from the first event
if production == "Prod5N":
allowed_tels, cams_and_foclens, subarray = prod5N_array(
filenamelist[0], site, array
)
elif production == "Prod3b":
allowed_tels, cams_and_foclens, subarray = prod3b_array(
filenamelist[0], site, array
)
else:
raise ValueError(bcolors.FAIL + "Unsupported production." + bcolors.ENDC)
sys_exit(-1)
# keeping track of events and where they were rejected
evt_cutflow = CutFlow("EventCutFlow")
img_cutflow = CutFlow("ImageCutFlow")
preper = EventPreparer(
config=cfg,
subarray=subarray,
cams_and_foclens=cams_and_foclens,
mode=args.mode,
event_cutflow=evt_cutflow,
image_cutflow=img_cutflow,
)
# catch ctr-c signal to exit current loop and still display results
signal_handler = SignalHandler()
signal.signal(signal.SIGINT, signal_handler)
# Regressor information
regressor_method = cfg["EnergyRegressor"]["method_name"]
try:
estimation_weight = cfg["EnergyRegressor"]["estimation_weight"]
except KeyError:
estimation_weight = "CTAMARS"
# wrapper for the scikit-learn regressor
if args.estimate_energy is True:
# Read configuration file
regressor_config = load_config(args.regressor_config)
log_10_target = regressor_config["Method"]["log_10_target"]
regressor_files = args.regressor_dir + "/regressor_{cam_id}_{regressor}.pkl.gz"
reg_file = regressor_files.format(
**{
"mode": args.mode,
"wave_args": "mixed", # ToDo, control
"regressor": regressor_method,
"cam_id": "{cam_id}",
}
)
regressors = load_models(reg_file, cam_id_list=cams_and_foclens.keys())
# COLUMN DESCRIPTOR AS DICTIONARY
# Column descriptor for the file containing output training data."""
DataTrainingOutput = dict(
# ======================================================================
# ARRAY
obs_id=tb.Int16Col(dflt=1, pos=0),
event_id=tb.Int32Col(dflt=1, pos=1),
tel_id=tb.Int16Col(dflt=1, pos=2),
N_LST=tb.Int16Col(dflt=1, pos=3),
N_MST=tb.Int16Col(dflt=1, pos=4),
N_SST=tb.Int16Col(dflt=1, pos=5),
n_tel_reco=tb.FloatCol(dflt=1, pos=6),
n_tel_discri=tb.FloatCol(dflt=1, pos=7),
# ======================================================================
# DL1
hillas_intensity_reco=tb.Float32Col(dflt=1, pos=8),
hillas_intensity=tb.Float32Col(dflt=1, pos=9),
hillas_x_reco=tb.Float32Col(dflt=1, pos=10),
hillas_y_reco=tb.Float32Col(dflt=1, pos=11),
hillas_x=tb.Float32Col(dflt=1, pos=12),
hillas_y=tb.Float32Col(dflt=1, pos=13),
hillas_r_reco=tb.Float32Col(dflt=1, pos=14),
hillas_r=tb.Float32Col(dflt=1, pos=15),
hillas_phi_reco=tb.Float32Col(dflt=1, pos=16),
hillas_phi=tb.Float32Col(dflt=1, pos=17),
hillas_length_reco=tb.Float32Col(dflt=1, pos=18),
hillas_length=tb.Float32Col(dflt=1, pos=19),
hillas_width_reco=tb.Float32Col(dflt=1, pos=20),
hillas_width=tb.Float32Col(dflt=1, pos=21),
hillas_psi_reco=tb.Float32Col(dflt=1, pos=22),
hillas_psi=tb.Float32Col(dflt=1, pos=23),
hillas_skewness_reco=tb.Float32Col(dflt=1, pos=24),
hillas_skewness=tb.Float32Col(dflt=1, pos=25),
hillas_kurtosis=tb.Float32Col(dflt=1, pos=26),
hillas_kurtosis_reco=tb.Float32Col(dflt=1, pos=27),
leakage_intensity_width_1_reco=tb.Float32Col(dflt=np.nan, pos=28),
leakage_intensity_width_2_reco=tb.Float32Col(dflt=np.nan, pos=29),
leakage_intensity_width_1=tb.Float32Col(dflt=np.nan, pos=30),
leakage_intensity_width_2=tb.Float32Col(dflt=np.nan, pos=31),
concentration_cog=tb.Float32Col(dflt=np.nan, pos=32),
concentration_core=tb.Float32Col(dflt=np.nan, pos=33),
concentration_pixel=tb.Float32Col(dflt=np.nan, pos=34),
# The following are missing from current ctapipe DL1 output
# Not sure if it's worth to add them
hillas_ellipticity_reco=tb.FloatCol(dflt=1, pos=35),
hillas_ellipticity=tb.FloatCol(dflt=1, pos=36),
max_signal_cam=tb.Float32Col(dflt=1, pos=37),
pixels=tb.Int16Col(dflt=-1, pos=38),
clusters=tb.Int16Col(dflt=-1, pos=39),
# ======================================================================
# DL2 - DIRECTION RECONSTRUCTION
impact_dist=tb.Float32Col(dflt=1, pos=40),
h_max=tb.Float32Col(dflt=1, pos=41),
alt=tb.Float32Col(dflt=np.nan, pos=42),
az=tb.Float32Col(dflt=np.nan, pos=43),
err_est_pos=tb.Float32Col(dflt=1, pos=44),
err_est_dir=tb.Float32Col(dflt=1, pos=45),
xi=tb.Float32Col(dflt=np.nan, pos=46),
offset=tb.Float32Col(dflt=np.nan, pos=47),
mc_core_x=tb.FloatCol(dflt=1, pos=48),
mc_core_y=tb.FloatCol(dflt=1, pos=49),
reco_core_x=tb.FloatCol(dflt=1, pos=50),
reco_core_y=tb.FloatCol(dflt=1, pos=51),
mc_h_first_int=tb.FloatCol(dflt=1, pos=52),
mc_x_max=tb.Float32Col(dflt=np.nan, pos=53),
is_valid=tb.BoolCol(dflt=False, pos=54),
good_image=tb.Int16Col(dflt=1, pos=55),
true_az=tb.Float32Col(dflt=np.nan, pos=56),
true_alt=tb.Float32Col(dflt=np.nan, pos=57),
pointing_az=tb.Float32Col(dflt=np.nan, pos=58),
pointing_alt=tb.Float32Col(dflt=np.nan, pos=59),
# ======================================================================
# DL2 - ENERGY ESTIMATION
true_energy=tb.FloatCol(dflt=np.nan, pos=60),
reco_energy=tb.FloatCol(dflt=np.nan, pos=61),
reco_energy_tel=tb.Float32Col(dflt=np.nan, pos=62),
# ======================================================================
# DL1 IMAGES
# this is optional data saved by the user
# since these data declarations require to know how many pixels
# each saved image will have,
# we add them later on, right before creating the table
# We list them here for reference
# true_image=tb.Float32Col(shape=(1855), pos=56),
# reco_image=tb.Float32Col(shape=(1855), pos=57),
# cleaning_mask_reco=tb.BoolCol(shape=(1855), pos=58), # not in ctapipe
# =======================================================================
# TEMP
N_reco_LST=tb.Int16Col(dflt=-1, pos=63),
N_reco_MST=tb.Int16Col(dflt=-1, pos=64),
image_extraction=tb.Int16Col(dflt=-1, pos=65),
)
outfile = tb.open_file(args.outfile, mode="w")
outfile.root._v_attrs["status"] = "incomplete"
outTable = {}
outData = {}
# Configuration options for SimTelEventSource
# Readout window integration correction
try:
calib_scale = cfg["Calibration"]["calib_scale"]
except KeyError:
# defaults for no calibscale applied
calib_scale = 1.0
for i, filename in enumerate(filenamelist):
print("file: {} filename = {}".format(i, filename))
source = MySimTelEventSource(
input_url=filename,
calib_scale=calib_scale,
allowed_tels=allowed_tels,
max_events=args.max_events,
)
# loop that cleans and parametrises the images and performs the
# reconstruction for each event
for (
event,
reco_image,
cleaning_mask_reco,
cleaning_mask_clusters,
true_image,
n_pixel_dict,
hillas_dict,
hillas_dict_reco,
leakage_dict,
concentration_dict,
n_tels,
n_tels_reco,
max_signals,
n_cluster_dict,
reco_result,
impact_dict,
good_event,
good_for_reco,
image_extraction_status,
) in tqdm(
preper.prepare_event(
source, save_images=args.save_images, debug=args.debug
),
desc=source.__class__.__name__,
total=source.max_events,
unit="event",
disable=not args.show_progress_bar,
):
# True direction
true_az = event.simulation.shower.az
true_alt = event.simulation.shower.alt
# Array pointing in AltAz frame
pointing_az = event.pointing.array_azimuth
pointing_alt = event.pointing.array_altitude
if good_event:
xi = angular_separation(
event.simulation.shower.az,
event.simulation.shower.alt,
reco_result.az,
reco_result.alt,
)
offset = angular_separation(
event.pointing.array_azimuth,
event.pointing.array_altitude,
reco_result.az,
reco_result.alt,
)
# Impact parameter
reco_core_x = reco_result.core_x
reco_core_y = reco_result.core_y
# Height of shower maximum
h_max = reco_result.h_max
# Todo add conversion in number of radiation length,
# need an atmosphere profile
is_valid = True
else: # something went wrong and the shower's reconstruction failed
xi = np.nan * u.deg
offset = np.nan * u.deg
reco_core_x = np.nan * u.m
reco_core_y = np.nan * u.m
h_max = np.nan * u.m
reco_result.alt = np.nan * u.deg
reco_result.az = np.nan * u.deg
is_valid = False
reco_energy = np.nan
reco_energy_tel = dict()
# Not optimal at all, two loop on tel!!!
# For energy estimation
# Estimate energy only if the shower was reconstructed
if (args.estimate_energy is True) and is_valid:
weight_tel = np.zeros(len(hillas_dict.keys()))
weight_statistic_tel = np.zeros(len(hillas_dict.keys()))
energy_tel = np.zeros(len(hillas_dict.keys()))
for idx, tel_id in enumerate(hillas_dict.keys()):
# use only images that survived cleaning and
# parametrization
if not good_for_reco[tel_id]:
# bad images will get an undetermined energy
# this is a per-telescope energy
# NOT the estimated energy for the shower
reco_energy_tel[tel_id] = np.nan
continue
cam_id = source.subarray.tel[tel_id].camera.camera_name
moments = hillas_dict[tel_id]
model = regressors[cam_id]
############################################################
# GET FEATURES
############################################################
# Read feature list from model configutation file
features_basic = regressor_config["FeatureList"]["Basic"]
features_derived = regressor_config["FeatureList"]["Derived"]
features = features_basic + list(features_derived)
# Create a pandas Dataframe with basic quantities
# This is needed in order to connect the I/O system of the
# model inputs to the in-memory computation of this script
data = pd.DataFrame(
{
"hillas_intensity": [moments.intensity],
"hillas_width": [moments.width.to("deg").value],
"hillas_length": [moments.length.to("deg").value],
"hillas_x": [moments.x.to("deg").value],
"hillas_y": [moments.y.to("deg").value],
"hillas_phi": [moments.phi.to("deg").value],
"hillas_r": [moments.r.to("deg").value],
"leakage_intensity_width_1_reco": [
leakage_dict[tel_id]["leak1_reco"]
],
"leakage_intensity_width_2_reco": [
leakage_dict[tel_id]["leak2_reco"]
],
"leakage_intensity_width_1": [
leakage_dict[tel_id]["leak1"]
],
"leakage_intensity_width_2": [
leakage_dict[tel_id]["leak2"]
],
"concentration_cog": [
concentration_dict[tel_id]["concentration_cog"]
],
"concentration_core": [
concentration_dict[tel_id]["concentration_core"]
],
"concentration_pixel": [
concentration_dict[tel_id]["concentration_pixel"]
],
"az": [reco_result.az.to("deg").value],
"alt": [reco_result.alt.to("deg").value],
"h_max": [h_max.value],
"impact_dist": [impact_dict[tel_id].to("m").value],
}
)
# Compute derived features and add them to the dataframe
for key, expression in features_derived.items():
data.eval(f"{key} = {expression}", inplace=True)
# features_img = np.array(
# [
# np.log10(moments.intensity),
# np.log10(impact_dict[tel_id].value),
# moments.width.value,
# moments.length.value,
# h_max.value,
# ]
# )
# sort features_to_use alphabetically to ensure order
# preservation with model.fit in protopipe.mva
features = sorted(features)
# Select the values for the full set of features
features_values = data[features].to_numpy()
############################################################
if estimation_weight == "CTAMARS":
# Get an array of trees
predictions_trees = np.array(
[
tree.predict(features_values)
for tree in model.estimators_
]
)
energy_tel[idx] = np.mean(predictions_trees, axis=0)
weight_statistic_tel[idx] = np.std(predictions_trees, axis=0)
else:
data.eval(
f"estimation_weight = {estimation_weight}", inplace=True
)
energy_tel[idx] = model.predict(features_values)
weight_tel[idx] = data["estimation_weight"]
if log_10_target:
energy_tel[idx] = 10 ** energy_tel[idx]
weight_tel[idx] = 10 ** weight_tel[idx]
weight_statistic_tel[idx] = 10 ** weight_statistic_tel[idx]
if estimation_weight == "CTAMARS":
# in CTAMARS the average is done after converting
# energy and weight to linear energy scale
weight_tel[idx] = 1 / (weight_statistic_tel[idx] ** 2)
reco_energy_tel[tel_id] = energy_tel[idx]
reco_energy = np.sum(weight_tel * energy_tel) / sum(weight_tel)
else:
for idx, tel_id in enumerate(hillas_dict.keys()):
reco_energy_tel[tel_id] = np.nan
for idx, tel_id in enumerate(hillas_dict.keys()):
cam_id = source.subarray.tel[tel_id].camera.camera_name
if cam_id not in outData:
if args.save_images is True:
# we define and save images content here, to make it
# adaptive to different cameras
n_pixels = source.subarray.tel[tel_id].camera.geometry.n_pixels
DataTrainingOutput["true_image"] = tb.Float32Col(
shape=(n_pixels), pos=56
)
DataTrainingOutput["reco_image"] = tb.Float32Col(
shape=(n_pixels), pos=57
)
DataTrainingOutput["cleaning_mask_reco"] = tb.BoolCol(
shape=(n_pixels), pos=58
) # not in ctapipe
DataTrainingOutput["cleaning_mask_clusters"] = tb.BoolCol(
shape=(n_pixels), pos=58
) # not in ctapipe
outTable[cam_id] = outfile.create_table(
"/",
cam_id,
DataTrainingOutput,
)
outData[cam_id] = outTable[cam_id].row
moments = hillas_dict[tel_id]
ellipticity = moments.width / moments.length
# Write to file also the Hillas parameters that have been used
# to calculate reco_results
moments_reco = hillas_dict_reco[tel_id]
ellipticity_reco = moments_reco.width / moments_reco.length
outData[cam_id]["good_image"] = good_for_reco[tel_id]
outData[cam_id]["is_valid"] = is_valid
outData[cam_id]["impact_dist"] = impact_dict[tel_id].to("m").value
outData[cam_id]["max_signal_cam"] = max_signals[tel_id]
outData[cam_id]["hillas_intensity"] = moments.intensity
outData[cam_id]["N_LST"] = n_tels["LST_LST_LSTCam"]
outData[cam_id]["N_MST"] = (
n_tels["MST_MST_NectarCam"]
+ n_tels["MST_MST_FlashCam"]
+ n_tels["MST_SCT_SCTCam"]
)
outData[cam_id]["N_SST"] = (
n_tels["SST_1M_DigiCam"]
+ n_tels["SST_ASTRI_ASTRICam"]
+ n_tels["SST_GCT_CHEC"]
)
outData[cam_id]["N_reco_LST"] = n_tels_reco["LST_LST_LSTCam"]
outData[cam_id]["N_reco_MST"] = n_tels_reco["MST_MST_NectarCam"]
outData[cam_id]["hillas_width"] = moments.width.to("deg").value
outData[cam_id]["hillas_length"] = moments.length.to("deg").value
outData[cam_id]["hillas_psi"] = moments.psi.to("deg").value
outData[cam_id]["hillas_skewness"] = moments.skewness
outData[cam_id]["hillas_kurtosis"] = moments.kurtosis
outData[cam_id]["h_max"] = h_max.to("m").value
outData[cam_id]["err_est_pos"] = np.nan
outData[cam_id]["err_est_dir"] = np.nan
outData[cam_id]["true_energy"] = event.simulation.shower.energy.to(
"TeV"
).value
outData[cam_id]["true_az"] = true_az.to("deg").value
outData[cam_id]["true_alt"] = true_alt.to("deg").value
outData[cam_id]["pointing_az"] = pointing_az.to("deg").value
outData[cam_id]["pointing_alt"] = pointing_alt.to("deg").value
outData[cam_id]["hillas_x"] = moments.x.to("deg").value
outData[cam_id]["hillas_y"] = moments.y.to("deg").value
outData[cam_id]["hillas_phi"] = moments.phi.to("deg").value
outData[cam_id]["hillas_r"] = moments.r.to("deg").value
outData[cam_id]["pixels"] = n_pixel_dict[tel_id]
outData[cam_id]["obs_id"] = event.index.obs_id
outData[cam_id]["event_id"] = event.index.event_id
outData[cam_id]["tel_id"] = tel_id
outData[cam_id]["xi"] = xi.to("deg").value
outData[cam_id]["reco_energy"] = reco_energy
outData[cam_id]["hillas_ellipticity"] = ellipticity.value
outData[cam_id]["clusters"] = n_cluster_dict[tel_id]
outData[cam_id]["n_tel_discri"] = n_tels["GOOD images"]
outData[cam_id]["mc_core_x"] = event.simulation.shower.core_x.to(
"m"
).value
outData[cam_id]["mc_core_y"] = event.simulation.shower.core_y.to(
"m"
).value
outData[cam_id]["reco_core_x"] = reco_core_x.to("m").value
outData[cam_id]["reco_core_y"] = reco_core_y.to("m").value
outData[cam_id][
"mc_h_first_int"
] = event.simulation.shower.h_first_int.to("m").value
outData[cam_id]["offset"] = offset.to("deg").value
outData[cam_id][
"mc_x_max"
] = event.simulation.shower.x_max.value # g / cm2
outData[cam_id]["alt"] = reco_result.alt.to("deg").value
outData[cam_id]["az"] = reco_result.az.to("deg").value
outData[cam_id]["reco_energy_tel"] = reco_energy_tel[tel_id]
# Variables from hillas_dist_reco
outData[cam_id]["n_tel_reco"] = n_tels["GOOD images"]
outData[cam_id]["hillas_x_reco"] = moments_reco.x.to("deg").value
outData[cam_id]["hillas_y_reco"] = moments_reco.y.to("deg").value
outData[cam_id]["hillas_phi_reco"] = moments_reco.phi.to("deg").value
outData[cam_id]["hillas_ellipticity_reco"] = ellipticity_reco.value
outData[cam_id]["hillas_r_reco"] = moments_reco.r.to("deg").value
outData[cam_id]["hillas_skewness_reco"] = moments_reco.skewness
outData[cam_id]["hillas_kurtosis_reco"] = moments_reco.kurtosis
outData[cam_id]["hillas_width_reco"] = moments_reco.width.to(
"deg"
).value
outData[cam_id]["hillas_length_reco"] = moments_reco.length.to(
"deg"
).value
outData[cam_id]["hillas_psi_reco"] = moments_reco.psi.to("deg").value
outData[cam_id]["hillas_intensity_reco"] = moments_reco.intensity
outData[cam_id]["leakage_intensity_width_1_reco"] = leakage_dict[
tel_id
]["leak1_reco"]
outData[cam_id]["leakage_intensity_width_2_reco"] = leakage_dict[
tel_id
]["leak2_reco"]
outData[cam_id]["leakage_intensity_width_1"] = leakage_dict[tel_id][
"leak1"
]
outData[cam_id]["leakage_intensity_width_2"] = leakage_dict[tel_id][
"leak2"
]
outData[cam_id]["concentration_cog"] = concentration_dict[tel_id][
"concentration_cog"
]
outData[cam_id]["concentration_core"] = concentration_dict[tel_id][
"concentration_core"
]
outData[cam_id]["concentration_pixel"] = concentration_dict[tel_id][
"concentration_pixel"
]
outData[cam_id]["image_extraction"] = image_extraction_status[tel_id]
# =======================
# IMAGES INFORMATION
# =======================
if args.save_images is True:
# we define and save images content here, to make it
# adaptive to different cameras
outData[cam_id]["true_image"] = true_image[tel_id]
outData[cam_id]["reco_image"] = reco_image[tel_id]
outData[cam_id]["cleaning_mask_reco"] = cleaning_mask_reco[tel_id]
outData[cam_id]["cleaning_mask_clusters"] = cleaning_mask_clusters[
tel_id
]
# =======================
outData[cam_id].append()
if signal_handler.stop:
break
if signal_handler.stop:
break
# make sure that all the events are properly stored
for table in outTable.values():
table.flush()
print(
bcolors.BOLD
+ "\n\n==================================================\n"
+ "Statistical summary of processed events and images\n"
+ "==================================================\n"
# + bcolors.ENDC
)
evt_cutflow()
# Catch specific cases
triggered_events = evt_cutflow.cuts["min2Tels trig"][1]
reconstructed_events = evt_cutflow.cuts["min2Tels reco"][1]
if triggered_events == 0:
print(
"\033[93mWARNING: No events have been triggered"
" by the selected telescopes! \033[0m"
)
else:
print("\n")
img_cutflow()
if reconstructed_events == 0:
print(
"\033[93m WARNING: None of the triggered events have been "
"properly reconstructed by the selected telescopes!\n"
"DL1 file will be empty! \033[0m"
)
print(bcolors.ENDC)
# Conclude by writing some metadata
outfile.root._v_attrs["status"] = "complete"
outfile.root._v_attrs["num_showers"] = source.simulation_config.num_showers
outfile.root._v_attrs["shower_reuse"] = source.simulation_config.shower_reuse
outfile.close()
print("Job done!")
if __name__ == "__main__":
main()
| [
"numpy.sum",
"tables.Float32Col",
"numpy.mean",
"ctapipe.utils.CutFlow",
"numpy.std",
"tables.Int32Col",
"tables.BoolCol",
"protopipe.pipeline.utils.SignalHandler",
"astropy.coordinates.angle_utilities.angular_separation",
"protopipe.pipeline.utils.make_argparser",
"signal.signal",
"tables.ope... | [((701, 717), 'protopipe.pipeline.utils.make_argparser', 'make_argparser', ([], {}), '()\n', (715, 717), False, 'from protopipe.pipeline.utils import make_argparser, prod5N_array, prod3b_array, str2bool, SignalHandler, bcolors\n'), ((1679, 1708), 'protopipe.pipeline.io.load_config', 'load_config', (['args.config_file'], {}), '(args.config_file)\n', (1690, 1708), False, 'from protopipe.pipeline.io import load_config, load_models\n'), ((3336, 3359), 'ctapipe.utils.CutFlow', 'CutFlow', (['"""EventCutFlow"""'], {}), "('EventCutFlow')\n", (3343, 3359), False, 'from ctapipe.utils import CutFlow\n'), ((3378, 3401), 'ctapipe.utils.CutFlow', 'CutFlow', (['"""ImageCutFlow"""'], {}), "('ImageCutFlow')\n", (3385, 3401), False, 'from ctapipe.utils import CutFlow\n'), ((3416, 3574), 'protopipe.pipeline.EventPreparer', 'EventPreparer', ([], {'config': 'cfg', 'subarray': 'subarray', 'cams_and_foclens': 'cams_and_foclens', 'mode': 'args.mode', 'event_cutflow': 'evt_cutflow', 'image_cutflow': 'img_cutflow'}), '(config=cfg, subarray=subarray, cams_and_foclens=\n cams_and_foclens, mode=args.mode, event_cutflow=evt_cutflow,\n image_cutflow=img_cutflow)\n', (3429, 3574), False, 'from protopipe.pipeline import EventPreparer\n'), ((3715, 3730), 'protopipe.pipeline.utils.SignalHandler', 'SignalHandler', ([], {}), '()\n', (3728, 3730), False, 'from protopipe.pipeline.utils import make_argparser, prod5N_array, prod3b_array, str2bool, SignalHandler, bcolors\n'), ((3735, 3779), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (3748, 3779), False, 'import signal\n'), ((9554, 9590), 'tables.open_file', 'tb.open_file', (['args.outfile'], {'mode': '"""w"""'}), "(args.outfile, mode='w')\n", (9566, 9590), True, 'import tables as tb\n'), ((2629, 2641), 'sys.exit', 'sys_exit', (['(-1)'], {}), '(-1)\n', (2637, 2641), True, 'from sys import exit as sys_exit\n'), ((2931, 2973), 'protopipe.pipeline.utils.prod5N_array', 'prod5N_array', (['filenamelist[0]', 'site', 'array'], {}), '(filenamelist[0], site, array)\n', (2943, 2973), False, 'from protopipe.pipeline.utils import make_argparser, prod5N_array, prod3b_array, str2bool, SignalHandler, bcolors\n'), ((4155, 4189), 'protopipe.pipeline.io.load_config', 'load_config', (['args.regressor_config'], {}), '(args.regressor_config)\n', (4166, 4189), False, 'from protopipe.pipeline.io import load_config, load_models\n'), ((10057, 10180), 'protopipe.pipeline.temp.MySimTelEventSource', 'MySimTelEventSource', ([], {'input_url': 'filename', 'calib_scale': 'calib_scale', 'allowed_tels': 'allowed_tels', 'max_events': 'args.max_events'}), '(input_url=filename, calib_scale=calib_scale,\n allowed_tels=allowed_tels, max_events=args.max_events)\n', (10076, 10180), False, 'from protopipe.pipeline.temp import MySimTelEventSource\n'), ((2264, 2276), 'sys.exit', 'sys_exit', (['(-1)'], {}), '(-1)\n', (2272, 2276), True, 'from sys import exit as sys_exit\n'), ((3080, 3122), 'protopipe.pipeline.utils.prod3b_array', 'prod3b_array', (['filenamelist[0]', 'site', 'array'], {}), '(filenamelist[0], site, array)\n', (3092, 3122), False, 'from protopipe.pipeline.utils import make_argparser, prod5N_array, prod3b_array, str2bool, SignalHandler, bcolors\n'), ((3245, 3257), 'sys.exit', 'sys_exit', (['(-1)'], {}), '(-1)\n', (3253, 3257), True, 'from sys import exit as sys_exit\n'), ((4941, 4967), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(1)', 'pos': '(0)'}), '(dflt=1, pos=0)\n', (4952, 4967), True, 'import tables as tb\n'), ((4986, 5012), 'tables.Int32Col', 'tb.Int32Col', ([], {'dflt': '(1)', 'pos': '(1)'}), '(dflt=1, pos=1)\n', (4997, 5012), True, 'import tables as tb\n'), ((5029, 5055), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(1)', 'pos': '(2)'}), '(dflt=1, pos=2)\n', (5040, 5055), True, 'import tables as tb\n'), ((5071, 5097), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(1)', 'pos': '(3)'}), '(dflt=1, pos=3)\n', (5082, 5097), True, 'import tables as tb\n'), ((5113, 5139), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(1)', 'pos': '(4)'}), '(dflt=1, pos=4)\n', (5124, 5139), True, 'import tables as tb\n'), ((5155, 5181), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(1)', 'pos': '(5)'}), '(dflt=1, pos=5)\n', (5166, 5181), True, 'import tables as tb\n'), ((5202, 5228), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(6)'}), '(dflt=1, pos=6)\n', (5213, 5228), True, 'import tables as tb\n'), ((5251, 5277), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(7)'}), '(dflt=1, pos=7)\n', (5262, 5277), True, 'import tables as tb\n'), ((5404, 5432), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(8)'}), '(dflt=1, pos=8)\n', (5417, 5432), True, 'import tables as tb\n'), ((5459, 5487), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(9)'}), '(dflt=1, pos=9)\n', (5472, 5487), True, 'import tables as tb\n'), ((5511, 5540), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(10)'}), '(dflt=1, pos=10)\n', (5524, 5540), True, 'import tables as tb\n'), ((5564, 5593), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(11)'}), '(dflt=1, pos=11)\n', (5577, 5593), True, 'import tables as tb\n'), ((5612, 5641), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(12)'}), '(dflt=1, pos=12)\n', (5625, 5641), True, 'import tables as tb\n'), ((5660, 5689), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(13)'}), '(dflt=1, pos=13)\n', (5673, 5689), True, 'import tables as tb\n'), ((5713, 5742), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(14)'}), '(dflt=1, pos=14)\n', (5726, 5742), True, 'import tables as tb\n'), ((5761, 5790), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(15)'}), '(dflt=1, pos=15)\n', (5774, 5790), True, 'import tables as tb\n'), ((5816, 5845), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(16)'}), '(dflt=1, pos=16)\n', (5829, 5845), True, 'import tables as tb\n'), ((5866, 5895), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(17)'}), '(dflt=1, pos=17)\n', (5879, 5895), True, 'import tables as tb\n'), ((5924, 5953), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(18)'}), '(dflt=1, pos=18)\n', (5937, 5953), True, 'import tables as tb\n'), ((5977, 6006), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(19)'}), '(dflt=1, pos=19)\n', (5990, 6006), True, 'import tables as tb\n'), ((6034, 6063), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(20)'}), '(dflt=1, pos=20)\n', (6047, 6063), True, 'import tables as tb\n'), ((6086, 6115), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(21)'}), '(dflt=1, pos=21)\n', (6099, 6115), True, 'import tables as tb\n'), ((6141, 6170), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(22)'}), '(dflt=1, pos=22)\n', (6154, 6170), True, 'import tables as tb\n'), ((6191, 6220), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(23)'}), '(dflt=1, pos=23)\n', (6204, 6220), True, 'import tables as tb\n'), ((6251, 6280), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(24)'}), '(dflt=1, pos=24)\n', (6264, 6280), True, 'import tables as tb\n'), ((6306, 6335), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(25)'}), '(dflt=1, pos=25)\n', (6319, 6335), True, 'import tables as tb\n'), ((6361, 6390), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(26)'}), '(dflt=1, pos=26)\n', (6374, 6390), True, 'import tables as tb\n'), ((6421, 6450), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(27)'}), '(dflt=1, pos=27)\n', (6434, 6450), True, 'import tables as tb\n'), ((6491, 6525), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(28)'}), '(dflt=np.nan, pos=28)\n', (6504, 6525), True, 'import tables as tb\n'), ((6566, 6600), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(29)'}), '(dflt=np.nan, pos=29)\n', (6579, 6600), True, 'import tables as tb\n'), ((6636, 6670), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(30)'}), '(dflt=np.nan, pos=30)\n', (6649, 6670), True, 'import tables as tb\n'), ((6706, 6740), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(31)'}), '(dflt=np.nan, pos=31)\n', (6719, 6740), True, 'import tables as tb\n'), ((6768, 6802), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(32)'}), '(dflt=np.nan, pos=32)\n', (6781, 6802), True, 'import tables as tb\n'), ((6831, 6865), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(33)'}), '(dflt=np.nan, pos=33)\n', (6844, 6865), True, 'import tables as tb\n'), ((6895, 6929), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(34)'}), '(dflt=np.nan, pos=34)\n', (6908, 6929), True, 'import tables as tb\n'), ((7076, 7103), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(35)'}), '(dflt=1, pos=35)\n', (7087, 7103), True, 'import tables as tb\n'), ((7132, 7159), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(36)'}), '(dflt=1, pos=36)\n', (7143, 7159), True, 'import tables as tb\n'), ((7184, 7213), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(37)'}), '(dflt=1, pos=37)\n', (7197, 7213), True, 'import tables as tb\n'), ((7230, 7258), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(-1)', 'pos': '(38)'}), '(dflt=-1, pos=38)\n', (7241, 7258), True, 'import tables as tb\n'), ((7277, 7305), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(-1)', 'pos': '(39)'}), '(dflt=-1, pos=39)\n', (7288, 7305), True, 'import tables as tb\n'), ((7449, 7478), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(40)'}), '(dflt=1, pos=40)\n', (7462, 7478), True, 'import tables as tb\n'), ((7494, 7523), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(41)'}), '(dflt=1, pos=41)\n', (7507, 7523), True, 'import tables as tb\n'), ((7537, 7571), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(42)'}), '(dflt=np.nan, pos=42)\n', (7550, 7571), True, 'import tables as tb\n'), ((7584, 7618), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(43)'}), '(dflt=np.nan, pos=43)\n', (7597, 7618), True, 'import tables as tb\n'), ((7640, 7669), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(44)'}), '(dflt=1, pos=44)\n', (7653, 7669), True, 'import tables as tb\n'), ((7691, 7720), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': '(1)', 'pos': '(45)'}), '(dflt=1, pos=45)\n', (7704, 7720), True, 'import tables as tb\n'), ((7733, 7767), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(46)'}), '(dflt=np.nan, pos=46)\n', (7746, 7767), True, 'import tables as tb\n'), ((7784, 7818), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(47)'}), '(dflt=np.nan, pos=47)\n', (7797, 7818), True, 'import tables as tb\n'), ((7838, 7865), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(48)'}), '(dflt=1, pos=48)\n', (7849, 7865), True, 'import tables as tb\n'), ((7885, 7912), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(49)'}), '(dflt=1, pos=49)\n', (7896, 7912), True, 'import tables as tb\n'), ((7934, 7961), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(50)'}), '(dflt=1, pos=50)\n', (7945, 7961), True, 'import tables as tb\n'), ((7983, 8010), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(51)'}), '(dflt=1, pos=51)\n', (7994, 8010), True, 'import tables as tb\n'), ((8035, 8062), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': '(1)', 'pos': '(52)'}), '(dflt=1, pos=52)\n', (8046, 8062), True, 'import tables as tb\n'), ((8081, 8115), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(53)'}), '(dflt=np.nan, pos=53)\n', (8094, 8115), True, 'import tables as tb\n'), ((8134, 8164), 'tables.BoolCol', 'tb.BoolCol', ([], {'dflt': '(False)', 'pos': '(54)'}), '(dflt=False, pos=54)\n', (8144, 8164), True, 'import tables as tb\n'), ((8185, 8212), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(1)', 'pos': '(55)'}), '(dflt=1, pos=55)\n', (8196, 8212), True, 'import tables as tb\n'), ((8230, 8264), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(56)'}), '(dflt=np.nan, pos=56)\n', (8243, 8264), True, 'import tables as tb\n'), ((8283, 8317), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(57)'}), '(dflt=np.nan, pos=57)\n', (8296, 8317), True, 'import tables as tb\n'), ((8339, 8373), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(58)'}), '(dflt=np.nan, pos=58)\n', (8352, 8373), True, 'import tables as tb\n'), ((8396, 8430), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(59)'}), '(dflt=np.nan, pos=59)\n', (8409, 8430), True, 'import tables as tb\n'), ((8567, 8599), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': 'np.nan', 'pos': '(60)'}), '(dflt=np.nan, pos=60)\n', (8578, 8599), True, 'import tables as tb\n'), ((8621, 8653), 'tables.FloatCol', 'tb.FloatCol', ([], {'dflt': 'np.nan', 'pos': '(61)'}), '(dflt=np.nan, pos=61)\n', (8632, 8653), True, 'import tables as tb\n'), ((8679, 8713), 'tables.Float32Col', 'tb.Float32Col', ([], {'dflt': 'np.nan', 'pos': '(62)'}), '(dflt=np.nan, pos=62)\n', (8692, 8713), True, 'import tables as tb\n'), ((9399, 9427), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(-1)', 'pos': '(63)'}), '(dflt=-1, pos=63)\n', (9410, 9427), True, 'import tables as tb\n'), ((9448, 9476), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(-1)', 'pos': '(64)'}), '(dflt=-1, pos=64)\n', (9459, 9476), True, 'import tables as tb\n'), ((9503, 9531), 'tables.Int16Col', 'tb.Int16Col', ([], {'dflt': '(-1)', 'pos': '(65)'}), '(dflt=-1, pos=65)\n', (9514, 9531), True, 'import tables as tb\n'), ((11514, 11626), 'astropy.coordinates.angle_utilities.angular_separation', 'angular_separation', (['event.simulation.shower.az', 'event.simulation.shower.alt', 'reco_result.az', 'reco_result.alt'], {}), '(event.simulation.shower.az, event.simulation.shower.alt,\n reco_result.az, reco_result.alt)\n', (11532, 11626), False, 'from astropy.coordinates.angle_utilities import angular_separation\n'), ((11748, 11865), 'astropy.coordinates.angle_utilities.angular_separation', 'angular_separation', (['event.pointing.array_azimuth', 'event.pointing.array_altitude', 'reco_result.az', 'reco_result.alt'], {}), '(event.pointing.array_azimuth, event.pointing.\n array_altitude, reco_result.az, reco_result.alt)\n', (11766, 11865), False, 'from astropy.coordinates.angle_utilities import angular_separation\n'), ((19150, 19181), 'numpy.sum', 'np.sum', (['(weight_tel * energy_tel)'], {}), '(weight_tel * energy_tel)\n', (19156, 19181), True, 'import numpy as np\n'), ((18095, 18129), 'numpy.mean', 'np.mean', (['predictions_trees'], {'axis': '(0)'}), '(predictions_trees, axis=0)\n', (18102, 18129), True, 'import numpy as np\n'), ((18182, 18215), 'numpy.std', 'np.std', (['predictions_trees'], {'axis': '(0)'}), '(predictions_trees, axis=0)\n', (18188, 18215), True, 'import numpy as np\n'), ((19846, 19883), 'tables.Float32Col', 'tb.Float32Col', ([], {'shape': 'n_pixels', 'pos': '(56)'}), '(shape=n_pixels, pos=56)\n', (19859, 19883), True, 'import tables as tb\n'), ((19999, 20036), 'tables.Float32Col', 'tb.Float32Col', ([], {'shape': 'n_pixels', 'pos': '(57)'}), '(shape=n_pixels, pos=57)\n', (20012, 20036), True, 'import tables as tb\n'), ((20160, 20194), 'tables.BoolCol', 'tb.BoolCol', ([], {'shape': 'n_pixels', 'pos': '(58)'}), '(shape=n_pixels, pos=58)\n', (20170, 20194), True, 'import tables as tb\n'), ((20340, 20374), 'tables.BoolCol', 'tb.BoolCol', ([], {'shape': 'n_pixels', 'pos': '(58)'}), '(shape=n_pixels, pos=58)\n', (20350, 20374), True, 'import tables as tb\n')] |
#
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import glob as globlib
import logging
import os
import sys
import time
import math
import numpy as np
import pandas as pd
import requests
from bravado.exception import BravadoConnectionError, BravadoTimeoutError, HTTPForbidden, \
HTTPInternalServerError, HTTPServerError, HTTPUnauthorized, HTTPServiceUnavailable, HTTPRequestTimeout, \
HTTPGatewayTimeout, HTTPBadGateway
from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError
from neptune.exceptions import InvalidNotebookPath, FileNotFound, NotAFile
from neptune.git_info import GitInfo
_logger = logging.getLogger(__name__)
IS_WINDOWS = hasattr(sys, 'getwindowsversion')
def map_values(f_value, dictionary):
return dict(
(k, f_value(v)) for k, v in dictionary.items()
)
def map_keys(f_key, dictionary):
return dict(
(f_key(k), v) for k, v in dictionary.items()
)
def as_list(value):
if value is None or isinstance(value, list):
return value
else:
return [value]
def validate_notebook_path(path):
if not path.endswith(".ipynb"):
raise InvalidNotebookPath(path)
if not os.path.exists(path):
raise FileNotFound(path)
if not os.path.isfile(path):
raise NotAFile(path)
def align_channels_on_x(dataframe):
channel_dfs, common_x = _split_df_by_stems(dataframe)
return merge_dataframes([common_x] + channel_dfs, on='x', how='outer')
def get_channel_name_stems(columns):
return list(set([col[2:] for col in columns]))
def merge_dataframes(dataframes, on, how='outer'):
merged_df = functools.reduce(lambda left, right: \
pd.merge(left, right, on=on, how=how), dataframes)
return merged_df
def is_float(value):
try:
_ = float(value)
except ValueError:
return False
else:
return True
def is_nan_or_inf(value):
return math.isnan(value) or math.isinf(value)
def file_contains(filename, text):
for line in open(filename):
if text in line:
return True
return False
def in_docker():
cgroup_file = '/proc/self/cgroup'
return os.path.exists('./dockerenv') or (os.path.exists(cgroup_file) and file_contains(cgroup_file, text='docker'))
def is_notebook():
try:
# pylint: disable=pointless-statement,undefined-variable
get_ipython
return True
except Exception:
return False
def _split_df_by_stems(df):
channel_dfs, x_vals = [], []
for stem in get_channel_name_stems(df.columns):
channel_df = df[['x_{}'.format(stem), 'y_{}'.format(stem)]]
channel_df.columns = ['x', stem]
channel_df = channel_df.dropna()
channel_dfs.append(channel_df)
x_vals.extend(channel_df['x'].tolist())
common_x = pd.DataFrame({'x': np.unique(x_vals)}, dtype=float)
return channel_dfs, common_x
def discover_git_repo_location():
# pylint:disable=bad-option-value,import-outside-toplevel
import __main__
if hasattr(__main__, '__file__'):
return os.path.dirname(os.path.abspath(__main__.__file__))
return None
def update_session_proxies(session, proxies):
if proxies is not None:
try:
session.proxies.update(proxies)
except (TypeError, ValueError):
raise ValueError("Wrong proxies format: {}".format(proxies))
def get_git_info(repo_path=None):
"""Retrieve information about git repository.
If attempt fails, ``None`` will be returned.
Args:
repo_path (:obj:`str`, optional, default is ``None``):
| Path to the repository from which extract information about git.
| If ``None`` is passed, calling ``get_git_info`` is equivalent to calling
``git.Repo(search_parent_directories=True)``.
Check `GitPython <https://gitpython.readthedocs.io/en/stable/reference.html#git.repo.base.Repo>`_
docs for more information.
Returns:
:class:`~neptune.git_info.GitInfo` - An object representing information about git repository.
Examples:
.. code:: python3
# Get git info from the current directory
git_info = get_git_info('.')
"""
try:
# pylint:disable=bad-option-value,import-outside-toplevel
import git
repo = git.Repo(repo_path, search_parent_directories=True)
commit = repo.head.commit
active_branch = ""
try:
active_branch = repo.active_branch.name
except TypeError as e:
if str(e.args[0]).startswith("HEAD is a detached symbolic reference as it points to"):
active_branch = "Detached HEAD"
remote_urls = [remote.url for remote in repo.remotes]
return GitInfo(
commit_id=commit.hexsha,
message=commit.message,
author_name=commit.author.name,
author_email=commit.author.email,
commit_date=commit.committed_datetime,
repository_dirty=repo.is_dirty(untracked_files=True),
active_branch=active_branch,
remote_urls=remote_urls
)
except: # pylint: disable=bare-except
return None
def with_api_exceptions_handler(func):
def wrapper(*args, **kwargs):
for retry in range(0, 11):
try:
return func(*args, **kwargs)
except requests.exceptions.SSLError:
raise SSLError()
except (BravadoConnectionError, BravadoTimeoutError,
requests.exceptions.ConnectionError, requests.exceptions.Timeout,
HTTPRequestTimeout, HTTPServiceUnavailable, HTTPGatewayTimeout, HTTPBadGateway):
if retry >= 6:
_logger.warning('Experiencing connection interruptions. Reestablishing communication with Neptune.')
time.sleep(2 ** retry)
continue
except HTTPServerError:
raise ServerError()
except HTTPUnauthorized:
raise Unauthorized()
except HTTPForbidden:
raise Forbidden()
except requests.exceptions.RequestException as e:
if e.response is None:
raise
status_code = e.response.status_code
if status_code in (
HTTPBadGateway.status_code,
HTTPServiceUnavailable.status_code,
HTTPGatewayTimeout.status_code):
if retry >= 6:
_logger.warning(
'Experiencing connection interruptions. Reestablishing communication with Neptune.')
time.sleep(2 ** retry)
continue
elif status_code >= HTTPInternalServerError.status_code:
raise ServerError()
elif status_code == HTTPUnauthorized.status_code:
raise Unauthorized()
elif status_code == HTTPForbidden.status_code:
raise Forbidden()
else:
raise
raise ConnectionLost()
return wrapper
def glob(pathname):
# pylint: disable=unexpected-keyword-arg
if sys.version_info.major < 3 or (sys.version_info.major == 3 and sys.version_info.minor < 5):
return globlib.glob(pathname)
else:
return globlib.glob(pathname, recursive=True)
def is_ipython():
try:
# pylint:disable=bad-option-value,import-outside-toplevel
import IPython
ipython = IPython.core.getipython.get_ipython()
return ipython is not None
except ImportError:
return False
| [
"math.isinf",
"neptune.exceptions.NotAFile",
"os.path.isfile",
"glob.glob",
"numpy.unique",
"os.path.abspath",
"neptune.api_exceptions.ConnectionLost",
"pandas.merge",
"os.path.exists",
"math.isnan",
"git.Repo",
"time.sleep",
"neptune.api_exceptions.ServerError",
"neptune.api_exceptions.SS... | [((1216, 1243), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1233, 1243), False, 'import logging\n'), ((1731, 1756), 'neptune.exceptions.InvalidNotebookPath', 'InvalidNotebookPath', (['path'], {}), '(path)\n', (1750, 1756), False, 'from neptune.exceptions import InvalidNotebookPath, FileNotFound, NotAFile\n'), ((1769, 1789), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1783, 1789), False, 'import os\n'), ((1805, 1823), 'neptune.exceptions.FileNotFound', 'FileNotFound', (['path'], {}), '(path)\n', (1817, 1823), False, 'from neptune.exceptions import InvalidNotebookPath, FileNotFound, NotAFile\n'), ((1836, 1856), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1850, 1856), False, 'import os\n'), ((1872, 1886), 'neptune.exceptions.NotAFile', 'NotAFile', (['path'], {}), '(path)\n', (1880, 1886), False, 'from neptune.exceptions import InvalidNotebookPath, FileNotFound, NotAFile\n'), ((2535, 2552), 'math.isnan', 'math.isnan', (['value'], {}), '(value)\n', (2545, 2552), False, 'import math\n'), ((2556, 2573), 'math.isinf', 'math.isinf', (['value'], {}), '(value)\n', (2566, 2573), False, 'import math\n'), ((2777, 2806), 'os.path.exists', 'os.path.exists', (['"""./dockerenv"""'], {}), "('./dockerenv')\n", (2791, 2806), False, 'import os\n'), ((4965, 5016), 'git.Repo', 'git.Repo', (['repo_path'], {'search_parent_directories': '(True)'}), '(repo_path, search_parent_directories=True)\n', (4973, 5016), False, 'import git\n'), ((7807, 7823), 'neptune.api_exceptions.ConnectionLost', 'ConnectionLost', ([], {}), '()\n', (7821, 7823), False, 'from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError\n'), ((8025, 8047), 'glob.glob', 'globlib.glob', (['pathname'], {}), '(pathname)\n', (8037, 8047), True, 'import glob as globlib\n'), ((8073, 8111), 'glob.glob', 'globlib.glob', (['pathname'], {'recursive': '(True)'}), '(pathname, recursive=True)\n', (8085, 8111), True, 'import glob as globlib\n'), ((8248, 8285), 'IPython.core.getipython.get_ipython', 'IPython.core.getipython.get_ipython', ([], {}), '()\n', (8283, 8285), False, 'import IPython\n'), ((2293, 2330), 'pandas.merge', 'pd.merge', (['left', 'right'], {'on': 'on', 'how': 'how'}), '(left, right, on=on, how=how)\n', (2301, 2330), True, 'import pandas as pd\n'), ((2811, 2838), 'os.path.exists', 'os.path.exists', (['cgroup_file'], {}), '(cgroup_file)\n', (2825, 2838), False, 'import os\n'), ((3450, 3467), 'numpy.unique', 'np.unique', (['x_vals'], {}), '(x_vals)\n', (3459, 3467), True, 'import numpy as np\n'), ((3704, 3738), 'os.path.abspath', 'os.path.abspath', (['__main__.__file__'], {}), '(__main__.__file__)\n', (3719, 3738), False, 'import os\n'), ((6085, 6095), 'neptune.api_exceptions.SSLError', 'SSLError', ([], {}), '()\n', (6093, 6095), False, 'from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError\n'), ((6516, 6538), 'time.sleep', 'time.sleep', (['(2 ** retry)'], {}), '(2 ** retry)\n', (6526, 6538), False, 'import time\n'), ((6622, 6635), 'neptune.api_exceptions.ServerError', 'ServerError', ([], {}), '()\n', (6633, 6635), False, 'from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError\n'), ((6695, 6709), 'neptune.api_exceptions.Unauthorized', 'Unauthorized', ([], {}), '()\n', (6707, 6709), False, 'from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError\n'), ((6766, 6777), 'neptune.api_exceptions.Forbidden', 'Forbidden', ([], {}), '()\n', (6775, 6777), False, 'from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError\n'), ((7372, 7394), 'time.sleep', 'time.sleep', (['(2 ** retry)'], {}), '(2 ** retry)\n', (7382, 7394), False, 'import time\n'), ((7523, 7536), 'neptune.api_exceptions.ServerError', 'ServerError', ([], {}), '()\n', (7534, 7536), False, 'from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError\n'), ((7629, 7643), 'neptune.api_exceptions.Unauthorized', 'Unauthorized', ([], {}), '()\n', (7641, 7643), False, 'from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError\n'), ((7733, 7744), 'neptune.api_exceptions.Forbidden', 'Forbidden', ([], {}), '()\n', (7742, 7744), False, 'from neptune.api_exceptions import ConnectionLost, Forbidden, ServerError, Unauthorized, SSLError\n')] |
"""
Production of corner plots.
Modified from a fork of https://github.com/dfm/corner.py .
Original code: Copyright (c) 2013-2020 <NAME>
Full license: https://github.com/dfm/corner.py/blob/main/LICENSE
This modified version:
- Add the observed quantities to the corner plots
- Colours for the plots
- Add KDE to non-contour panels
- Cleaned
"""
import logging
import colorsys
import numpy as np
import matplotlib
from scipy.stats import gaussian_kde
from scipy.ndimage import gaussian_filter
from matplotlib.ticker import MaxNLocator, NullLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
import matplotlib.colors as mc
matplotlib.use("Agg")
import matplotlib.pyplot as plt
fontdic = {"size": 12}
__all__ = ["corner", "hist2d"]
def corner(
xs,
smooth=None,
smooth1d="kde",
labels=None,
label_kwargs=fontdic,
show_titles=False,
title_fmt=".3f",
title_kwargs=fontdic,
truth_color="#4682b4",
scale_hist=False,
quantiles=None,
max_n_ticks=5,
use_math_text=False,
reverse=False,
plotin=None,
plotout=None,
autobins=True,
binrule_fallback="scott",
uncert="quantiles",
kde_points=250,
kde_method="silverman",
**hist2d_kwargs
):
"""
Make a corner plot showing the projections of a data set in a multi-dimensional
space. kwargs are passed to hist2d() or used for `matplotlib` styling.
Parameters
----------
xs : array_like[nsamples, ndim]
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
smooth: float, optional
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D histograms. If `None`
(default), no smoothing is applied.
smooth1d: str or float, optional
If "kde", a Kernel Density Estimate (KDE) is used in the 1D histograms.
Otherwise, as ``smooth`` above, but for the 1D histograms.
labels : None or iterable (ndim,), optional
A list of names for the dimensions.
label_kwargs : dict, optional
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool, optional
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string, optional
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict, optional
Any extra keyword arguments to send to the `set_title` command.
truth_color : str or dict, optional
A ``matplotlib`` style color for the truths makers or a dict with the colors
with keys being the labels.
scale_hist : bool, optional
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable, optional
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
max_n_ticks: int, optional
Maximum number of ticks to try to use
use_math_text : bool, optional
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
reverse : bool, optional
If true, plot the corner plot starting in the upper-right corner instead
of the usual bottom-left corner
plotin : iterable (ndim,), optional
A list of reference input values to indicate on the plots.
plotout : iterable (ndim,), optional
A list of reference output values to indicate on the plots.
autobins : bool or int or array_like[ndim,] optional
If True, automatically determine bin edges. Otherwise, the number of bins to use
in histograms, either as a fixed value for all dimensions, or as a list of
integers for each dimension.
binrule_fallback : str, optional
In case auto-binning fails for the posterior distribution (usually due to too
many zeros, which causes a memory leak), use this rule for posterior binning
instead.
uncert : str, optional
If uncertainties are given in terms of 'quantiles' or 'std' (standard
deviation), included here to change formatting when reporting inferred
quantities in titles.
kde_points : float, optional
Number of points to sample the KDE on. The higher number of points, the smoother
the KDE, but the longer computation time.
kde_method : str, optional
Method used to select the bandwidth in the gaussian KDE. Passed directly to
the routine in SciPy. Default is Scott's rule.
**hist2d_kwargs, optional
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = {}
if label_kwargs is None:
label_kwargs = {}
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], (
"I don't believe that you want more " "dimensions than samples!"
)
# Parse the parameter ranges.
# --> Set dummy ranges [v-1, v+1] for parameters that never change..
mins = np.array([x.min() for x in xs])
maxs = np.array([x.max() for x in xs])
m = mins == maxs
mins[m] -= 1
maxs[m] += 1
prange = np.transpose((mins, maxs)).tolist()
if len(prange) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
if isinstance(autobins, bool) and autobins:
bins = []
for i, x in enumerate(xs):
try:
xbin = np.histogram_bin_edges(x, bins="auto", range=np.sort(prange[i]))
except MemoryError:
print(
"WARNING! Using 'auto' as bin-rule causes a memory crash!"
"Switching to '{0}'".format(binrule_fallback),
"for the parameter '{0}'!".format(labels[i]),
)
xbin = np.histogram_bin_edges(
x, bins=binrule_fallback, range=np.sort(prange[i])
)
bins.append(xbin)
else:
try:
bins = [int(autobins) for _ in prange]
except TypeError:
if len(autobins) != len(prange):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
if reverse:
lbdim = 0.2 * factor # size of left/bottom margin
trdim = 0.5 * factor # size of top/right margin
else:
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.0) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure
fig, axes = plt.subplots(K, K, figsize=(dim, dim))
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(
left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace
)
# Set up the default histogram keywords.
color = "k"
hist_kwargs = {"color": color}
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
if reverse:
ax = axes[K - i - 1, K - i - 1]
else:
ax = axes[i, i]
if isinstance(truth_color, str):
tcolor = truth_color
else:
tcolor = lighten_color(truth_color[i], 0.5)
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], range=np.sort(prange[i]), **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], range=np.sort(prange[i]))
if smooth1d != "kde":
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
else:
try:
kernel = gaussian_kde(x, bw_method=kde_method)
except np.linalg.LinAlgError:
print("WARNING! Unable to create KDE. Skipping plot...")
raise
x0 = np.linspace(np.amin(x), np.amax(x), num=kde_points)
y0 = kernel(x0)
y0 /= np.amax(y0)
n = gaussian_filter(n, 1)
x0_hist = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0_hist = np.array(list(zip(n, n))).flatten() / np.amax(n)
ax.fill_between(
x0_hist, y0_hist, y2=-1, interpolate=True, color=tcolor, alpha=0.15
)
ax.plot(x0, y0, **hist_kwargs)
ax.fill_between(x0, y0, y2=-1, interpolate=True, color=tcolor, alpha=0.15)
# Plot quantiles
q = plotout[3 * i]
p = plotout[3 * i + 1]
m = plotout[3 * i + 2]
ax.axvline(q, ls="solid", color=color)
ax.axvline(q + p, ls="dashed", color=color)
ax.axvline(q - m, ls="dashed", color=color)
# Plot input parameters when they are given
if plotin is not None:
if plotin[2 * i] != -9999:
inx = plotin[2 * i]
instd = plotin[2 * i + 1]
ax.axvline(inx, ls="dashdot", color="0.4")
ax.axvline(inx - instd, ls="dotted", color="0.4")
ax.axvline(inx + instd, ls="dotted", color="0.4")
if show_titles:
title = None
if title_fmt is not None:
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
if uncert == "quantiles":
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q), fmt(m), fmt(p))
else:
title = r"${{{0}}}\pm{{{1}}}$"
title = title.format(fmt(q), fmt(p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
if reverse:
ax.set_xlabel(title, **title_kwargs)
else:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(prange[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.05 * maxn)
elif smooth1d == "kde":
maxn = np.amax(y0)
ax.set_ylim(-0.1 * maxn, 1.05 * maxn)
else:
ax.set_ylim(0, 1.05 * np.max(n))
ax.set_yticklabels([])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(NullLocator())
if i < K - 1:
ax.set_xticklabels([])
else:
if reverse:
ax.xaxis.tick_top()
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
if reverse:
ax.set_title(labels[i], y=1.25, **label_kwargs)
else:
ax.set_xlabel(labels[i], **label_kwargs)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
if reverse:
ax = axes[K - i - 1, K - j - 1]
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
if isinstance(truth_color, str):
tcolor = truth_color
else:
tcolor = lighten_color(truth_color[j], 0.5)
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(
y,
x,
ax=ax,
range=[prange[j], prange[i]],
color=tcolor,
smooth=smooth,
bins=[bins[j], bins[i]],
**hist2d_kwargs
)
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
if reverse:
ax.xaxis.tick_top()
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
if reverse:
ax.xaxis.set_label_coords(0.5, 1.4)
else:
ax.xaxis.set_label_coords(0.5, -0.35)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
if reverse:
ax.yaxis.tick_right()
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
if reverse:
ax.set_ylabel(labels[i], rotation=-90, **label_kwargs)
ax.yaxis.set_label_coords(1.3, 0.5)
else:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.35, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=use_math_text))
return fig
def hist2d(
x,
y,
bins=20,
prange=None,
weights=None,
levels=None,
smooth=None,
ax=None,
color=None,
plot_datapoints=True,
plot_density=True,
plot_contours=True,
no_fill_contours=True,
fill_contours=True,
contour_kwargs=None,
contourf_kwargs=None,
data_kwargs=None,
**kwargs
):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x : array_like[nsamples,]
The samples.
y : array_like[nsamples,]
The samples.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool
Draw the individual data points.
plot_density : bool
Draw the density colormap.
plot_contours : bool
Draw the contours.
no_fill_contours : bool
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool
Fill the contours.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
if ax is None:
ax = plt.gca()
# Set the default range based on the data range if not provided.
if prange is None:
prange = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels,
# https://corner.readthedocs.io/en/latest/pages/sigmas.html
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)]
)
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2
)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels) + 1)
# We'll make the 2D histogram to directly estimate the density.
H, X, Y = np.histogram2d(
x.flatten(),
y.flatten(),
bins=bins,
range=list(map(np.sort, prange)),
weights=weights,
)
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
# Compute the density levels.
if not (np.all(x == x[0]) or np.all(y == y[0])):
if plot_contours or plot_density:
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except Exception:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m):
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate(
[
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
]
)
Y2 = np.concatenate(
[
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
]
)
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if not (np.all(x == x[0]) or np.all(y == y[0])):
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(
X2, Y2, H2.T, [V.min(), H.max()], cmap=white_cmap, antialiased=False
)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = {}
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased", False)
ax.contourf(
X2,
Y2,
H2.T,
np.concatenate([[0], V, [H.max() * (1 + 1e-4)]]),
**contourf_kwargs
)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(prange[0])
ax.set_ylim(prange[1])
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
| [
"numpy.amin",
"numpy.shape",
"numpy.argsort",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.atleast_2d",
"logging.warning",
"scipy.ndimage.gaussian_filter",
"matplotlib.ticker.MaxNLocator",
"numpy.tra... | [((702, 723), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (716, 723), False, 'import matplotlib\n'), ((5385, 5402), 'numpy.atleast_1d', 'np.atleast_1d', (['xs'], {}), '(xs)\n', (5398, 5402), True, 'import numpy as np\n'), ((7555, 7593), 'matplotlib.pyplot.subplots', 'plt.subplots', (['K', 'K'], {'figsize': '(dim, dim)'}), '(K, K, figsize=(dim, dim))\n', (7567, 7593), True, 'import matplotlib.pyplot as plt\n'), ((17429, 17501), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""density_cmap"""', '[color, (1, 1, 1, 0)]'], {}), "('density_cmap', [color, (1, 1, 1, 0)])\n", (17462, 17501), False, 'from matplotlib.colors import LinearSegmentedColormap, colorConverter\n'), ((17609, 17685), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""white_cmap"""', '[(1, 1, 1), (1, 1, 1)]'], {'N': '(2)'}), "('white_cmap', [(1, 1, 1), (1, 1, 1)], N=2)\n", (17642, 17685), False, 'from matplotlib.colors import LinearSegmentedColormap, colorConverter\n'), ((17820, 17849), 'matplotlib.colors.colorConverter.to_rgba', 'colorConverter.to_rgba', (['color'], {}), '(color)\n', (17842, 17849), False, 'from matplotlib.colors import LinearSegmentedColormap, colorConverter\n'), ((5443, 5460), 'numpy.atleast_2d', 'np.atleast_2d', (['xs'], {}), '(xs)\n', (5456, 5460), True, 'import numpy as np\n'), ((16831, 16840), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16838, 16840), True, 'import matplotlib.pyplot as plt\n'), ((18391, 18417), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['H', 'smooth'], {}), '(H, smooth)\n', (18406, 18417), False, 'from scipy.ndimage import gaussian_filter\n'), ((5950, 5976), 'numpy.transpose', 'np.transpose', (['(mins, maxs)'], {}), '((mins, maxs))\n', (5962, 5976), True, 'import numpy as np\n'), ((11548, 11557), 'numpy.max', 'np.max', (['n'], {}), '(n)\n', (11554, 11557), True, 'import numpy as np\n'), ((18465, 18482), 'numpy.all', 'np.all', (['(x == x[0])'], {}), '(x == x[0])\n', (18471, 18482), True, 'import numpy as np\n'), ((18486, 18503), 'numpy.all', 'np.all', (['(y == y[0])'], {}), '(y == y[0])\n', (18492, 18503), True, 'import numpy as np\n'), ((18672, 18688), 'numpy.cumsum', 'np.cumsum', (['Hflat'], {}), '(Hflat)\n', (18681, 18688), True, 'import numpy as np\n'), ((19002, 19011), 'numpy.any', 'np.any', (['m'], {}), '(m)\n', (19008, 19011), True, 'import numpy as np\n'), ((19106, 19115), 'numpy.any', 'np.any', (['m'], {}), '(m)\n', (19112, 19115), True, 'import numpy as np\n'), ((20728, 20745), 'numpy.all', 'np.all', (['(x == x[0])'], {}), '(x == x[0])\n', (20734, 20745), True, 'import numpy as np\n'), ((20749, 20766), 'numpy.all', 'np.all', (['(y == y[0])'], {}), '(y == y[0])\n', (20755, 20766), True, 'import numpy as np\n'), ((22419, 22431), 'matplotlib.colors.to_rgb', 'mc.to_rgb', (['c'], {}), '(c)\n', (22428, 22431), True, 'import matplotlib.colors as mc\n'), ((8121, 8133), 'numpy.shape', 'np.shape', (['xs'], {}), '(xs)\n', (8129, 8133), True, 'import numpy as np\n'), ((8850, 8878), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['n', 'smooth1d'], {}), '(n, smooth1d)\n', (8865, 8878), False, 'from scipy.ndimage import gaussian_filter\n'), ((9384, 9395), 'numpy.amax', 'np.amax', (['y0'], {}), '(y0)\n', (9391, 9395), True, 'import numpy as np\n'), ((9416, 9437), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['n', '(1)'], {}), '(n, 1)\n', (9431, 9437), False, 'from scipy.ndimage import gaussian_filter\n'), ((11659, 11670), 'numpy.amax', 'np.amax', (['y0'], {}), '(y0)\n', (11666, 11670), True, 'import numpy as np\n'), ((11879, 11892), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (11890, 11892), False, 'from matplotlib.ticker import MaxNLocator, NullLocator\n'), ((11933, 11946), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (11944, 11946), False, 'from matplotlib.ticker import MaxNLocator, NullLocator\n'), ((12001, 12040), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (['max_n_ticks'], {'prune': '"""lower"""'}), "(max_n_ticks, prune='lower')\n", (12012, 12040), False, 'from matplotlib.ticker import MaxNLocator, NullLocator\n'), ((12081, 12094), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (12092, 12094), False, 'from matplotlib.ticker import MaxNLocator, NullLocator\n'), ((12589, 12631), 'matplotlib.ticker.ScalarFormatter', 'ScalarFormatter', ([], {'useMathText': 'use_math_text'}), '(useMathText=use_math_text)\n', (12604, 12631), False, 'from matplotlib.ticker import ScalarFormatter\n'), ((18599, 18616), 'numpy.argsort', 'np.argsort', (['Hflat'], {}), '(Hflat)\n', (18609, 18616), True, 'import numpy as np\n'), ((18971, 18981), 'numpy.diff', 'np.diff', (['V'], {}), '(V)\n', (18978, 18981), True, 'import numpy as np\n'), ((19029, 19087), 'logging.warning', 'logging.warning', (['"""Too few points to create valid contours"""'], {}), "('Too few points to create valid contours')\n", (19044, 19087), False, 'import logging\n'), ((19440, 19482), 'numpy.zeros', 'np.zeros', (['(H.shape[0] + 4, H.shape[1] + 4)'], {}), '((H.shape[0] + 4, H.shape[1] + 4))\n', (19448, 19482), True, 'import numpy as np\n'), ((8560, 8578), 'numpy.sort', 'np.sort', (['prange[i]'], {}), '(prange[i])\n', (8567, 8578), True, 'import numpy as np\n'), ((8776, 8794), 'numpy.sort', 'np.sort', (['prange[i]'], {}), '(prange[i])\n', (8783, 8794), True, 'import numpy as np\n'), ((9070, 9107), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['x'], {'bw_method': 'kde_method'}), '(x, bw_method=kde_method)\n', (9082, 9107), False, 'from scipy.stats import gaussian_kde\n'), ((9290, 9300), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (9297, 9300), True, 'import numpy as np\n'), ((9302, 9312), 'numpy.amax', 'np.amax', (['x'], {}), '(x)\n', (9309, 9312), True, 'import numpy as np\n'), ((9573, 9583), 'numpy.amax', 'np.amax', (['n'], {}), '(n)\n', (9580, 9583), True, 'import numpy as np\n'), ((12684, 12696), 'numpy.shape', 'np.shape', (['xs'], {}), '(xs)\n', (12692, 12696), True, 'import numpy as np\n'), ((13722, 13735), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (13733, 13735), False, 'from matplotlib.ticker import MaxNLocator, NullLocator\n'), ((13780, 13793), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (13791, 13793), False, 'from matplotlib.ticker import MaxNLocator, NullLocator\n'), ((13856, 13895), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (['max_n_ticks'], {'prune': '"""lower"""'}), "(max_n_ticks, prune='lower')\n", (13867, 13895), False, 'from matplotlib.ticker import MaxNLocator, NullLocator\n'), ((13940, 13979), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (['max_n_ticks'], {'prune': '"""lower"""'}), "(max_n_ticks, prune='lower')\n", (13951, 13979), False, 'from matplotlib.ticker import MaxNLocator, NullLocator\n'), ((14572, 14614), 'matplotlib.ticker.ScalarFormatter', 'ScalarFormatter', ([], {'useMathText': 'use_math_text'}), '(useMathText=use_math_text)\n', (14587, 14614), False, 'from matplotlib.ticker import ScalarFormatter\n'), ((15288, 15330), 'matplotlib.ticker.ScalarFormatter', 'ScalarFormatter', ([], {'useMathText': 'use_math_text'}), '(useMathText=use_math_text)\n', (15303, 15330), False, 'from matplotlib.ticker import ScalarFormatter\n'), ((19188, 19198), 'numpy.diff', 'np.diff', (['V'], {}), '(V)\n', (19195, 19198), True, 'import numpy as np\n'), ((6318, 6336), 'numpy.sort', 'np.sort', (['prange[i]'], {}), '(prange[i])\n', (6325, 6336), True, 'import numpy as np\n'), ((11769, 11778), 'numpy.max', 'np.max', (['n'], {}), '(n)\n', (11775, 11778), True, 'import numpy as np\n'), ((17254, 17278), 'numpy.arange', 'np.arange', (['(0.5)', '(2.1)', '(0.5)'], {}), '(0.5, 2.1, 0.5)\n', (17263, 17278), True, 'import numpy as np\n'), ((6722, 6740), 'numpy.sort', 'np.sort', (['prange[i]'], {}), '(prange[i])\n', (6729, 6740), True, 'import numpy as np\n'), ((19135, 19146), 'numpy.where', 'np.where', (['m'], {}), '(m)\n', (19143, 19146), True, 'import numpy as np\n'), ((19859, 19877), 'numpy.array', 'np.array', (['[-2, -1]'], {}), '([-2, -1])\n', (19867, 19877), True, 'import numpy as np\n'), ((19880, 19895), 'numpy.diff', 'np.diff', (['X1[:2]'], {}), '(X1[:2])\n', (19887, 19895), True, 'import numpy as np\n'), ((19950, 19966), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (19958, 19966), True, 'import numpy as np\n'), ((19969, 19985), 'numpy.diff', 'np.diff', (['X1[-2:]'], {}), '(X1[-2:])\n', (19976, 19985), True, 'import numpy as np\n'), ((20098, 20116), 'numpy.array', 'np.array', (['[-2, -1]'], {}), '([-2, -1])\n', (20106, 20116), True, 'import numpy as np\n'), ((20119, 20134), 'numpy.diff', 'np.diff', (['Y1[:2]'], {}), '(Y1[:2])\n', (20126, 20134), True, 'import numpy as np\n'), ((20189, 20205), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (20197, 20205), True, 'import numpy as np\n'), ((20208, 20224), 'numpy.diff', 'np.diff', (['Y1[-2:]'], {}), '(Y1[-2:])\n', (20215, 20224), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pandas as pd
import numpy as np
## it can be more general allowing the user to pass
## the name of the columns
def getMcSnowTable(mcSnowPath):
"""
Read McSnow output table
Parameters
----------
mcSnowPath: path for the output from McSnow
Returns
-------
Pandas DataFrame with the columns named after the local
variable 'names'. This DataFrame additionally includes
a column for the radii and the density [sRho]. The
velocity is negative towards the ground.
"""
names = ['time', 'mTot', 'sHeight', 'vel', 'dia',
'area', 'sMice', 'sVice', 'sPhi', 'sRhoIce',
'igf', 'sMultpl', 'sMrime', 'sVrime']
mcTable = pd.read_csv(mcSnowPath, header=None, names=names)
selMcTable = mcTable.copy()
selMcTable['vel'] = -1. * selMcTable['vel']
selMcTable['radii'] = selMcTable['dia']/2.
selMcTable = calcRho(selMcTable)
return selMcTable
def calcRho(mcTable):
"""
Calculate the density of each super particles.
Parameters
----------
mcTable: output from getMcSnowTable()
Returns
-------
mcTable with an additional column for the density.
The density is calculated separately for aspect ratio < 1
and for aspect ratio >= 1.
"""
# density calculation for different AR ranges
mcTable['sRho'] = np.ones_like(mcTable['time'])*np.nan
#calculaiton for AR < 1
tmpTable = mcTable[mcTable['sPhi']<1].copy()
tmpVol = (np.pi/6.) * (tmpTable['dia']*1e2)**3 * tmpTable['sPhi']
tmpRho = (tmpTable['mTot']*1e3)/tmpVol
mcTable['sRho'][mcTable['sPhi']<1] = tmpRho
# calculation for AR >= 1
tmpTable = mcTable[mcTable['sPhi']>=1].copy()
tmpVol = (np.pi/6.) * (tmpTable['dia']*1e2)**3 * tmpTable['sPhi']**2
tmpRho = (tmpTable['mTot']*1e3)/tmpVol
mcTable['sRho'][mcTable['sPhi']>=1] = tmpRho
return mcTable
def creatZeCols(mcTable, wls):
"""
Create the KDP column
Parameters
----------
mcTable: output from getMcSnowTable()
wls: wavelenght (iterable) [mm]
Returns
-------
mcTable with an empty columns 'sZe*_*' for
storing Ze_H and Ze_V of one particle of a
given wavelength
"""
for wl in wls:
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)] = np.ones_like(mcTable['time'])*np.nan
mcTable['sZeV_{0}'.format(wlStr)] = np.ones_like(mcTable['time'])*np.nan
return mcTable
def creatKdpCols(mcTable, wls):
"""
Create the KDP column
Parameters
----------
mcTable: output from getMcSnowTable()
wls: wavelenght (iterable) [mm]
Returns
-------
mcTable with an empty column 'sKDP_*' for
storing the calculated KDP of a given wavelength.
"""
for wl in wls:
wlStr = '{:.2e}'.format(wl)
mcTable['sKDP_{0}'.format(wlStr)] = np.ones_like(mcTable['time'])*np.nan
return mcTable
| [
"pandas.read_csv",
"numpy.ones_like"
] | [((804, 853), 'pandas.read_csv', 'pd.read_csv', (['mcSnowPath'], {'header': 'None', 'names': 'names'}), '(mcSnowPath, header=None, names=names)\n', (815, 853), True, 'import pandas as pd\n'), ((1476, 1505), 'numpy.ones_like', 'np.ones_like', (["mcTable['time']"], {}), "(mcTable['time'])\n", (1488, 1505), True, 'import numpy as np\n'), ((2465, 2494), 'numpy.ones_like', 'np.ones_like', (["mcTable['time']"], {}), "(mcTable['time'])\n", (2477, 2494), True, 'import numpy as np\n'), ((2546, 2575), 'numpy.ones_like', 'np.ones_like', (["mcTable['time']"], {}), "(mcTable['time'])\n", (2558, 2575), True, 'import numpy as np\n'), ((3035, 3064), 'numpy.ones_like', 'np.ones_like', (["mcTable['time']"], {}), "(mcTable['time'])\n", (3047, 3064), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 15:50:17 2017
https://github.com/ChangdeDu/DGMM
@author: duchangde
"""
import os
os.environ['THEANO_FLAGS'] = "device=gpu"
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import savemat, loadmat
from sklearn import preprocessing
from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape
from tensorflow.keras.layers import Conv2D, Conv2DTranspose
from tensorflow.keras.models import Model
from tensorflow.keras import backend
from numpy import random
from tensorflow.keras import optimizers
import matlab.engine
eng=matlab.engine.start_matlab()
from tensorflow.keras import metrics
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
# In[]: Load dataset
handwriten_69=loadmat('digit69_28x28.mat')
#ini fmri 10 test 90 train satu baris berisi 3092
Y_train = handwriten_69['fmriTrn'].astype('float32')
Y_test = handwriten_69['fmriTest'].astype('float32')
# ini stimulus semua
X_train = handwriten_69['stimTrn']#90 gambar dalam baris isi per baris 784 kolom
X_test = handwriten_69['stimTest']#10 gambar dalam baris isi 784 kolom
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
# In[]: lihat isinya, ketika dijalankan hasilnya jelek
stim0=np.reshape(X_test[0],(28,28)).T
stim1=np.reshape(X_test[1],(28,28)).T
stim2=np.reshape(X_test[2],(28,28)).T
stim3=np.reshape(X_test[3],(28,28)).T
stimtrain0=np.reshape(X_train[0],(28,28)).T
stimtrain1=np.reshape(X_train[1],(28,28)).T
stimtrain2=np.reshape(X_train[2],(28,28)).T
stimtrain3=np.reshape(X_train[3],(28,28)).T
# In[]: X adalah gambar stimulus,ukuran pixel 28x28 = 784 di flatten sebelumnya dalam satu baris, 28 row x 28 column dengan channel 1(samaa kaya miyawaki)
resolution = 28
#channel di depan
#X_train = X_train.reshape([X_train.shape[0], 1, resolution, resolution])
#X_test = X_test.reshape([X_test.shape[0], 1, resolution, resolution])
#channel di belakang(edit rolly) 1 artinya grayscale
X_train = X_train.reshape([X_train.shape[0], resolution, resolution, 1])
X_test = X_test.reshape([X_test.shape[0], resolution, resolution, 1])
# In[]: Normlization sinyal fMRI, min max agar nilainya hanya antara 0 sd 1
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
Y_train = min_max_scaler.fit_transform(Y_train)
Y_test = min_max_scaler.transform(Y_test)
print ('X_train.shape : ')
print (X_train.shape)
print ('Y_train.shape')
print (Y_train.shape)
print ('X_test.shape')
print (X_test.shape)
print ('Y_test.shape')
print (Y_test.shape)
numTrn=X_train.shape[0]#ada 90 data training
numTest=X_test.shape[0]#ada 10 data testing
# In[]: Set the model parameters and hyper-parameters
maxiter = 200
nb_epoch = 1
batch_size = 10
resolution = 28
D1 = X_train.shape[1]*X_train.shape[2]*X_train.shape[3]
D2 = Y_train.shape[1]
K = 6
C = 5
intermediate_dim = 128
#hyper-parameters
tau_alpha = 1
tau_beta = 1
eta_alpha = 1
eta_beta = 1
gamma_alpha = 1
gamma_beta = 1
Beta = 1 # Beta-VAE for Learning Disentangled Representations
rho=0.1 # posterior regularization parameter
k=10 # k-nearest neighbors
t = 10.0 # kernel parameter in similarity measure
L = 100 # Monte-Carlo sampling
np.random.seed(1000)
numTrn=X_train.shape[0]
numTest=X_test.shape[0]
# input image dimensions
img_rows, img_cols, img_chns = 28, 28, 1
# number of convolutional filters to use
filters = 64
# convolution kernel size
num_conv = 3
if backend.image_data_format() == 'channels_first': # atau 'channels_last'
original_img_size = (img_chns, img_rows, img_cols)#1,28, 28
else:
original_img_size = (img_rows, img_cols, img_chns)#28, 28, 1
# In[]: Building the architechture
X = Input(shape=original_img_size)
Y = Input(shape=(D2,))
Y_mu = Input(shape=(D2,))
Y_lsgms = Input(shape=(D2,))
conv_1 = Conv2D(img_chns,
kernel_size=(2, 2),
padding='same', activation='relu', name='en_conv_1')(X)
conv_2 = Conv2D(filters,
kernel_size=(2, 2),
padding='same', activation='relu',
strides=(2, 2), name='en_conv_2')(conv_1)
conv_3 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1, name='en_conv_3')(conv_2)
conv_4 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1, name='en_conv_4')(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu', name='en_dense_5')(flat)
Z_mu = Dense(K, name='en_mu')(hidden)
Z_lsgms = Dense(K, name='en_var')(hidden)
def sampling(args):
Z_mu, Z_lsgms = args
epsilon = backend.random_normal(shape=(backend.shape(Z_mu)[0], K), mean=0., stddev=1.0)
return Z_mu + backend.exp(Z_lsgms) * epsilon
Z = Lambda(sampling, output_shape=(K,))([Z_mu, Z_lsgms])
# In[]: we instantiate these layers separately so as to reuse them later
decoder_hid = Dense(intermediate_dim, activation='relu')
decoder_upsample = Dense(filters * 14 * 14, activation='relu')
if backend.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, 14, 14)
else:
output_shape = (batch_size, 14, 14, filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
if backend.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, 29, 29)
else:
output_shape = (batch_size, 29, 29, filters)
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='valid',
activation='relu')
decoder_mean_squash_mu = Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='sigmoid')
decoder_mean_squash_lsgms= Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='tanh')
hid_decoded = decoder_hid(Z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
X_mu = decoder_mean_squash_mu (x_decoded_relu)
X_lsgms = decoder_mean_squash_lsgms (x_decoded_relu)
# In[]:define objective function
logc = np.log(2 * np.pi).astype(np.float32)
def X_normal_logpdf(x, mu, lsgms):
lsgms = backend.flatten(lsgms)
return backend.mean(-(0.5 * logc + 0.5 * lsgms) - 0.5 * ((x - mu)**2 / backend.exp(lsgms)), axis=-1)
def Y_normal_logpdf(y, mu, lsgms):
return backend.mean(-(0.5 * logc + 0.5 * lsgms) - 0.5 * ((y - mu)**2 / backend.exp(lsgms)), axis=-1)
def obj(X, X_mu):
X = backend.flatten(X)
X_mu = backend.flatten(X_mu)
Lp = 0.5 * backend.mean( 1 + Z_lsgms - backend.square(Z_mu) - backend.exp(Z_lsgms), axis=-1)
Lx = - metrics.binary_crossentropy(X, X_mu) # Pixels have a Bernoulli distribution
Ly = Y_normal_logpdf(Y, Y_mu, Y_lsgms) # Voxels have a Gaussian distribution
lower_bound = backend.mean(Lp + 10000 * Lx + Ly)
cost = - lower_bound
return cost
DGMM = Model(inputs=[X, Y, Y_mu, Y_lsgms], outputs=X_mu)
opt_method = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
DGMM.compile(optimizer = opt_method, loss = obj)
DGMM.summary()
# build a model to project inputs on the latent space
encoder = Model(inputs=X, outputs=[Z_mu,Z_lsgms])
# build a model to project inputs on the output space
imagepredict = Model(inputs=X, outputs=[X_mu,X_lsgms])
# build a digit generator that can sample from the learned distribution
Z_predict = Input(shape=(K,))
_hid_decoded = decoder_hid(Z_predict)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
X_mu_predict = decoder_mean_squash_mu(_x_decoded_relu)
X_lsgms_predict = decoder_mean_squash_mu(_x_decoded_relu)
imagereconstruct = Model(inputs=Z_predict, outputs=X_mu_predict)
# In[]: Initialization
Z_mu = np.mat(random.random(size=(numTrn,K))).astype(np.float32)
B_mu = np.mat(random.random(size=(K,D2))).astype(np.float32)
R_mu = np.mat(random.random(size=(numTrn,C))).astype(np.float32)
sigma_r = np.mat(np.eye((C))).astype(np.float32)
H_mu = np.mat(random.random(size=(C,D2))).astype(np.float32)
sigma_h = np.mat(np.eye((C))).astype(np.float32)
tau_mu = tau_alpha / tau_beta
eta_mu = eta_alpha / eta_beta
gamma_mu = gamma_alpha / gamma_beta
Y_mu = np.array(Z_mu * B_mu + R_mu * H_mu).astype(np.float32)
Y_lsgms = np.log(1 / gamma_mu * np.ones((numTrn, D2))).astype(np.float32)
savemat('data.mat', {'Y_train':Y_train,'Y_test':Y_test})
S=np.mat(eng.calculateS(float(k), float(t))).astype(np.float32)
# In[]: Loop training
for l in range(maxiter):
print ('************************************** iter= ', l)
# update Z
DGMM.fit([X_train, Y_train, Y_mu, Y_lsgms], X_train,
shuffle=True,
verbose=2,
epochs=nb_epoch,
batch_size=batch_size)
[Z_mu,Z_lsgms] = encoder.predict(X_train)
Z_mu = np.mat(Z_mu)
# update B
temp1 = np.exp(Z_lsgms)
temp2 = Z_mu.T * Z_mu + np.mat(np.diag(temp1.sum(axis=0)))
temp3 = tau_mu * np.mat(np.eye(K))
sigma_b = (gamma_mu * temp2 + temp3).I
B_mu = sigma_b * gamma_mu * Z_mu.T * (np.mat(Y_train) - R_mu * H_mu)
# update H
RTR_mu = R_mu.T * R_mu + numTrn * sigma_r
sigma_h = (eta_mu * np.mat(np.eye(C)) + gamma_mu * RTR_mu).I
H_mu = sigma_h * gamma_mu * R_mu.T * (np.mat(Y_train) - Z_mu * B_mu)
# update R
HHT_mu = H_mu * H_mu.T + D2 * sigma_h
sigma_r = (np.mat(np.eye(C)) + gamma_mu * HHT_mu).I
R_mu = (sigma_r * gamma_mu * H_mu * (np.mat(Y_train) - Z_mu * B_mu).T).T
# update tau
tau_alpha_new = tau_alpha + 0.5 * K * D2
tau_beta_new = tau_beta + 0.5 * ((np.diag(B_mu.T * B_mu)).sum() + D2 * sigma_b.trace())
tau_mu = tau_alpha_new / tau_beta_new
tau_mu = tau_mu[0,0]
# update eta
eta_alpha_new = eta_alpha + 0.5 * C * D2
eta_beta_new = eta_beta + 0.5 * ((np.diag(H_mu.T * H_mu)).sum() + D2 * sigma_h.trace())
eta_mu = eta_alpha_new / eta_beta_new
eta_mu = eta_mu[0,0]
# update gamma
gamma_alpha_new = gamma_alpha + 0.5 * numTrn * D2
gamma_temp = np.mat(Y_train) - Z_mu * B_mu - R_mu * H_mu
gamma_temp = np.multiply(gamma_temp, gamma_temp)
gamma_temp = gamma_temp.sum(axis=0)
gamma_temp = gamma_temp.sum(axis=1)
gamma_beta_new = gamma_beta + 0.5 * gamma_temp
gamma_mu = gamma_alpha_new / gamma_beta_new
gamma_mu = gamma_mu[0,0]
# calculate Y_mu
Y_mu = np.array(Z_mu * B_mu + R_mu * H_mu)
Y_lsgms = np.log(1 / gamma_mu * np.ones((numTrn, D2)))
# In[]: reconstruct X (image) from Y (fmri)
X_reconstructed_mu = np.zeros((numTest, img_chns, img_rows, img_cols))
HHT = H_mu * H_mu.T + D2 * sigma_h
Temp = gamma_mu * np.mat(np.eye(D2)) - (gamma_mu**2) * (H_mu.T * (np.mat(np.eye(C)) + gamma_mu * HHT).I * H_mu)
for i in range(numTest):
s=S[:,i]
z_sigma_test = (B_mu * Temp * B_mu.T + (1 + rho * s.sum(axis=0)[0,0]) * np.mat(np.eye(K)) ).I
z_mu_test = (z_sigma_test * (B_mu * Temp * (np.mat(Y_test)[i,:]).T + rho * np.mat(Z_mu).T * s )).T
temp_mu = np.zeros((1,img_chns, img_rows, img_cols))#1,1,28,28
epsilon_std = 1
for l in range(L):
epsilon=np.random.normal(0,epsilon_std,1)
z_test = z_mu_test + np.sqrt(np.diag(z_sigma_test))*epsilon
x_reconstructed_mu = imagereconstruct.predict(z_test, batch_size=1)#1,28,28,1
#edit rolly move axis
x_reconstructed_mu=np.moveaxis(x_reconstructed_mu,-1,1)
temp_mu = temp_mu + x_reconstructed_mu # ati2 nih disini main tambahin aja
x_reconstructed_mu = temp_mu / L
X_reconstructed_mu[i,:,:,:] = x_reconstructed_mu
# In[]:# visualization the reconstructed images
n = 10
for j in range(1):
plt.figure(figsize=(12, 2))
for i in range(n):
# display original images
ax = plt.subplot(2, n, i +j*n*2 + 1)
plt.imshow(np.rot90(np.fliplr(X_test[i+j*n].reshape(resolution ,resolution ))),cmap='hot')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstructed images
ax = plt.subplot(2, n, i + n + j*n*2 + 1)
plt.imshow(np.rot90(np.fliplr(X_reconstructed_mu[i+j*n].reshape(resolution ,resolution ))),cmap='hot')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
| [
"tensorflow.python.framework.ops.disable_eager_execution",
"numpy.moveaxis",
"numpy.random.seed",
"tensorflow.keras.layers.Reshape",
"scipy.io.loadmat",
"tensorflow.keras.layers.Dense",
"sklearn.preprocessing.MinMaxScaler",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.random.nor... | [((766, 791), 'tensorflow.python.framework.ops.disable_eager_execution', 'disable_eager_execution', ([], {}), '()\n', (789, 791), False, 'from tensorflow.python.framework.ops import disable_eager_execution\n'), ((829, 857), 'scipy.io.loadmat', 'loadmat', (['"""digit69_28x28.mat"""'], {}), "('digit69_28x28.mat')\n", (836, 857), False, 'from scipy.io import savemat, loadmat\n'), ((2284, 2332), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (2310, 2332), False, 'from sklearn import preprocessing\n'), ((3259, 3279), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (3273, 3279), True, 'import numpy as np\n'), ((3741, 3771), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'original_img_size'}), '(shape=original_img_size)\n', (3746, 3771), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((3776, 3794), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(D2,)'}), '(shape=(D2,))\n', (3781, 3794), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((3802, 3820), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(D2,)'}), '(shape=(D2,))\n', (3807, 3820), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((3831, 3849), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(D2,)'}), '(shape=(D2,))\n', (3836, 3849), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((5015, 5057), 'tensorflow.keras.layers.Dense', 'Dense', (['intermediate_dim'], {'activation': '"""relu"""'}), "(intermediate_dim, activation='relu')\n", (5020, 5057), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((5077, 5120), 'tensorflow.keras.layers.Dense', 'Dense', (['(filters * 14 * 14)'], {'activation': '"""relu"""'}), "(filters * 14 * 14, activation='relu')\n", (5082, 5120), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((5297, 5322), 'tensorflow.keras.layers.Reshape', 'Reshape', (['output_shape[1:]'], {}), '(output_shape[1:])\n', (5304, 5322), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((5342, 5438), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['filters'], {'kernel_size': 'num_conv', 'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(filters, kernel_size=num_conv, padding='same', strides=1,\n activation='relu')\n", (5357, 5438), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((5594, 5690), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['filters'], {'kernel_size': 'num_conv', 'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(filters, kernel_size=num_conv, padding='same', strides=1,\n activation='relu')\n", (5609, 5690), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((6009, 6110), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['filters'], {'kernel_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters, kernel_size=(3, 3), strides=(2, 2), padding=\n 'valid', activation='relu')\n", (6024, 6110), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((6299, 6369), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['img_chns'], {'kernel_size': '(2)', 'padding': '"""valid"""', 'activation': '"""sigmoid"""'}), "(img_chns, kernel_size=2, padding='valid', activation='sigmoid')\n", (6305, 6369), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((6485, 6552), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['img_chns'], {'kernel_size': '(2)', 'padding': '"""valid"""', 'activation': '"""tanh"""'}), "(img_chns, kernel_size=2, padding='valid', activation='tanh')\n", (6491, 6552), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((7939, 7988), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[X, Y, Y_mu, Y_lsgms]', 'outputs': 'X_mu'}), '(inputs=[X, Y, Y_mu, Y_lsgms], outputs=X_mu)\n', (7944, 7988), False, 'from tensorflow.keras.models import Model\n'), ((8002, 8095), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'learning_rate': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0.0)'}), '(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=\n 1e-08, decay=0.0)\n', (8017, 8095), False, 'from tensorflow.keras import optimizers\n'), ((8219, 8259), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'X', 'outputs': '[Z_mu, Z_lsgms]'}), '(inputs=X, outputs=[Z_mu, Z_lsgms])\n', (8224, 8259), False, 'from tensorflow.keras.models import Model\n'), ((8328, 8368), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'X', 'outputs': '[X_mu, X_lsgms]'}), '(inputs=X, outputs=[X_mu, X_lsgms])\n', (8333, 8368), False, 'from tensorflow.keras.models import Model\n'), ((8453, 8470), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(K,)'}), '(shape=(K,))\n', (8458, 8470), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((8906, 8951), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'Z_predict', 'outputs': 'X_mu_predict'}), '(inputs=Z_predict, outputs=X_mu_predict)\n', (8911, 8951), False, 'from tensorflow.keras.models import Model\n'), ((9561, 9620), 'scipy.io.savemat', 'savemat', (['"""data.mat"""', "{'Y_train': Y_train, 'Y_test': Y_test}"], {}), "('data.mat', {'Y_train': Y_train, 'Y_test': Y_test})\n", (9568, 9620), False, 'from scipy.io import savemat, loadmat\n'), ((11755, 11804), 'numpy.zeros', 'np.zeros', (['(numTest, img_chns, img_rows, img_cols)'], {}), '((numTest, img_chns, img_rows, img_cols))\n', (11763, 11804), True, 'import numpy as np\n'), ((1335, 1366), 'numpy.reshape', 'np.reshape', (['X_test[0]', '(28, 28)'], {}), '(X_test[0], (28, 28))\n', (1345, 1366), True, 'import numpy as np\n'), ((1373, 1404), 'numpy.reshape', 'np.reshape', (['X_test[1]', '(28, 28)'], {}), '(X_test[1], (28, 28))\n', (1383, 1404), True, 'import numpy as np\n'), ((1411, 1442), 'numpy.reshape', 'np.reshape', (['X_test[2]', '(28, 28)'], {}), '(X_test[2], (28, 28))\n', (1421, 1442), True, 'import numpy as np\n'), ((1449, 1480), 'numpy.reshape', 'np.reshape', (['X_test[3]', '(28, 28)'], {}), '(X_test[3], (28, 28))\n', (1459, 1480), True, 'import numpy as np\n'), ((1493, 1525), 'numpy.reshape', 'np.reshape', (['X_train[0]', '(28, 28)'], {}), '(X_train[0], (28, 28))\n', (1503, 1525), True, 'import numpy as np\n'), ((1537, 1569), 'numpy.reshape', 'np.reshape', (['X_train[1]', '(28, 28)'], {}), '(X_train[1], (28, 28))\n', (1547, 1569), True, 'import numpy as np\n'), ((1581, 1613), 'numpy.reshape', 'np.reshape', (['X_train[2]', '(28, 28)'], {}), '(X_train[2], (28, 28))\n', (1591, 1613), True, 'import numpy as np\n'), ((1625, 1657), 'numpy.reshape', 'np.reshape', (['X_train[3]', '(28, 28)'], {}), '(X_train[3], (28, 28))\n', (1635, 1657), True, 'import numpy as np\n'), ((3493, 3520), 'tensorflow.keras.backend.image_data_format', 'backend.image_data_format', ([], {}), '()\n', (3518, 3520), False, 'from tensorflow.keras import backend\n'), ((3859, 3952), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['img_chns'], {'kernel_size': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""en_conv_1"""'}), "(img_chns, kernel_size=(2, 2), padding='same', activation='relu',\n name='en_conv_1')\n", (3865, 3952), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((3993, 4101), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters'], {'kernel_size': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""', 'strides': '(2, 2)', 'name': '"""en_conv_2"""'}), "(filters, kernel_size=(2, 2), padding='same', activation='relu',\n strides=(2, 2), name='en_conv_2')\n", (3999, 4101), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((4163, 4268), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters'], {'kernel_size': 'num_conv', 'padding': '"""same"""', 'activation': '"""relu"""', 'strides': '(1)', 'name': '"""en_conv_3"""'}), "(filters, kernel_size=num_conv, padding='same', activation='relu',\n strides=1, name='en_conv_3')\n", (4169, 4268), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((4330, 4435), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters'], {'kernel_size': 'num_conv', 'padding': '"""same"""', 'activation': '"""relu"""', 'strides': '(1)', 'name': '"""en_conv_4"""'}), "(filters, kernel_size=num_conv, padding='same', activation='relu',\n strides=1, name='en_conv_4')\n", (4336, 4435), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose\n'), ((4495, 4504), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4502, 4504), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((4522, 4583), 'tensorflow.keras.layers.Dense', 'Dense', (['intermediate_dim'], {'activation': '"""relu"""', 'name': '"""en_dense_5"""'}), "(intermediate_dim, activation='relu', name='en_dense_5')\n", (4527, 4583), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((4598, 4620), 'tensorflow.keras.layers.Dense', 'Dense', (['K'], {'name': '"""en_mu"""'}), "(K, name='en_mu')\n", (4603, 4620), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((4639, 4662), 'tensorflow.keras.layers.Dense', 'Dense', (['K'], {'name': '"""en_var"""'}), "(K, name='en_var')\n", (4644, 4662), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((4874, 4909), 'tensorflow.keras.layers.Lambda', 'Lambda', (['sampling'], {'output_shape': '(K,)'}), '(sampling, output_shape=(K,))\n', (4880, 4909), False, 'from tensorflow.keras.layers import Input, Dense, Lambda, Flatten, Reshape\n'), ((5125, 5152), 'tensorflow.keras.backend.image_data_format', 'backend.image_data_format', ([], {}), '()\n', (5150, 5152), False, 'from tensorflow.keras import backend\n'), ((5830, 5857), 'tensorflow.keras.backend.image_data_format', 'backend.image_data_format', ([], {}), '()\n', (5855, 5857), False, 'from tensorflow.keras import backend\n'), ((7150, 7172), 'tensorflow.keras.backend.flatten', 'backend.flatten', (['lsgms'], {}), '(lsgms)\n', (7165, 7172), False, 'from tensorflow.keras import backend\n'), ((7454, 7472), 'tensorflow.keras.backend.flatten', 'backend.flatten', (['X'], {}), '(X)\n', (7469, 7472), False, 'from tensorflow.keras import backend\n'), ((7484, 7505), 'tensorflow.keras.backend.flatten', 'backend.flatten', (['X_mu'], {}), '(X_mu)\n', (7499, 7505), False, 'from tensorflow.keras import backend\n'), ((7833, 7867), 'tensorflow.keras.backend.mean', 'backend.mean', (['(Lp + 10000 * Lx + Ly)'], {}), '(Lp + 10000 * Lx + Ly)\n', (7845, 7867), False, 'from tensorflow.keras import backend\n'), ((10049, 10061), 'numpy.mat', 'np.mat', (['Z_mu'], {}), '(Z_mu)\n', (10055, 10061), True, 'import numpy as np\n'), ((10090, 10105), 'numpy.exp', 'np.exp', (['Z_lsgms'], {}), '(Z_lsgms)\n', (10096, 10105), True, 'import numpy as np\n'), ((11310, 11345), 'numpy.multiply', 'np.multiply', (['gamma_temp', 'gamma_temp'], {}), '(gamma_temp, gamma_temp)\n', (11321, 11345), True, 'import numpy as np\n'), ((11590, 11625), 'numpy.array', 'np.array', (['(Z_mu * B_mu + R_mu * H_mu)'], {}), '(Z_mu * B_mu + R_mu * H_mu)\n', (11598, 11625), True, 'import numpy as np\n'), ((12205, 12248), 'numpy.zeros', 'np.zeros', (['(1, img_chns, img_rows, img_cols)'], {}), '((1, img_chns, img_rows, img_cols))\n', (12213, 12248), True, 'import numpy as np\n'), ((12851, 12878), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 2)'}), '(figsize=(12, 2))\n', (12861, 12878), True, 'import matplotlib.pyplot as plt\n'), ((13456, 13466), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13464, 13466), True, 'import matplotlib.pyplot as plt\n'), ((7066, 7083), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (7072, 7083), True, 'import numpy as np\n'), ((7630, 7666), 'tensorflow.keras.metrics.binary_crossentropy', 'metrics.binary_crossentropy', (['X', 'X_mu'], {}), '(X, X_mu)\n', (7657, 7666), False, 'from tensorflow.keras import metrics\n'), ((9431, 9466), 'numpy.array', 'np.array', (['(Z_mu * B_mu + R_mu * H_mu)'], {}), '(Z_mu * B_mu + R_mu * H_mu)\n', (9439, 9466), True, 'import numpy as np\n'), ((12317, 12352), 'numpy.random.normal', 'np.random.normal', (['(0)', 'epsilon_std', '(1)'], {}), '(0, epsilon_std, 1)\n', (12333, 12352), True, 'import numpy as np\n'), ((12562, 12600), 'numpy.moveaxis', 'np.moveaxis', (['x_reconstructed_mu', '(-1)', '(1)'], {}), '(x_reconstructed_mu, -1, 1)\n', (12573, 12600), True, 'import numpy as np\n'), ((12953, 12989), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', 'n', '(i + j * n * 2 + 1)'], {}), '(2, n, i + j * n * 2 + 1)\n', (12964, 12989), True, 'import matplotlib.pyplot as plt\n'), ((13220, 13260), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', 'n', '(i + n + j * n * 2 + 1)'], {}), '(2, n, i + n + j * n * 2 + 1)\n', (13231, 13260), True, 'import matplotlib.pyplot as plt\n'), ((4838, 4858), 'tensorflow.keras.backend.exp', 'backend.exp', (['Z_lsgms'], {}), '(Z_lsgms)\n', (4849, 4858), False, 'from tensorflow.keras import backend\n'), ((8990, 9021), 'numpy.random.random', 'random.random', ([], {'size': '(numTrn, K)'}), '(size=(numTrn, K))\n', (9003, 9021), False, 'from numpy import random\n'), ((9055, 9082), 'numpy.random.random', 'random.random', ([], {'size': '(K, D2)'}), '(size=(K, D2))\n', (9068, 9082), False, 'from numpy import random\n'), ((9116, 9147), 'numpy.random.random', 'random.random', ([], {'size': '(numTrn, C)'}), '(size=(numTrn, C))\n', (9129, 9147), False, 'from numpy import random\n'), ((9184, 9193), 'numpy.eye', 'np.eye', (['C'], {}), '(C)\n', (9190, 9193), True, 'import numpy as np\n'), ((9230, 9257), 'numpy.random.random', 'random.random', ([], {'size': '(C, D2)'}), '(size=(C, D2))\n', (9243, 9257), False, 'from numpy import random\n'), ((9294, 9303), 'numpy.eye', 'np.eye', (['C'], {}), '(C)\n', (9300, 9303), True, 'import numpy as np\n'), ((10197, 10206), 'numpy.eye', 'np.eye', (['K'], {}), '(K)\n', (10203, 10206), True, 'import numpy as np\n'), ((10293, 10308), 'numpy.mat', 'np.mat', (['Y_train'], {}), '(Y_train)\n', (10299, 10308), True, 'import numpy as np\n'), ((10492, 10507), 'numpy.mat', 'np.mat', (['Y_train'], {}), '(Y_train)\n', (10498, 10507), True, 'import numpy as np\n'), ((11249, 11264), 'numpy.mat', 'np.mat', (['Y_train'], {}), '(Y_train)\n', (11255, 11264), True, 'import numpy as np\n'), ((11663, 11684), 'numpy.ones', 'np.ones', (['(numTrn, D2)'], {}), '((numTrn, D2))\n', (11670, 11684), True, 'import numpy as np\n'), ((11865, 11875), 'numpy.eye', 'np.eye', (['D2'], {}), '(D2)\n', (11871, 11875), True, 'import numpy as np\n'), ((7577, 7597), 'tensorflow.keras.backend.exp', 'backend.exp', (['Z_lsgms'], {}), '(Z_lsgms)\n', (7588, 7597), False, 'from tensorflow.keras import backend\n'), ((9518, 9539), 'numpy.ones', 'np.ones', (['(numTrn, D2)'], {}), '((numTrn, D2))\n', (9525, 9539), True, 'import numpy as np\n'), ((10602, 10611), 'numpy.eye', 'np.eye', (['C'], {}), '(C)\n', (10608, 10611), True, 'import numpy as np\n'), ((4766, 4785), 'tensorflow.keras.backend.shape', 'backend.shape', (['Z_mu'], {}), '(Z_mu)\n', (4779, 4785), False, 'from tensorflow.keras import backend\n'), ((7251, 7269), 'tensorflow.keras.backend.exp', 'backend.exp', (['lsgms'], {}), '(lsgms)\n', (7262, 7269), False, 'from tensorflow.keras import backend\n'), ((7394, 7412), 'tensorflow.keras.backend.exp', 'backend.exp', (['lsgms'], {}), '(lsgms)\n', (7405, 7412), False, 'from tensorflow.keras import backend\n'), ((7554, 7574), 'tensorflow.keras.backend.square', 'backend.square', (['Z_mu'], {}), '(Z_mu)\n', (7568, 7574), False, 'from tensorflow.keras import backend\n'), ((10416, 10425), 'numpy.eye', 'np.eye', (['C'], {}), '(C)\n', (10422, 10425), True, 'import numpy as np\n'), ((10677, 10692), 'numpy.mat', 'np.mat', (['Y_train'], {}), '(Y_train)\n', (10683, 10692), True, 'import numpy as np\n'), ((12073, 12082), 'numpy.eye', 'np.eye', (['K'], {}), '(K)\n', (12079, 12082), True, 'import numpy as np\n'), ((12388, 12409), 'numpy.diag', 'np.diag', (['z_sigma_test'], {}), '(z_sigma_test)\n', (12395, 12409), True, 'import numpy as np\n'), ((10815, 10837), 'numpy.diag', 'np.diag', (['(B_mu.T * B_mu)'], {}), '(B_mu.T * B_mu)\n', (10822, 10837), True, 'import numpy as np\n'), ((11037, 11059), 'numpy.diag', 'np.diag', (['(H_mu.T * H_mu)'], {}), '(H_mu.T * H_mu)\n', (11044, 11059), True, 'import numpy as np\n'), ((11913, 11922), 'numpy.eye', 'np.eye', (['C'], {}), '(C)\n', (11919, 11922), True, 'import numpy as np\n'), ((12136, 12150), 'numpy.mat', 'np.mat', (['Y_test'], {}), '(Y_test)\n', (12142, 12150), True, 'import numpy as np\n'), ((12167, 12179), 'numpy.mat', 'np.mat', (['Z_mu'], {}), '(Z_mu)\n', (12173, 12179), True, 'import numpy as np\n')] |
import datetime
import os
import warnings
import xml.etree.ElementTree as ET
import zipfile
import numpy as np
import shapely
import isce3
from nisar.workflows.stage_dem import check_dateline
from s1reader.s1_burst_slc import Doppler, Sentinel1BurstSlc
# TODO evaluate if it make sense to combine below into a class
def as_datetime(t_str, fmt = "%Y-%m-%dT%H:%M:%S.%f"):
'''Parse given time string to datetime.datetime object.
Parameters:
----------
t_str : string
Time string to be parsed. (e.g., "2021-12-10T12:00:0.0")
fmt : string
Format of string provided. Defaults to az time format found in annotation XML.
(e.g., "%Y-%m-%dT%H:%M:%S.%f").
Returns:
------
_ : datetime.datetime
datetime.datetime object parsed from given time string.
'''
return datetime.datetime.strptime(t_str, fmt)
def parse_polynomial_element(elem, poly_name):
'''Parse azimuth FM (Frequency Modulation) rate element to reference time and poly1d tuples.
Parameters
----------
elem : Element
Element containing coefficients.
poly_name : string
Name of element containing azimuth time and polynomial coefficients.
Returns:
------
_ : tuple
Tuple of time and Poly1d constructed from azimuth time and coefficients.
'''
ref_time = as_datetime(elem.find('azimuthTime').text)
half_c = 0.5 * isce3.core.speed_of_light
r0 = half_c * float(elem.find('t0').text)
coeffs = [float(x) for x in elem.find(poly_name).text.split()]
poly1d = isce3.core.Poly1d(coeffs, r0, half_c)
return (ref_time, poly1d)
def get_nearest_polynomial(t_mid, time_poly_pair):
'''Find polynomial closest to given sensing mid and return associated poly1d.
Parameters
----------
t_mid : datetime.datetime
Middle of the burst
time_poly_pair: list(tuple)
List of tuples of time and associated Poly1d
Returns:
------
nearest_poly: list
Polynomial coefficients associated with nearest time.
'''
# lambda calculating absolute time difference
get_abs_dt = lambda t_mid, t_new : np.abs((t_mid - t_new).total_seconds())
# calculate 1st dt and polynomial
dt = get_abs_dt(t_mid, time_poly_pair[0][0])
nearest_poly = time_poly_pair[0][1]
# loop thru remaining time, polynomial pairs
for x in time_poly_pair[1:]:
temp_dt = get_abs_dt(t_mid, x[0])
# stop looping if dt starts growing
if temp_dt > dt:
break
# set dt and polynomial for next iteration
dt, nearest_poly = temp_dt, x[1]
return nearest_poly
def doppler_poly1d_to_lut2d(doppler_poly1d, starting_slant_range,
slant_range_res, shape, az_time_interval):
'''Convert doppler poly1d to LUT2d.
Parameters
----------
doppler_poly1d : poly1d
Poly1d object to be convereted.
starting_slant_range : float
Starting slant range of the burst.
slant_range_res : float
Slant-range pixel spacing of the burst.
shape : tuple
Tuples holding number of lines and samples of the burst.
az_time_interval : float
Azimth time interval of the burst.
Returns:
------
_ : LUT2d
LUT2d calculated from poly1d.
'''
(n_lines, n_samples) = shape
# calculate all slant ranges in grid
slant_ranges = starting_slant_range + np.arange(n_samples) * slant_range_res
# no az dependency, but LUT2d required, so ensure all az coords covered
# offset by -2 days in seconds (referenece epoch)
offset_ref_epoch = 2 * 24 *3600
az_times = offset_ref_epoch + np.array([0, n_lines * az_time_interval])
# calculate frequency for all slant range
freq_1d = np.array([doppler_poly1d.eval(t) for t in slant_ranges])
# init LUT2d (vstack freq since no az dependency) and return
return isce3.core.LUT2d(slant_ranges, az_times,
np.vstack((freq_1d, freq_1d)))
def get_burst_orbit(sensing_start, sensing_stop, osv_list: ET.Element):
'''Init and return ISCE3 orbit.
Parameters:
-----------
sensing_start : datetime.datetime
Sensing start of burst; taken from azimuth time
sensing_stop : datetime.datetime
Sensing stop of burst
osv_list : xml.etree.ElementTree.Element
ElementTree containing orbit state vectors
Returns:
--------
_ : datetime
Sensing mid as datetime object.
'''
fmt = "UTC=%Y-%m-%dT%H:%M:%S.%f"
orbit_sv = []
# add start & end padding to ensure sufficient number of orbit points
pad = datetime.timedelta(seconds=60)
for osv in osv_list:
t_orbit = datetime.datetime.strptime(osv[1].text, fmt)
if t_orbit > sensing_stop + pad:
break
if t_orbit > sensing_start - pad:
pos = [float(osv[i].text) for i in range(4,7)]
vel = [float(osv[i].text) for i in range(7,10)]
orbit_sv.append(isce3.core.StateVector(isce3.core.DateTime(t_orbit),
pos, vel))
# use list of stateVectors to init and return isce3.core.Orbit
time_delta = datetime.timedelta(days=2)
ref_epoch = isce3.core.DateTime(sensing_start - time_delta)
return isce3.core.Orbit(orbit_sv, ref_epoch)
def calculate_centroid(lons, lats):
'''Calculate burst centroid from boundary longitude/latitude points.
Parameters:
-----------
lons : list
Burst longitudes (degrees)
lats : list
Burst latitudes (degrees)
Returns:
--------
_ : shapely.geometry.Point
Burst center in degrees longitude and latitude
'''
proj = isce3.core.Geocent()
# convert boundary points to geocentric
xyz = [proj.forward([np.deg2rad(lon), np.deg2rad(lat), 0])
for lon, lat in zip(lons, lats)]
# get mean of corners as centroid
xyz_centroid = np.mean(np.array(xyz), axis=0)
# convert back to LLH
llh_centroid = [np.rad2deg(x) for x in proj.inverse(xyz_centroid)]
return shapely.geometry.Point(llh_centroid[:2])
def get_burst_centers_and_boundaries(tree):
'''Parse grid points list and calculate burst center lat and lon
Parameters:
-----------
tree : Element
Element containing geolocation grid points.
Returns:
--------
center_pts : list
List of burst centroids ass shapely Points
boundary_pts : list
List of burst boundaries as shapely Polygons
'''
# find element tree
grid_pt_list = tree.find('geolocationGrid/geolocationGridPointList')
# read in all points
n_grid_pts = int(grid_pt_list.attrib['count'])
lines = np.empty(n_grid_pts)
pixels = np.empty(n_grid_pts)
lats = np.empty(n_grid_pts)
lons = np.empty(n_grid_pts)
for i, grid_pt in enumerate(grid_pt_list):
lines[i] = int(grid_pt[2].text)
pixels[i] = int(grid_pt[3].text)
lats[i] = float(grid_pt[4].text)
lons[i] = float(grid_pt[5].text)
unique_line_indices = np.unique(lines)
n_bursts = len(unique_line_indices) - 1
center_pts = [[]] * n_bursts
boundary_pts = [[]] * n_bursts
# zip lines numbers of bursts together and iterate
for i, (ln0, ln1) in enumerate(zip(unique_line_indices[:-1],
unique_line_indices[1:])):
# create masks for lines in current burst
mask0 = lines==ln0
mask1 = lines==ln1
# reverse order of 2nd set of points so plots of boundaries
# are not connected by a diagonal line
burst_lons = np.concatenate((lons[mask0], lons[mask1][::-1]))
burst_lats = np.concatenate((lats[mask0], lats[mask1][::-1]))
center_pts[i] = calculate_centroid(burst_lons, burst_lats)
poly = shapely.geometry.Polygon(zip(burst_lons, burst_lats))
boundary_pts[i] = check_dateline(poly)
return center_pts, boundary_pts
def burst_from_xml(annotation_path: str, orbit_path: str, tiff_path: str,
iw2_annotation_path: str, open_method=open):
'''Parse bursts in Sentinel-1 annotation XML.
Parameters:
-----------
annotation_path : str
Path to Sentinel-1 annotation XML file of specific subswath and
polarization.
orbit_path : str
Path the orbit file.
tiff_path : str
Path to tiff file holding Sentinel-1 SLCs.
iw2_annotation_path : str
Path to Sentinel-1 annotation XML file of IW2 subswath.
open_method : function
Function used to open annotation file.
Returns:
--------
bursts : list
List of Sentinel1BurstSlc objects found in annotation XML.
'''
_, tail = os.path.split(annotation_path)
platform_id, subswath_id, _, pol = [x.upper() for x in tail.split('-')[:4]]
# For IW mode, one burst has a duration of ~2.75 seconds and a burst
# overlap of approximately ~0.4 seconds.
# https://sentinels.copernicus.eu/web/sentinel/user-guides/sentinel-1-sar/product-types-processing-levels/level-1
# Additional precision calculated from averaging the differences between
# burst sensing starts in prototyping test data
burst_interval = 2.758277
# Nearly all metadata loaded here is common to all bursts in annotation XML
with open_method(annotation_path, 'r') as f:
tree = ET.parse(f)
product_info_element = tree.find('generalAnnotation/productInformation')
azimuth_steer_rate = np.radians(float(product_info_element.find('azimuthSteeringRate').text))
radar_freq = float(product_info_element.find('radarFrequency').text)
range_sampling_rate = float(product_info_element.find('rangeSamplingRate').text)
orbit_direction = product_info_element.find('pass').text
image_info_element = tree.find('imageAnnotation/imageInformation')
azimuth_time_interval = float(image_info_element.find('azimuthTimeInterval').text)
slant_range_time = float(image_info_element.find('slantRangeTime').text)
ascending_node_time = as_datetime(image_info_element.find('ascendingNodeTime').text)
downlink_element = tree.find('generalAnnotation/downlinkInformationList/downlinkInformation')
prf_raw_data = float(downlink_element.find('prf').text)
rank = int(downlink_element.find('downlinkValues/rank').text)
range_chirp_ramp_rate = float(downlink_element.find('downlinkValues/txPulseRampRate').text)
n_lines = int(tree.find('swathTiming/linesPerBurst').text)
n_samples = int(tree.find('swathTiming/samplesPerBurst').text)
az_rate_list_element = tree.find('generalAnnotation/azimuthFmRateList')
poly_name = 'azimuthFmRatePolynomial'
az_fm_rate_list = [parse_polynomial_element(x, poly_name) for x in az_rate_list_element]
doppler_list_element = tree.find('dopplerCentroid/dcEstimateList')
poly_name = 'dataDcPolynomial'
doppler_list = [parse_polynomial_element(x, poly_name) for x in doppler_list_element]
rng_processing_element = tree.find('imageAnnotation/processingInformation/swathProcParamsList/swathProcParams/rangeProcessing')
rng_processing_bandwidth = float(rng_processing_element.find('processingBandwidth').text)
range_window_type = str(rng_processing_element.find('windowType').text)
range_window_coeff = float(rng_processing_element.find('windowCoefficient').text)
orbit_number = int(tree.find('adsHeader/absoluteOrbitNumber').text)
orbit_number_offset = 73 if platform_id == 'S1A' else 202
track_number = (orbit_number - orbit_number_offset) % 175 + 1
center_pts, boundary_pts = get_burst_centers_and_boundaries(tree)
wavelength = isce3.core.speed_of_light / radar_freq
starting_range = slant_range_time * isce3.core.speed_of_light / 2
range_pxl_spacing = isce3.core.speed_of_light / (2 * range_sampling_rate)
# calculate the range at mid swath (mid of SM swath, mid of IW2 or mid of EW3)
with open_method(iw2_annotation_path, 'r') as iw2_f:
iw2_tree = ET.parse(iw2_f)
iw2_slant_range_time = float(iw2_tree.find('imageAnnotation/imageInformation/slantRangeTime').text)
iw2_n_samples = int(iw2_tree.find('swathTiming/samplesPerBurst').text)
iw2_starting_range = iw2_slant_range_time * isce3.core.speed_of_light / 2
iw2_mid_range = iw2_starting_range + 0.5 * iw2_n_samples * range_pxl_spacing
# find orbit state vectors in 'Data_Block/List_of_OSVs'
orbit_tree = ET.parse(orbit_path)
osv_list = orbit_tree.find('Data_Block/List_of_OSVs')
# load individual burst
burst_list_elements = tree.find('swathTiming/burstList')
n_bursts = int(burst_list_elements.attrib['count'])
bursts = [[]] * n_bursts
sensing_starts = [[]] * n_bursts
sensing_times = [[]] * n_bursts
for i, burst_list_element in enumerate(burst_list_elements):
# get burst timing
sensing_start = as_datetime(burst_list_element.find('azimuthTime').text)
sensing_starts[i] = sensing_start
sensing_times[i] = as_datetime(burst_list_element.find('sensingTime').text)
dt = sensing_times[i] - ascending_node_time
id_burst = int((dt.seconds + dt.microseconds / 1e6) // burst_interval)
# choose nearest azimuth FM rate
d_seconds = 0.5 * (n_lines - 1) * azimuth_time_interval
sensing_mid = sensing_start + datetime.timedelta(seconds=d_seconds)
az_fm_rate = get_nearest_polynomial(sensing_mid, az_fm_rate_list)
# choose nearest doppler
poly1d = get_nearest_polynomial(sensing_mid, doppler_list)
lut2d = doppler_poly1d_to_lut2d(poly1d, starting_range,
range_pxl_spacing, (n_lines, n_samples),
azimuth_time_interval)
doppler = Doppler(poly1d, lut2d)
# get orbit from state vector list/element tree
sensing_duration = datetime.timedelta(
seconds=n_samples * azimuth_time_interval)
orbit = get_burst_orbit(sensing_start, sensing_start + sensing_duration,
osv_list)
# determine burst offset and dimensions
# TODO move to own method
first_valid_samples = [int(val) for val in burst_list_element.find('firstValidSample').text.split()]
last_valid_samples = [int(val) for val in burst_list_element.find('lastValidSample').text.split()]
first_valid_line = [x >= 0 for x in first_valid_samples].index(True)
n_valid_lines = [x >=0 for x in first_valid_samples].count(True)
last_line = first_valid_line + n_valid_lines - 1
first_valid_sample = max(first_valid_samples[first_valid_line],
first_valid_samples[last_line])
last_sample = min(last_valid_samples[first_valid_line],
last_valid_samples[last_line])
burst_id = f't{track_number}_{subswath_id.lower()}_b{id_burst}'
bursts[i] = Sentinel1BurstSlc(sensing_start, radar_freq, wavelength,
azimuth_steer_rate, azimuth_time_interval,
slant_range_time, starting_range, iw2_mid_range,
range_sampling_rate, range_pxl_spacing,
(n_lines, n_samples), az_fm_rate, doppler,
rng_processing_bandwidth, pol, burst_id,
platform_id, center_pts[i],
boundary_pts[i], orbit, orbit_direction,
tiff_path, i, first_valid_sample,
last_sample, first_valid_line, last_line,
range_window_type, range_window_coeff,
rank, prf_raw_data, range_chirp_ramp_rate)
return bursts
def _is_zip_annotation_xml(path: str, id_str: str) -> bool:
''' Check if path is annotation XMl and not calibration or rfi related
path : str
Path from SAFE zip to be checked
id_str : str
Subswath and polarization to be found. e.g. iw1_slc_vv
Returns:
--------
_ : bool
Whether or not given path is desired annotation XML
'''
# break path into tokens by '/'
tokens = path.split('/')
# check if 2nd to last path token, directory where file resides, is "annotation"
# check if last path token, file name, contains ID string
if tokens[-2] == 'annotation' and id_str in tokens[-1]:
return True
return False
def load_bursts(path: str, orbit_path: str, swath_num: int, pol: str='vv',
burst_ids: list[str]=None):
'''Find bursts in a Sentinel-1 zip file or a SAFE structured directory.
Parameters:
-----------
path : str
Path to Sentinel-1 zip file or SAFE directory
orbit_path : str
Path the orbit file.
swath_num : int
Integer of subswath of desired burst. {1, 2, 3}
pol : str
Polarization of desired burst. {hh, vv, hv, vh}
burst_ids : list[str]
List of burst IDs for which their Sentinel1BurstSlc objects will be
returned. Default of None returns all bursts. Empty list returned if
none of the burst IDs are found. If not all burst IDs are found, a list
containing found bursts will be returned.
Returns:
--------
bursts : list
List of Sentinel1BurstSlc objects found in annotation XML.
'''
if swath_num < 1 or swath_num > 3:
raise ValueError("swath_num not <1 or >3")
if burst_ids is None:
burst_ids = []
# ensure burst IDs is a list
if not isinstance(burst_ids, list):
burst_ids = [burst_ids]
# lower case polarity to be consistent with file naming convention
pol = pol.lower()
pols = ['vv', 'vh', 'hh', 'hv']
if pol not in pols:
raise ValueError(f"polarization not in {pols}")
id_str = f'iw{swath_num}-slc-{pol}'
if not os.path.exists(path):
raise FileNotFoundError(f'{path} not found')
elif os.path.isdir(path):
bursts = _burst_from_safe_dir(path, id_str, orbit_path)
elif os.path.isfile(path):
bursts = _burst_from_zip(path, id_str, orbit_path)
else:
raise ValueError(f'{path} is unsupported')
if burst_ids:
bursts = [b for b in bursts if b.burst_id in burst_ids]
burst_ids_found = set([b.burst_id for b in bursts])
warnings.simplefilter("always")
set_burst_ids = set(burst_ids)
if not burst_ids_found:
warnings.warn("None of provided burst IDs found in sub-swath {swath_num}")
elif burst_ids_found != set_burst_ids:
diff = set_burst_ids.difference(burst_ids_found)
warn_str = 'Not all burst IDs found. \n '
warn_str += f'Not found: {diff} . \n'
warn_str += f'Found bursts: {burst_ids_found}'
warnings.warn(warn_str)
return bursts
def _burst_from_zip(zip_path: str, id_str: str, orbit_path: str):
'''Find bursts in a Sentinel-1 zip file.
Parameters:
-----------
path : str
Path to zip file.
id_str: str
Identifcation of desired burst. Format: iw[swath_num]-slc-[pol]
orbit_path : str
Path the orbit file.
Returns:
--------
bursts : list
List of Sentinel1BurstSlc objects found in annotation XML.
'''
with zipfile.ZipFile(zip_path, 'r') as z_file:
z_file_list = z_file.namelist()
# find annotation file - subswath of interest
f_annotation = [f for f in z_file_list if _is_zip_annotation_xml(f, id_str)]
if not f_annotation:
raise ValueError(f"burst {id_str} not in SAFE: {zip_path}")
f_annotation = f_annotation[0]
# find annotation file - IW2
iw2_id_str = f'iw2-{id_str[4:]}'
iw2_f_annotation = [f for f in z_file_list if _is_zip_annotation_xml(f, iw2_id_str)]
if not iw2_f_annotation:
raise ValueError(f"burst {iw2_id_str} not in SAFE: {zip_path}")
iw2_f_annotation = iw2_f_annotation[0]
# find tiff file
f_tiff = [f for f in z_file_list
if 'measurement' in f and id_str in f and 'tiff' in f]
f_tiff = f'/vsizip/{zip_path}/{f_tiff[0]}' if f_tiff else ''
bursts = burst_from_xml(f_annotation, orbit_path, f_tiff, iw2_f_annotation, z_file.open)
return bursts
def _burst_from_safe_dir(safe_dir_path: str, id_str: str, orbit_path: str):
'''Find bursts in a Sentinel-1 SAFE structured directory.
Parameters:
-----------
path : str
Path to SAFE directory.
id_str: str
Identifcation of desired burst. Format: iw[swath_num]-slc-[pol]
orbit_path : str
Path the orbit file.
Returns:
--------
bursts : list
List of Sentinel1BurstSlc objects found in annotation XML.
'''
# find annotation file - subswath of interest
annotation_list = os.listdir(f'{safe_dir_path}/annotation')
f_annotation = [f for f in annotation_list if id_str in f]
if not f_annotation:
raise ValueError(f"burst {id_str} not in SAFE: {safe_dir_path}")
f_annotation = f'{safe_dir_path}/annotation/{f_annotation[0]}'
# find annotation file - IW2
iw2_id_str = f'iw2-{id_str[4:]}'
iw2_f_annotation = [f for f in annotation_list if iw2_id_str in f]
if not iw2_f_annotation:
raise ValueError(f"burst {iw2_id_str} not in SAFE: {safe_dir_path}")
iw2_f_annotation = f'{safe_dir_path}/annotation/{iw2_f_annotation[0]}'
# find tiff file if measurement directory found
if os.path.isdir(f'{safe_dir_path}/measurement'):
measurement_list = os.listdir(f'{safe_dir_path}/measurement')
f_tiff = [f for f in measurement_list
if 'measurement' in f and id_str in f and 'tiff' in f]
f_tiff = f'{safe_dir_path}/measurement/{f_tiff[0]}' if f_tiff else ''
else:
warning_str = f'measurement directory not found in {safe_dir_path}'
warnings.warn(warning_str)
f_tiff = ''
bursts = burst_from_xml(f_annotation, orbit_path, f_tiff, iw2_f_annotation)
return bursts
| [
"numpy.empty",
"os.path.isfile",
"numpy.arange",
"numpy.unique",
"shapely.geometry.Point",
"warnings.simplefilter",
"os.path.exists",
"isce3.core.Geocent",
"nisar.workflows.stage_dem.check_dateline",
"datetime.timedelta",
"s1reader.s1_burst_slc.Doppler",
"isce3.core.Orbit",
"xml.etree.Elemen... | [((827, 865), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t_str', 'fmt'], {}), '(t_str, fmt)\n', (853, 865), False, 'import datetime\n'), ((1560, 1597), 'isce3.core.Poly1d', 'isce3.core.Poly1d', (['coeffs', 'r0', 'half_c'], {}), '(coeffs, r0, half_c)\n', (1577, 1597), False, 'import isce3\n'), ((4634, 4664), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(60)'}), '(seconds=60)\n', (4652, 4664), False, 'import datetime\n'), ((5203, 5229), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (5221, 5229), False, 'import datetime\n'), ((5246, 5293), 'isce3.core.DateTime', 'isce3.core.DateTime', (['(sensing_start - time_delta)'], {}), '(sensing_start - time_delta)\n', (5265, 5293), False, 'import isce3\n'), ((5306, 5343), 'isce3.core.Orbit', 'isce3.core.Orbit', (['orbit_sv', 'ref_epoch'], {}), '(orbit_sv, ref_epoch)\n', (5322, 5343), False, 'import isce3\n'), ((5720, 5740), 'isce3.core.Geocent', 'isce3.core.Geocent', ([], {}), '()\n', (5738, 5740), False, 'import isce3\n'), ((6092, 6132), 'shapely.geometry.Point', 'shapely.geometry.Point', (['llh_centroid[:2]'], {}), '(llh_centroid[:2])\n', (6114, 6132), False, 'import shapely\n'), ((6722, 6742), 'numpy.empty', 'np.empty', (['n_grid_pts'], {}), '(n_grid_pts)\n', (6730, 6742), True, 'import numpy as np\n'), ((6756, 6776), 'numpy.empty', 'np.empty', (['n_grid_pts'], {}), '(n_grid_pts)\n', (6764, 6776), True, 'import numpy as np\n'), ((6788, 6808), 'numpy.empty', 'np.empty', (['n_grid_pts'], {}), '(n_grid_pts)\n', (6796, 6808), True, 'import numpy as np\n'), ((6820, 6840), 'numpy.empty', 'np.empty', (['n_grid_pts'], {}), '(n_grid_pts)\n', (6828, 6840), True, 'import numpy as np\n'), ((7078, 7094), 'numpy.unique', 'np.unique', (['lines'], {}), '(lines)\n', (7087, 7094), True, 'import numpy as np\n'), ((8741, 8771), 'os.path.split', 'os.path.split', (['annotation_path'], {}), '(annotation_path)\n', (8754, 8771), False, 'import os\n'), ((12578, 12598), 'xml.etree.ElementTree.parse', 'ET.parse', (['orbit_path'], {}), '(orbit_path)\n', (12586, 12598), True, 'import xml.etree.ElementTree as ET\n'), ((21156, 21197), 'os.listdir', 'os.listdir', (['f"""{safe_dir_path}/annotation"""'], {}), "(f'{safe_dir_path}/annotation')\n", (21166, 21197), False, 'import os\n'), ((21809, 21854), 'os.path.isdir', 'os.path.isdir', (['f"""{safe_dir_path}/measurement"""'], {}), "(f'{safe_dir_path}/measurement')\n", (21822, 21854), False, 'import os\n'), ((3667, 3708), 'numpy.array', 'np.array', (['[0, n_lines * az_time_interval]'], {}), '([0, n_lines * az_time_interval])\n', (3675, 3708), True, 'import numpy as np\n'), ((3973, 4002), 'numpy.vstack', 'np.vstack', (['(freq_1d, freq_1d)'], {}), '((freq_1d, freq_1d))\n', (3982, 4002), True, 'import numpy as np\n'), ((4708, 4752), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['osv[1].text', 'fmt'], {}), '(osv[1].text, fmt)\n', (4734, 4752), False, 'import datetime\n'), ((5959, 5972), 'numpy.array', 'np.array', (['xyz'], {}), '(xyz)\n', (5967, 5972), True, 'import numpy as np\n'), ((6029, 6042), 'numpy.rad2deg', 'np.rad2deg', (['x'], {}), '(x)\n', (6039, 6042), True, 'import numpy as np\n'), ((7635, 7683), 'numpy.concatenate', 'np.concatenate', (['(lons[mask0], lons[mask1][::-1])'], {}), '((lons[mask0], lons[mask1][::-1]))\n', (7649, 7683), True, 'import numpy as np\n'), ((7705, 7753), 'numpy.concatenate', 'np.concatenate', (['(lats[mask0], lats[mask1][::-1])'], {}), '((lats[mask0], lats[mask1][::-1]))\n', (7719, 7753), True, 'import numpy as np\n'), ((7918, 7938), 'nisar.workflows.stage_dem.check_dateline', 'check_dateline', (['poly'], {}), '(poly)\n', (7932, 7938), False, 'from nisar.workflows.stage_dem import check_dateline\n'), ((9393, 9404), 'xml.etree.ElementTree.parse', 'ET.parse', (['f'], {}), '(f)\n', (9401, 9404), True, 'import xml.etree.ElementTree as ET\n'), ((12130, 12145), 'xml.etree.ElementTree.parse', 'ET.parse', (['iw2_f'], {}), '(iw2_f)\n', (12138, 12145), True, 'import xml.etree.ElementTree as ET\n'), ((13919, 13941), 's1reader.s1_burst_slc.Doppler', 'Doppler', (['poly1d', 'lut2d'], {}), '(poly1d, lut2d)\n', (13926, 13941), False, 'from s1reader.s1_burst_slc import Doppler, Sentinel1BurstSlc\n'), ((14026, 14087), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(n_samples * azimuth_time_interval)'}), '(seconds=n_samples * azimuth_time_interval)\n', (14044, 14087), False, 'import datetime\n'), ((15085, 15612), 's1reader.s1_burst_slc.Sentinel1BurstSlc', 'Sentinel1BurstSlc', (['sensing_start', 'radar_freq', 'wavelength', 'azimuth_steer_rate', 'azimuth_time_interval', 'slant_range_time', 'starting_range', 'iw2_mid_range', 'range_sampling_rate', 'range_pxl_spacing', '(n_lines, n_samples)', 'az_fm_rate', 'doppler', 'rng_processing_bandwidth', 'pol', 'burst_id', 'platform_id', 'center_pts[i]', 'boundary_pts[i]', 'orbit', 'orbit_direction', 'tiff_path', 'i', 'first_valid_sample', 'last_sample', 'first_valid_line', 'last_line', 'range_window_type', 'range_window_coeff', 'rank', 'prf_raw_data', 'range_chirp_ramp_rate'], {}), '(sensing_start, radar_freq, wavelength, azimuth_steer_rate,\n azimuth_time_interval, slant_range_time, starting_range, iw2_mid_range,\n range_sampling_rate, range_pxl_spacing, (n_lines, n_samples),\n az_fm_rate, doppler, rng_processing_bandwidth, pol, burst_id,\n platform_id, center_pts[i], boundary_pts[i], orbit, orbit_direction,\n tiff_path, i, first_valid_sample, last_sample, first_valid_line,\n last_line, range_window_type, range_window_coeff, rank, prf_raw_data,\n range_chirp_ramp_rate)\n', (15102, 15612), False, 'from s1reader.s1_burst_slc import Doppler, Sentinel1BurstSlc\n'), ((18144, 18164), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (18158, 18164), False, 'import os\n'), ((18228, 18247), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (18241, 18247), False, 'import os\n'), ((18617, 18648), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (18638, 18648), False, 'import warnings\n'), ((19587, 19617), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (19602, 19617), False, 'import zipfile\n'), ((21883, 21925), 'os.listdir', 'os.listdir', (['f"""{safe_dir_path}/measurement"""'], {}), "(f'{safe_dir_path}/measurement')\n", (21893, 21925), False, 'import os\n'), ((22217, 22243), 'warnings.warn', 'warnings.warn', (['warning_str'], {}), '(warning_str)\n', (22230, 22243), False, 'import warnings\n'), ((3427, 3447), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (3436, 3447), True, 'import numpy as np\n'), ((13480, 13517), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'd_seconds'}), '(seconds=d_seconds)\n', (13498, 13517), False, 'import datetime\n'), ((18322, 18342), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (18336, 18342), False, 'import os\n'), ((18732, 18806), 'warnings.warn', 'warnings.warn', (['"""None of provided burst IDs found in sub-swath {swath_num}"""'], {}), "('None of provided burst IDs found in sub-swath {swath_num}')\n", (18745, 18806), False, 'import warnings\n'), ((5811, 5826), 'numpy.deg2rad', 'np.deg2rad', (['lon'], {}), '(lon)\n', (5821, 5826), True, 'import numpy as np\n'), ((5828, 5843), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (5838, 5843), True, 'import numpy as np\n'), ((19090, 19113), 'warnings.warn', 'warnings.warn', (['warn_str'], {}), '(warn_str)\n', (19103, 19113), False, 'import warnings\n'), ((5026, 5054), 'isce3.core.DateTime', 'isce3.core.DateTime', (['t_orbit'], {}), '(t_orbit)\n', (5045, 5054), False, 'import isce3\n')] |
import os
import time
import numpy as np
from scipy import ndimage, misc
import numba as nb
from config.load_yaml import configs, configs_name
from sklearn.mixture import GaussianMixture
from utils import visualization
def get_packet_events(event_txt, mode='TimeFixed'):
'''
:param event_txt: The input events .txt file
:param mode: 'TimeFixed', equal time interval in each packet
'EventNumFixed', equal event numbers in each packet
Default is 'TimeFixed'
:return: packet_events: [packet_1, packet_2, ..., packet_i]
packet_i: [[timestamp_1, x_1, y_1], [timestamp_2, x_2, y_2], ..., [timestamp_j, x_j, y_j]]
'''
if configs_name in ['iros', 'insight']:
events = np.genfromtxt(event_txt)[:, :3] # dim:(events_num, 3), 3dim: [timestamp, x, y]
events[:, 0] = configs['time_transfer'] * (events[:, 0] - events[0, 0]) # transfer timestamp unit to 'us'
elif configs_name == 'rpg':
events = np.genfromtxt(event_txt)[:, :3]
events[:, 0] = configs['time_transfer'] * (events[:, 0] - events[0, 0])
elif configs_name == 'matlab':
events = np.loadtxt(event_txt, delimiter=',')
events[:, [0, 1, 2]] = events[:, [2, 1, 0]]
# events[:, 0] = events[:, 0] - events[0, 0] + 1
return [events]
else:
raise OSError
if mode == 'TimeFixed':
pkt_events = []
win_start = 0
win_end = configs['packet_time']
while True:
if win_end >= events[-1, 0]:
current_pkt = events[np.where(events[:, 0] >= win_start)]
pkt_events.append(current_pkt)
break
else:
current_pkt = events[np.where((events[:, 0] >= win_start) & (events[:, 0] < win_end))]
pkt_events.append(current_pkt)
win_start += configs['packet_time'] * configs['sliding_time']
win_end += configs['packet_time'] * configs['sliding_time']
elif mode == 'EventNumFixed':
raise NotImplementedError
# pkt_events = [[] for _ in range(int(len(events)//configs['c_packet_event_num'])+1)]
# for i, item in enumerate(events):
# packet_idx = int(i//configs['c_packet_event_num'])
# pkt_events[packet_idx].append(item)
else:
pkt_events = None
return pkt_events
def old_get_packet_events(event_txt):
'''
:param event_txt:
:return: events_coo_list: 每帧图像的事件列表,列表中每个元素为一个二维(dim:[num_event, 2])数组
coo_accu_list: 每帧图像的事件列表,附加一个事件出现的次数,列表中每个元素为一个二维(dim:[num_event, 3])数组
'''
# 获取每帧图像中事件的坐标
events = np.genfromtxt(event_txt)
events[:, 0] = configs['time_transfer'] * (events[:, 0] - events[0, 0]) // configs['packet_time']
events_coo_list = []
coo_accu_list = []
for i in range(int(events[-1, 0]) + 1):
idx = np.where(events[:, 0] == i)
events_coo = events[idx, 1:3].reshape(-1, 2)
events_coo = numpy_multi_sort(events_coo)
events_coo_list.append(events_coo)
# # 对每帧图像的事件进行积分,不论极性,坐标只要出现一次,亮度增加一个常数(255//configs['accu_num'])
coo_accu = np.array([[events_coo[0,0], events_coo[0,1], 1]])
for j, coo in enumerate(events_coo, 1):
if (coo[0] == coo_accu[-1, 0]) and (coo[1] == coo_accu[-1, 1]):
coo_accu[-1, 2] += 1
else:
coo = np.append(coo, 1).reshape(1, 3)
coo_accu = np.concatenate((coo_accu, coo))
coo_accu_list.append(coo_accu)
return events_coo_list, coo_accu_list
def numpy_multi_sort(array):
value = [tuple(a) for a in array]
dtyp = [('x', int), ('y', int)]
array = np.array(value, dtype=dtyp)
sorted_arr = np.sort(array, order=['x', 'y'])
sorted_arr = np.array([list(a) for a in sorted_arr])
return sorted_arr
def denoise(evt_vec, medfilt, pic_type):
'''
:param evt_vec:
:param medfilt:
:param pic_type: sae, evt_intensity, latest_evt
sae: 每个像素记录最新的时间步
intensity: 每个像素记录事件累加的个数
bool: 每个像素记录是否有事件
:return:
'''
height, width = configs['height'], configs['width']
pic = np.zeros([height, width])
for item in evt_vec:
if pic_type == 'sae':
pic[int(item[2])][int(item[1])] = item[0]
elif pic_type == 'intensity':
pic[int(item[2])][int(item[1])] += 1
elif pic_type == 'bool':
pic[int(item[2])][int(item[1])] = 1
else:
return AttributeError
if medfilt:
pic = ndimage.median_filter(pic, size=3) # 4ms
# 闭运算
pic = ndimage.binary_dilation(pic, iterations=5).astype(int) # 2.6ms
evt_vec = get_denoised_evt_vec(pic, height, width) # 1.5ms
return evt_vec, pic
@nb.njit
def get_denoised_evt_vec(pic, height, width):
evt_vec_out = []
for i in range(height):
for j in range(width):
if pic[i][j] != 0:
evt_vec_out.append([pic[i][j], j, i])
return evt_vec_out
def cal_gmm_components(evt_xy, n_components=1):
score_list = []
for i in range(n_components, n_components+5):
gmm = GaussianMixture(n_components=i, warm_start=True).fit(evt_xy)
score_list.append(gmm.bic(evt_xy))
print(score_list)
def gmm_cluster(evt_path):
'''
本方法仅考虑如下情况:
1. 前后帧类别数不变的
2. 后帧相比前帧多进入一个物体
3. 后帧相比前帧退出一个或多个物体
未考虑的情况:
1. 有多个物体进入
2. 同时有物体进入和有物体退出
:param evt_path:
:return:
'''
evt_fn = os.path.splitext(os.path.basename(evt_path))[0]
try:
configs[evt_fn]
except KeyError:
print('Not found the provided file')
raise
pkt_evts = get_packet_events(event_txt=evt_path)
pic_list = []
evt_xy_list = []
gmm_list = []
weights_list = []
means_list = []
n_components_list = [configs[evt_fn]['init_n_components']]
nll_list = []
used_label_list = [] # 为了保持video的时间连续性,实际使用的label
used_label_unique_list = []
label_list = []
label_means_list = []
init = True
for i, event_vector in enumerate(pkt_evts):
if configs[evt_fn]['start_time'] <= i < configs[evt_fn]['end_time']:
pkt_evt, pic = denoise(event_vector, medfilt=configs['medfilt'], pic_type='bool')
pkt_evts[i] = pkt_evt
pic_list.append(pic)
# misc.imsave(f'{configs["image_dir"]}/{image_fn}_{i}.jpg', pic)
if len(pkt_evt) >= 80:
# print('-----------')
# print(i)
evt_xy = np.array(pkt_evt)[:, 1:]
evt_xy[:, 1] = configs['height'] - evt_xy[:, 1]
evt_xy_list.append(evt_xy)
current_n_components = n_components_list[-1]
# 初始化gmm,送入视频的第一帧
if init:
# n_components = event_process.cal_gmm_components()
gmm = GaussianMixture(n_components=current_n_components, random_state=0).fit(evt_xy)
used_label = gmm.predict(evt_xy)
label = gmm.predict(evt_xy)
init = False
# 第二帧及往后
else:
# # 1. 异常值检测, 检测用上一帧图像训练的gmm来predict这一帧图像,若有新事件簇出现,会有较多低概率点
# # 实验证明,低概率点并不是新出现点,但有新类出现时,确实会增加许多低概率点
label = gmm_list[-1].predict(evt_xy)
# entrance 异常值指标
nll = -gmm_list[-1].score(evt_xy)
# exit 异常值指标
each_label_num = np.array([np.sum(label == j) for j in range(n_components_list[-1])])
label_zero_num = np.sum(each_label_num == 0)
# 如果无异常值, 记录当前的weight和means, 并重新用现有数据fit一个新的gmm
if abs(nll - nll_list[-1]) / nll_list[-1] < 0.2 and label_zero_num == 0:
gmm = GaussianMixture(n_components=current_n_components,
weights_init=weights_list[-1],
means_init=means_list[-1],
random_state=42).fit(evt_xy)
# 调整label,使得同一个物体的当前label与上一帧中的label相同
label = gmm.predict(evt_xy)
used_label = np.zeros_like(label)
for j, c_mean in enumerate(gmm.means_):
used_label_idx = np.argmin(
[np.linalg.norm(c_mean - p_mean) for p_mean in label_means_list[-1]])
used_label[np.where(label == j)] = used_label_unique_list[-1][used_label_idx]
else:
# 有异常值的情况,即已有事件退出的情况
if label_zero_num != 0:
current_n_components -= label_zero_num
gmm = GaussianMixture(n_components=current_n_components, random_state=42).fit(evt_xy) # 10ms
# 调整label,使得同一个物体的当前label与上一帧中的label相同
label = gmm.predict(evt_xy)
used_label = np.zeros_like(label)
for j, c_mean in enumerate(gmm.means_):
used_label_idx = np.argmin(
[np.linalg.norm(c_mean - p_mean) for p_mean in label_means_list[-1]])
used_label[np.where(label == j)] = used_label_unique_list[-1][used_label_idx]
# 有异常值的情况,即新事件entrance的情况, 目前仅支持新进来1个物体,超过1个的物体还没有解决方案
if abs(nll - nll_list[-1]) / nll_list[-1] >= 0.1:
current_n_components += 1
gmm = GaussianMixture(n_components=current_n_components, random_state=42).fit(evt_xy) # 10ms
# 调整label,使得同一个物体的当前label与上一帧中的label相同
label = gmm.predict(evt_xy)
used_label = np.zeros_like(label)
# 排除某些小物体突然在某帧画面中突然消失的情况(理论上不用在此处理)
def repair_small_obj_lost(lost_frame_num):
gmm_ = GaussianMixture(n_components=current_n_components,
weights_init=weights_list[-(lost_frame_num+1)],
means_init=means_list[-(lost_frame_num+1)],
random_state=0).fit(evt_xy)
label_ = gmm_.predict(evt_xy)
used_label_ = np.zeros_like(label_)
for j, c_mean_ in enumerate(gmm_.means_):
used_label_idx_ = np.argmin(
[np.linalg.norm(c_mean_ - p_mean_) for p_mean_ in label_means_list[-(lost_frame_num+1)]])
used_label_[np.where(label_ == j)] = used_label_unique_list[-(lost_frame_num+1)][used_label_idx_]
return label_, used_label_
if current_n_components == n_components_list[-2]:
label, used_label = repair_small_obj_lost(1)
elif current_n_components == n_components_list[-3]:
label, used_label = repair_small_obj_lost(2)
# elif current_n_components == n_components_list[-4]:
# label, used_label = repair_small_obj_lost(3)
else:
dist_min = []
for j, c_mean in enumerate(gmm.means_):
dist = [np.linalg.norm(c_mean - p_mean) for p_mean in label_means_list[-1]]
dist_min.append(np.min(dist))
existed_label_idx_list = np.where(dist_min != np.max(dist_min))[0]
new_idx_list = []
temp_idx = 0
for j in range(current_n_components):
if j in existed_label_idx_list:
new_idx = used_label_unique_list[-1][temp_idx]
used_label[np.where(label == j)] = new_idx
temp_idx += 1
else:
new_idx = np.max(used_label_unique_list[-1]) + 1
used_label[np.where(label == j)] = new_idx
new_idx_list.append(new_idx)
gmm.weights_[list(range(current_n_components))] = gmm.weights_[new_idx_list]
gmm.means_[list(range(current_n_components)), :] = gmm.means_[new_idx_list, :]
gmm = GaussianMixture(n_components=current_n_components,
weights_init=gmm.weights_,
means_init=gmm.means_,
random_state=0).fit(evt_xy)
label = gmm.predict(evt_xy)
used_label = gmm.predict(evt_xy)
label_means = [np.mean(evt_xy[np.where(used_label == j)], axis=0) for j in np.unique(used_label)]
gmm_list.append(gmm)
n_components_list.append(current_n_components)
weights_list.append(gmm.weights_)
means_list.append(gmm.means_)
nll_list.append(-gmm.score(evt_xy))
used_label_unique_list.append(np.unique(used_label))
used_label_with_evt = np.concatenate((evt_xy, used_label.reshape(-1, 1)), axis=1)
used_label_list.append(used_label_with_evt)
label_with_evt = np.concatenate((evt_xy, label.reshape(-1, 1)), axis=1)
label_list.append(label_with_evt)
label_means_list.append(label_means)
# visualization.plot_gmm(evt_xy, used_label, f'{configs["cmap_dir"]}/{evt_fn}_{i}')
else:
used_label_list.append(None)
label_list.append(None)
return pic_list, used_label_list
| [
"scipy.ndimage.binary_dilation",
"numpy.sum",
"numpy.zeros_like",
"os.path.basename",
"numpy.unique",
"numpy.zeros",
"numpy.genfromtxt",
"sklearn.mixture.GaussianMixture",
"numpy.sort",
"numpy.append",
"numpy.where",
"numpy.array",
"numpy.loadtxt",
"numpy.linalg.norm",
"numpy.min",
"nu... | [((2650, 2674), 'numpy.genfromtxt', 'np.genfromtxt', (['event_txt'], {}), '(event_txt)\n', (2663, 2674), True, 'import numpy as np\n'), ((3693, 3720), 'numpy.array', 'np.array', (['value'], {'dtype': 'dtyp'}), '(value, dtype=dtyp)\n', (3701, 3720), True, 'import numpy as np\n'), ((3738, 3770), 'numpy.sort', 'np.sort', (['array'], {'order': "['x', 'y']"}), "(array, order=['x', 'y'])\n", (3745, 3770), True, 'import numpy as np\n'), ((4205, 4230), 'numpy.zeros', 'np.zeros', (['[height, width]'], {}), '([height, width])\n', (4213, 4230), True, 'import numpy as np\n'), ((2884, 2911), 'numpy.where', 'np.where', (['(events[:, 0] == i)'], {}), '(events[:, 0] == i)\n', (2892, 2911), True, 'import numpy as np\n'), ((3152, 3203), 'numpy.array', 'np.array', (['[[events_coo[0, 0], events_coo[0, 1], 1]]'], {}), '([[events_coo[0, 0], events_coo[0, 1], 1]])\n', (3160, 3203), True, 'import numpy as np\n'), ((4587, 4621), 'scipy.ndimage.median_filter', 'ndimage.median_filter', (['pic'], {'size': '(3)'}), '(pic, size=3)\n', (4608, 4621), False, 'from scipy import ndimage, misc\n'), ((741, 765), 'numpy.genfromtxt', 'np.genfromtxt', (['event_txt'], {}), '(event_txt)\n', (754, 765), True, 'import numpy as np\n'), ((5557, 5583), 'os.path.basename', 'os.path.basename', (['evt_path'], {}), '(evt_path)\n', (5573, 5583), False, 'import os\n'), ((987, 1011), 'numpy.genfromtxt', 'np.genfromtxt', (['event_txt'], {}), '(event_txt)\n', (1000, 1011), True, 'import numpy as np\n'), ((1152, 1188), 'numpy.loadtxt', 'np.loadtxt', (['event_txt'], {'delimiter': '""","""'}), "(event_txt, delimiter=',')\n", (1162, 1188), True, 'import numpy as np\n'), ((3462, 3493), 'numpy.concatenate', 'np.concatenate', (['(coo_accu, coo)'], {}), '((coo_accu, coo))\n', (3476, 3493), True, 'import numpy as np\n'), ((4658, 4700), 'scipy.ndimage.binary_dilation', 'ndimage.binary_dilation', (['pic'], {'iterations': '(5)'}), '(pic, iterations=5)\n', (4681, 4700), False, 'from scipy import ndimage, misc\n'), ((5196, 5244), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'i', 'warm_start': '(True)'}), '(n_components=i, warm_start=True)\n', (5211, 5244), False, 'from sklearn.mixture import GaussianMixture\n'), ((1569, 1604), 'numpy.where', 'np.where', (['(events[:, 0] >= win_start)'], {}), '(events[:, 0] >= win_start)\n', (1577, 1604), True, 'import numpy as np\n'), ((1730, 1794), 'numpy.where', 'np.where', (['((events[:, 0] >= win_start) & (events[:, 0] < win_end))'], {}), '((events[:, 0] >= win_start) & (events[:, 0] < win_end))\n', (1738, 1794), True, 'import numpy as np\n'), ((6576, 6593), 'numpy.array', 'np.array', (['pkt_evt'], {}), '(pkt_evt)\n', (6584, 6593), True, 'import numpy as np\n'), ((7655, 7682), 'numpy.sum', 'np.sum', (['(each_label_num == 0)'], {}), '(each_label_num == 0)\n', (7661, 7682), True, 'import numpy as np\n'), ((13834, 13855), 'numpy.unique', 'np.unique', (['used_label'], {}), '(used_label)\n', (13843, 13855), True, 'import numpy as np\n'), ((3403, 3420), 'numpy.append', 'np.append', (['coo', '(1)'], {}), '(coo, 1)\n', (3412, 3420), True, 'import numpy as np\n'), ((8303, 8323), 'numpy.zeros_like', 'np.zeros_like', (['label'], {}), '(label)\n', (8316, 8323), True, 'import numpy as np\n'), ((13515, 13536), 'numpy.unique', 'np.unique', (['used_label'], {}), '(used_label)\n', (13524, 13536), True, 'import numpy as np\n'), ((6927, 6993), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'current_n_components', 'random_state': '(0)'}), '(n_components=current_n_components, random_state=0)\n', (6942, 6993), False, 'from sklearn.mixture import GaussianMixture\n'), ((7559, 7577), 'numpy.sum', 'np.sum', (['(label == j)'], {}), '(label == j)\n', (7565, 7577), True, 'import numpy as np\n'), ((9125, 9145), 'numpy.zeros_like', 'np.zeros_like', (['label'], {}), '(label)\n', (9138, 9145), True, 'import numpy as np\n'), ((9984, 10004), 'numpy.zeros_like', 'np.zeros_like', (['label'], {}), '(label)\n', (9997, 10004), True, 'import numpy as np\n'), ((13470, 13495), 'numpy.where', 'np.where', (['(used_label == j)'], {}), '(used_label == j)\n', (13478, 13495), True, 'import numpy as np\n'), ((7875, 8005), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'current_n_components', 'weights_init': 'weights_list[-1]', 'means_init': 'means_list[-1]', 'random_state': '(42)'}), '(n_components=current_n_components, weights_init=\n weights_list[-1], means_init=means_list[-1], random_state=42)\n', (7890, 8005), False, 'from sklearn.mixture import GaussianMixture\n'), ((8586, 8606), 'numpy.where', 'np.where', (['(label == j)'], {}), '(label == j)\n', (8594, 8606), True, 'import numpy as np\n'), ((10621, 10642), 'numpy.zeros_like', 'np.zeros_like', (['label_'], {}), '(label_)\n', (10634, 10642), True, 'import numpy as np\n'), ((8478, 8509), 'numpy.linalg.norm', 'np.linalg.norm', (['(c_mean - p_mean)'], {}), '(c_mean - p_mean)\n', (8492, 8509), True, 'import numpy as np\n'), ((8874, 8941), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'current_n_components', 'random_state': '(42)'}), '(n_components=current_n_components, random_state=42)\n', (8889, 8941), False, 'from sklearn.mixture import GaussianMixture\n'), ((9423, 9443), 'numpy.where', 'np.where', (['(label == j)'], {}), '(label == j)\n', (9431, 9443), True, 'import numpy as np\n'), ((9732, 9799), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'current_n_components', 'random_state': '(42)'}), '(n_components=current_n_components, random_state=42)\n', (9747, 9799), False, 'from sklearn.mixture import GaussianMixture\n'), ((9311, 9342), 'numpy.linalg.norm', 'np.linalg.norm', (['(c_mean - p_mean)'], {}), '(c_mean - p_mean)\n', (9325, 9342), True, 'import numpy as np\n'), ((10180, 10352), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'current_n_components', 'weights_init': 'weights_list[-(lost_frame_num + 1)]', 'means_init': 'means_list[-(lost_frame_num + 1)]', 'random_state': '(0)'}), '(n_components=current_n_components, weights_init=\n weights_list[-(lost_frame_num + 1)], means_init=means_list[-(\n lost_frame_num + 1)], random_state=0)\n', (10195, 10352), False, 'from sklearn.mixture import GaussianMixture\n'), ((10960, 10981), 'numpy.where', 'np.where', (['(label_ == j)'], {}), '(label_ == j)\n', (10968, 10981), True, 'import numpy as np\n'), ((10823, 10856), 'numpy.linalg.norm', 'np.linalg.norm', (['(c_mean_ - p_mean_)'], {}), '(c_mean_ - p_mean_)\n', (10837, 10856), True, 'import numpy as np\n'), ((11777, 11808), 'numpy.linalg.norm', 'np.linalg.norm', (['(c_mean - p_mean)'], {}), '(c_mean - p_mean)\n', (11791, 11808), True, 'import numpy as np\n'), ((11897, 11909), 'numpy.min', 'np.min', (['dist'], {}), '(dist)\n', (11903, 11909), True, 'import numpy as np\n'), ((13007, 13128), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'current_n_components', 'weights_init': 'gmm.weights_', 'means_init': 'gmm.means_', 'random_state': '(0)'}), '(n_components=current_n_components, weights_init=gmm.\n weights_, means_init=gmm.means_, random_state=0)\n', (13022, 13128), False, 'from sklearn.mixture import GaussianMixture\n'), ((11990, 12006), 'numpy.max', 'np.max', (['dist_min'], {}), '(dist_min)\n', (11996, 12006), True, 'import numpy as np\n'), ((12383, 12403), 'numpy.where', 'np.where', (['(label == j)'], {}), '(label == j)\n', (12391, 12403), True, 'import numpy as np\n'), ((12561, 12595), 'numpy.max', 'np.max', (['used_label_unique_list[-1]'], {}), '(used_label_unique_list[-1])\n', (12567, 12595), True, 'import numpy as np\n'), ((12651, 12671), 'numpy.where', 'np.where', (['(label == j)'], {}), '(label == j)\n', (12659, 12671), True, 'import numpy as np\n')] |
#! /usr/bin/python
'''
Module of utility functions to drive ATP from Python and extract results
At this time, only steady-state result extraction is supported.
'''
from __future__ import print_function, unicode_literals
from math import sqrt
import pickle
import lineZ
import numpy as np
from numpy.linalg import inv
# Defining the data type may allow use of a smaller, faster data type
# if the default precision isn't necessary. Or it may allow going with
# a larger datatype if more precision is needed.
nbits = 64
fdtype = np.dtype('float'+str(nbits))
cdtype = np.dtype('complex'+str(2*nbits))
# To parse ATP files, the test_data_cards module is used.
# It is currently under development.
import text_data_cards as tdc
import itertools, copy, os
import subprocess, re, csv, codecs, shutil
ATP_path = 'C:\ATP\gigmingw'
ATP_exe = 'runATP_G.bat'
be_quiet = True
def run_ATP(ATP_file, quiet=None):
kwargs = {}
if quiet is not None and quiet or quiet is None and be_quiet:
kwargs['stdout'] = open(os.devnull, 'w')
rtn = subprocess.call((os.path.join(ATP_path, ATP_exe), ATP_file), **kwargs)
def atp_basename(ATP_file):
'''
Returns the base filename corresponding to the ATP filename provided.
It basically just removes the .atp from the end.
'''
return re.match('(.*)\.atp$',ATP_file, flags=re.I).group(1)
def lis_filename(ATP_file):
'''
Returns the LIS filename corresponding to the ATP filename provided.
This does not verify that the LIS file exists or that it is in the expected
format.
'''
return atp_basename(ATP_file) + '.lis'
def replace_text(ATP_file, old_text, new_text, outfile=None, n=None):
'''
Replaces some text in the specified ATP file with new text. Since ATP uses
fixed with fields in the file, new text will be left padded with spaces if
new text is shorter than old text or truncated on the right if new text is
longer. Optional parameter n indicates that ONE instance of the old text
should be replaced, and the instance replaced will be the nth instance.
Optional parameter outfile provides a filename for the modified file to be
saved to. If outfile is not provided, the input file is overwritten.
It is assumed that the text would appear only once on any given line.
If the old text is not found, ValueError is raised and the file is not written.
'''
new_text_fixed = ' '*max(0, len(old_text)-len(new_text)) + \
new_text[:len(old_text)]
i = 0
with codecs.open(ATP_file, 'r', encoding='iso-8859-1', errors='replace') as f:
infile = f.readlines()
for ln, line in enumerate(infile):
if old_text in line:
i += 1
if n is None or i == n:
infile[ln] = line.replace(old_text, new_text_fixed)
if n is not None:
break
if i == 0:
raise ValueError('Text to replace not found')
with codecs.open(outfile if outfile is not None else ATP_file, 'w',
encoding='iso-8859-1', errors='replace') as f:
f.write(''.join(infile))
def node_ph(node_name, ph):
return node_name + (ph if node_name != "TERRA" else "")
def get_SS_results(LIS_file, RMS_scale=False):
'''
Extract steady-state results from LIS file. Results are returned as a
tuple in the following structure:
(<Node Voltages>, <Branch Currents>)
<Node Voltages> has the following structure:
{ <Node Name>: <Node Voltage> }
Where <Node Name> is a string of the node name and <Node Voltage> is a
complex number representing the phasor node voltage.
<Branch Current> has the following structure:
{ (<From Node>, <To Node>): <Branch Current> }
Where <From Node> and <To Node> are strings of the node name and
<Branch Current> is a complex number representing the phasor branch current.
By default the phasor values returned are NOT scaled from the ATP output to
convert from peak values to RMS values. They can be scaled down by a factor
of sqrt(2) by passing RMS_scale as True.
TODO: Detect if ATP throws an error and raise an exception
'''
s = 1./sqrt(2) if RMS_scale else 1.0 # set scaling factor
node_voltages = {}
branch_currents = {}
with open(LIS_file, 'r') as f:
iter_input = f # No pre-processing needed, but it could be done here
ss_res_start = re.compile('^Sinusoidal steady-state phasor solution, branch by branch')
ss_node_end = re.compile('^ *Total network loss P-loss by summing injections')
ss_sw_start = re.compile('^Output for steady-state phasor switch currents.')
for line in iter_input:
if ss_res_start.match(line):
break
else:
print('Steady-state phasor solution not found.')
# Skip next four lines to get to where results start
for _ in range(4):
line = next(iter_input)
# Steady state phasor solution column definitions
# Since the output is fixed width text, it is most reliable to parse it
# using the width of the columns. The columns will be defined using starting
# column number. Column numbers are set 1-based to match a text
# editor. Two blank spaces between columns are assumed.
col_nums = [2, 13, 23, 40, 61, 78, 99, 116, 133]
c = [slice(start-1, end-3) for start, end in zip(col_nums[:-1], col_nums[1:])]
# Get steady state node voltages and currents
while ss_node_end.match(line) is None:
# Line 1: Blank
line = next(iter_input)
if ss_node_end.match(line) is not None:
break
# Line 2
from_node = line[c[0]].strip()
from_v_re = float(line[c[2]])
from_i_re = float(line[c[4]])
line = next(iter_input)
# Line 3
from_v_im = float(line[c[2]])
from_i_im = float(line[c[4]])
line = next(iter_input)
# Line 4: Blank
line = next(iter_input)
# Line 5
to_node = line[c[1]].strip()
to_v_re = float(line[c[2]])
to_i_re = float(line[c[4]])
line = next(iter_input)
# Line 6
to_v_im = float(line[c[2]])
to_i_im = float(line[c[4]])
node_voltages[from_node] = complex(from_v_re, from_v_im)*s
node_voltages[to_node] = complex(to_v_re, to_v_im)*s
branch_currents[(from_node,to_node)] = complex(from_i_re, from_i_im)*s
branch_currents[(to_node,from_node)] = complex(to_i_re, to_i_im)*s
line = next(iter_input)
# Skip down to switch currents (if any)
while not ss_sw_start.match(line):
line = next(iter_input)
# See if switch currents were found
if ss_sw_start.match(line):
# Eat the column header line
line = next(iter_input)
col_nums = [7, 17, 30, 48, 66, 85, 97, 115, 133]
c = [slice(start-1, end-3) for start, end in zip(col_nums[:-1], col_nums[1:])]
line = next(iter_input)
while len(line[c[0]].strip()) > 0:
from_node = line[c[0]].strip()
to_node = line[c[1]].strip() if len(line[c[1]].strip()) > 0 else 'TERRA'
if 'Open' in line[c[2]]:
from_i_re = 0.
from_i_im = 0.
else:
from_i_re = float(line[c[2]])
from_i_im = float(line[c[3]])
branch_currents[(from_node,to_node)] = complex(from_i_re, from_i_im)*s
branch_currents[(to_node,from_node)] = complex(-1*from_i_re, -1*from_i_im)*s
line = next(iter_input)
return node_voltages, branch_currents
def output_ss_file(LIS_file, SS_file=None, pickle_file=None,
buses=None, branches=None,
phases=('A','B', 'C'), RMS_scale=False):
'''
Extract steady-state phasor results from LIS file and output them to a _ss.csv
file in comma-separated format.
'''
data_list = {} # Data to save to pickle file
node_voltages, branch_currents = get_SS_results(LIS_file, RMS_scale)
data_list['node_voltages'] = node_voltages
data_list['branch_currents'] = branch_currents
if buses is not None:
ph_voltages, seq_voltages, neg_seq_imbalance = \
process_SS_bus_voltages(LIS_file, buses, phases, RMS_scale)
bus_data = {}
for n, bus in enumerate(buses):
bus_data[bus] = {}
bus_data[bus]['ph_voltages'] = ph_voltages[:, n]
bus_data[bus]['seq_voltages'] = seq_voltages[:, n]
bus_data[bus]['neg_seq_imbalance'] = neg_seq_imbalance[n]
data_list['bus_data'] = bus_data
if branches is not None:
ph_br_currents, seq_br_currents, S_3ph = \
process_SS_branch_currents(LIS_file, branches, phases, RMS_scale)
branch_data = {}
for n, branch in enumerate(branches):
branch = tuple(branch) # convert to tuple for indexing
branch_data[branch] = {}
branch_data[branch]['ph_br_currents'] = ph_br_currents[:, n]
branch_data[branch]['seq_br_currents'] = seq_br_currents[:, n]
branch_data[branch]['S_3ph'] = S_3ph[n]
data_list['branch_data'] = branch_data
# Reorganize data_list to be easier to key into bus or branch data
bus_data = {}
if SS_file is None:
SS_file = re.match('(.*)\.lis$',LIS_file, flags=re.I).group(1) + '_ss.csv'
if pickle_file is None:
pickle_file = re.match('(.*)\.lis$',LIS_file, flags=re.I).group(1) + '_ss.p'
with open(pickle_file, 'wb') as binfile:
pickle.dump(data_list, binfile)
with open(SS_file, 'w') as csvfile:
sswriter = csv.writer(csvfile, lineterminator='\n')
if buses is not None:
# Output bus voltage results
sswriter.writerow(list(itertools.chain(('Bus',),
itertools.chain(*[('%s-phase Voltage (Real)' % ph,
'%s-phase Voltage (Imag)' % ph) for ph in phases]),
itertools.chain(*[('%s-sequence Voltage (Real)' % ph,
'%s-sequence Voltage (Imag)' % ph) for ph in ('Zero', 'Positive', 'Negative')]),
itertools.chain(*[('%s-phase Voltage (Mag)' % ph,
'%s-phase Voltage (Ang)' % ph) for ph in phases]),
itertools.chain(*[('%s-sequence Voltage (Mag)' % ph,
'%s-sequence Voltage (Ang)' % ph) for ph in ('Zero', 'Positive', 'Negative')]),
('Neg. Seq. Unbalance Factor (%%)',))))
for n, bus in enumerate(buses):
sswriter.writerow(list(itertools.chain((bus,),
itertools.chain(*zip(np.real(ph_voltages[:, n]),
np.imag(ph_voltages[:,n]))),
itertools.chain(*zip(np.real(seq_voltages[:, n]),
np.imag(seq_voltages[:,n]))),
itertools.chain(*zip(np.absolute(ph_voltages[:, n]),
np.angle(ph_voltages[:, n], deg=True))),
itertools.chain(*zip(np.absolute(seq_voltages[:, n]),
np.angle(seq_voltages[:, n], deg=True))),
(neg_seq_imbalance[n],))))
sswriter.writerow(['--------']*26)
if branches is not None:
# Output branch current results
sswriter.writerow(list(itertools.chain(('From Bus', 'To Bus',
'3PH MW', '3PH Mvar'),
itertools.chain(*[('%s-phase Voltage (Real)' % ph,
'%s-phase Voltage (Imag)' % ph) for ph in phases]),
itertools.chain(*[('%s-sequence Voltage (Real)' % ph,
'%s-sequence Voltage (Imag)' % ph) for ph in ('Zero', 'Positive', 'Negative')]),
itertools.chain(*[('%s-phase Voltage (Mag)' % ph,
'%s-phase Voltage (Ang)' % ph) for ph in phases]),
itertools.chain(*[('%s-sequence Voltage (Mag)' % ph,
'%s-sequence Voltage (Ang)' % ph) for ph in ('Zero', 'Positive', 'Negative')]))))
for n, branch in enumerate(branches):
sswriter.writerow(list(itertools.chain(branch,
(S_3ph.real[n], S_3ph.imag[n]),
itertools.chain(*zip(np.real(ph_br_currents[:, n]),
np.imag(ph_br_currents[:,n]))),
itertools.chain(*zip(np.real(seq_br_currents[:, n]),
np.imag(seq_br_currents[:,n]))),
itertools.chain(*zip(np.absolute(ph_br_currents[:, n]),
np.angle(ph_br_currents[:, n], deg=True))),
itertools.chain(*zip(np.absolute(seq_br_currents[:, n]),
np.angle(seq_br_currents[:, n], deg=True))))))
sswriter.writerow(['--------']*28)
# Output node voltage results
sswriter.writerow(['Bus', 'Bus Voltage (Real)', 'Bus Voltage (Imag)'])
for node, voltage in node_voltages.items():
sswriter.writerow([node, voltage.real, voltage.imag])
# Output file section separator
sswriter.writerow([])
sswriter.writerow(['--------']*4)
sswriter.writerow([])
# Output branch current results
sswriter.writerow(['From Bus', 'To Bus', 'Branch Current (Real)', 'Branch Current (Imag)'])
for nodes, current in branch_currents.items():
sswriter.writerow([nodes[0], nodes[1], current.real, current.imag])
return SS_file
def process_SS_bus_voltages(LIS_file, buses, phases=('A', 'B', 'C'), RMS_scale=False):
''' Parses LIS_file to get steady state results, then creates vectors of
voltage phasors at the specified buses. Returns ph_voltages,
seq_voltages, neg_seq_imbalance as tuple of lists in same order as buses.
'''
node_voltages, branch_currents = get_SS_results(LIS_file, RMS_scale)
ph_voltages = np.array([[node_voltages[b+p] for p in phases] for b in buses]).T
seq_voltages = np.array(lineZ.ph_to_seq_v(ph_voltages))
neg_seq_imbalance = np.abs(seq_voltages[2]/seq_voltages[1])*100
return ph_voltages, seq_voltages, neg_seq_imbalance
def process_SS_branch_currents(LIS_file, branches, phases=('A', 'B', 'C'),
RMS_scale=False):
''' Parses LIS_file to get steady state results, then creates vectors of
branch current phasors on the specified branches. Returns
ph_br_currents, seq_br_currents as tuple of lists in same order as
the list of branches.
'''
node_voltages, branch_currents = get_SS_results(LIS_file, RMS_scale)
ph_br_currents = np.array(
[[branch_currents[(fr_b + p, to_b + p)] for p in phases]
for fr_b, to_b in branches]).T
seq_br_currents = np.array(lineZ.ph_to_seq_v(ph_br_currents))
# Voltages for power calculation
ph_voltages = np.array(
[[node_voltages[fr_b + p] for p in phases]
for fr_b, to_b in branches]).T
S_3ph = np.sum(ph_voltages * np.conj(ph_br_currents), axis=0) / 1e6
return ph_br_currents, seq_br_currents, S_3ph
def get_line_params_from_pch(atp_pch_folder, seg_list):
""" Reads in line parameters from PCH files, saves the data, and combines
data into an aggregate summary of several parameters. Returns two
dicts as a tuple:
seg_data_dict: {seg: params} returns the segment name and
LineConstPCHCards object for each segment.
summary_data_dict: Returns various parameters with the line segments
combined into an equivalent. Parameters returned are the
following:
Zsum: Sum of Z matrices of segments
Ysum: Sum of Y matrices of segments
Zsum_s, Ysum_s: Symmetrical components of Zsum & Ysum
ABCD: Transfer matrix in phase quantities
Zeq: Equivalent Z matrix from ABCD
Yeq: Equivalent Y matrix from ABCD
ABCD_s, Zeq_s, Yeq_s: Symmetrical components of prev. three."""
seg_data_dict = {}
for seg in seg_list:
with open(os.path.join(atp_pch_folder, seg + '.pch')) as \
pch_file:
pch_lines = pch_file.readlines()
params = LineConstPCHCards()
params.read(pch_lines)
seg_data_dict[seg] = params
summary_data_dict = {}
summary_data_dict['Zsum'] = np.sum([p.Z for _, p in seg_data_dict.items()],
axis=0)
summary_data_dict['Ysum'] = np.sum([p.Y for _, p in seg_data_dict.items()],
axis=0)
summary_data_dict['Zsum_s'] = lineZ.ph_to_seq_m(summary_data_dict['Zsum'])
summary_data_dict['Ysum_s'] = lineZ.ph_to_seq_m(summary_data_dict['Ysum'])
ABCD_list = [p.ABCD for _, p in seg_data_dict.items()]
summary_data_dict['ABCD'] = lineZ.combine_ABCD(ABCD_list)
Z, Y1, Y2 = lineZ.ABCD_to_ZY(summary_data_dict['ABCD'])
summary_data_dict['Zeq'] = Z
summary_data_dict['Yeq'] = Y1 + Y2
summary_data_dict['ABCD_s'] = lineZ.ph_to_seq_m(summary_data_dict['ABCD'])
Z, Y1, Y2 = lineZ.ABCD_to_ZY(summary_data_dict['ABCD_s'])
summary_data_dict['Zeq_s'] = Z
summary_data_dict['Yeq_s'] = Y1 + Y2
return seg_data_dict, summary_data_dict
def extract_ABCD(ATP_template, ATP_tmp, current_key, switch_key,
in_port, out_port,
test_current = 500., switch_close_t = '999.',
phases = ('A', 'B', 'C')):
# ATP_tmp should be in the same directory as ATP_template since most likely
# the model will include includes of .lib files for line parameters.
ATP_tmp_full = os.path.join(os.path.dirname(ATP_template), ATP_tmp)
V1_s = np.zeros((3,3), dtype=np.complex128)
V1_o = np.zeros((3,3), dtype=np.complex128)
I1_s = np.zeros((3,3), dtype=np.complex128)
I1_o = np.zeros((3,3), dtype=np.complex128)
V2_o = np.zeros((3,3), dtype=np.complex128)
I2_s = np.zeros((3,3), dtype=np.complex128)
for out_port_short in (True, False):
for n, ph in enumerate(phases):
shutil.copyfile(ATP_template, ATP_tmp_full)
# Find/replace code numbers in template ATP file and copy to new file
for n2, ph2 in enumerate(phases):
replace_text(ATP_tmp_full, current_key,
('%6f.' % (test_current if n2==n else test_current/1000.)),
n=n+1)
replace_text(ATP_tmp_full, switch_key, '-1' if out_port_short else '1')
# Run ATP on new file
run_ATP(ATP_tmp_full)
# Extract steady-state results
LIS_results = lis_filename(ATP_tmp_full)
node_voltages, branch_currents = get_SS_results(LIS_results)
if out_port_short:
for n2, ph2 in enumerate(phases):
V1_s[n2, n] = node_voltages[node_ph(in_port[0], ph2)]
I1_s[n2, n] = branch_currents[(node_ph(in_port[0], ph2),
node_ph(in_port[1], ph2))]
I2_s[n2, n] = branch_currents[(node_ph(out_port[0], ph2),
node_ph(out_port[1], ph2))] # removed +ph2 because 'TERRA'
else:
for n2, ph2 in enumerate(phases):
V1_o[n2, n] = node_voltages[node_ph(in_port[0], ph2)]
I1_o[n2, n] = branch_currents[(node_ph(in_port[0], ph2), node_ph(in_port[1], ph2))]
V2_o[n2, n] = node_voltages[node_ph(out_port[0], ph2)]
A = V1_o.dot(inv(V2_o))
B = V1_s.dot(inv(I2_s))
C = I1_o.dot(inv(V2_o))
D = I1_s.dot(inv(I2_s))
ABCD = np.array(np.bmat([[A, B], [C, D]]))
return ABCD
# Parsing function for reading / modifying line constant cards. See Rule Book
# Chapter 21. Below is an example of such a card for a three-phase line:
'''
BEGIN NEW DATA CASE
LINE CONSTANTS
$ERASE
BRANCH IN___AOUT__AIN___BOUT__BIN___COUT__C
ENGLISH
3 0.0 .1357 0 .3959 1.18 5. 42. 30.
1 0.0 .1357 0 .3959 1.18 -5. 49. 37.
2 0.0 .1357 0 .3959 1.18 5. 56. 44.
0 0.0 .6609 0 .4883 .551 -.5 65. 58.
BLANK CARD ENDING CONDUCTOR CARDS
50. 60. 000001 001000 0 5.98 0 44
$PUNCH
BLANK CARD ENDING FREQUENCY CARDS
BLANK CARD ENDING LINE CONSTANT
BEGIN NEW DATA CASE
BLANK CARD
'''
class LineConstCards(tdc.DataCardStack):
''' Stack of cards for a line constants case.
This is Based on what ATPDraw creates.'''
def __init__(self):
conductor_card = tdc.DataCard('(I3, F5.4, F8.5, I2, F8.5, F8.5, F8.3, F8.3, F8.3)',
['IP', 'SKIN', 'RESIS', 'IX', 'REACT', 'DIAM', 'HORIZ', 'VTOWER', 'VMID'])
end_conductors = tdc.DataCardFixedText('BLANK CARD ENDING CONDUCTOR CARDS')
conductor_cards = tdc.DataCardRepeat(conductor_card, end_record = end_conductors,
name = 'conductors')
tdc.DataCardStack.__init__(self,
[tdc.DataCardFixedText('BEGIN NEW DATA CASE'),
tdc.DataCardFixedText('LINE CONSTANTS'),
tdc.DataCardFixedText('$ERASE'),
tdc.DataCard('(A8,6A6)', ['branch_card', 'in1', 'out1', 'in2',
'out2', 'in3', 'out3']),
tdc.DataCard('(A80)', ['units']),
conductor_cards,
tdc.DataCard('(F8.2, F10.2, A10, A1, 6I1, A1, 6I1, A1, I1, F8.3, A1, 4I1, I1, A7, I3)',
['RHO', 'FREQ', 'FCAR', '_1',
'inv_C', 'inv_Ce', 'inv_Cs',
'C', 'Ce', 'Cs', '_2',
'Z', 'Ze', 'Zs',
'inv_Z', 'inv_Ze', 'inv_Zg', '_3',
'ICAP',
'DIST', '_4',
'pi_Y', 'pi_Ys', 'pi_Z', 'pi_Zs',
'ISEG', '_5',
'PUN']),
tdc.DataCardFixedText('$PUNCH'),
tdc.DataCardFixedText('BLANK CARD ENDING FREQUENCY CARDS'),
tdc.DataCardFixedText('BLANK CARD ENDING LINE CONSTANT'),
tdc.DataCardFixedText('BEGIN NEW DATA CASE'),
tdc.DataCardFixedText('BLANK CARD')
])
comment_card = tdc.DataCard('A2, A78', ['C ', 'Comment'], fixed_fields=(0,))
vintage_card = tdc.DataCard('A9, A71', ['$VINTAGE,', 'Flag'], fixed_fields=(0,))
units_card = tdc.DataCard('A7, A73', ['$UNITS,', 'Flag'], fixed_fields=(0,))
class LineConstPCHCards(tdc.DataCardStack):
""" Stack of cards output by a line constants case, based on three-phase
line and the way ATPDraw runs the case. NOTE: L and C parameters are
assumed to be in the file at nominal frequency. For 60 Hz,
there should be a $UNITS card with values 60., 60.. This object will
still read the data otherwise, but matrices may have mH or uF instead of
Ohms and microSiemens.
"""
def __init__(self):
tdc.DataCardStack.__init__(self,
[tdc.DataCardRepeat(comment_card),
tdc.DataCardOptional(vintage_card),
tdc.DataCardOptional(units_card),
tdc.DataCardRepeat(tdc.DataCard('I2, 4A6, 3E16.0',
['PH', 'BUS1', 'BUS2', 'BUS3', 'BUS4', 'R', 'L', 'C']),
vintage_card, name='RLC_params'),
units_card],
post_read_hook=self._get_ZY_and_ABCD)
self.Z = None
self.Y = None
@staticmethod
def _get_ZY_and_ABCD(pch_card):
""" Callback for reading. """
pch_card.get_ZY()
pch_card.get_ABCD()
def get_ZY(self):
""" Convert R, L, C parameters to Z and Y matrices
It is assumed that PI parameters are calculated at nominal
frequency. """
# First compute the number of phases
n = len(self.data['RLC_params'])
# n = (n_ph+1)*n_ph/2
# Solve for n_ph using quadratic formula
n_ph = int((sqrt(1+8*n) - 1)/2)
Z = np.zeros((n_ph, n_ph), dtype=cdtype)
Y = np.zeros((n_ph, n_ph), dtype=cdtype)
idx = 0
for r in range(n_ph):
for c in range(r+1):
# R & L assumed to be in Ohms. (Ref R.B. IV.B.3)
Z[r, c] = Z[c, r] = self.data['RLC_params'][idx]['R'] \
+ 1j*self.data['RLC_params'][idx]['L']
# C assumed to be in microSiemens. (Ref R.B. IV.B.3)
# C value is total capacitance, which ATP then divides by
# two for the pi model.
Y[r, c] = Y[c, r] = 1e-6j*self.data['RLC_params'][idx]['C']
idx += 1
self.Z = Z
self.Y = Y
return Z, Y
def get_ABCD(self):
self.ABCD = lineZ.ZY_to_ABCD(self.Z, self.Y)
return self.ABCD
# Quick and dirty hack to build lib files. Will only work for three-phase lines.
ATPline_lib_head = ['''KARD 4 4 5 5 7 7
KARG 1 4 2 5 3 6
KBEG 3 9 3 9 3 9
KEND 8 14 8 14 8 14
KTEX 1 1 1 1 1 1
/BRANCH
''']
ATPline_lib_foot = ['''$EOF
ARG, IN___A, IN___B, IN___C, OUT__A, OUT__B, OUT__C
''']
def make_ATPline_lib(pch_file_lines):
line_idx = 0
# Find line where data starts. Assume we just have to skip comments 'C '
while pch_file_lines[line_idx][:2] == 'C ':
line_idx += 1
card_lines = pch_file_lines[line_idx:]
assert(len(card_lines) == 10) # To make sure the assumed simplified case is met.
return ATPline_lib_head + card_lines + ATPline_lib_foot
def main():
'''
This module can be called from the command line. No functionality is
implemented yet. In the future this could be used to call ATP and automatically
export the steady-state results or run other analysis.
'''
import sys
print(sys.argv)
print('No functionality implemented at this time.')
if __name__ == "__main__":
main()
| [
"numpy.absolute",
"lineZ.ph_to_seq_m",
"pickle.dump",
"numpy.abs",
"numpy.angle",
"text_data_cards.DataCard",
"numpy.imag",
"os.path.join",
"text_data_cards.DataCardOptional",
"codecs.open",
"lineZ.ABCD_to_ZY",
"os.path.dirname",
"numpy.bmat",
"lineZ.ph_to_seq_v",
"numpy.real",
"shutil... | [((23216, 23277), 'text_data_cards.DataCard', 'tdc.DataCard', (['"""A2, A78"""', "['C ', 'Comment']"], {'fixed_fields': '(0,)'}), "('A2, A78', ['C ', 'Comment'], fixed_fields=(0,))\n", (23228, 23277), True, 'import text_data_cards as tdc\n'), ((23293, 23358), 'text_data_cards.DataCard', 'tdc.DataCard', (['"""A9, A71"""', "['$VINTAGE,', 'Flag']"], {'fixed_fields': '(0,)'}), "('A9, A71', ['$VINTAGE,', 'Flag'], fixed_fields=(0,))\n", (23305, 23358), True, 'import text_data_cards as tdc\n'), ((23372, 23435), 'text_data_cards.DataCard', 'tdc.DataCard', (['"""A7, A73"""', "['$UNITS,', 'Flag']"], {'fixed_fields': '(0,)'}), "('A7, A73', ['$UNITS,', 'Flag'], fixed_fields=(0,))\n", (23384, 23435), True, 'import text_data_cards as tdc\n'), ((17322, 17366), 'lineZ.ph_to_seq_m', 'lineZ.ph_to_seq_m', (["summary_data_dict['Zsum']"], {}), "(summary_data_dict['Zsum'])\n", (17339, 17366), False, 'import lineZ\n'), ((17401, 17445), 'lineZ.ph_to_seq_m', 'lineZ.ph_to_seq_m', (["summary_data_dict['Ysum']"], {}), "(summary_data_dict['Ysum'])\n", (17418, 17445), False, 'import lineZ\n'), ((17537, 17566), 'lineZ.combine_ABCD', 'lineZ.combine_ABCD', (['ABCD_list'], {}), '(ABCD_list)\n', (17555, 17566), False, 'import lineZ\n'), ((17583, 17626), 'lineZ.ABCD_to_ZY', 'lineZ.ABCD_to_ZY', (["summary_data_dict['ABCD']"], {}), "(summary_data_dict['ABCD'])\n", (17599, 17626), False, 'import lineZ\n'), ((17733, 17777), 'lineZ.ph_to_seq_m', 'lineZ.ph_to_seq_m', (["summary_data_dict['ABCD']"], {}), "(summary_data_dict['ABCD'])\n", (17750, 17777), False, 'import lineZ\n'), ((17794, 17839), 'lineZ.ABCD_to_ZY', 'lineZ.ABCD_to_ZY', (["summary_data_dict['ABCD_s']"], {}), "(summary_data_dict['ABCD_s'])\n", (17810, 17839), False, 'import lineZ\n'), ((18410, 18447), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.complex128'}), '((3, 3), dtype=np.complex128)\n', (18418, 18447), True, 'import numpy as np\n'), ((18458, 18495), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.complex128'}), '((3, 3), dtype=np.complex128)\n', (18466, 18495), True, 'import numpy as np\n'), ((18506, 18543), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.complex128'}), '((3, 3), dtype=np.complex128)\n', (18514, 18543), True, 'import numpy as np\n'), ((18554, 18591), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.complex128'}), '((3, 3), dtype=np.complex128)\n', (18562, 18591), True, 'import numpy as np\n'), ((18602, 18639), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.complex128'}), '((3, 3), dtype=np.complex128)\n', (18610, 18639), True, 'import numpy as np\n'), ((18650, 18687), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.complex128'}), '((3, 3), dtype=np.complex128)\n', (18658, 18687), True, 'import numpy as np\n'), ((2553, 2620), 'codecs.open', 'codecs.open', (['ATP_file', '"""r"""'], {'encoding': '"""iso-8859-1"""', 'errors': '"""replace"""'}), "(ATP_file, 'r', encoding='iso-8859-1', errors='replace')\n", (2564, 2620), False, 'import subprocess, re, csv, codecs, shutil\n'), ((3013, 3121), 'codecs.open', 'codecs.open', (['(outfile if outfile is not None else ATP_file)', '"""w"""'], {'encoding': '"""iso-8859-1"""', 'errors': '"""replace"""'}), "(outfile if outfile is not None else ATP_file, 'w', encoding=\n 'iso-8859-1', errors='replace')\n", (3024, 3121), False, 'import subprocess, re, csv, codecs, shutil\n'), ((4511, 4583), 're.compile', 're.compile', (['"""^Sinusoidal steady-state phasor solution, branch by branch"""'], {}), "('^Sinusoidal steady-state phasor solution, branch by branch')\n", (4521, 4583), False, 'import subprocess, re, csv, codecs, shutil\n'), ((4606, 4672), 're.compile', 're.compile', (['"""^ *Total network loss P-loss by summing injections"""'], {}), "('^ *Total network loss P-loss by summing injections')\n", (4616, 4672), False, 'import subprocess, re, csv, codecs, shutil\n'), ((4695, 4757), 're.compile', 're.compile', (['"""^Output for steady-state phasor switch currents."""'], {}), "('^Output for steady-state phasor switch currents.')\n", (4705, 4757), False, 'import subprocess, re, csv, codecs, shutil\n'), ((10132, 10163), 'pickle.dump', 'pickle.dump', (['data_list', 'binfile'], {}), '(data_list, binfile)\n', (10143, 10163), False, 'import pickle\n'), ((10224, 10264), 'csv.writer', 'csv.writer', (['csvfile'], {'lineterminator': '"""\n"""'}), "(csvfile, lineterminator='\\n')\n", (10234, 10264), False, 'import subprocess, re, csv, codecs, shutil\n'), ((14607, 14672), 'numpy.array', 'np.array', (['[[node_voltages[b + p] for p in phases] for b in buses]'], {}), '([[node_voltages[b + p] for p in phases] for b in buses])\n', (14615, 14672), True, 'import numpy as np\n'), ((14701, 14731), 'lineZ.ph_to_seq_v', 'lineZ.ph_to_seq_v', (['ph_voltages'], {}), '(ph_voltages)\n', (14718, 14731), False, 'import lineZ\n'), ((14757, 14798), 'numpy.abs', 'np.abs', (['(seq_voltages[2] / seq_voltages[1])'], {}), '(seq_voltages[2] / seq_voltages[1])\n', (14763, 14798), True, 'import numpy as np\n'), ((15336, 15432), 'numpy.array', 'np.array', (['[[branch_currents[fr_b + p, to_b + p] for p in phases] for fr_b, to_b in\n branches]'], {}), '([[branch_currents[fr_b + p, to_b + p] for p in phases] for fr_b,\n to_b in branches])\n', (15344, 15432), True, 'import numpy as np\n'), ((15482, 15515), 'lineZ.ph_to_seq_v', 'lineZ.ph_to_seq_v', (['ph_br_currents'], {}), '(ph_br_currents)\n', (15499, 15515), False, 'import lineZ\n'), ((15572, 15657), 'numpy.array', 'np.array', (['[[node_voltages[fr_b + p] for p in phases] for fr_b, to_b in branches]'], {}), '([[node_voltages[fr_b + p] for p in phases] for fr_b, to_b in branches]\n )\n', (15580, 15657), True, 'import numpy as np\n'), ((18358, 18387), 'os.path.dirname', 'os.path.dirname', (['ATP_template'], {}), '(ATP_template)\n', (18373, 18387), False, 'import itertools, copy, os\n'), ((20275, 20284), 'numpy.linalg.inv', 'inv', (['V2_o'], {}), '(V2_o)\n', (20278, 20284), False, 'from numpy.linalg import inv\n'), ((20303, 20312), 'numpy.linalg.inv', 'inv', (['I2_s'], {}), '(I2_s)\n', (20306, 20312), False, 'from numpy.linalg import inv\n'), ((20331, 20340), 'numpy.linalg.inv', 'inv', (['V2_o'], {}), '(V2_o)\n', (20334, 20340), False, 'from numpy.linalg import inv\n'), ((20359, 20368), 'numpy.linalg.inv', 'inv', (['I2_s'], {}), '(I2_s)\n', (20362, 20368), False, 'from numpy.linalg import inv\n'), ((20391, 20416), 'numpy.bmat', 'np.bmat', (['[[A, B], [C, D]]'], {}), '([[A, B], [C, D]])\n', (20398, 20416), True, 'import numpy as np\n'), ((21333, 21478), 'text_data_cards.DataCard', 'tdc.DataCard', (['"""(I3, F5.4, F8.5, I2, F8.5, F8.5, F8.3, F8.3, F8.3)"""', "['IP', 'SKIN', 'RESIS', 'IX', 'REACT', 'DIAM', 'HORIZ', 'VTOWER', 'VMID']"], {}), "('(I3, F5.4, F8.5, I2, F8.5, F8.5, F8.3, F8.3, F8.3)', ['IP',\n 'SKIN', 'RESIS', 'IX', 'REACT', 'DIAM', 'HORIZ', 'VTOWER', 'VMID'])\n", (21345, 21478), True, 'import text_data_cards as tdc\n'), ((21533, 21591), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""BLANK CARD ENDING CONDUCTOR CARDS"""'], {}), "('BLANK CARD ENDING CONDUCTOR CARDS')\n", (21554, 21591), True, 'import text_data_cards as tdc\n'), ((21618, 21703), 'text_data_cards.DataCardRepeat', 'tdc.DataCardRepeat', (['conductor_card'], {'end_record': 'end_conductors', 'name': '"""conductors"""'}), "(conductor_card, end_record=end_conductors, name='conductors'\n )\n", (21636, 21703), True, 'import text_data_cards as tdc\n'), ((24966, 25002), 'numpy.zeros', 'np.zeros', (['(n_ph, n_ph)'], {'dtype': 'cdtype'}), '((n_ph, n_ph), dtype=cdtype)\n', (24974, 25002), True, 'import numpy as np\n'), ((25015, 25051), 'numpy.zeros', 'np.zeros', (['(n_ph, n_ph)'], {'dtype': 'cdtype'}), '((n_ph, n_ph), dtype=cdtype)\n', (25023, 25051), True, 'import numpy as np\n'), ((25730, 25762), 'lineZ.ZY_to_ABCD', 'lineZ.ZY_to_ABCD', (['self.Z', 'self.Y'], {}), '(self.Z, self.Y)\n', (25746, 25762), False, 'import lineZ\n'), ((1071, 1102), 'os.path.join', 'os.path.join', (['ATP_path', 'ATP_exe'], {}), '(ATP_path, ATP_exe)\n', (1083, 1102), False, 'import itertools, copy, os\n'), ((1312, 1357), 're.match', 're.match', (['"""(.*)\\\\.atp$"""', 'ATP_file'], {'flags': 're.I'}), "('(.*)\\\\.atp$', ATP_file, flags=re.I)\n", (1320, 1357), False, 'import subprocess, re, csv, codecs, shutil\n'), ((4258, 4265), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (4262, 4265), False, 'from math import sqrt\n'), ((18781, 18824), 'shutil.copyfile', 'shutil.copyfile', (['ATP_template', 'ATP_tmp_full'], {}), '(ATP_template, ATP_tmp_full)\n', (18796, 18824), False, 'import subprocess, re, csv, codecs, shutil\n'), ((15706, 15729), 'numpy.conj', 'np.conj', (['ph_br_currents'], {}), '(ph_br_currents)\n', (15713, 15729), True, 'import numpy as np\n'), ((16774, 16816), 'os.path.join', 'os.path.join', (['atp_pch_folder', "(seg + '.pch')"], {}), "(atp_pch_folder, seg + '.pch')\n", (16786, 16816), False, 'import itertools, copy, os\n'), ((21799, 21843), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""BEGIN NEW DATA CASE"""'], {}), "('BEGIN NEW DATA CASE')\n", (21820, 21843), True, 'import text_data_cards as tdc\n'), ((21858, 21897), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""LINE CONSTANTS"""'], {}), "('LINE CONSTANTS')\n", (21879, 21897), True, 'import text_data_cards as tdc\n'), ((21912, 21943), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""$ERASE"""'], {}), "('$ERASE')\n", (21933, 21943), True, 'import text_data_cards as tdc\n'), ((21958, 22048), 'text_data_cards.DataCard', 'tdc.DataCard', (['"""(A8,6A6)"""', "['branch_card', 'in1', 'out1', 'in2', 'out2', 'in3', 'out3']"], {}), "('(A8,6A6)', ['branch_card', 'in1', 'out1', 'in2', 'out2',\n 'in3', 'out3'])\n", (21970, 22048), True, 'import text_data_cards as tdc\n'), ((22098, 22130), 'text_data_cards.DataCard', 'tdc.DataCard', (['"""(A80)"""', "['units']"], {}), "('(A80)', ['units'])\n", (22110, 22130), True, 'import text_data_cards as tdc\n'), ((22186, 22502), 'text_data_cards.DataCard', 'tdc.DataCard', (['"""(F8.2, F10.2, A10, A1, 6I1, A1, 6I1, A1, I1, F8.3, A1, 4I1, I1, A7, I3)"""', "['RHO', 'FREQ', 'FCAR', '_1', 'inv_C', 'inv_Ce', 'inv_Cs', 'C', 'Ce', 'Cs',\n '_2', 'Z', 'Ze', 'Zs', 'inv_Z', 'inv_Ze', 'inv_Zg', '_3', 'ICAP',\n 'DIST', '_4', 'pi_Y', 'pi_Ys', 'pi_Z', 'pi_Zs', 'ISEG', '_5', 'PUN']"], {}), "(\n '(F8.2, F10.2, A10, A1, 6I1, A1, 6I1, A1, I1, F8.3, A1, 4I1, I1, A7, I3)',\n ['RHO', 'FREQ', 'FCAR', '_1', 'inv_C', 'inv_Ce', 'inv_Cs', 'C', 'Ce',\n 'Cs', '_2', 'Z', 'Ze', 'Zs', 'inv_Z', 'inv_Ze', 'inv_Zg', '_3', 'ICAP',\n 'DIST', '_4', 'pi_Y', 'pi_Ys', 'pi_Z', 'pi_Zs', 'ISEG', '_5', 'PUN'])\n", (22198, 22502), True, 'import text_data_cards as tdc\n'), ((22900, 22931), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""$PUNCH"""'], {}), "('$PUNCH')\n", (22921, 22931), True, 'import text_data_cards as tdc\n'), ((22946, 23004), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""BLANK CARD ENDING FREQUENCY CARDS"""'], {}), "('BLANK CARD ENDING FREQUENCY CARDS')\n", (22967, 23004), True, 'import text_data_cards as tdc\n'), ((23019, 23075), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""BLANK CARD ENDING LINE CONSTANT"""'], {}), "('BLANK CARD ENDING LINE CONSTANT')\n", (23040, 23075), True, 'import text_data_cards as tdc\n'), ((23090, 23134), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""BEGIN NEW DATA CASE"""'], {}), "('BEGIN NEW DATA CASE')\n", (23111, 23134), True, 'import text_data_cards as tdc\n'), ((23149, 23184), 'text_data_cards.DataCardFixedText', 'tdc.DataCardFixedText', (['"""BLANK CARD"""'], {}), "('BLANK CARD')\n", (23170, 23184), True, 'import text_data_cards as tdc\n'), ((23979, 24011), 'text_data_cards.DataCardRepeat', 'tdc.DataCardRepeat', (['comment_card'], {}), '(comment_card)\n', (23997, 24011), True, 'import text_data_cards as tdc\n'), ((24026, 24060), 'text_data_cards.DataCardOptional', 'tdc.DataCardOptional', (['vintage_card'], {}), '(vintage_card)\n', (24046, 24060), True, 'import text_data_cards as tdc\n'), ((24075, 24107), 'text_data_cards.DataCardOptional', 'tdc.DataCardOptional', (['units_card'], {}), '(units_card)\n', (24095, 24107), True, 'import text_data_cards as tdc\n'), ((9899, 9944), 're.match', 're.match', (['"""(.*)\\\\.lis$"""', 'LIS_file'], {'flags': 're.I'}), "('(.*)\\\\.lis$', LIS_file, flags=re.I)\n", (9907, 9944), False, 'import subprocess, re, csv, codecs, shutil\n'), ((10015, 10060), 're.match', 're.match', (['"""(.*)\\\\.lis$"""', 'LIS_file'], {'flags': 're.I'}), "('(.*)\\\\.lis$', LIS_file, flags=re.I)\n", (10023, 10060), False, 'import subprocess, re, csv, codecs, shutil\n'), ((24141, 24231), 'text_data_cards.DataCard', 'tdc.DataCard', (['"""I2, 4A6, 3E16.0"""', "['PH', 'BUS1', 'BUS2', 'BUS3', 'BUS4', 'R', 'L', 'C']"], {}), "('I2, 4A6, 3E16.0', ['PH', 'BUS1', 'BUS2', 'BUS3', 'BUS4', 'R',\n 'L', 'C'])\n", (24153, 24231), True, 'import text_data_cards as tdc\n'), ((24934, 24949), 'math.sqrt', 'sqrt', (['(1 + 8 * n)'], {}), '(1 + 8 * n)\n', (24938, 24949), False, 'from math import sqrt\n'), ((10413, 10519), 'itertools.chain', 'itertools.chain', (["*[('%s-phase Voltage (Real)' % ph, '%s-phase Voltage (Imag)' % ph) for ph in\n phases]"], {}), "(*[('%s-phase Voltage (Real)' % ph, \n '%s-phase Voltage (Imag)' % ph) for ph in phases])\n", (10428, 10519), False, 'import itertools, copy, os\n'), ((10552, 10694), 'itertools.chain', 'itertools.chain', (["*[('%s-sequence Voltage (Real)' % ph, '%s-sequence Voltage (Imag)' % ph) for\n ph in ('Zero', 'Positive', 'Negative')]"], {}), "(*[('%s-sequence Voltage (Real)' % ph, \n '%s-sequence Voltage (Imag)' % ph) for ph in ('Zero', 'Positive',\n 'Negative')])\n", (10567, 10694), False, 'import itertools, copy, os\n'), ((10723, 10826), 'itertools.chain', 'itertools.chain', (["*[('%s-phase Voltage (Mag)' % ph, '%s-phase Voltage (Ang)' % ph) for ph in\n phases]"], {}), "(*[('%s-phase Voltage (Mag)' % ph, '%s-phase Voltage (Ang)' %\n ph) for ph in phases])\n", (10738, 10826), False, 'import itertools, copy, os\n'), ((10858, 10998), 'itertools.chain', 'itertools.chain', (["*[('%s-sequence Voltage (Mag)' % ph, '%s-sequence Voltage (Ang)' % ph) for\n ph in ('Zero', 'Positive', 'Negative')]"], {}), "(*[('%s-sequence Voltage (Mag)' % ph, \n '%s-sequence Voltage (Ang)' % ph) for ph in ('Zero', 'Positive',\n 'Negative')])\n", (10873, 10998), False, 'import itertools, copy, os\n'), ((12077, 12183), 'itertools.chain', 'itertools.chain', (["*[('%s-phase Voltage (Real)' % ph, '%s-phase Voltage (Imag)' % ph) for ph in\n phases]"], {}), "(*[('%s-phase Voltage (Real)' % ph, \n '%s-phase Voltage (Imag)' % ph) for ph in phases])\n", (12092, 12183), False, 'import itertools, copy, os\n'), ((12214, 12356), 'itertools.chain', 'itertools.chain', (["*[('%s-sequence Voltage (Real)' % ph, '%s-sequence Voltage (Imag)' % ph) for\n ph in ('Zero', 'Positive', 'Negative')]"], {}), "(*[('%s-sequence Voltage (Real)' % ph, \n '%s-sequence Voltage (Imag)' % ph) for ph in ('Zero', 'Positive',\n 'Negative')])\n", (12229, 12356), False, 'import itertools, copy, os\n'), ((12383, 12486), 'itertools.chain', 'itertools.chain', (["*[('%s-phase Voltage (Mag)' % ph, '%s-phase Voltage (Ang)' % ph) for ph in\n phases]"], {}), "(*[('%s-phase Voltage (Mag)' % ph, '%s-phase Voltage (Ang)' %\n ph) for ph in phases])\n", (12398, 12486), False, 'import itertools, copy, os\n'), ((12518, 12658), 'itertools.chain', 'itertools.chain', (["*[('%s-sequence Voltage (Mag)' % ph, '%s-sequence Voltage (Ang)' % ph) for\n ph in ('Zero', 'Positive', 'Negative')]"], {}), "(*[('%s-sequence Voltage (Mag)' % ph, \n '%s-sequence Voltage (Ang)' % ph) for ph in ('Zero', 'Positive',\n 'Negative')])\n", (12533, 12658), False, 'import itertools, copy, os\n'), ((11218, 11244), 'numpy.real', 'np.real', (['ph_voltages[:, n]'], {}), '(ph_voltages[:, n])\n', (11225, 11244), True, 'import numpy as np\n'), ((11274, 11300), 'numpy.imag', 'np.imag', (['ph_voltages[:, n]'], {}), '(ph_voltages[:, n])\n', (11281, 11300), True, 'import numpy as np\n'), ((11348, 11375), 'numpy.real', 'np.real', (['seq_voltages[:, n]'], {}), '(seq_voltages[:, n])\n', (11355, 11375), True, 'import numpy as np\n'), ((11405, 11432), 'numpy.imag', 'np.imag', (['seq_voltages[:, n]'], {}), '(seq_voltages[:, n])\n', (11412, 11432), True, 'import numpy as np\n'), ((11480, 11510), 'numpy.absolute', 'np.absolute', (['ph_voltages[:, n]'], {}), '(ph_voltages[:, n])\n', (11491, 11510), True, 'import numpy as np\n'), ((11539, 11576), 'numpy.angle', 'np.angle', (['ph_voltages[:, n]'], {'deg': '(True)'}), '(ph_voltages[:, n], deg=True)\n', (11547, 11576), True, 'import numpy as np\n'), ((11625, 11656), 'numpy.absolute', 'np.absolute', (['seq_voltages[:, n]'], {}), '(seq_voltages[:, n])\n', (11636, 11656), True, 'import numpy as np\n'), ((11685, 11723), 'numpy.angle', 'np.angle', (['seq_voltages[:, n]'], {'deg': '(True)'}), '(seq_voltages[:, n], deg=True)\n', (11693, 11723), True, 'import numpy as np\n'), ((12885, 12914), 'numpy.real', 'np.real', (['ph_br_currents[:, n]'], {}), '(ph_br_currents[:, n])\n', (12892, 12914), True, 'import numpy as np\n'), ((12944, 12973), 'numpy.imag', 'np.imag', (['ph_br_currents[:, n]'], {}), '(ph_br_currents[:, n])\n', (12951, 12973), True, 'import numpy as np\n'), ((13021, 13051), 'numpy.real', 'np.real', (['seq_br_currents[:, n]'], {}), '(seq_br_currents[:, n])\n', (13028, 13051), True, 'import numpy as np\n'), ((13081, 13111), 'numpy.imag', 'np.imag', (['seq_br_currents[:, n]'], {}), '(seq_br_currents[:, n])\n', (13088, 13111), True, 'import numpy as np\n'), ((13159, 13192), 'numpy.absolute', 'np.absolute', (['ph_br_currents[:, n]'], {}), '(ph_br_currents[:, n])\n', (13170, 13192), True, 'import numpy as np\n'), ((13221, 13261), 'numpy.angle', 'np.angle', (['ph_br_currents[:, n]'], {'deg': '(True)'}), '(ph_br_currents[:, n], deg=True)\n', (13229, 13261), True, 'import numpy as np\n'), ((13310, 13344), 'numpy.absolute', 'np.absolute', (['seq_br_currents[:, n]'], {}), '(seq_br_currents[:, n])\n', (13321, 13344), True, 'import numpy as np\n'), ((13373, 13414), 'numpy.angle', 'np.angle', (['seq_br_currents[:, n]'], {'deg': '(True)'}), '(seq_br_currents[:, n], deg=True)\n', (13381, 13414), True, 'import numpy as np\n')] |
import torch.nn as nn
from functools import partial
import torch # NOQA
from clab.torch.models.output_shape_for import OutputShapeFor
from collections import OrderedDict
class Conv3DBlock(nn.Module):
"""
>>> block = Conv3DBlock(in_channels=64, out_channels=128, n_conv=2)
>>> block.output_shape_for([1, 3, 16, 112, 112])
(1, 128, 8, 56, 56)
>>> block = Conv3DBlock(in_channels=64, out_channels=128, conv_kernel=4, n_conv=1)
>>> block.output_shape_for([1, 3, 16, 112, 112])
(1, 128, 7, 55, 55)
>>> block = Conv3DBlock(in_channels=64, out_channels=128, conv_kernel=4, n_conv=3)
>>> block.output_shape_for([1, 3, 16, 112, 112])
(1, 128, 6, 54, 54)
"""
def __init__(self, in_channels, out_channels, n_conv=1,
conv_kernel=(3, 3, 3), conv_padding=1,
pool_kernel=(2, 2, 2), pool_stride=(2, 2, 2)):
super(Conv3DBlock, self).__init__()
nonlinearity = partial(nn.LeakyReLU, negative_slope=1e-2,
inplace=False)
assert n_conv >= 1
named_layers = []
# First convolution uses input_channels
named_layers += [
('conv0', nn.Conv3d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=conv_kernel,
padding=conv_padding)),
('nonlin0', nonlinearity()),
]
# The remainder use output_channels
for ix in range(1, n_conv):
suff = str(ix)
named_layers += [
('conv' + suff, nn.Conv3d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=conv_kernel,
padding=conv_padding)),
('nonlin' + suff, nonlinearity()),
]
named_layers += [
('pool', nn.MaxPool3d(kernel_size=pool_kernel, stride=pool_stride)),
]
self.sequence = nn.Sequential(OrderedDict(named_layers))
def output_shape_for(self, input_shape):
return OutputShapeFor(self.sequence)(input_shape)
def forward(self, inputs):
return self.sequence(inputs)
class FCBlock(nn.Module):
def __init__(self, n_inputs, out_channels):
super(FCBlock, self).__init__()
nonlinearity = partial(nn.LeakyReLU, negative_slope=1e-2,
inplace=False)
self.sequence = nn.Sequential(OrderedDict([
('fc', nn.Linear(n_inputs, out_channels)),
('nonlin', nonlinearity()),
]))
def output_shape_for(self, input_shape):
return OutputShapeFor(self.sequence)(input_shape)
def forward(self, inputs):
return self.sequence(inputs)
class C3D(nn.Module):
"""
The C3D network as described in [1].
References:
[1] <NAME>, et al. "Learning spatiotemporal features with 3d convolutional networks."
Proceedings of the IEEE international conference on computer vision. 2015.
Notes:
* According to the findings in 2D ConvNet [37], small receptive fields
of 3 × 3 convolution kernels with deeper architectures yield best
results. Hence, for our architecture search study we fix the spatial
receptive field to 3 × 3 and vary only the temporal depth of the 3D
convolution kernels
References:
https://github.com/DavideA/c3d-pytorch/blob/master/C3D_model.py
Example:
>>> B, C, D, H, W = 1, 3, 16, 112, 112
>>> # default input shape is
>>> input_shape = [B, C, D, H, W]
>>> inputs = torch.autograd.Variable(torch.randn(input_shape)).cpu()
>>> #input_shape = [None, C, D, H, W]
>>> self = C3D(input_shape)
>>> outputs = self(inputs)
"""
def __init__(self, input_shape):
"""
"""
super(C3D, self).__init__()
# nonlinearity = partial(nn.ReLU)
# kernels are specified in D, H, W
feats = [64, 128, 256, 512, 512]
conv_blocks = nn.Sequential(OrderedDict([
('block1', Conv3DBlock(in_channels=3, out_channels=feats[0], n_conv=1,
pool_kernel=(1, 2, 2), pool_stride=(1, 2, 2))),
('block2', Conv3DBlock(in_channels=feats[0], out_channels=feats[1], n_conv=1)),
('block3', Conv3DBlock(in_channels=feats[1], out_channels=feats[2], n_conv=2)),
('block4', Conv3DBlock(in_channels=feats[2], out_channels=feats[3], n_conv=2)),
('block5', Conv3DBlock(in_channels=feats[3], out_channels=feats[4], n_conv=2)),
]))
output_shape = OutputShapeFor(conv_blocks)(input_shape)
print('output_shape = {!r}'.format(output_shape))
import numpy as np
self.input_shape = input_shape
self.conv_blocks = conv_blocks
self.n_conv_output = int(np.prod(output_shape[1:]))
self.block6 = FCBlock(self.n_conv_output, 4096)
self.block7 = FCBlock(4096, 4096)
self.softmax = nn.Softmax(dim=1)
def debug(self, inputs):
c1, c2, c3, c4, c5 = self.conv_blocks.children()
h1 = c1(inputs)
assert h1.shape == c1.output_shape_for(inputs.shape)
h2 = c2(h1)
assert h2.shape == c2.output_shape_for(h1.shape)
h3 = c3(h2)
assert h3.shape == c3.output_shape_for(h2.shape)
h4 = c4(h3)
assert h4.shape == c4.output_shape_for(h3.shape)
h5 = c5(h4)
assert h5.shape == c5.output_shape_for(h4.shape)
def forward(self, inputs):
h = self.conv_blocks(inputs)
h = h.view(-1, self.n_conv_output)
h = self.block6(h)
h = self.block7(h)
probs = self.softmax(h)
return probs
| [
"functools.partial",
"torch.nn.Conv3d",
"clab.torch.models.output_shape_for.OutputShapeFor",
"torch.nn.Softmax",
"torch.nn.Linear",
"collections.OrderedDict",
"torch.nn.MaxPool3d",
"numpy.prod"
] | [((946, 1003), 'functools.partial', 'partial', (['nn.LeakyReLU'], {'negative_slope': '(0.01)', 'inplace': '(False)'}), '(nn.LeakyReLU, negative_slope=0.01, inplace=False)\n', (953, 1003), False, 'from functools import partial\n'), ((2414, 2471), 'functools.partial', 'partial', (['nn.LeakyReLU'], {'negative_slope': '(0.01)', 'inplace': '(False)'}), '(nn.LeakyReLU, negative_slope=0.01, inplace=False)\n', (2421, 2471), False, 'from functools import partial\n'), ((5111, 5128), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (5121, 5128), True, 'import torch.nn as nn\n'), ((2075, 2100), 'collections.OrderedDict', 'OrderedDict', (['named_layers'], {}), '(named_layers)\n', (2086, 2100), False, 'from collections import OrderedDict\n'), ((2163, 2192), 'clab.torch.models.output_shape_for.OutputShapeFor', 'OutputShapeFor', (['self.sequence'], {}), '(self.sequence)\n', (2177, 2192), False, 'from clab.torch.models.output_shape_for import OutputShapeFor\n'), ((2723, 2752), 'clab.torch.models.output_shape_for.OutputShapeFor', 'OutputShapeFor', (['self.sequence'], {}), '(self.sequence)\n', (2737, 2752), False, 'from clab.torch.models.output_shape_for import OutputShapeFor\n'), ((4724, 4751), 'clab.torch.models.output_shape_for.OutputShapeFor', 'OutputShapeFor', (['conv_blocks'], {}), '(conv_blocks)\n', (4738, 4751), False, 'from clab.torch.models.output_shape_for import OutputShapeFor\n'), ((4962, 4987), 'numpy.prod', 'np.prod', (['output_shape[1:]'], {}), '(output_shape[1:])\n', (4969, 4987), True, 'import numpy as np\n'), ((1187, 1300), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'conv_kernel', 'padding': 'conv_padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n conv_kernel, padding=conv_padding)\n', (1196, 1300), True, 'import torch.nn as nn\n'), ((1966, 2023), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': 'pool_kernel', 'stride': 'pool_stride'}), '(kernel_size=pool_kernel, stride=pool_stride)\n', (1978, 2023), True, 'import torch.nn as nn\n'), ((1615, 1729), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': 'out_channels', 'out_channels': 'out_channels', 'kernel_size': 'conv_kernel', 'padding': 'conv_padding'}), '(in_channels=out_channels, out_channels=out_channels, kernel_size=\n conv_kernel, padding=conv_padding)\n', (1624, 1729), True, 'import torch.nn as nn\n'), ((2574, 2607), 'torch.nn.Linear', 'nn.Linear', (['n_inputs', 'out_channels'], {}), '(n_inputs, out_channels)\n', (2583, 2607), True, 'import torch.nn as nn\n')] |
'''
## Test ##
# Test a trained DQN. This can be run alongside training by running 'run_every_new_ckpt.sh'.
@author: <NAME> (<EMAIL>)
'''
import os
import sys
import argparse
import gym
import tensorflow as tf
import numpy as np
import scipy.stats as ss
from train import get_train_args
from utils.utils import preprocess_image, reset_env_and_state_buffer
from utils.state_buffer import StateBuffer
from utils.network import DeepQNetwork
def get_test_args(train_args):
test_params = argparse.ArgumentParser()
# Environment parameters (First 4 params must be same as those used in training)
test_params.add_argument("--env", type=str, default=train_args.env, help="Environment to use (must have RGB image state space and discrete action space)")
test_params.add_argument("--frame_width", type=int, default=train_args.frame_width, help="Frame width after resize.")
test_params.add_argument("--frame_height", type=int, default=train_args.frame_height, help="Frame height after resize.")
test_params.add_argument("--frames_per_state", type=int, default=train_args.frames_per_state, help="Sequence of frames which constitutes a single state.")
test_params.add_argument("--render", type=bool, default=False, help="Whether or not to display the environment on the screen during testing")
test_params.add_argument("--random_seed", type=int, default=4321, help="Random seed for reproducability")
# Testing parameters
test_params.add_argument("--num_eps_test", type=int, default=20, help="Number of episodes to test for")
test_params.add_argument("--max_ep_length", type=int, default=2000, help="Maximum number of steps per episode")
test_params.add_argument("--max_initial_random_steps", type=int, default=10, help="Maximum number of random steps to take at start of episode to ensure random starting point")
# Files/directories
test_params.add_argument("--ckpt_dir", type=str, default='./ckpts', help="Directory for saving/loading checkpoints")
test_params.add_argument("--ckpt_file", type=str, default=None, help="Checkpoint file to load (if None, load latest ckpt)")
test_params.add_argument("--log_dir", type=str, default='./logs/test', help="Directory for saving logs")
test_params.add_argument("--results_dir", type=str, default='./test_results', help="Directory for saving txt file of results")
test_params.add_argument("--results_file", type=str, default='results.txt', help="Text file of test results (if None, do not save results)")
return test_params.parse_args()
def test(args):
# Create environment
env = gym.make(args.env)
num_actions = env.action_space.n
# Set random seeds for reproducability
env.seed(args.random_seed)
np.random.seed(args.random_seed)
tf.set_random_seed(args.random_seed)
# Initialise state buffer
state_buf = StateBuffer(args)
# Define input placeholders
state_ph = tf.placeholder(tf.uint8, (None, args.frame_height, args.frame_width, args.frames_per_state))
# Instantiate DQN network
DQN = DeepQNetwork(num_actions, state_ph, scope='DQN_main')
DQN_predict_op = DQN.predict()
# Create session
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# Load ckpt file
loader = tf.train.Saver()
if args.ckpt_file is not None:
ckpt = args.ckpt_dir + '/' + args.ckpt_file
else:
ckpt = tf.train.latest_checkpoint(args.ckpt_dir)
loader.restore(sess, ckpt)
sys.stdout.write('%s restored.\n\n' % ckpt)
sys.stdout.flush()
ckpt_split = ckpt.split('-')
train_ep = ckpt_split[-1]
# Create summary writer to write summaries to disk
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
summary_writer = tf.summary.FileWriter(args.log_dir, sess.graph)
# Create summary op to save episode reward to Tensorboard log
reward_var = tf.Variable(0.0, trainable=False)
tf.summary.scalar("Average Test Reward", reward_var)
summary_op = tf.summary.merge_all()
## Begin testing
env.reset()
rewards = []
for test_ep in range(args.num_eps_test):
# Reset environment and state buffer for next episode
reset_env_and_state_buffer(env, state_buf, args)
ep_reward = 0
step = 0
ep_done = False
initial_steps = np.random.randint(1, args.max_initial_random_steps+1)
sys.stdout.write('\n')
sys.stdout.flush()
while not ep_done:
if args.render:
env.render()
else:
env.render(mode='rgb_array')
#Choose random action for initial steps to ensure every episode has a random start point. Then choose action with highest Q-value according to network's current policy.
if step < initial_steps:
test_action = env.action_space.sample()
else:
test_state = np.expand_dims(state_buf.get_state(), 0)
test_action = sess.run(DQN_predict_op, {state_ph:test_state})
test_frame, test_reward, test_ep_terminal, _ = env.step(test_action)
test_frame = preprocess_image(test_frame, args.frame_width, args.frame_height)
state_buf.add(test_frame)
ep_reward += test_reward
step += 1
sys.stdout.write('\x1b[2K\rTest episode {:d}/{:d} \t Steps = {:d} \t Reward = {:.2f}'.format(test_ep, args.num_eps_test, step, ep_reward))
sys.stdout.flush()
# Episode can finish either by reaching terminal state or max episode steps
if test_ep_terminal or step == args.max_ep_length:
rewards.append(ep_reward)
ep_done = True
mean_reward = np.mean(rewards)
error_reward = ss.sem(rewards)
sys.stdout.write('\n\nTesting complete \t Average reward = {:.2f} +/- {:.2f} /ep \n\n'.format(mean_reward, error_reward))
sys.stdout.flush()
# Log average episode reward for Tensorboard visualisation
summary_str = sess.run(summary_op, {reward_var: mean_reward})
summary_writer.add_summary(summary_str, train_ep)
# Write results to file
if args.results_file is not None:
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
output_file = open(args.results_dir + '/' + args.results_file, 'a')
output_file.write('Training Episode {}: \t Average reward = {:.2f} +/- {:.2f} /ep \n\n'.format(train_ep, mean_reward, error_reward))
output_file.flush()
sys.stdout.write('Results saved to file \n\n')
sys.stdout.flush()
env.close()
if __name__ == '__main__':
train_args = get_train_args()
test_args = get_test_args(train_args)
test(test_args)
| [
"sys.stdout.write",
"utils.network.DeepQNetwork",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.ConfigProto",
"tensorflow.Variable",
"sys.stdout.flush",
"numpy.mean",
"tensorflow.train.latest_checkpoint",
"numpy.random.randint",
"utils.utils.preprocess_image",
"os.path.exists",
... | [((515, 540), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (538, 540), False, 'import argparse\n'), ((2670, 2688), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (2678, 2688), False, 'import gym\n'), ((2814, 2846), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (2828, 2846), True, 'import numpy as np\n'), ((2852, 2888), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (2870, 2888), True, 'import tensorflow as tf\n'), ((2943, 2960), 'utils.state_buffer.StateBuffer', 'StateBuffer', (['args'], {}), '(args)\n', (2954, 2960), False, 'from utils.state_buffer import StateBuffer\n'), ((3020, 3117), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', '(None, args.frame_height, args.frame_width, args.frames_per_state)'], {}), '(tf.uint8, (None, args.frame_height, args.frame_width, args.\n frames_per_state))\n', (3034, 3117), True, 'import tensorflow as tf\n'), ((3161, 3214), 'utils.network.DeepQNetwork', 'DeepQNetwork', (['num_actions', 'state_ph'], {'scope': '"""DQN_main"""'}), "(num_actions, state_ph, scope='DQN_main')\n", (3173, 3214), False, 'from utils.network import DeepQNetwork\n'), ((3297, 3338), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (3311, 3338), True, 'import tensorflow as tf\n'), ((3395, 3420), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3405, 3420), True, 'import tensorflow as tf\n'), ((3482, 3498), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3496, 3498), True, 'import tensorflow as tf\n'), ((3707, 3750), 'sys.stdout.write', 'sys.stdout.write', (["('%s restored.\\n\\n' % ckpt)"], {}), "('%s restored.\\n\\n' % ckpt)\n", (3723, 3750), False, 'import sys\n'), ((3756, 3774), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3772, 3774), False, 'import sys\n'), ((4011, 4058), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['args.log_dir', 'sess.graph'], {}), '(args.log_dir, sess.graph)\n', (4032, 4058), True, 'import tensorflow as tf\n'), ((4150, 4183), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (4161, 4183), True, 'import tensorflow as tf\n'), ((4189, 4241), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Average Test Reward"""', 'reward_var'], {}), "('Average Test Reward', reward_var)\n", (4206, 4241), True, 'import tensorflow as tf\n'), ((4260, 4282), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (4280, 4282), True, 'import tensorflow as tf\n'), ((6200, 6216), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (6207, 6216), True, 'import numpy as np\n'), ((6237, 6252), 'scipy.stats.sem', 'ss.sem', (['rewards'], {}), '(rewards)\n', (6243, 6252), True, 'import scipy.stats as ss\n'), ((6399, 6417), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6415, 6417), False, 'import sys\n'), ((7236, 7252), 'train.get_train_args', 'get_train_args', ([], {}), '()\n', (7250, 7252), False, 'from train import get_train_args\n'), ((3621, 3662), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['args.ckpt_dir'], {}), '(args.ckpt_dir)\n', (3647, 3662), True, 'import tensorflow as tf\n'), ((3924, 3952), 'os.path.exists', 'os.path.exists', (['args.log_dir'], {}), '(args.log_dir)\n', (3938, 3952), False, 'import os\n'), ((3963, 3988), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (3974, 3988), False, 'import os\n'), ((4508, 4556), 'utils.utils.reset_env_and_state_buffer', 'reset_env_and_state_buffer', (['env', 'state_buf', 'args'], {}), '(env, state_buf, args)\n', (4534, 4556), False, 'from utils.utils import preprocess_image, reset_env_and_state_buffer\n'), ((4660, 4715), 'numpy.random.randint', 'np.random.randint', (['(1)', '(args.max_initial_random_steps + 1)'], {}), '(1, args.max_initial_random_steps + 1)\n', (4677, 4715), True, 'import numpy as np\n'), ((4733, 4755), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (4749, 4755), False, 'import sys\n'), ((4768, 4786), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4784, 4786), False, 'import sys\n'), ((7053, 7099), 'sys.stdout.write', 'sys.stdout.write', (['"""Results saved to file \n\n"""'], {}), "('Results saved to file \\n\\n')\n", (7069, 7099), False, 'import sys\n'), ((7109, 7127), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7125, 7127), False, 'import sys\n'), ((5545, 5610), 'utils.utils.preprocess_image', 'preprocess_image', (['test_frame', 'args.frame_width', 'args.frame_height'], {}), '(test_frame, args.frame_width, args.frame_height)\n', (5561, 5610), False, 'from utils.utils import preprocess_image, reset_env_and_state_buffer\n'), ((5908, 5926), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5924, 5926), False, 'import sys\n'), ((6719, 6751), 'os.path.exists', 'os.path.exists', (['args.results_dir'], {}), '(args.results_dir)\n', (6733, 6751), False, 'import os\n'), ((6766, 6795), 'os.makedirs', 'os.makedirs', (['args.results_dir'], {}), '(args.results_dir)\n', (6777, 6795), False, 'import os\n')] |
'''
在第七课的基础上,增加了TensorBoard功能。
'''
#import tensorflow as tf
import tensorflow.compat.v1 as tf
import numpy as np
import matplotlib.pyplot as plt
'''
定义网络结构
输入的参数:
该层输入,输入数据的大小,输出数据的大小,以及使用的激活函数,激活函数在默认情况下是None,即不适用激活函数
'''
def add_layer(inputs,in_size,out_size,activation_function=None):
with tf.name_scope("layer"):
with tf.name_scope("weights"):
Weights = tf.Variable(tf.random_normal([in_size,out_size]), name="Weight")
with tf.name_scope("biase"):
biases = tf.Variable(tf.zeros([1,out_size])+0.1, name="biase")
with tf.name_scope("Wx_plus_b"):
Wx_plus_b = tf.add(tf.matmul(inputs,Weights),biases)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
#生成训练数据
#这里,我们生成300*1的x,然后增加一点噪声noise,通过y = x^2 - 0.5+noise来生成y
x_data = np.linspace(-1,1,300)[:,np.newaxis]
# print(x_data)
noise = np.random.normal(0,0.05,x_data.shape).astype(np.float32)
y_data = np.square(x_data) - 0.5 + noise
#构建网络
#定义输入层-隐藏层-输出层的三层神经网络结构,其中输入层和输出层仅有一个神经元,而隐藏层有10个神经元。同时,我们定义我们的损失是平方损失函数,通过梯度下降法来最小化我们的损失。
#None表示给多少个sample都可以
with tf.name_scope("inputs"):
xs = tf.placeholder(tf.float32,[None,1], name="x_input")
ys = tf.placeholder(tf.float32,[None,1], name="y_input")
# add hidden layer
l1 = add_layer(xs,1,10,activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1,10,1,activation_function=None)
#在计算平方损失的时候,我们先使用tf.reduce_sum来计算了每一个样本点的损失,注意这里的参数 reduction_indices=[1],这表明我们是在第1维上运算
with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]), name="loss")
with tf.name_scope("train"):
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
# plot real data
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x_data, y_data)
plt.ion()
plt.show(block=False)
#定义Session并训练
init = tf.global_variables_initializer()
with tf.Session() as sess:
# you can run "tensorboard --logdir=E:\zhouyi\graph" in Win10
writer = tf.summary.FileWriter("E:\zhouyi\graph", sess.graph)
sess.run(init)
for i in range(1000):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i % 50 == 0:
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
# visualize the result
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value = sess.run(prediction, feed_dict={xs: x_data})
lines = ax.plot(x_data, prediction_value, 'r', lw=5)
plt.pause(0.2) | [
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.zeros",
"matplotlib.pyplot.show",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.placeholder",
"numpy.square",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
"tensorflow.compat.v1.matmul",
"matplotlib.pyplot.ion",
"matplotlib.p... | [((1865, 1877), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1875, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1936, 1945), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1943, 1945), True, 'import matplotlib.pyplot as plt\n'), ((1946, 1967), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1954, 1967), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2023), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2021, 2023), True, 'import tensorflow.compat.v1 as tf\n'), ((902, 925), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(300)'], {}), '(-1, 1, 300)\n', (913, 925), True, 'import numpy as np\n'), ((1184, 1207), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""inputs"""'], {}), "('inputs')\n", (1197, 1207), True, 'import tensorflow.compat.v1 as tf\n'), ((1218, 1271), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {'name': '"""x_input"""'}), "(tf.float32, [None, 1], name='x_input')\n", (1232, 1271), True, 'import tensorflow.compat.v1 as tf\n'), ((1279, 1332), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {'name': '"""y_input"""'}), "(tf.float32, [None, 1], name='y_input')\n", (1293, 1332), True, 'import tensorflow.compat.v1 as tf\n'), ((1575, 1596), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (1588, 1596), True, 'import tensorflow.compat.v1 as tf\n'), ((1732, 1754), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (1745, 1754), True, 'import tensorflow.compat.v1 as tf\n'), ((2029, 2041), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (2039, 2041), True, 'import tensorflow.compat.v1 as tf\n'), ((2130, 2184), 'tensorflow.compat.v1.summary.FileWriter', 'tf.summary.FileWriter', (['"""E:\\\\zhouyi\\\\graph"""', 'sess.graph'], {}), "('E:\\\\zhouyi\\\\graph', sess.graph)\n", (2151, 2184), True, 'import tensorflow.compat.v1 as tf\n'), ((298, 320), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""layer"""'], {}), "('layer')\n", (311, 320), True, 'import tensorflow.compat.v1 as tf\n'), ((962, 1001), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.05)', 'x_data.shape'], {}), '(0, 0.05, x_data.shape)\n', (978, 1001), True, 'import numpy as np\n'), ((1028, 1045), 'numpy.square', 'np.square', (['x_data'], {}), '(x_data)\n', (1037, 1045), True, 'import numpy as np\n'), ((335, 359), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (348, 359), True, 'import tensorflow.compat.v1 as tf\n'), ((461, 483), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""biase"""'], {}), "('biase')\n", (474, 483), True, 'import tensorflow.compat.v1 as tf\n'), ((573, 599), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""Wx_plus_b"""'], {}), "('Wx_plus_b')\n", (586, 599), True, 'import tensorflow.compat.v1 as tf\n'), ((1638, 1664), 'tensorflow.compat.v1.square', 'tf.square', (['(ys - prediction)'], {}), '(ys - prediction)\n', (1647, 1664), True, 'import tensorflow.compat.v1 as tf\n'), ((1773, 1825), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (1806, 1825), True, 'import tensorflow.compat.v1 as tf\n'), ((2677, 2691), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.2)'], {}), '(0.2)\n', (2686, 2691), True, 'import matplotlib.pyplot as plt\n'), ((395, 432), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['[in_size, out_size]'], {}), '([in_size, out_size])\n', (411, 432), True, 'import tensorflow.compat.v1 as tf\n'), ((632, 658), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['inputs', 'Weights'], {}), '(inputs, Weights)\n', (641, 658), True, 'import tensorflow.compat.v1 as tf\n'), ((518, 541), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[1, out_size]'], {}), '([1, out_size])\n', (526, 541), True, 'import tensorflow.compat.v1 as tf\n')] |
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Reshape
from tensorflow.keras.layers import Conv2D, MaxPooling2D, SpatialDropout2D
from data_loader import load_3D, load_2D
def create_model(map_size):
model = Sequential([
Conv2D(64, (3, 3), padding="same", activation=tf.keras.activations.relu, input_shape=(map_size, map_size, 1)),
SpatialDropout2D(0.3),
Flatten(),
Dense(units=441),
Reshape((map_size, map_size, 1)),
Conv2D(1, (map_size, map_size), padding="same")
])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.0001), metrics=['accuracy'])
return model
def main():
map_size = 21
train_map_count = 1000
test_map_count = 1000
features_train, labels_train = load_3D(train_map_count, map_size, map_size)
features_test, labels_test = load_3D(test_map_count, map_size, map_size, "test_created_data_")
checkpoint_path = "checkpoints/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
#latest = tf.train.latest_checkpoint(checkpoint_dir)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, verbose = 1, period = 5)
model = create_model(map_size)
model.summary()
#model.load_weights(latest)
model.fit(features_train, labels_train, epochs = 20, validation_split = 0.1, callbacks = [cp_callback])
size = 68
test = np.array(features_test[size]).reshape(map_size, map_size)
prediction = model.predict(features_test)
fig, axs = plt.subplot(1,3,1), plt.imshow(test)
fig.axis('off')
fig.set_title('Map')
pred = np.array(prediction[size]).reshape(map_size, map_size) * features_test[size].reshape(map_size, map_size)
array = np.clip(pred, -0.25, 0.25)
fig, axs = plt.subplot(1,3,2), plt.imshow(array)
fig.axis('off')
fig.set_title('Predicted path')
fig, axs = plt.subplot(1,3,3), plt.imshow(np.array(labels_test[size]).reshape(map_size, map_size))
fig.axis('off')
fig.set_title('Desired path')
plt.show()
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.subplot",
"tensorflow.keras.layers.SpatialDropout2D",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.imshow",
"os.path.dirname",
"tensorflow.keras.callbacks.ModelCheckpoint",
"num... | [((872, 916), 'data_loader.load_3D', 'load_3D', (['train_map_count', 'map_size', 'map_size'], {}), '(train_map_count, map_size, map_size)\n', (879, 916), False, 'from data_loader import load_3D, load_2D\n'), ((947, 1012), 'data_loader.load_3D', 'load_3D', (['test_map_count', 'map_size', 'map_size', '"""test_created_data_"""'], {}), "(test_map_count, map_size, map_size, 'test_created_data_')\n", (954, 1012), False, 'from data_loader import load_3D, load_2D\n'), ((1086, 1118), 'os.path.dirname', 'os.path.dirname', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1101, 1118), False, 'import os\n'), ((1188, 1260), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['checkpoint_path'], {'verbose': '(1)', 'period': '(5)'}), '(checkpoint_path, verbose=1, period=5)\n', (1222, 1260), True, 'import tensorflow as tf\n'), ((1787, 1813), 'numpy.clip', 'np.clip', (['pred', '(-0.25)', '(0.25)'], {}), '(pred, -0.25, 0.25)\n', (1794, 1813), True, 'import numpy as np\n'), ((2063, 2073), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2071, 2073), True, 'import matplotlib.pyplot as plt\n'), ((1589, 1609), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1600, 1609), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1625), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test'], {}), '(test)\n', (1619, 1625), True, 'import matplotlib.pyplot as plt\n'), ((1826, 1846), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1837, 1846), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1863), 'matplotlib.pyplot.imshow', 'plt.imshow', (['array'], {}), '(array)\n', (1856, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1946), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (1937, 1946), True, 'import matplotlib.pyplot as plt\n'), ((382, 495), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': 'tf.keras.activations.relu', 'input_shape': '(map_size, map_size, 1)'}), "(64, (3, 3), padding='same', activation=tf.keras.activations.relu,\n input_shape=(map_size, map_size, 1))\n", (388, 495), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, SpatialDropout2D\n'), ((495, 516), 'tensorflow.keras.layers.SpatialDropout2D', 'SpatialDropout2D', (['(0.3)'], {}), '(0.3)\n', (511, 516), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, SpatialDropout2D\n'), ((520, 529), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (527, 529), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Reshape\n'), ((533, 549), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(441)'}), '(units=441)\n', (538, 549), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Reshape\n'), ((553, 585), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(map_size, map_size, 1)'], {}), '((map_size, map_size, 1))\n', (560, 585), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Reshape\n'), ((589, 636), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(1)', '(map_size, map_size)'], {'padding': '"""same"""'}), "(1, (map_size, map_size), padding='same')\n", (595, 636), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, SpatialDropout2D\n'), ((695, 727), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (719, 727), True, 'import tensorflow as tf\n'), ((1474, 1503), 'numpy.array', 'np.array', (['features_test[size]'], {}), '(features_test[size])\n', (1482, 1503), True, 'import numpy as np\n'), ((1673, 1699), 'numpy.array', 'np.array', (['prediction[size]'], {}), '(prediction[size])\n', (1681, 1699), True, 'import numpy as np\n'), ((1957, 1984), 'numpy.array', 'np.array', (['labels_test[size]'], {}), '(labels_test[size])\n', (1965, 1984), True, 'import numpy as np\n')] |
import random
import operator
import math
import numpy
import numpy as np
import csv
import datetime
import os
import time
# deap
# pip install deap
from deap import base, creator, gp, tools, algorithms
# keras
# pip install keras
import tensorflow
import keras
from keras.models import Sequential, clone_model
import keras.utils
from keras.layers import Dense, Input
from keras.layers.merge import concatenate
# pip install tensorflow
# split data with sklearn library
# pip install sklearn
from sklearn.model_selection import train_test_split
import datetime
from datetime import date
def log_title_console(title):
print("\n#####{}#####".format("#" * len(title)))
print("# ", title, " #")
print("#####{}#####\n".format("#" * len(title)))
################################################################################
log_title_console("DATASET")
TAILLE_POP = 10
NOMBRE_LIGNES = 300
NOMBRE_VARIABLE = 14
# GPU Kernel Performance Dataset (kaggle.com)
# Link : https://www.kaggle.com/rupals/gpu-runtime?select=sgemm_product.csv
dataset = []
data_file = open("sgemm_product.csv")
read_data = csv.reader(data_file, delimiter=",")
for row in read_data:
dataset.append(row)
dataset = dataset[1:]
# Input : x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 y1 y2 y3 y4
# Considérons les variables y de la manière suivante :
# 1- On réduit la dimension à 1 tel que y = sum(y1, y2, y3, y4)
mean_y = []
for i in range(NOMBRE_LIGNES):
for j in range(len(dataset[i])):
if j < NOMBRE_VARIABLE:
dataset[i][j] = float(dataset[i][j])
elif j == NOMBRE_VARIABLE:
for elem in dataset[i][-4:]:
dataset[i][dataset[i].index(elem)] = float(elem)
new_y = round(sum(dataset[i][-4:]), 2)
dataset[i][j] = new_y
mean_y.append(new_y)
# Les derniers 'y' doivent étre effacés
dataset[i] = dataset[i][:-3]
dataset = dataset[:NOMBRE_LIGNES]
# 2 - On transforme y tel que :
# y = 0 si y < moyenne des y observés, 1 sinon
# L'objectif est d'obtenir une classification binaire
X = []
y = []
for tuple in dataset:
X.append(tuple[:14])
if tuple[-1] >= np.mean(mean_y):
y.append(1)
else:
y.append(0)
X = np.array(X)
y = np.array(y)
# On split nos données d'entraînement, de test et de validation tel que:
# -> inspiré du cours "COMPUTER VISION"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5, stratify=y_test)
# Test rapide
print("Dataset: split data...")
print('> Initial size: ', len(dataset))
print('> training samples: ', len(X_train))
print('> testing samples: ', len(X_test))
print('> validation samples:', len(X_val))
if len(X_train) + len(X_test) + len(X_val) == len(dataset):
print(" OK!")
else:
print(" KO!")
################################################################################
log_title_console("GENERATION INDIVIDU")
def generate_weight_matrix(number_hidden_layers, number_neurons):
weight_matrix = []
# On défini maintenant un vecteur de poids pour chaques neurones de chaques couches
for i in range(number_hidden_layers):
vector = []
# Le dernier vecteur de poids
if i == number_hidden_layers-1:
for k in range(number_neurons[i]):
vector.append(random.random() if random.random() < 0.7 else 0)
# Les autres
else:
for j in range(number_neurons[i] * number_neurons[i+1]):
# Pour casser le système 'fully connected' du réseau, on définit certains poids à 0
vector.append(random.random() if random.random() < 0.7 else 0)
# On l'ajoute à notre liste de poids
weight_matrix.append(vector)
return weight_matrix
def generate_individual():
# On retourne le tableau suivant:
# - un tableau : [nombre de couches, [nombres de neurones dans chaques couches]] -> neural_network_config
# - un vecteur de poids pour chaques couches -> weight_vector
res = []
neural_network_config = []
# On choisit un nombre aléatoire de couches cachées
number_hidden_layers = int(random.uniform(1, 10))
# On choisit un nombre aléatoire de neurones dans chaques couches
number_neurons = []
for i in range(number_hidden_layers):
number_neurons.append(int(random.uniform(1, NOMBRE_VARIABLE)))
# On ajoute à la liste
neural_network_config.append(number_hidden_layers)
neural_network_config.append(number_neurons)
# On l'ajoute au résultat final
res.append(neural_network_config)
weight_vector = generate_weight_matrix(number_hidden_layers, number_neurons)
# On l'ajoute au résultat
res.append(weight_vector)
return res
print("Generate model:")
example = generate_individual()
print('> model:\n', example)
################################################################################
log_title_console("GENERATION POPULATION")
# On crée notre population tel que :
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("individual", tools.initIterate, creator.Individual, generate_individual)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Population
pop = toolbox.population(n=TAILLE_POP)
print("Generate pop")
print("> len(pop): ", len(pop))
print("> pop[0]: ", pop[0])
################################################################################
log_title_console("OPERATIONS WITH DEAP")
# Création du fichier de sortie
output = open(str(date.today()) + "_MIAGE_FELIN_AE_res.txt", "w")
name = output.name
output.write("> Auteur: <NAME>\n")
output.write("> TP6: Algorithmes évolutionnaires\n")
output.write("> Date: {}\n".format(datetime.datetime.now()))
def build_model(input):
# On crée notre réseau de neurones ici :
model = Sequential()
# Nos entrées fixes :
model.add(Dense(NOMBRE_VARIABLE, input_dim=NOMBRE_VARIABLE, activation='sigmoid', name="input"))
# Nos couches cachées
for i in range(input[0][0]):
model.add(Dense(input[0][1][i], activation='sigmoid', name="Couche_" + str(i + 1)))
# On ajoute notre sortie
model.add(Dense(1, activation='sigmoid', name="output"))
# model.summary()
return model
def fitness(individual):
# Notre modèle
model = build_model(individual)
# On compile notre modèle
model.compile(optimizer='sgd',
loss='mean_squared_error',
metrics=['mse'])
# On l'entraine avec le jeu de données (avec peu d'epoch et un petit batch)
model.fit(X_train,
y_train,
validation_data=(X_val, y_val),
epochs=10,
batch_size=5,
verbose=0)
# On évalue l'erreur quadratique du modèle
score = model.evaluate(X_test, y_test, verbose=1)
# On applique la formule et on retourne le résultat
return 1 / score[0] + 1,
def crossover(individual1, individual2):
# On teste aussi si les 2 individus ont la même dimension
if individual1[0][0] == individual2[0][0]:
# On applique un crossover de type merge entre 2 modèles
for i in range(len(individual1[0][1])):
# On ajoute les élèments d'un modèle dans l'autre
individual1[0][1][i] = individual1[0][1][i] + individual2[0][1][i]
# On regénère les poids
individual1[1] = generate_weight_matrix(individual1[0][0], individual1[0][1])
# On retourne les 2 individus qui ont été merge (et qui sont donc indentiques)
return individual1, individual1
else:
# On retourne les individus sans rien faire
return individual1, individual2
def mutate(individual):
# On ajoute un noeud dans la dernière couche
# de plus, on divise son poid avec le noeud adjacent
# > On sélectionne la dernière couche et on lui ajoute un neurone
individual[0][1][-1] = individual[0][1][-1] + 1
# On met à jour les 2 poids adjacent en les divisant par 2
# > On divise par 2 le dernier neurone :
individual[1][-1][-1] = individual[1][-1][-1] / 2
# > On ajoute le dernier poid déjà divisé au préalable
individual[1][-1].append(individual[1][-1][-1])
return individual,
toolbox.register("mate", crossover)
toolbox.register("mutate", mutate)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", fitness)
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
mstats = tools.MultiStatistics(fitness=stats_fit)
mstats.register("avg", numpy.mean)
mstats.register("std", numpy.std)
mstats.register("min", numpy.min)
mstats.register("max", numpy.max)
NGEN = 10
CXPB = 0.4
MUTPB = 0.2
# TESTING
output.write("\n> Starting ...\n")
hof = tools.HallOfFame(3)
pop, log = algorithms.eaSimple(pop, toolbox, CXPB, MUTPB, NGEN, stats=mstats,
halloffame=hof, verbose=True)
output.write(str(log))
output.write("\n\n> End !\n")
output.write("\n\n[RESULT] - 3 best individuals :\n")
for res in hof:
output.write("\n##########################################################")
output.write("\n> individual: " + str(res))
output.write("\n\n> summary")
list = []
build_model(res).summary(print_fn=lambda x: list.append(x))
model_summary = "\n".join(list)
output.write("\n\n" + str(model_summary))
output.write("\n\n> fitness: " + str(res.fitness))
output.write("\n\n##########################################################\n")
output.close()
if open(name, "r") is not None:
print("\n[END] Le fichier", name, "a bien été créé")
| [
"csv.reader",
"deap.base.Toolbox",
"random.uniform",
"sklearn.model_selection.train_test_split",
"deap.tools.MultiStatistics",
"datetime.date.today",
"deap.tools.Statistics",
"random.random",
"deap.creator.create",
"numpy.mean",
"numpy.array",
"keras.layers.Dense",
"keras.models.Sequential",... | [((1116, 1152), 'csv.reader', 'csv.reader', (['data_file'], {'delimiter': '""","""'}), "(data_file, delimiter=',')\n", (1126, 1152), False, 'import csv\n'), ((2227, 2238), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2235, 2238), True, 'import numpy as np\n'), ((2243, 2254), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2251, 2254), True, 'import numpy as np\n'), ((2404, 2453), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'stratify': 'y'}), '(X, y, test_size=0.2, stratify=y)\n', (2420, 2453), False, 'from sklearn.model_selection import train_test_split\n'), ((2485, 2549), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_test', 'y_test'], {'test_size': '(0.5)', 'stratify': 'y_test'}), '(X_test, y_test, test_size=0.5, stratify=y_test)\n', (2501, 2549), False, 'from sklearn.model_selection import train_test_split\n'), ((5106, 5164), 'deap.creator.create', 'creator.create', (['"""FitnessMax"""', 'base.Fitness'], {'weights': '(1.0,)'}), "('FitnessMax', base.Fitness, weights=(1.0,))\n", (5120, 5164), False, 'from deap import base, creator, gp, tools, algorithms\n'), ((5165, 5227), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'list'], {'fitness': 'creator.FitnessMax'}), "('Individual', list, fitness=creator.FitnessMax)\n", (5179, 5227), False, 'from deap import base, creator, gp, tools, algorithms\n'), ((5238, 5252), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (5250, 5252), False, 'from deap import base, creator, gp, tools, algorithms\n'), ((8620, 8668), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values)'], {}), '(lambda ind: ind.fitness.values)\n', (8636, 8668), False, 'from deap import base, creator, gp, tools, algorithms\n'), ((8678, 8718), 'deap.tools.MultiStatistics', 'tools.MultiStatistics', ([], {'fitness': 'stats_fit'}), '(fitness=stats_fit)\n', (8699, 8718), False, 'from deap import base, creator, gp, tools, algorithms\n'), ((8943, 8962), 'deap.tools.HallOfFame', 'tools.HallOfFame', (['(3)'], {}), '(3)\n', (8959, 8962), False, 'from deap import base, creator, gp, tools, algorithms\n'), ((8974, 9074), 'deap.algorithms.eaSimple', 'algorithms.eaSimple', (['pop', 'toolbox', 'CXPB', 'MUTPB', 'NGEN'], {'stats': 'mstats', 'halloffame': 'hof', 'verbose': '(True)'}), '(pop, toolbox, CXPB, MUTPB, NGEN, stats=mstats,\n halloffame=hof, verbose=True)\n', (8993, 9074), False, 'from deap import base, creator, gp, tools, algorithms\n'), ((6049, 6061), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6059, 6061), False, 'from keras.models import Sequential, clone_model\n'), ((2156, 2171), 'numpy.mean', 'np.mean', (['mean_y'], {}), '(mean_y)\n', (2163, 2171), True, 'import numpy as np\n'), ((4263, 4284), 'random.uniform', 'random.uniform', (['(1)', '(10)'], {}), '(1, 10)\n', (4277, 4284), False, 'import random\n'), ((5940, 5963), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5961, 5963), False, 'import datetime\n'), ((6102, 6191), 'keras.layers.Dense', 'Dense', (['NOMBRE_VARIABLE'], {'input_dim': 'NOMBRE_VARIABLE', 'activation': '"""sigmoid"""', 'name': '"""input"""'}), "(NOMBRE_VARIABLE, input_dim=NOMBRE_VARIABLE, activation='sigmoid',\n name='input')\n", (6107, 6191), False, 'from keras.layers import Dense, Input\n'), ((6383, 6428), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""output"""'}), "(1, activation='sigmoid', name='output')\n", (6388, 6428), False, 'from keras.layers import Dense, Input\n'), ((5735, 5747), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5745, 5747), False, 'from datetime import date\n'), ((4456, 4490), 'random.uniform', 'random.uniform', (['(1)', 'NOMBRE_VARIABLE'], {}), '(1, NOMBRE_VARIABLE)\n', (4470, 4490), False, 'import random\n'), ((3450, 3465), 'random.random', 'random.random', ([], {}), '()\n', (3463, 3465), False, 'import random\n'), ((3733, 3748), 'random.random', 'random.random', ([], {}), '()\n', (3746, 3748), False, 'import random\n'), ((3469, 3484), 'random.random', 'random.random', ([], {}), '()\n', (3482, 3484), False, 'import random\n'), ((3752, 3767), 'random.random', 'random.random', ([], {}), '()\n', (3765, 3767), False, 'import random\n')] |
#
# devon
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.17103, 0.1004, 0.29978],
[0.17087, 0.10414, 0.30337],
[0.17068, 0.10786, 0.30699],
[0.17046, 0.11159, 0.31061],
[0.17023, 0.11524, 0.31422],
[0.16997, 0.11894, 0.31784],
[0.1697, 0.12254, 0.32146],
[0.1694, 0.12624, 0.32509],
[0.16908, 0.12989, 0.32874],
[0.16874, 0.1335, 0.33238],
[0.16837, 0.13713, 0.33601],
[0.16801, 0.14077, 0.33967],
[0.16765, 0.14438, 0.34332],
[0.16728, 0.14799, 0.34698],
[0.16689, 0.1516, 0.35064],
[0.16648, 0.15524, 0.35429],
[0.16605, 0.15887, 0.35797],
[0.16562, 0.16251, 0.36164],
[0.1652, 0.16612, 0.36531],
[0.1648, 0.16979, 0.369],
[0.16439, 0.17341, 0.37268],
[0.16397, 0.17708, 0.37639],
[0.16353, 0.18072, 0.38008],
[0.16308, 0.1844, 0.3838],
[0.16264, 0.1881, 0.3875],
[0.1622, 0.19175, 0.39124],
[0.16176, 0.19545, 0.39496],
[0.16131, 0.19914, 0.3987],
[0.16084, 0.20287, 0.40244],
[0.16036, 0.20662, 0.40621],
[0.15991, 0.21033, 0.40998],
[0.15947, 0.21408, 0.41376],
[0.15904, 0.21784, 0.41755],
[0.1586, 0.2216, 0.42135],
[0.15814, 0.22538, 0.42518],
[0.15768, 0.22918, 0.42901],
[0.15723, 0.23298, 0.43287],
[0.15681, 0.23683, 0.43674],
[0.15639, 0.24063, 0.44063],
[0.15595, 0.24449, 0.44456],
[0.15552, 0.24839, 0.44851],
[0.1551, 0.25226, 0.45249],
[0.15469, 0.25615, 0.45649],
[0.15431, 0.26008, 0.46054],
[0.15394, 0.26403, 0.46464],
[0.1536, 0.26797, 0.46878],
[0.15329, 0.27194, 0.47296],
[0.153, 0.27597, 0.47721],
[0.15276, 0.27998, 0.48152],
[0.15256, 0.28401, 0.48589],
[0.15241, 0.28806, 0.49034],
[0.15232, 0.29214, 0.49486],
[0.1523, 0.29622, 0.49944],
[0.15234, 0.30031, 0.50414],
[0.15246, 0.30441, 0.5089],
[0.15267, 0.30853, 0.51375],
[0.15298, 0.31262, 0.51869],
[0.15339, 0.31673, 0.52371],
[0.15391, 0.32082, 0.52882],
[0.15456, 0.32486, 0.53399],
[0.15532, 0.32892, 0.53926],
[0.15623, 0.33293, 0.5446],
[0.15721, 0.33688, 0.55],
[0.15838, 0.3408, 0.55545],
[0.15962, 0.34465, 0.56096],
[0.16105, 0.34846, 0.56651],
[0.16259, 0.3522, 0.57209],
[0.16426, 0.35588, 0.57769],
[0.166, 0.35947, 0.58331],
[0.1679, 0.36299, 0.58894],
[0.16993, 0.36646, 0.59458],
[0.17202, 0.36982, 0.60021],
[0.17425, 0.37313, 0.60583],
[0.17653, 0.37638, 0.61145],
[0.17895, 0.37954, 0.61705],
[0.1814, 0.38265, 0.62262],
[0.18398, 0.3857, 0.62819],
[0.18662, 0.38871, 0.63374],
[0.18934, 0.39167, 0.63926],
[0.19213, 0.39459, 0.64477],
[0.19503, 0.39748, 0.65028],
[0.19798, 0.40036, 0.65577],
[0.20103, 0.40321, 0.66124],
[0.20423, 0.40606, 0.66671],
[0.20748, 0.40891, 0.67218],
[0.21086, 0.41176, 0.67765],
[0.21438, 0.41463, 0.68311],
[0.21803, 0.41753, 0.68857],
[0.22183, 0.42044, 0.69402],
[0.22582, 0.42338, 0.69948],
[0.22993, 0.42637, 0.70494],
[0.23429, 0.42941, 0.71039],
[0.23881, 0.43249, 0.71584],
[0.24355, 0.43562, 0.72126],
[0.24855, 0.43878, 0.72667],
[0.25373, 0.44203, 0.73207],
[0.25915, 0.4453, 0.73742],
[0.26478, 0.44865, 0.74275],
[0.27068, 0.45204, 0.74803],
[0.27677, 0.45548, 0.75325],
[0.28309, 0.45897, 0.75842],
[0.28962, 0.4625, 0.76352],
[0.29635, 0.46607, 0.76854],
[0.30326, 0.46969, 0.77347],
[0.31037, 0.47331, 0.77832],
[0.3176, 0.47697, 0.78307],
[0.32497, 0.48065, 0.78772],
[0.33249, 0.48433, 0.79227],
[0.34009, 0.48804, 0.7967],
[0.34779, 0.49174, 0.80103],
[0.35555, 0.49542, 0.80525],
[0.36336, 0.49911, 0.80936],
[0.37122, 0.50278, 0.81336],
[0.37911, 0.50644, 0.81727],
[0.38701, 0.51008, 0.82107],
[0.39494, 0.51371, 0.82478],
[0.40285, 0.51731, 0.8284],
[0.41076, 0.52088, 0.83194],
[0.41863, 0.52444, 0.8354],
[0.4265, 0.52797, 0.83878],
[0.43435, 0.53148, 0.84211],
[0.44217, 0.53496, 0.84536],
[0.44995, 0.53844, 0.84856],
[0.45769, 0.54187, 0.85171],
[0.46541, 0.54531, 0.85481],
[0.47308, 0.54872, 0.85786],
[0.48073, 0.55212, 0.86088],
[0.48834, 0.55549, 0.86386],
[0.49589, 0.55887, 0.8668],
[0.50341, 0.56225, 0.8697],
[0.51089, 0.5656, 0.87258],
[0.51834, 0.56896, 0.87542],
[0.52572, 0.57232, 0.87823],
[0.53305, 0.57568, 0.88101],
[0.54033, 0.57904, 0.88375],
[0.54755, 0.58241, 0.88646],
[0.55471, 0.58579, 0.88913],
[0.5618, 0.58917, 0.89176],
[0.56881, 0.59258, 0.89435],
[0.57576, 0.59599, 0.89689],
[0.5826, 0.59942, 0.89939],
[0.58935, 0.60286, 0.90183],
[0.59601, 0.60632, 0.90422],
[0.60256, 0.6098, 0.90655],
[0.60899, 0.61329, 0.90881],
[0.61529, 0.6168, 0.91102],
[0.62146, 0.62032, 0.91315],
[0.62749, 0.62385, 0.91521],
[0.63338, 0.62739, 0.9172],
[0.63911, 0.63094, 0.9191],
[0.64469, 0.63449, 0.92093],
[0.65011, 0.63804, 0.92269],
[0.65537, 0.64159, 0.92436],
[0.66048, 0.64514, 0.92595],
[0.66542, 0.6487, 0.92746],
[0.67022, 0.65223, 0.9289],
[0.67484, 0.65577, 0.93027],
[0.67933, 0.65928, 0.93157],
[0.68368, 0.66279, 0.93279],
[0.68789, 0.66628, 0.93396],
[0.69197, 0.66977, 0.93506],
[0.69594, 0.67324, 0.93611],
[0.69978, 0.67669, 0.93711],
[0.70354, 0.68012, 0.93806],
[0.7072, 0.68355, 0.93896],
[0.71077, 0.68697, 0.93983],
[0.71427, 0.69038, 0.94067],
[0.71771, 0.69377, 0.94147],
[0.72108, 0.69716, 0.94225],
[0.72441, 0.70054, 0.943],
[0.72769, 0.70392, 0.94373],
[0.73094, 0.70728, 0.94445],
[0.73416, 0.71065, 0.94515],
[0.73734, 0.71401, 0.94583],
[0.74051, 0.71738, 0.94651],
[0.74366, 0.72073, 0.94718],
[0.7468, 0.72408, 0.94784],
[0.74992, 0.72744, 0.9485],
[0.75303, 0.7308, 0.94915],
[0.75615, 0.73416, 0.9498],
[0.75925, 0.73752, 0.95045],
[0.76236, 0.74089, 0.9511],
[0.76546, 0.74426, 0.95174],
[0.76856, 0.74763, 0.95238],
[0.77167, 0.75101, 0.95302],
[0.77477, 0.75438, 0.95367],
[0.77788, 0.75776, 0.95431],
[0.78098, 0.76114, 0.95495],
[0.7841, 0.76453, 0.95559],
[0.78721, 0.76791, 0.95623],
[0.79032, 0.77131, 0.95687],
[0.79344, 0.7747, 0.95752],
[0.79656, 0.77811, 0.95816],
[0.79969, 0.78151, 0.95881],
[0.80281, 0.78492, 0.95945],
[0.80595, 0.78833, 0.9601],
[0.80909, 0.79175, 0.96074],
[0.81222, 0.79517, 0.96139],
[0.81537, 0.79859, 0.96203],
[0.81851, 0.80202, 0.96269],
[0.82167, 0.80545, 0.96334],
[0.82482, 0.80889, 0.96398],
[0.82797, 0.81233, 0.96463],
[0.83114, 0.81577, 0.96529],
[0.8343, 0.81921, 0.96594],
[0.83747, 0.82267, 0.96659],
[0.84064, 0.82612, 0.96725],
[0.84381, 0.82958, 0.9679],
[0.84699, 0.83304, 0.96856],
[0.85017, 0.83651, 0.96921],
[0.85336, 0.83998, 0.96987],
[0.85655, 0.84345, 0.97053],
[0.85974, 0.84693, 0.97118],
[0.86294, 0.85041, 0.97184],
[0.86614, 0.85389, 0.97251],
[0.86934, 0.85739, 0.97316],
[0.87255, 0.86088, 0.97382],
[0.87576, 0.86438, 0.97448],
[0.87897, 0.86788, 0.97515],
[0.88219, 0.87138, 0.97581],
[0.88541, 0.87489, 0.97647],
[0.88864, 0.8784, 0.97714],
[0.89186, 0.88192, 0.9778],
[0.8951, 0.88544, 0.97847],
[0.89833, 0.88896, 0.97914],
[0.90157, 0.89249, 0.97981],
[0.90481, 0.89602, 0.98047],
[0.90806, 0.89956, 0.98114],
[0.9113, 0.9031, 0.98181],
[0.91456, 0.90664, 0.98248],
[0.91781, 0.91019, 0.98315],
[0.92107, 0.91374, 0.98382],
[0.92433, 0.91729, 0.9845],
[0.92759, 0.92084, 0.98517],
[0.93087, 0.92441, 0.98584],
[0.93413, 0.92797, 0.98652],
[0.93741, 0.93154, 0.98719],
[0.94069, 0.93511, 0.98786],
[0.94397, 0.93869, 0.98854],
[0.94725, 0.94227, 0.98921],
[0.95053, 0.94585, 0.98989],
[0.95382, 0.94944, 0.99056],
[0.95711, 0.95303, 0.99124],
[0.9604, 0.95662, 0.99191],
[0.96369, 0.96022, 0.99259],
[0.96698, 0.96382, 0.99326],
[0.97027, 0.96742, 0.99393],
[0.97357, 0.97102, 0.9946],
[0.97686, 0.97463, 0.99527],
[0.98016, 0.97824, 0.99594],
[0.98345, 0.98186, 0.99662],
[0.98675, 0.98548, 0.99728],
[0.99004, 0.9891, 0.99795],
[0.99334, 0.99272, 0.99862],
[0.99662, 0.99634, 0.99929],
[0.99992, 0.99997, 0.99995]]
devon_map = LinearSegmentedColormap.from_list('devon', cm_data)
# For use of "viscm view"
test_cm = devon_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(devon_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=devon_map)
plt.show()
| [
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.linspace",
"matplotlib.pyplot.show",
"viscm.viscm"
] | [((11858, 11909), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""devon"""', 'cm_data'], {}), "('devon', cm_data)\n", (11891, 11909), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((12397, 12407), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12405, 12407), True, 'import matplotlib.pyplot as plt\n'), ((12153, 12169), 'viscm.viscm', 'viscm', (['devon_map'], {}), '(devon_map)\n', (12158, 12169), False, 'from viscm import viscm\n'), ((12296, 12320), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(256)'], {}), '(0, 100, 256)\n', (12307, 12320), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import time
import re
import numpy
from pyscf import lib
from pyscf import gto, scf, dft, mcscf, mp, cc, lo
def sort_mo(casscf, idx, mo_coeff):
mol = casscf.mol
corth = lo.orth.orth_ao(mol)
casorb = corth[:,idx]
nmo = mo_coeff.shape[1]
ncore = casscf.ncore
ncas = casscf.ncas
nelecas = casscf.nelecas
assert(ncas == casorb.shape[1])
mo1 = reduce(numpy.dot, (casorb.T, casscf._scf.get_ovlp(), mo_coeff))
sdiag = numpy.einsum('pi,pi->i', mo1, mo1)
nocc = ncore + nelecas[0]
casidx = numpy.hstack((numpy.argsort(sdiag[:nocc])[ncore:],
nocc+numpy.argsort(-sdiag[nocc:])[:ncas-nelecas[0]]))
notcas = [i for i in range(nmo) if i not in casidx]
mo = numpy.hstack((mo_coeff[:,notcas[:ncore]],
mo_coeff[:,casidx],
mo_coeff[:,notcas[ncore:]]))
return mo
mol = gto.Mole()
mol.verbose = 0
log = lib.logger.Logger(mol.stdout, 5)
with open('/proc/cpuinfo') as f:
for line in f:
if 'model name' in line:
log.note(line[:-1])
break
with open('/proc/meminfo') as f:
log.note(f.readline()[:-1])
log.note('OMP_NUM_THREADS=%s\n', os.environ['OMP_NUM_THREADS'])
for bas in ('3-21g', '6-31g*', 'cc-pVTZ', 'ANO-Roos-TZ'):
mol.atom = 'N 0 0 0; N 0 0 1.1'
mol.basis = bas
mol.build(0, 0)
cpu0 = time.clock(), time.time()
mf = scf.RHF(mol)
mf.kernel()
cpu0 = log.timer('N2 %s RHF'%bas, *cpu0)
mymp2 = mp.MP2(mf)
mymp2.kernel()
cpu0 = log.timer('N2 %s MP2'%bas, *cpu0)
mymc = mcscf.CASSCF(mf, 4, 4)
idx = [i for i,s in enumerate(mol.spheric_labels(1)) if
re.search('2p[xy]', s)]
mo = sort_mo(mymc, idx, mf.mo_coeff)
mymc.kernel(mo)
cpu0 = log.timer('N2 %s CASSCF'%bas, *cpu0)
mycc = cc.CCSD(mf)
mycc.kernel()
cpu0 = log.timer('N2 %s CCSD'%bas, *cpu0)
mf = dft.RKS(mol)
mf.xc = 'b3lyp'
mf.kernel()
cpu0 = log.timer('N2 %s B3LYP'%bas, *cpu0)
mf = scf.density_fit(mf)
mf.kernel()
cpu0 = log.timer('N2 %s density-fit RHF'%bas, *cpu0)
| [
"pyscf.lib.logger.Logger",
"pyscf.lo.orth.orth_ao",
"pyscf.gto.Mole",
"pyscf.dft.RKS",
"pyscf.scf.density_fit",
"numpy.einsum",
"pyscf.cc.CCSD",
"pyscf.mcscf.CASSCF",
"numpy.hstack",
"time.clock",
"time.time",
"numpy.argsort",
"pyscf.scf.RHF",
"re.search",
"pyscf.mp.MP2"
] | [((920, 930), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (928, 930), False, 'from pyscf import gto, scf, dft, mcscf, mp, cc, lo\n'), ((953, 985), 'pyscf.lib.logger.Logger', 'lib.logger.Logger', (['mol.stdout', '(5)'], {}), '(mol.stdout, 5)\n', (970, 985), False, 'from pyscf import lib\n'), ((210, 230), 'pyscf.lo.orth.orth_ao', 'lo.orth.orth_ao', (['mol'], {}), '(mol)\n', (225, 230), False, 'from pyscf import gto, scf, dft, mcscf, mp, cc, lo\n'), ((486, 520), 'numpy.einsum', 'numpy.einsum', (['"""pi,pi->i"""', 'mo1', 'mo1'], {}), "('pi,pi->i', mo1, mo1)\n", (498, 520), False, 'import numpy\n'), ((762, 859), 'numpy.hstack', 'numpy.hstack', (['(mo_coeff[:, notcas[:ncore]], mo_coeff[:, casidx], mo_coeff[:, notcas[ncore:]])'], {}), '((mo_coeff[:, notcas[:ncore]], mo_coeff[:, casidx], mo_coeff[:,\n notcas[ncore:]]))\n', (774, 859), False, 'import numpy\n'), ((1432, 1444), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (1439, 1444), False, 'from pyscf import gto, scf, dft, mcscf, mp, cc, lo\n'), ((1519, 1529), 'pyscf.mp.MP2', 'mp.MP2', (['mf'], {}), '(mf)\n', (1525, 1529), False, 'from pyscf import gto, scf, dft, mcscf, mp, cc, lo\n'), ((1606, 1628), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (1618, 1628), False, 'from pyscf import gto, scf, dft, mcscf, mp, cc, lo\n'), ((1845, 1856), 'pyscf.cc.CCSD', 'cc.CCSD', (['mf'], {}), '(mf)\n', (1852, 1856), False, 'from pyscf import gto, scf, dft, mcscf, mp, cc, lo\n'), ((1931, 1943), 'pyscf.dft.RKS', 'dft.RKS', (['mol'], {}), '(mol)\n', (1938, 1943), False, 'from pyscf import gto, scf, dft, mcscf, mp, cc, lo\n'), ((2037, 2056), 'pyscf.scf.density_fit', 'scf.density_fit', (['mf'], {}), '(mf)\n', (2052, 2056), False, 'from pyscf import gto, scf, dft, mcscf, mp, cc, lo\n'), ((1396, 1408), 'time.clock', 'time.clock', ([], {}), '()\n', (1406, 1408), False, 'import time\n'), ((1410, 1421), 'time.time', 'time.time', ([], {}), '()\n', (1419, 1421), False, 'import time\n'), ((1700, 1722), 're.search', 're.search', (['"""2p[xy]"""', 's'], {}), "('2p[xy]', s)\n", (1709, 1722), False, 'import re\n'), ((579, 606), 'numpy.argsort', 'numpy.argsort', (['sdiag[:nocc]'], {}), '(sdiag[:nocc])\n', (592, 606), False, 'import numpy\n'), ((648, 676), 'numpy.argsort', 'numpy.argsort', (['(-sdiag[nocc:])'], {}), '(-sdiag[nocc:])\n', (661, 676), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 00:56:03 2018
@author: Shreyans
"""
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import pandas as pd
train_features = np.load("../data/train_features.npy")
test_features = np.load("../data/test_features.npy")
train_data = pd.read_csv("../data/meta-data/train.csv")
test_data = pd.read_csv("../data/meta-data/test.csv")
train_labels = train_data.drop("Image_name", axis = 1)
clf = DecisionTreeClassifier(random_state=123456)
clf.fit(train_features, train_labels.attrib_01)
pred_labels = clf.predict(test_features)
pred_df = pd.DataFrame(pred_labels, columns=list(train_labels.columns))
pred_df.insert(0, "Image_name", train_data.Image_name)
pred_df.to_csv("predictions.csv", index = False) | [
"pandas.read_csv",
"numpy.load",
"sklearn.tree.DecisionTreeClassifier"
] | [((193, 230), 'numpy.load', 'np.load', (['"""../data/train_features.npy"""'], {}), "('../data/train_features.npy')\n", (200, 230), True, 'import numpy as np\n'), ((247, 283), 'numpy.load', 'np.load', (['"""../data/test_features.npy"""'], {}), "('../data/test_features.npy')\n", (254, 283), True, 'import numpy as np\n'), ((298, 340), 'pandas.read_csv', 'pd.read_csv', (['"""../data/meta-data/train.csv"""'], {}), "('../data/meta-data/train.csv')\n", (309, 340), True, 'import pandas as pd\n'), ((353, 394), 'pandas.read_csv', 'pd.read_csv', (['"""../data/meta-data/test.csv"""'], {}), "('../data/meta-data/test.csv')\n", (364, 394), True, 'import pandas as pd\n'), ((458, 501), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(123456)'}), '(random_state=123456)\n', (480, 501), False, 'from sklearn.tree import DecisionTreeClassifier\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import io
from logging import getLogger
import numpy as np
import torch
from scipy.stats import spearmanr
MONOLINGUAL_EVAL_PATH = 'data/monolingual'
SEMEVAL17_EVAL_PATH = 'data/crosslingual/wordsim'
logger = getLogger()
def get_word_pairs(path, lower=True):
"""
Return a list of (word1, word2, score) tuples from a word similarity file.
"""
assert os.path.isfile(path) and type(lower) is bool
word_pairs = []
with io.open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip()
line = line.lower() if lower else line
line = line.split()
# ignore phrases, only consider words
if len(line) != 3:
assert len(line) > 3
assert 'SEMEVAL17' in os.path.basename(path) or 'EN-IT_MWS353' in path
continue
word_pairs.append((line[0], line[1], float(line[2])))
return word_pairs
def get_word_id(word, word2id, lower):
"""
Get a word ID.
If the model does not use lowercase and the evaluation file is lowercased,
we might be able to find an associated word.
"""
assert type(lower) is bool
word_id = word2id.get(word)
if word_id is None and not lower:
word_id = word2id.get(word.capitalize())
if word_id is None and not lower:
word_id = word2id.get(word.title())
return word_id
def get_spearman_rho(word2id1, embeddings1, path, lower,
word2id2=None, embeddings2=None):
"""
Compute monolingual or cross-lingual word similarity score.
"""
assert not ((word2id2 is None) ^ (embeddings2 is None))
word2id2 = word2id1 if word2id2 is None else word2id2
embeddings2 = embeddings1 if embeddings2 is None else embeddings2
assert len(word2id1) == embeddings1.shape[0]
assert len(word2id2) == embeddings2.shape[0]
assert type(lower) is bool
word_pairs = get_word_pairs(path)
not_found = 0
pred = []
gold = []
for word1, word2, similarity in word_pairs:
id1 = get_word_id(word1, word2id1, lower)
id2 = get_word_id(word2, word2id2, lower)
if id1 is None or id2 is None:
not_found += 1
continue
u = embeddings1[id1]
v = embeddings2[id2]
score = u.dot(v) / (np.linalg.norm(u) * np.linalg.norm(v))
gold.append(similarity)
pred.append(score)
return spearmanr(gold, pred).correlation, len(gold), not_found
def get_wordsim_scores(language, word2id, embeddings, lower=True):
"""
Return monolingual word similarity scores.
"""
dirpath = os.path.join(MONOLINGUAL_EVAL_PATH, language)
if not os.path.isdir(dirpath):
return None
scores = {}
separator = "=" * (30 + 1 + 10 + 1 + 13 + 1 + 12)
pattern = "%30s %10s %13s %12s"
logger.info(separator)
logger.info(pattern % ("Dataset", "Found", "Not found", "Rho"))
logger.info(separator)
for filename in list(os.listdir(dirpath)):
if filename.startswith('%s_' % (language.upper())):
filepath = os.path.join(dirpath, filename)
coeff, found, not_found = get_spearman_rho(word2id, embeddings, filepath, lower)
logger.info(pattern % (filename[:-4], str(found), str(not_found), "%.4f" % coeff))
scores[filename[:-4]] = coeff
logger.info(separator)
return scores
def get_wordanalogy_scores(language, word2id, embeddings, lower=True):
"""
Return (english) word analogy score
"""
dirpath = os.path.join(MONOLINGUAL_EVAL_PATH, language)
if not os.path.isdir(dirpath) or language not in ["en"]:
return None
# normalize word embeddings
embeddings = embeddings / np.sqrt((embeddings ** 2).sum(1))[:, None]
# scores by category
scores = {}
word_ids = {}
queries = {}
with io.open(os.path.join(dirpath, 'questions-words.txt'), 'r', encoding='utf-8') as f:
for line in f:
# new line
line = line.rstrip()
if lower:
line = line.lower()
# new category
if ":" in line:
assert line[1] == ' '
category = line[2:]
assert category not in scores
scores[category] = {'n_found': 0, 'n_not_found': 0, 'n_correct': 0}
word_ids[category] = []
queries[category] = []
continue
# get word IDs
assert len(line.split()) == 4, line
word1, word2, word3, word4 = line.split()
word_id1 = get_word_id(word1, word2id, lower)
word_id2 = get_word_id(word2, word2id, lower)
word_id3 = get_word_id(word3, word2id, lower)
word_id4 = get_word_id(word4, word2id, lower)
# if at least one word is not found
if any(x is None for x in [word_id1, word_id2, word_id3, word_id4]):
scores[category]['n_not_found'] += 1
continue
else:
scores[category]['n_found'] += 1
word_ids[category].append([word_id1, word_id2, word_id3, word_id4])
# generate query vector and get nearest neighbors
query = embeddings[word_id1] - embeddings[word_id2] + embeddings[word_id4]
query = query / np.linalg.norm(query)
queries[category].append(query)
# Compute score for each category
for cat in queries:
qs = torch.from_numpy(np.vstack(queries[cat]))
keys = torch.from_numpy(embeddings.T)
values = qs.mm(keys).cpu().numpy()
# be sure we do not select input words
for i, ws in enumerate(word_ids[cat]):
for wid in [ws[0], ws[1], ws[3]]:
values[i, wid] = -1e9
scores[cat]['n_correct'] = np.sum(values.argmax(axis=1) == [ws[2] for ws in word_ids[cat]])
# pretty print
separator = "=" * (30 + 1 + 10 + 1 + 13 + 1 + 12)
pattern = "%30s %10s %13s %12s"
logger.info(separator)
logger.info(pattern % ("Category", "Found", "Not found", "Accuracy"))
logger.info(separator)
# compute and log accuracies
accuracies = {}
for k in sorted(scores.keys()):
v = scores[k]
accuracies[k] = float(v['n_correct']) / max(v['n_found'], 1)
logger.info(pattern % (k, str(v['n_found']), str(v['n_not_found']), "%.4f" % accuracies[k]))
logger.info(separator)
return accuracies
def get_crosslingual_wordsim_scores(lang1, word2id1, embeddings1,
lang2, word2id2, embeddings2, lower=True):
"""
Return cross-lingual word similarity scores.
"""
f1 = os.path.join(SEMEVAL17_EVAL_PATH, '%s-%s-SEMEVAL17.txt' % (lang1, lang2))
f2 = os.path.join(SEMEVAL17_EVAL_PATH, '%s-%s-SEMEVAL17.txt' % (lang2, lang1))
if not (os.path.exists(f1) or os.path.exists(f2)):
return None
if os.path.exists(f1):
coeff, found, not_found = get_spearman_rho(
word2id1, embeddings1, f1,
lower, word2id2, embeddings2
)
elif os.path.exists(f2):
coeff, found, not_found = get_spearman_rho(
word2id2, embeddings2, f2,
lower, word2id1, embeddings1
)
scores = {}
separator = "=" * (30 + 1 + 10 + 1 + 13 + 1 + 12)
pattern = "%30s %10s %13s %12s"
logger.info(separator)
logger.info(pattern % ("Dataset", "Found", "Not found", "Rho"))
logger.info(separator)
task_name = '%s_%s_SEMEVAL17' % (lang1.upper(), lang2.upper())
logger.info(pattern % (task_name, str(found), str(not_found), "%.4f" % coeff))
scores[task_name] = coeff
if not scores:
return None
logger.info(separator)
return scores
| [
"os.path.basename",
"os.path.isdir",
"scipy.stats.spearmanr",
"os.path.exists",
"os.path.isfile",
"numpy.linalg.norm",
"io.open",
"numpy.vstack",
"os.path.join",
"os.listdir",
"logging.getLogger",
"torch.from_numpy"
] | [((415, 426), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (424, 426), False, 'from logging import getLogger\n'), ((2829, 2874), 'os.path.join', 'os.path.join', (['MONOLINGUAL_EVAL_PATH', 'language'], {}), '(MONOLINGUAL_EVAL_PATH, language)\n', (2841, 2874), False, 'import os\n'), ((3741, 3786), 'os.path.join', 'os.path.join', (['MONOLINGUAL_EVAL_PATH', 'language'], {}), '(MONOLINGUAL_EVAL_PATH, language)\n', (3753, 3786), False, 'import os\n'), ((6900, 6973), 'os.path.join', 'os.path.join', (['SEMEVAL17_EVAL_PATH', "('%s-%s-SEMEVAL17.txt' % (lang1, lang2))"], {}), "(SEMEVAL17_EVAL_PATH, '%s-%s-SEMEVAL17.txt' % (lang1, lang2))\n", (6912, 6973), False, 'import os\n'), ((6983, 7056), 'os.path.join', 'os.path.join', (['SEMEVAL17_EVAL_PATH', "('%s-%s-SEMEVAL17.txt' % (lang2, lang1))"], {}), "(SEMEVAL17_EVAL_PATH, '%s-%s-SEMEVAL17.txt' % (lang2, lang1))\n", (6995, 7056), False, 'import os\n'), ((7140, 7158), 'os.path.exists', 'os.path.exists', (['f1'], {}), '(f1)\n', (7154, 7158), False, 'import os\n'), ((573, 593), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (587, 593), False, 'import os\n'), ((647, 683), 'io.open', 'io.open', (['path', '"""r"""'], {'encoding': '"""utf-8"""'}), "(path, 'r', encoding='utf-8')\n", (654, 683), False, 'import io\n'), ((2886, 2908), 'os.path.isdir', 'os.path.isdir', (['dirpath'], {}), '(dirpath)\n', (2899, 2908), False, 'import os\n'), ((3185, 3204), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (3195, 3204), False, 'import os\n'), ((5760, 5790), 'torch.from_numpy', 'torch.from_numpy', (['embeddings.T'], {}), '(embeddings.T)\n', (5776, 5790), False, 'import torch\n'), ((7311, 7329), 'os.path.exists', 'os.path.exists', (['f2'], {}), '(f2)\n', (7325, 7329), False, 'import os\n'), ((2627, 2648), 'scipy.stats.spearmanr', 'spearmanr', (['gold', 'pred'], {}), '(gold, pred)\n', (2636, 2648), False, 'from scipy.stats import spearmanr\n'), ((3290, 3321), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (3302, 3321), False, 'import os\n'), ((3798, 3820), 'os.path.isdir', 'os.path.isdir', (['dirpath'], {}), '(dirpath)\n', (3811, 3820), False, 'import os\n'), ((4070, 4114), 'os.path.join', 'os.path.join', (['dirpath', '"""questions-words.txt"""'], {}), "(dirpath, 'questions-words.txt')\n", (4082, 4114), False, 'import os\n'), ((5720, 5743), 'numpy.vstack', 'np.vstack', (['queries[cat]'], {}), '(queries[cat])\n', (5729, 5743), True, 'import numpy as np\n'), ((7069, 7087), 'os.path.exists', 'os.path.exists', (['f1'], {}), '(f1)\n', (7083, 7087), False, 'import os\n'), ((7091, 7109), 'os.path.exists', 'os.path.exists', (['f2'], {}), '(f2)\n', (7105, 7109), False, 'import os\n'), ((2518, 2535), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (2532, 2535), True, 'import numpy as np\n'), ((2538, 2555), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (2552, 2555), True, 'import numpy as np\n'), ((5556, 5577), 'numpy.linalg.norm', 'np.linalg.norm', (['query'], {}), '(query)\n', (5570, 5577), True, 'import numpy as np\n'), ((985, 1007), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1001, 1007), False, 'import os\n')] |
## test_defense.py -- test defense
##
## Copyright (C) 2017, <NAME> <<EMAIL>>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
# Load external module: MagNet
import sys, os
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_dir)
from externals.MagNet.setup_cifar import CIFAR
from externals.MagNet.utils import prepare_data
from externals.MagNet.worker import AEDetector, DBDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator
import numpy as np
import os
from keras.models import Model, Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Lambda
from keras.activations import softmax
class ClassifierWrapper:
def __init__(self, model):
"""
Keras classifier wrapper.
Note that the wrapped classifier should spit logits as output.
"""
layer_id = len(model.layers)-2
self.model = Model(inputs=model.layers[0].input, outputs=model.layers[layer_id].output)
self.softmax = Sequential()
self.softmax.add(Lambda(lambda X: softmax(X, axis=1), input_shape=(10,)))
def classify(self, X, option="logit", T=1):
if option == "logit":
return self.model.predict(X)
if option == "prob":
logits = self.model.predict(X)/T
return self.softmax.predict(logits)
def print(self):
return "Classifier:"+self.path.split("/")[-1]
class MagNetDetector:
def __init__(self, model, detector_name):
classifier = ClassifierWrapper(model)
autoencoder_model_fpath = os.path.join(project_dir, "downloads/MagNet/defensive_models/CIFAR")
reformer = SimpleReformer(autoencoder_model_fpath)
id_reformer = IdReformer()
# Note: we may swap the two.
reconstructor = id_reformer
prober = reformer
# reconstructor = reformer
# prober = id_reformer
eb_detector = AEDetector(autoencoder_model_fpath, p=1)
db_detector_I = DBDetector(reconstructor, prober, classifier, T=10)
db_detector_II = DBDetector(reconstructor, prober, classifier, T=40)
detector_dict = dict()
detector_dict["db-I"] = db_detector_I
detector_dict["db-II"] = db_detector_II
detector_dict['eb'] = eb_detector
self.operator = Operator(CIFAR(), classifier, detector_dict, reformer)
def train(self, X=None, Y=None, fpr=None):
# CIFAR-10
drop_rate={"db-I": 0.01, "db-II": 0.01, "eb": 0.005}
# drop_rate={"db-I": fpr, "db-II": fpr, "eb": fpr}
print("\n==========================================================")
print("Drop Rate:", drop_rate)
self.thrs = self.operator.get_thrs(drop_rate)
print("Thresholds:", self.thrs)
def test(self, X):
all_pass, detector_breakdown = self.operator.filter(X, self.thrs)
print ("detector_breakdown", detector_breakdown)
ret_detection = np.array([ False if i in all_pass else True for i in range(len(X)) ])
return ret_detection, ret_detection
if __name__ == '__main__':
magnet_detector = MagNetDetector()
magnet_detector.train()
X = magnet_detector.operator.data.test_data
Y_detected, _ = magnet_detector.test(X)
print ("False positive rate: %f" % (np.sum(Y_detected)/float(len(X))))
| [
"sys.path.append",
"externals.MagNet.worker.DBDetector",
"os.path.abspath",
"externals.MagNet.worker.AEDetector",
"numpy.sum",
"keras.activations.softmax",
"keras.models.Model",
"externals.MagNet.setup_cifar.CIFAR",
"externals.MagNet.worker.SimpleReformer",
"keras.models.Sequential",
"externals.... | [((320, 348), 'sys.path.append', 'sys.path.append', (['project_dir'], {}), '(project_dir)\n', (335, 348), False, 'import sys, os\n'), ((292, 317), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (307, 317), False, 'import os\n'), ((1004, 1078), 'keras.models.Model', 'Model', ([], {'inputs': 'model.layers[0].input', 'outputs': 'model.layers[layer_id].output'}), '(inputs=model.layers[0].input, outputs=model.layers[layer_id].output)\n', (1009, 1078), False, 'from keras.models import Model, Sequential\n'), ((1102, 1114), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1112, 1114), False, 'from keras.models import Model, Sequential\n'), ((1665, 1733), 'os.path.join', 'os.path.join', (['project_dir', '"""downloads/MagNet/defensive_models/CIFAR"""'], {}), "(project_dir, 'downloads/MagNet/defensive_models/CIFAR')\n", (1677, 1733), False, 'import os\n'), ((1754, 1793), 'externals.MagNet.worker.SimpleReformer', 'SimpleReformer', (['autoencoder_model_fpath'], {}), '(autoencoder_model_fpath)\n', (1768, 1793), False, 'from externals.MagNet.worker import AEDetector, DBDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator\n'), ((1816, 1828), 'externals.MagNet.worker.IdReformer', 'IdReformer', ([], {}), '()\n', (1826, 1828), False, 'from externals.MagNet.worker import AEDetector, DBDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator\n'), ((2018, 2058), 'externals.MagNet.worker.AEDetector', 'AEDetector', (['autoencoder_model_fpath'], {'p': '(1)'}), '(autoencoder_model_fpath, p=1)\n', (2028, 2058), False, 'from externals.MagNet.worker import AEDetector, DBDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator\n'), ((2083, 2134), 'externals.MagNet.worker.DBDetector', 'DBDetector', (['reconstructor', 'prober', 'classifier'], {'T': '(10)'}), '(reconstructor, prober, classifier, T=10)\n', (2093, 2134), False, 'from externals.MagNet.worker import AEDetector, DBDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator\n'), ((2160, 2211), 'externals.MagNet.worker.DBDetector', 'DBDetector', (['reconstructor', 'prober', 'classifier'], {'T': '(40)'}), '(reconstructor, prober, classifier, T=40)\n', (2170, 2211), False, 'from externals.MagNet.worker import AEDetector, DBDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator\n'), ((2414, 2421), 'externals.MagNet.setup_cifar.CIFAR', 'CIFAR', ([], {}), '()\n', (2419, 2421), False, 'from externals.MagNet.setup_cifar import CIFAR\n'), ((3381, 3399), 'numpy.sum', 'np.sum', (['Y_detected'], {}), '(Y_detected)\n', (3387, 3399), True, 'import numpy as np\n'), ((1157, 1175), 'keras.activations.softmax', 'softmax', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (1164, 1175), False, 'from keras.activations import softmax\n')] |
"""
test_physics_cross_sections.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon Apr 22 10:54:18 2013
Description:
"""
import numpy as np
import matplotlib.pyplot as pl
from ares.physics.CrossSections import *
def test():
E = np.logspace(np.log10(13.6), 4)
sigma = PhotoIonizationCrossSection
sigma_approx = ApproximatePhotoIonizationCrossSection
pl.loglog(E, map(lambda EE: sigma(EE, 0), E),
color='k', ls='-', label=r'H')
pl.loglog(E, map(lambda EE: sigma(EE, 1), E),
color='k', ls='--', label=r'HeI')
pl.loglog(E, map(lambda EE: sigma_approx(EE, 0), E),
color='b', ls='-')
pl.loglog(E, map(lambda EE: sigma_approx(EE, 1), E),
color='b', ls='--')
pl.legend(frameon=False)
pl.xlabel(r'$h\nu \ (\mathrm{eV})$')
pl.ylabel(r'$\sigma_{\nu} \ (\mathrm{cm}^2)$')
pl.annotate(r'Verner & Ferland (1996)', (20, 1e-24), ha='left')
pl.annotate(r'Approximate', (20, 1e-25), color='b', ha='left')
pl.savefig('%s.png' % (__file__.rstrip('.py')))
pl.close()
assert True
if __name__ == '__main__':
test() | [
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel"
] | [((768, 792), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (777, 792), True, 'import matplotlib.pyplot as pl\n'), ((802, 840), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""$h\\\\nu \\\\ (\\\\mathrm{eV})$"""'], {}), "('$h\\\\nu \\\\ (\\\\mathrm{eV})$')\n", (811, 840), True, 'import matplotlib.pyplot as pl\n'), ((843, 892), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""$\\\\sigma_{\\\\nu} \\\\ (\\\\mathrm{cm}^2)$"""'], {}), "('$\\\\sigma_{\\\\nu} \\\\ (\\\\mathrm{cm}^2)$')\n", (852, 892), True, 'import matplotlib.pyplot as pl\n'), ((899, 961), 'matplotlib.pyplot.annotate', 'pl.annotate', (['"""Verner & Ferland (1996)"""', '(20, 1e-24)'], {'ha': '"""left"""'}), "('Verner & Ferland (1996)', (20, 1e-24), ha='left')\n", (910, 961), True, 'import matplotlib.pyplot as pl\n'), ((967, 1028), 'matplotlib.pyplot.annotate', 'pl.annotate', (['"""Approximate"""', '(20, 1e-25)'], {'color': '"""b"""', 'ha': '"""left"""'}), "('Approximate', (20, 1e-25), color='b', ha='left')\n", (978, 1028), True, 'import matplotlib.pyplot as pl\n'), ((1091, 1101), 'matplotlib.pyplot.close', 'pl.close', ([], {}), '()\n', (1099, 1101), True, 'import matplotlib.pyplot as pl\n'), ((282, 296), 'numpy.log10', 'np.log10', (['(13.6)'], {}), '(13.6)\n', (290, 296), True, 'import numpy as np\n')] |
import numpy as np
import os
import ntpath
def fill_mesh(mesh2fill, file: str, opt):
load_path = get_mesh_path(file, opt.num_aug)
if os.path.exists(load_path):
mesh_data = np.load(load_path, encoding='latin1', allow_pickle=True)
else:
mesh_data = from_scratch(file, opt)
np.savez_compressed(load_path, gemm_edges=mesh_data.gemm_edges, vs=mesh_data.vs, edges=mesh_data.edges,
edges_count=mesh_data.edges_count, ve=mesh_data.ve, v_mask=mesh_data.v_mask,
filename=mesh_data.filename, sides=mesh_data.sides,
edge_lengths=mesh_data.edge_lengths, edge_areas=mesh_data.edge_areas,
features=mesh_data.features)
mesh2fill.vs = mesh_data['vs']
mesh2fill.edges = mesh_data['edges']
mesh2fill.gemm_edges = mesh_data['gemm_edges']
mesh2fill.edges_count = int(mesh_data['edges_count'])
mesh2fill.ve = mesh_data['ve']
mesh2fill.v_mask = mesh_data['v_mask']
mesh2fill.filename = str(mesh_data['filename'])
mesh2fill.edge_lengths = mesh_data['edge_lengths']
mesh2fill.edge_areas = mesh_data['edge_areas']
mesh2fill.features = mesh_data['features']
mesh2fill.sides = mesh_data['sides']
def get_mesh_path(file: str, num_aug: int):
filename, _ = os.path.splitext(file)
dir_name = os.path.dirname(filename)
prefix = os.path.basename(filename)
load_dir = os.path.join(dir_name, 'cache')
load_file = os.path.join(load_dir, '%s_%03d.npz' % (prefix, np.random.randint(0, num_aug)))
if not os.path.isdir(load_dir):
os.makedirs(load_dir, exist_ok=True)
return load_file
def from_scratch(file, opt):
class MeshPrep:
def __getitem__(self, item):
return eval('self.' + item)
mesh_data = MeshPrep()
mesh_data.vs = mesh_data.edges = None
mesh_data.gemm_edges = mesh_data.sides = None
mesh_data.edges_count = None
mesh_data.ve = None
mesh_data.v_mask = None
mesh_data.filename = 'unknown'
mesh_data.edge_lengths = None
mesh_data.edge_areas = []
mesh_data.vs, faces = fill_from_file(mesh_data, file)
mesh_data.v_mask = np.ones(len(mesh_data.vs), dtype=bool)
faces, face_areas = remove_non_manifolds(mesh_data, faces)
if opt.num_aug > 1:
faces = augmentation(mesh_data, opt, faces)
build_gemm(mesh_data, faces, face_areas)
if opt.num_aug > 1:
post_augmentation(mesh_data, opt)
mesh_data.features = extract_features(mesh_data)
return mesh_data
def fill_from_file(mesh, file):
mesh.filename = ntpath.split(file)[1]
mesh.fullfilename = file
vs, faces = [], []
f = open(file)
for line in f:
line = line.strip()
splitted_line = line.split()
if not splitted_line:
continue
elif splitted_line[0] == 'v':
vs.append([float(v) for v in splitted_line[1:4]])
elif splitted_line[0] == 'f':
face_vertex_ids = [int(c.split('/')[0]) for c in splitted_line[1:]]
assert len(face_vertex_ids) == 3
face_vertex_ids = [(ind - 1) if (ind >= 0) else (len(vs) + ind)
for ind in face_vertex_ids]
faces.append(face_vertex_ids)
f.close()
vs = np.asarray(vs)
faces = np.asarray(faces, dtype=int)
assert np.logical_and(faces >= 0, faces < len(vs)).all()
return vs, faces
def remove_non_manifolds(mesh, faces):
mesh.ve = [[] for _ in mesh.vs]
edges_set = set()
mask = np.ones(len(faces), dtype=bool)
_, face_areas = compute_face_normals_and_areas(mesh, faces)
for face_id, face in enumerate(faces):
if face_areas[face_id] == 0:
mask[face_id] = False
continue
faces_edges = []
is_manifold = False
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
if cur_edge in edges_set:
is_manifold = True
break
else:
faces_edges.append(cur_edge)
if is_manifold:
mask[face_id] = False
else:
for idx, edge in enumerate(faces_edges):
edges_set.add(edge)
return faces[mask], face_areas[mask]
def build_gemm(mesh, faces, face_areas):
mesh.ve = [[] for _ in mesh.vs]
edge_nb = []
sides = []
edge2key = dict()
edges = []
edges_count = 0
nb_count = []
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
faces_edges[idx] = edge
if edge not in edge2key:
edge2key[edge] = edges_count
edges.append(list(edge))
edge_nb.append([-1, -1, -1, -1])
sides.append([-1, -1, -1, -1])
mesh.ve[edge[0]].append(edges_count)
mesh.ve[edge[1]].append(edges_count)
mesh.edge_areas.append(0)
nb_count.append(0)
edges_count += 1
mesh.edge_areas[edge2key[edge]] += face_areas[face_id] / 3
for idx, edge in enumerate(faces_edges):
edge_key = edge2key[edge]
edge_nb[edge_key][nb_count[edge_key]] = edge2key[faces_edges[(idx + 1) % 3]]
edge_nb[edge_key][nb_count[edge_key] + 1] = edge2key[faces_edges[(idx + 2) % 3]]
nb_count[edge_key] += 2
for idx, edge in enumerate(faces_edges):
edge_key = edge2key[edge]
sides[edge_key][nb_count[edge_key] - 2] = nb_count[edge2key[faces_edges[(idx + 1) % 3]]] - 1
sides[edge_key][nb_count[edge_key] - 1] = nb_count[edge2key[faces_edges[(idx + 2) % 3]]] - 2
mesh.edges = np.array(edges, dtype=np.int32)
mesh.gemm_edges = np.array(edge_nb, dtype=np.int64)
mesh.sides = np.array(sides, dtype=np.int64)
mesh.edges_count = edges_count
mesh.edge_areas = np.array(mesh.edge_areas, dtype=np.float32) / np.sum(face_areas) #todo whats the difference between edge_areas and edge_lenghts?
def compute_face_normals_and_areas(mesh, faces):
face_normals = np.cross(mesh.vs[faces[:, 1]] - mesh.vs[faces[:, 0]],
mesh.vs[faces[:, 2]] - mesh.vs[faces[:, 1]])
face_areas = np.sqrt((face_normals ** 2).sum(axis=1))
face_normals /= face_areas[:, np.newaxis]
assert (not np.any(face_areas[:, np.newaxis] == 0)), 'has zero area face: %s' % mesh.filename
face_areas *= 0.5
return face_normals, face_areas
# Data augmentation methods
def augmentation(mesh, opt, faces=None):
if hasattr(opt, 'scale_verts') and opt.scale_verts:
scale_verts(mesh)
if hasattr(opt, 'flip_edges') and opt.flip_edges:
faces = flip_edges(mesh, opt.flip_edges, faces)
return faces
def post_augmentation(mesh, opt):
if hasattr(opt, 'slide_verts') and opt.slide_verts:
slide_verts(mesh, opt.slide_verts)
def slide_verts(mesh, prct):
edge_points = get_edge_points(mesh)
dihedral = dihedral_angle(mesh, edge_points).squeeze() #todo make fixed_division epsilon=0
thr = np.mean(dihedral) + np.std(dihedral)
vids = np.random.permutation(len(mesh.ve))
target = int(prct * len(vids))
shifted = 0
for vi in vids:
if shifted < target:
edges = mesh.ve[vi]
if min(dihedral[edges]) > 2.65:
edge = mesh.edges[np.random.choice(edges)]
vi_t = edge[1] if vi == edge[0] else edge[0]
nv = mesh.vs[vi] + np.random.uniform(0.2, 0.5) * (mesh.vs[vi_t] - mesh.vs[vi])
mesh.vs[vi] = nv
shifted += 1
else:
break
mesh.shifted = shifted / len(mesh.ve)
def scale_verts(mesh, mean=1, var=0.1):
for i in range(mesh.vs.shape[1]):
mesh.vs[:, i] = mesh.vs[:, i] * np.random.normal(mean, var)
def angles_from_faces(mesh, edge_faces, faces):
normals = [None, None]
for i in range(2):
edge_a = mesh.vs[faces[edge_faces[:, 0], 2]] - mesh.vs[faces[edge_faces[:, 0], 1]]
edge_b = mesh.vs[faces[edge_faces[:, 1], 1]] - mesh.vs[faces[edge_faces[:, 1], 0]]
normals[i] = np.cross(edge_a, edge_b)
div = fixed_division(np.linalg.norm(normals[i], ord=2, axis=1), epsilon=0)
normals[i] /= div[:, np.newaxis]
dot = np.sum(normals[0] * normals[1], axis=1).clip(-1, 1)
angles = np.pi - np.arccos(dot)
return angles
def flip_edges(mesh, prct, faces):
edge_count, edge_faces, edges_dict = get_edge_faces(faces)
dihedral = angles_from_faces(mesh, edge_faces[:, 2:], faces)
edges2flip = np.random.permutation(edge_count)
# print(dihedral.min())
# print(dihedral.max())
target = int(prct * edge_count)
flipped = 0
for edge_key in edges2flip:
if flipped == target:
break
if dihedral[edge_key] > 2.7:
edge_info = edge_faces[edge_key]
if edge_info[3] == -1:
continue
new_edge = tuple(sorted(list(set(faces[edge_info[2]]) ^ set(faces[edge_info[3]]))))
if new_edge in edges_dict:
continue
new_faces = np.array(
[[edge_info[1], new_edge[0], new_edge[1]], [edge_info[0], new_edge[0], new_edge[1]]])
if check_area(mesh, new_faces):
del edges_dict[(edge_info[0], edge_info[1])]
edge_info[:2] = [new_edge[0], new_edge[1]]
edges_dict[new_edge] = edge_key
rebuild_face(faces[edge_info[2]], new_faces[0])
rebuild_face(faces[edge_info[3]], new_faces[1])
for i, face_id in enumerate([edge_info[2], edge_info[3]]):
cur_face = faces[face_id]
for j in range(3):
cur_edge = tuple(sorted((cur_face[j], cur_face[(j + 1) % 3])))
if cur_edge != new_edge:
cur_edge_key = edges_dict[cur_edge]
for idx, face_nb in enumerate(
[edge_faces[cur_edge_key, 2], edge_faces[cur_edge_key, 3]]):
if face_nb == edge_info[2 + (i + 1) % 2]:
edge_faces[cur_edge_key, 2 + idx] = face_id
flipped += 1
# print(flipped)
return faces
def rebuild_face(face, new_face):
new_point = list(set(new_face) - set(face))[0]
for i in range(3):
if face[i] not in new_face:
face[i] = new_point
break
return face
def check_area(mesh, faces):
face_normals = np.cross(mesh.vs[faces[:, 1]] - mesh.vs[faces[:, 0]],
mesh.vs[faces[:, 2]] - mesh.vs[faces[:, 1]])
face_areas = np.sqrt((face_normals ** 2).sum(axis=1))
face_areas *= 0.5
return face_areas[0] > 0 and face_areas[1] > 0
def get_edge_faces(faces):
edge_count = 0
edge_faces = []
edge2keys = dict()
for face_id, face in enumerate(faces):
for i in range(3):
cur_edge = tuple(sorted((face[i], face[(i + 1) % 3])))
if cur_edge not in edge2keys:
edge2keys[cur_edge] = edge_count
edge_count += 1
edge_faces.append(np.array([cur_edge[0], cur_edge[1], -1, -1]))
edge_key = edge2keys[cur_edge]
if edge_faces[edge_key][2] == -1:
edge_faces[edge_key][2] = face_id
else:
edge_faces[edge_key][3] = face_id
return edge_count, np.array(edge_faces), edge2keys
def set_edge_lengths(mesh, edge_points=None):
if edge_points is not None:
edge_points = get_edge_points(mesh)
edge_lengths = np.linalg.norm(mesh.vs[edge_points[:, 0]] - mesh.vs[edge_points[:, 1]], ord=2, axis=1)
mesh.edge_lengths = edge_lengths
def extract_features(mesh):
features = []
edge_points = get_edge_points(mesh)
set_edge_lengths(mesh, edge_points)
with np.errstate(divide='raise'):
try:
for extractor in [dihedral_angle, symmetric_opposite_angles, symmetric_ratios]:
feature = extractor(mesh, edge_points)
features.append(feature)
return np.concatenate(features, axis=0)
except Exception as e:
print(e)
raise ValueError(mesh.filename, 'bad features')
def dihedral_angle(mesh, edge_points):
normals_a = get_normals(mesh, edge_points, 0)
normals_b = get_normals(mesh, edge_points, 3)
dot = np.sum(normals_a * normals_b, axis=1).clip(-1, 1)
angles = np.expand_dims(np.pi - np.arccos(dot), axis=0)
return angles
def symmetric_opposite_angles(mesh, edge_points):
""" computes two angles: one for each face shared between the edge
the angle is in each face opposite the edge
sort handles order ambiguity
"""
angles_a = get_opposite_angles(mesh, edge_points, 0)
angles_b = get_opposite_angles(mesh, edge_points, 3)
angles = np.concatenate((np.expand_dims(angles_a, 0), np.expand_dims(angles_b, 0)), axis=0)
angles = np.sort(angles, axis=0)
return angles
def symmetric_ratios(mesh, edge_points):
""" computes two ratios: one for each face shared between the edge
the ratio is between the height / base (edge) of each triangle
sort handles order ambiguity
"""
ratios_a = get_ratios(mesh, edge_points, 0)
ratios_b = get_ratios(mesh, edge_points, 3)
ratios = np.concatenate((np.expand_dims(ratios_a, 0), np.expand_dims(ratios_b, 0)), axis=0)
return np.sort(ratios, axis=0)
def get_edge_points(mesh):
edge_points = np.zeros([mesh.edges_count, 4], dtype=np.int32)
for edge_id, edge in enumerate(mesh.edges):
edge_points[edge_id] = get_side_points(mesh, edge_id)
# edge_points[edge_id, 3:] = mesh.get_side_points(edge_id, 2)
return edge_points
def get_side_points(mesh, edge_id):
# if mesh.gemm_edges[edge_id, side] == -1:
# return mesh.get_side_points(edge_id, ((side + 2) % 4))
# else:
edge_a = mesh.edges[edge_id]
if mesh.gemm_edges[edge_id, 0] == -1:
edge_b = mesh.edges[mesh.gemm_edges[edge_id, 2]]
edge_c = mesh.edges[mesh.gemm_edges[edge_id, 3]]
else:
edge_b = mesh.edges[mesh.gemm_edges[edge_id, 0]]
edge_c = mesh.edges[mesh.gemm_edges[edge_id, 1]]
if mesh.gemm_edges[edge_id, 2] == -1:
edge_d = mesh.edges[mesh.gemm_edges[edge_id, 0]]
edge_e = mesh.edges[mesh.gemm_edges[edge_id, 1]]
else:
edge_d = mesh.edges[mesh.gemm_edges[edge_id, 2]]
edge_e = mesh.edges[mesh.gemm_edges[edge_id, 3]]
first_vertex = 0
second_vertex = 0
third_vertex = 0
if edge_a[1] in edge_b:
first_vertex = 1
if edge_b[1] in edge_c:
second_vertex = 1
if edge_d[1] in edge_e:
third_vertex = 1
return [edge_a[first_vertex], edge_a[1 - first_vertex], edge_b[second_vertex], edge_d[third_vertex]]
def get_normals(mesh, edge_points, side):
edge_a = mesh.vs[edge_points[:, side // 2 + 2]] - mesh.vs[edge_points[:, side // 2]]
edge_b = mesh.vs[edge_points[:, 1 - side // 2]] - mesh.vs[edge_points[:, side // 2]]
normals = np.cross(edge_a, edge_b)
div = fixed_division(np.linalg.norm(normals, ord=2, axis=1), epsilon=0.1)
normals /= div[:, np.newaxis]
return normals
def get_opposite_angles(mesh, edge_points, side):
edges_a = mesh.vs[edge_points[:, side // 2]] - mesh.vs[edge_points[:, side // 2 + 2]]
edges_b = mesh.vs[edge_points[:, 1 - side // 2]] - mesh.vs[edge_points[:, side // 2 + 2]]
edges_a /= fixed_division(np.linalg.norm(edges_a, ord=2, axis=1), epsilon=0.1)[:, np.newaxis]
edges_b /= fixed_division(np.linalg.norm(edges_b, ord=2, axis=1), epsilon=0.1)[:, np.newaxis]
dot = np.sum(edges_a * edges_b, axis=1).clip(-1, 1)
return np.arccos(dot)
def get_ratios(mesh, edge_points, side):
edges_lengths = np.linalg.norm(mesh.vs[edge_points[:, side // 2]] - mesh.vs[edge_points[:, 1 - side // 2]],
ord=2, axis=1)
point_o = mesh.vs[edge_points[:, side // 2 + 2]]
point_a = mesh.vs[edge_points[:, side // 2]]
point_b = mesh.vs[edge_points[:, 1 - side // 2]]
line_ab = point_b - point_a
projection_length = np.sum(line_ab * (point_o - point_a), axis=1) / fixed_division(
np.linalg.norm(line_ab, ord=2, axis=1), epsilon=0.1)
closest_point = point_a + (projection_length / edges_lengths)[:, np.newaxis] * line_ab
d = np.linalg.norm(point_o - closest_point, ord=2, axis=1)
return d / edges_lengths
def fixed_division(to_div, epsilon):
if epsilon == 0:
to_div[to_div == 0] = 0.1
else:
to_div += epsilon
return to_div
| [
"numpy.load",
"numpy.sum",
"numpy.savez_compressed",
"numpy.mean",
"numpy.linalg.norm",
"numpy.random.randint",
"numpy.random.normal",
"os.path.join",
"numpy.std",
"os.path.dirname",
"os.path.exists",
"ntpath.split",
"numpy.random.choice",
"numpy.arccos",
"os.path.basename",
"numpy.asa... | [((143, 168), 'os.path.exists', 'os.path.exists', (['load_path'], {}), '(load_path)\n', (157, 168), False, 'import os\n'), ((1325, 1347), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1341, 1347), False, 'import os\n'), ((1363, 1388), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1378, 1388), False, 'import os\n'), ((1402, 1428), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1418, 1428), False, 'import os\n'), ((1444, 1475), 'os.path.join', 'os.path.join', (['dir_name', '"""cache"""'], {}), "(dir_name, 'cache')\n", (1456, 1475), False, 'import os\n'), ((3294, 3308), 'numpy.asarray', 'np.asarray', (['vs'], {}), '(vs)\n', (3304, 3308), True, 'import numpy as np\n'), ((3321, 3349), 'numpy.asarray', 'np.asarray', (['faces'], {'dtype': 'int'}), '(faces, dtype=int)\n', (3331, 3349), True, 'import numpy as np\n'), ((5894, 5925), 'numpy.array', 'np.array', (['edges'], {'dtype': 'np.int32'}), '(edges, dtype=np.int32)\n', (5902, 5925), True, 'import numpy as np\n'), ((5948, 5981), 'numpy.array', 'np.array', (['edge_nb'], {'dtype': 'np.int64'}), '(edge_nb, dtype=np.int64)\n', (5956, 5981), True, 'import numpy as np\n'), ((5999, 6030), 'numpy.array', 'np.array', (['sides'], {'dtype': 'np.int64'}), '(sides, dtype=np.int64)\n', (6007, 6030), True, 'import numpy as np\n'), ((6287, 6389), 'numpy.cross', 'np.cross', (['(mesh.vs[faces[:, 1]] - mesh.vs[faces[:, 0]])', '(mesh.vs[faces[:, 2]] - mesh.vs[faces[:, 1]])'], {}), '(mesh.vs[faces[:, 1]] - mesh.vs[faces[:, 0]], mesh.vs[faces[:, 2]] -\n mesh.vs[faces[:, 1]])\n', (6295, 6389), True, 'import numpy as np\n'), ((8774, 8807), 'numpy.random.permutation', 'np.random.permutation', (['edge_count'], {}), '(edge_count)\n', (8795, 8807), True, 'import numpy as np\n'), ((10772, 10874), 'numpy.cross', 'np.cross', (['(mesh.vs[faces[:, 1]] - mesh.vs[faces[:, 0]])', '(mesh.vs[faces[:, 2]] - mesh.vs[faces[:, 1]])'], {}), '(mesh.vs[faces[:, 1]] - mesh.vs[faces[:, 0]], mesh.vs[faces[:, 2]] -\n mesh.vs[faces[:, 1]])\n', (10780, 10874), True, 'import numpy as np\n'), ((11866, 11957), 'numpy.linalg.norm', 'np.linalg.norm', (['(mesh.vs[edge_points[:, 0]] - mesh.vs[edge_points[:, 1]])'], {'ord': '(2)', 'axis': '(1)'}), '(mesh.vs[edge_points[:, 0]] - mesh.vs[edge_points[:, 1]], ord\n =2, axis=1)\n', (11880, 11957), True, 'import numpy as np\n'), ((13243, 13266), 'numpy.sort', 'np.sort', (['angles'], {'axis': '(0)'}), '(angles, axis=0)\n', (13250, 13266), True, 'import numpy as np\n'), ((13718, 13741), 'numpy.sort', 'np.sort', (['ratios'], {'axis': '(0)'}), '(ratios, axis=0)\n', (13725, 13741), True, 'import numpy as np\n'), ((13789, 13836), 'numpy.zeros', 'np.zeros', (['[mesh.edges_count, 4]'], {'dtype': 'np.int32'}), '([mesh.edges_count, 4], dtype=np.int32)\n', (13797, 13836), True, 'import numpy as np\n'), ((15361, 15385), 'numpy.cross', 'np.cross', (['edge_a', 'edge_b'], {}), '(edge_a, edge_b)\n', (15369, 15385), True, 'import numpy as np\n'), ((16016, 16030), 'numpy.arccos', 'np.arccos', (['dot'], {}), '(dot)\n', (16025, 16030), True, 'import numpy as np\n'), ((16094, 16205), 'numpy.linalg.norm', 'np.linalg.norm', (['(mesh.vs[edge_points[:, side // 2]] - mesh.vs[edge_points[:, 1 - side // 2]])'], {'ord': '(2)', 'axis': '(1)'}), '(mesh.vs[edge_points[:, side // 2]] - mesh.vs[edge_points[:, \n 1 - side // 2]], ord=2, axis=1)\n', (16108, 16205), True, 'import numpy as np\n'), ((16671, 16725), 'numpy.linalg.norm', 'np.linalg.norm', (['(point_o - closest_point)'], {'ord': '(2)', 'axis': '(1)'}), '(point_o - closest_point, ord=2, axis=1)\n', (16685, 16725), True, 'import numpy as np\n'), ((190, 246), 'numpy.load', 'np.load', (['load_path'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(load_path, encoding='latin1', allow_pickle=True)\n", (197, 246), True, 'import numpy as np\n'), ((309, 658), 'numpy.savez_compressed', 'np.savez_compressed', (['load_path'], {'gemm_edges': 'mesh_data.gemm_edges', 'vs': 'mesh_data.vs', 'edges': 'mesh_data.edges', 'edges_count': 'mesh_data.edges_count', 've': 'mesh_data.ve', 'v_mask': 'mesh_data.v_mask', 'filename': 'mesh_data.filename', 'sides': 'mesh_data.sides', 'edge_lengths': 'mesh_data.edge_lengths', 'edge_areas': 'mesh_data.edge_areas', 'features': 'mesh_data.features'}), '(load_path, gemm_edges=mesh_data.gemm_edges, vs=\n mesh_data.vs, edges=mesh_data.edges, edges_count=mesh_data.edges_count,\n ve=mesh_data.ve, v_mask=mesh_data.v_mask, filename=mesh_data.filename,\n sides=mesh_data.sides, edge_lengths=mesh_data.edge_lengths, edge_areas=\n mesh_data.edge_areas, features=mesh_data.features)\n', (328, 658), True, 'import numpy as np\n'), ((1583, 1606), 'os.path.isdir', 'os.path.isdir', (['load_dir'], {}), '(load_dir)\n', (1596, 1606), False, 'import os\n'), ((1616, 1652), 'os.makedirs', 'os.makedirs', (['load_dir'], {'exist_ok': '(True)'}), '(load_dir, exist_ok=True)\n', (1627, 1652), False, 'import os\n'), ((2603, 2621), 'ntpath.split', 'ntpath.split', (['file'], {}), '(file)\n', (2615, 2621), False, 'import ntpath\n'), ((6088, 6131), 'numpy.array', 'np.array', (['mesh.edge_areas'], {'dtype': 'np.float32'}), '(mesh.edge_areas, dtype=np.float32)\n', (6096, 6131), True, 'import numpy as np\n'), ((6134, 6152), 'numpy.sum', 'np.sum', (['face_areas'], {}), '(face_areas)\n', (6140, 6152), True, 'import numpy as np\n'), ((6534, 6572), 'numpy.any', 'np.any', (['(face_areas[:, np.newaxis] == 0)'], {}), '(face_areas[:, np.newaxis] == 0)\n', (6540, 6572), True, 'import numpy as np\n'), ((7265, 7282), 'numpy.mean', 'np.mean', (['dihedral'], {}), '(dihedral)\n', (7272, 7282), True, 'import numpy as np\n'), ((7285, 7301), 'numpy.std', 'np.std', (['dihedral'], {}), '(dihedral)\n', (7291, 7301), True, 'import numpy as np\n'), ((8327, 8351), 'numpy.cross', 'np.cross', (['edge_a', 'edge_b'], {}), '(edge_a, edge_b)\n', (8335, 8351), True, 'import numpy as np\n'), ((8559, 8573), 'numpy.arccos', 'np.arccos', (['dot'], {}), '(dot)\n', (8568, 8573), True, 'import numpy as np\n'), ((11691, 11711), 'numpy.array', 'np.array', (['edge_faces'], {}), '(edge_faces)\n', (11699, 11711), True, 'import numpy as np\n'), ((12127, 12154), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""raise"""'}), "(divide='raise')\n", (12138, 12154), True, 'import numpy as np\n'), ((15411, 15449), 'numpy.linalg.norm', 'np.linalg.norm', (['normals'], {'ord': '(2)', 'axis': '(1)'}), '(normals, ord=2, axis=1)\n', (15425, 15449), True, 'import numpy as np\n'), ((16447, 16492), 'numpy.sum', 'np.sum', (['(line_ab * (point_o - point_a))'], {'axis': '(1)'}), '(line_ab * (point_o - point_a), axis=1)\n', (16453, 16492), True, 'import numpy as np\n'), ((7996, 8023), 'numpy.random.normal', 'np.random.normal', (['mean', 'var'], {}), '(mean, var)\n', (8012, 8023), True, 'import numpy as np\n'), ((8381, 8422), 'numpy.linalg.norm', 'np.linalg.norm', (['normals[i]'], {'ord': '(2)', 'axis': '(1)'}), '(normals[i], ord=2, axis=1)\n', (8395, 8422), True, 'import numpy as np\n'), ((8486, 8525), 'numpy.sum', 'np.sum', (['(normals[0] * normals[1])'], {'axis': '(1)'}), '(normals[0] * normals[1], axis=1)\n', (8492, 8525), True, 'import numpy as np\n'), ((9322, 9421), 'numpy.array', 'np.array', (['[[edge_info[1], new_edge[0], new_edge[1]], [edge_info[0], new_edge[0],\n new_edge[1]]]'], {}), '([[edge_info[1], new_edge[0], new_edge[1]], [edge_info[0], new_edge\n [0], new_edge[1]]])\n', (9330, 9421), True, 'import numpy as np\n'), ((12376, 12408), 'numpy.concatenate', 'np.concatenate', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (12390, 12408), True, 'import numpy as np\n'), ((12672, 12709), 'numpy.sum', 'np.sum', (['(normals_a * normals_b)'], {'axis': '(1)'}), '(normals_a * normals_b, axis=1)\n', (12678, 12709), True, 'import numpy as np\n'), ((12758, 12772), 'numpy.arccos', 'np.arccos', (['dot'], {}), '(dot)\n', (12767, 12772), True, 'import numpy as np\n'), ((13163, 13190), 'numpy.expand_dims', 'np.expand_dims', (['angles_a', '(0)'], {}), '(angles_a, 0)\n', (13177, 13190), True, 'import numpy as np\n'), ((13192, 13219), 'numpy.expand_dims', 'np.expand_dims', (['angles_b', '(0)'], {}), '(angles_b, 0)\n', (13206, 13219), True, 'import numpy as np\n'), ((13640, 13667), 'numpy.expand_dims', 'np.expand_dims', (['ratios_a', '(0)'], {}), '(ratios_a, 0)\n', (13654, 13667), True, 'import numpy as np\n'), ((13669, 13696), 'numpy.expand_dims', 'np.expand_dims', (['ratios_b', '(0)'], {}), '(ratios_b, 0)\n', (13683, 13696), True, 'import numpy as np\n'), ((15783, 15821), 'numpy.linalg.norm', 'np.linalg.norm', (['edges_a'], {'ord': '(2)', 'axis': '(1)'}), '(edges_a, ord=2, axis=1)\n', (15797, 15821), True, 'import numpy as np\n'), ((15881, 15919), 'numpy.linalg.norm', 'np.linalg.norm', (['edges_b'], {'ord': '(2)', 'axis': '(1)'}), '(edges_b, ord=2, axis=1)\n', (15895, 15919), True, 'import numpy as np\n'), ((15959, 15992), 'numpy.sum', 'np.sum', (['(edges_a * edges_b)'], {'axis': '(1)'}), '(edges_a * edges_b, axis=1)\n', (15965, 15992), True, 'import numpy as np\n'), ((16519, 16557), 'numpy.linalg.norm', 'np.linalg.norm', (['line_ab'], {'ord': '(2)', 'axis': '(1)'}), '(line_ab, ord=2, axis=1)\n', (16533, 16557), True, 'import numpy as np\n'), ((1540, 1569), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_aug'], {}), '(0, num_aug)\n', (1557, 1569), True, 'import numpy as np\n'), ((7559, 7582), 'numpy.random.choice', 'np.random.choice', (['edges'], {}), '(edges)\n', (7575, 7582), True, 'import numpy as np\n'), ((11415, 11459), 'numpy.array', 'np.array', (['[cur_edge[0], cur_edge[1], -1, -1]'], {}), '([cur_edge[0], cur_edge[1], -1, -1])\n', (11423, 11459), True, 'import numpy as np\n'), ((7680, 7707), 'numpy.random.uniform', 'np.random.uniform', (['(0.2)', '(0.5)'], {}), '(0.2, 0.5)\n', (7697, 7707), True, 'import numpy as np\n')] |
import streamlit as st
import pickle
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
st.set_option('deprecation.showPyplotGlobalUse', False)
model = pickle.load(open(os.path.join(os.getcwd(), 'model', 'mlp_model.pkl'), 'rb'))
# output = [0, 0, 2, 3, 4, 3, 1, 5, 3, 5, 5, 4, 3, 4, 4, 5]
# test = np.array([output])
# model.predict(test)
def predict_MLP(evaluation: int) -> float:
input = np.array(evaluation)
prediction = model.predict(input)
# ponto = (prediction - model.intercept_)/model.coef_[0]
return prediction
def main():
clf_data = list()
html_temp = """
<div style="background-color:#025246 ;padding:10px">
<h2 style="color:white;text-align:center;">
Multi-Layer Perceptron App </h2>
</div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
# customer_type = st.text_input("SELECT TYPE")
# st.markdown('**SELECT THE CUSTOMER TYPE**')
customer_type = st.radio('1. SELECT THE CUSTOMER TYPE',
(
'1. Loyal Customer',
'2. Disloyal Customer')
)
customer_class = st.radio('2. SELECT THE CUSTOMER CLASS',
(
'1. Business',
'2. Eco',
'3. Eco Plus')
)
inflight_wifi_service = st.radio('3. EVALUATE INFLIGHT WIFI SERVICE',
(
1,
2,
3,
4,
5
)
)
departure_arrival_time_convenient = st.radio('4. EVALUATE TIME CONVENIENT (DEPARTURE/ARRIVAL)',
(
1,
2,
3,
4,
5
)
)
ease_of_online_booking = st.radio('5. EVALUATE EASE OF ONLINE BOOKING',
(
1,
2,
3,
4,
5
)
)
gate_location = st.radio('6. EVALUATE THE GATE LOCATION',
(
1,
2,
3,
4,
5
)
)
food_and_drink = st.radio('7. EVALUATE THE FOOD AND DRINK',
(
1,
2,
3,
4,
5
)
)
online_boarding = st.radio('8. EVALUATE THE ONLINE BOARDING',
(
1,
2,
3,
4,
5
)
)
seat_comfort = st.radio('9. EVALUATE THE SEAT COMFORT',
(
1,
2,
3,
4,
5
)
)
inflight_entertainment = st.radio('10. EVALUATE THE INFLIGHT ENTERTAIMENT',
(
1,
2,
3,
4,
5
)
)
on_board_service = st.radio('11. EVALUATE THE ONBOARD SERVICE',
(
1,
2,
3,
4,
5
)
)
leg_room_service = st.radio('12. EVALUATE THE LEG ROOM SERVICE',
(
1,
2,
3,
4,
5
)
)
baggage_handling = st.radio('13. EVALUATE THE BAGGAGE HANDLING',
(
1,
2,
3,
4,
5
)
)
checkin_service = st.radio('14. EVALUATE THE CHECK-IN SERVICE',
(
1,
2,
3,
4,
5
)
)
inflight_service = st.radio('15. EVALUATE THE INFLIGHT SERVICE',
(
1,
2,
3,
4,
5
)
)
cleanliness = st.radio('16. EVALUATE THE CLEANLINESS',
(
1,
2,
3,
4,
5
)
)
if customer_type == 'Loyal Customer':
customer_type = 0
else:
customer_type = 1
if customer_class == 'Business':
customer_class = 0
elif customer_class == 'Eco':
customer_class = 1
else:
customer_class = 2
clf_data.append([customer_type, customer_class, inflight_wifi_service, departure_arrival_time_convenient,
ease_of_online_booking, gate_location, food_and_drink, online_boarding, seat_comfort, inflight_entertainment,
on_board_service, leg_room_service, baggage_handling, checkin_service, inflight_service, cleanliness])
if st.button('Classifier'):
prediction = predict_MLP(clf_data)
if prediction == 1:
st.success(f'The customer is "satisfied"')
else:
st.success(f'The customer is "dissatisfied"')
if __name__ == '__main__':
main()
| [
"streamlit.markdown",
"streamlit.set_option",
"os.getcwd",
"streamlit.radio",
"streamlit.button",
"numpy.array",
"streamlit.success"
] | [((120, 175), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showPyplotGlobalUse"""', '(False)'], {}), "('deprecation.showPyplotGlobalUse', False)\n", (133, 175), True, 'import streamlit as st\n'), ((431, 451), 'numpy.array', 'np.array', (['evaluation'], {}), '(evaluation)\n', (439, 451), True, 'import numpy as np\n'), ((796, 842), 'streamlit.markdown', 'st.markdown', (['html_temp'], {'unsafe_allow_html': '(True)'}), '(html_temp, unsafe_allow_html=True)\n', (807, 842), True, 'import streamlit as st\n'), ((964, 1054), 'streamlit.radio', 'st.radio', (['"""1. SELECT THE CUSTOMER TYPE"""', "('1. Loyal Customer', '2. Disloyal Customer')"], {}), "('1. SELECT THE CUSTOMER TYPE', ('1. Loyal Customer',\n '2. Disloyal Customer'))\n", (972, 1054), True, 'import streamlit as st\n'), ((1111, 1197), 'streamlit.radio', 'st.radio', (['"""2. SELECT THE CUSTOMER CLASS"""', "('1. Business', '2. Eco', '3. Eco Plus')"], {}), "('2. SELECT THE CUSTOMER CLASS', ('1. Business', '2. Eco',\n '3. Eco Plus'))\n", (1119, 1197), True, 'import streamlit as st\n'), ((1265, 1327), 'streamlit.radio', 'st.radio', (['"""3. EVALUATE INFLIGHT WIFI SERVICE"""', '(1, 2, 3, 4, 5)'], {}), "('3. EVALUATE INFLIGHT WIFI SERVICE', (1, 2, 3, 4, 5))\n", (1273, 1327), True, 'import streamlit as st\n'), ((1456, 1532), 'streamlit.radio', 'st.radio', (['"""4. EVALUATE TIME CONVENIENT (DEPARTURE/ARRIVAL)"""', '(1, 2, 3, 4, 5)'], {}), "('4. EVALUATE TIME CONVENIENT (DEPARTURE/ARRIVAL)', (1, 2, 3, 4, 5))\n", (1464, 1532), True, 'import streamlit as st\n'), ((1654, 1717), 'streamlit.radio', 'st.radio', (['"""5. EVALUATE EASE OF ONLINE BOOKING"""', '(1, 2, 3, 4, 5)'], {}), "('5. EVALUATE EASE OF ONLINE BOOKING', (1, 2, 3, 4, 5))\n", (1662, 1717), True, 'import streamlit as st\n'), ((1830, 1888), 'streamlit.radio', 'st.radio', (['"""6. EVALUATE THE GATE LOCATION"""', '(1, 2, 3, 4, 5)'], {}), "('6. EVALUATE THE GATE LOCATION', (1, 2, 3, 4, 5))\n", (1838, 1888), True, 'import streamlit as st\n'), ((1998, 2057), 'streamlit.radio', 'st.radio', (['"""7. EVALUATE THE FOOD AND DRINK"""', '(1, 2, 3, 4, 5)'], {}), "('7. EVALUATE THE FOOD AND DRINK', (1, 2, 3, 4, 5))\n", (2006, 2057), True, 'import streamlit as st\n'), ((2168, 2228), 'streamlit.radio', 'st.radio', (['"""8. EVALUATE THE ONLINE BOARDING"""', '(1, 2, 3, 4, 5)'], {}), "('8. EVALUATE THE ONLINE BOARDING', (1, 2, 3, 4, 5))\n", (2176, 2228), True, 'import streamlit as st\n'), ((2336, 2393), 'streamlit.radio', 'st.radio', (['"""9. EVALUATE THE SEAT COMFORT"""', '(1, 2, 3, 4, 5)'], {}), "('9. EVALUATE THE SEAT COMFORT', (1, 2, 3, 4, 5))\n", (2344, 2393), True, 'import streamlit as st\n'), ((2515, 2582), 'streamlit.radio', 'st.radio', (['"""10. EVALUATE THE INFLIGHT ENTERTAIMENT"""', '(1, 2, 3, 4, 5)'], {}), "('10. EVALUATE THE INFLIGHT ENTERTAIMENT', (1, 2, 3, 4, 5))\n", (2523, 2582), True, 'import streamlit as st\n'), ((2698, 2759), 'streamlit.radio', 'st.radio', (['"""11. EVALUATE THE ONBOARD SERVICE"""', '(1, 2, 3, 4, 5)'], {}), "('11. EVALUATE THE ONBOARD SERVICE', (1, 2, 3, 4, 5))\n", (2706, 2759), True, 'import streamlit as st\n'), ((2875, 2937), 'streamlit.radio', 'st.radio', (['"""12. EVALUATE THE LEG ROOM SERVICE"""', '(1, 2, 3, 4, 5)'], {}), "('12. EVALUATE THE LEG ROOM SERVICE', (1, 2, 3, 4, 5))\n", (2883, 2937), True, 'import streamlit as st\n'), ((3049, 3111), 'streamlit.radio', 'st.radio', (['"""13. EVALUATE THE BAGGAGE HANDLING"""', '(1, 2, 3, 4, 5)'], {}), "('13. EVALUATE THE BAGGAGE HANDLING', (1, 2, 3, 4, 5))\n", (3057, 3111), True, 'import streamlit as st\n'), ((3226, 3288), 'streamlit.radio', 'st.radio', (['"""14. EVALUATE THE CHECK-IN SERVICE"""', '(1, 2, 3, 4, 5)'], {}), "('14. EVALUATE THE CHECK-IN SERVICE', (1, 2, 3, 4, 5))\n", (3234, 3288), True, 'import streamlit as st\n'), ((3404, 3466), 'streamlit.radio', 'st.radio', (['"""15. EVALUATE THE INFLIGHT SERVICE"""', '(1, 2, 3, 4, 5)'], {}), "('15. EVALUATE THE INFLIGHT SERVICE', (1, 2, 3, 4, 5))\n", (3412, 3466), True, 'import streamlit as st\n'), ((3577, 3634), 'streamlit.radio', 'st.radio', (['"""16. EVALUATE THE CLEANLINESS"""', '(1, 2, 3, 4, 5)'], {}), "('16. EVALUATE THE CLEANLINESS', (1, 2, 3, 4, 5))\n", (3585, 3634), True, 'import streamlit as st\n'), ((4335, 4358), 'streamlit.button', 'st.button', (['"""Classifier"""'], {}), "('Classifier')\n", (4344, 4358), True, 'import streamlit as st\n'), ((214, 225), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (223, 225), False, 'import os\n'), ((4443, 4485), 'streamlit.success', 'st.success', (['f"""The customer is "satisfied\\""""'], {}), '(f\'The customer is "satisfied"\')\n', (4453, 4485), True, 'import streamlit as st\n'), ((4512, 4557), 'streamlit.success', 'st.success', (['f"""The customer is "dissatisfied\\""""'], {}), '(f\'The customer is "dissatisfied"\')\n', (4522, 4557), True, 'import streamlit as st\n')] |
from __future__ import print_function
import sys
sys.path.insert(0, 'src')
import transform, numpy as np, vgg, pdb, os
import scipy.misc
import tensorflow as tf
from utils import save_img, get_img, exists, list_files, check_version
from argparse import ArgumentParser
from collections import defaultdict
import time
import json
import subprocess
import numpy
BATCH_SIZE = 4
DEVICE = '/gpu:0'
def from_pipe(opts):
command = ["ffprobe",
'-v', "quiet",
'-print_format', 'json',
'-show_streams', opts.in_path]
info = json.loads(str(subprocess.check_output(command), encoding="utf8"))
width = int(info["streams"][0]["width"])
height = int(info["streams"][0]["height"])
fps = round(eval(info["streams"][0]["r_frame_rate"]))
command = ["ffmpeg",
'-loglevel', "quiet",
'-i', opts.in_path,
'-f', 'image2pipe',
'-pix_fmt', 'rgb24',
'-vcodec', 'rawvideo', '-']
pipe_in = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=10 ** 9, stdin=None, stderr=None)
command = ["ffmpeg",
'-loglevel', "info",
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', str(width) + 'x' + str(height), # size of one frame
'-pix_fmt', 'rgb24',
'-r', str(fps), # frames per second
'-i', '-', # The imput comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-c:v', 'libx264',
'-preset', 'slow',
'-crf', '18',
opts.out]
pipe_out = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=None, stderr=None)
g = tf.Graph()
soft_config = tf.ConfigProto(allow_soft_placement=True)
soft_config.gpu_options.allow_growth = True
with g.as_default(), g.device(opts.device), \
tf.Session(config=soft_config) as sess:
batch_shape = (opts.batch_size, height, width, 3)
img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
name='img_placeholder')
preds = transform.net(img_placeholder)
saver = tf.train.Saver()
if os.path.isdir(opts.checkpoint):
ckpt = tf.train.get_checkpoint_state(opts.checkpoint)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception("No checkpoint found...")
else:
saver.restore(sess, opts.checkpoint)
X = np.zeros(batch_shape, dtype=np.float32)
nbytes = 3 * width * height
read_input = True
last = False
while read_input:
count = 0
while count < opts.batch_size:
raw_image = pipe_in.stdout.read(width * height * 3)
if len(raw_image) != nbytes:
if count == 0:
read_input = False
else:
last = True
X = X[:count]
batch_shape = (count, height, width, 3)
img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
name='img_placeholder')
preds = transform.net(img_placeholder)
break
image = numpy.fromstring(raw_image, dtype='uint8')
image = image.reshape((height, width, 3))
X[count] = image
count += 1
if read_input:
if last:
read_input = False
_preds = sess.run(preds, feed_dict={img_placeholder: X})
for i in range(0, batch_shape[0]):
img = np.clip(_preds[i], 0, 255).astype(np.uint8)
try:
pipe_out.stdin.write(img)
except IOError as err:
ffmpeg_error = pipe_out.stderr.read()
error = (str(err) + ("\n\nFFMPEG encountered"
"the following error while writing file:"
"\n\n %s" % ffmpeg_error))
read_input = False
print(error)
pipe_out.terminate()
pipe_in.terminate()
pipe_out.stdin.close()
pipe_in.stdout.close()
del pipe_in
del pipe_out
# get img_shape
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
assert len(paths_out) > 0
is_paths = type(data_in[0]) == str
if is_paths:
assert len(data_in) == len(paths_out)
img_shape = get_img(data_in[0]).shape
else:
assert data_in.size[0] == len(paths_out)
img_shape = X[0].shape
g = tf.Graph()
batch_size = min(len(paths_out), batch_size)
curr_num = 0
soft_config = tf.ConfigProto(allow_soft_placement=True)
soft_config.gpu_options.allow_growth = True
with g.as_default(), g.device(device_t), \
tf.Session(config=soft_config) as sess:
batch_shape = (batch_size,) + img_shape
img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
name='img_placeholder')
preds = transform.net(img_placeholder)
saver = tf.train.Saver()
if os.path.isdir(checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception("No checkpoint found...")
else:
saver.restore(sess, checkpoint_dir)
num_iters = int(len(paths_out)/batch_size)
for i in range(num_iters):
pos = i * batch_size
curr_batch_out = paths_out[pos:pos+batch_size]
if is_paths:
curr_batch_in = data_in[pos:pos+batch_size]
X = np.zeros(batch_shape, dtype=np.float32)
for j, path_in in enumerate(curr_batch_in):
img = get_img(path_in)
assert img.shape == img_shape, \
'Images have different dimensions. ' + \
'Resize images or use --allow-different-dimensions.'
X[j] = img
else:
X = data_in[pos:pos+batch_size]
_preds = sess.run(preds, feed_dict={img_placeholder:X})
for j, path_out in enumerate(curr_batch_out):
save_img(path_out, _preds[j])
remaining_in = data_in[num_iters*batch_size:]
remaining_out = paths_out[num_iters*batch_size:]
if len(remaining_in) > 0:
ffwd(remaining_in, remaining_out, checkpoint_dir,
device_t=device_t, batch_size=1)
def ffwd_to_img(in_path, out_path, checkpoint_dir, device='/cpu:0'):
paths_in, paths_out = [in_path], [out_path]
ffwd(paths_in, paths_out, checkpoint_dir, batch_size=1, device_t=device)
def ffwd_different_dimensions(in_path, out_path, checkpoint_dir,
device_t=DEVICE, batch_size=4):
in_path_of_shape = defaultdict(list)
out_path_of_shape = defaultdict(list)
for i in range(len(in_path)):
in_image = in_path[i]
out_image = out_path[i]
shape = "%dx%dx%d" % get_img(in_image).shape
in_path_of_shape[shape].append(in_image)
out_path_of_shape[shape].append(out_image)
for shape in in_path_of_shape:
print('Processing images of shape %s' % shape)
ffwd(in_path_of_shape[shape], out_path_of_shape[shape],
checkpoint_dir, device_t, batch_size)
def build_parser():
parser = ArgumentParser()
parser.add_argument('--checkpoint', type=str,
dest='checkpoint_dir',
help='dir or .ckpt file to load checkpoint from',
metavar='CHECKPOINT', required=True)
parser.add_argument('--in-path', type=str,
dest='in_path',help='dir or file to transform',
metavar='IN_PATH', required=True)
help_out = 'destination (dir or file) of transformed file or files'
parser.add_argument('--out-path', type=str,
dest='out_path', help=help_out, metavar='OUT_PATH',
required=True)
parser.add_argument('--device', type=str,
dest='device',help='device to perform compute on',
metavar='DEVICE', default=DEVICE)
parser.add_argument('--batch-size', type=int,
dest='batch_size',help='batch size for feedforwarding',
metavar='BATCH_SIZE', default=BATCH_SIZE)
parser.add_argument('--allow-different-dimensions', action='store_true',
dest='allow_different_dimensions',
help='allow different image dimensions')
return parser
def check_opts(opts):
# exists(opts.checkpoint_dir, 'Checkpoint not found!')
exists(opts.in_path, 'In path not found!')
if os.path.isdir(opts.out_path):
exists(opts.out_path, 'out dir not found!')
assert opts.batch_size > 0
def main():
check_version()
parser = build_parser()
opts = parser.parse_args()
check_opts(opts)
if not os.path.isdir(opts.in_path):
if os.path.exists(opts.out_path) and os.path.isdir(opts.out_path):
out_path = \
os.path.join(opts.out_path,os.path.basename(opts.in_path))
else:
out_path = opts.out_path
ffwd_to_img(opts.in_path, out_path, opts.checkpoint_dir,
device=opts.device)
else:
files = list_files(opts.in_path)
full_in = map(lambda x: os.path.join(opts.in_path,x), files)
full_out = map(lambda x: os.path.join(opts.out_path,x), files)
if opts.allow_different_dimensions:
ffwd_different_dimensions(full_in, full_out, opts.checkpoint_dir,
device_t=opts.device, batch_size=opts.batch_size)
else :
ffwd(full_in, full_out, opts.checkpoint_dir, device_t=opts.device,
batch_size=opts.batch_size)
if __name__ == '__main__':
main()
| [
"utils.list_files",
"argparse.ArgumentParser",
"transform.net",
"numpy.clip",
"collections.defaultdict",
"tensorflow.ConfigProto",
"os.path.join",
"utils.exists",
"utils.check_version",
"os.path.exists",
"tensorflow.placeholder",
"numpy.fromstring",
"tensorflow.train.get_checkpoint_state",
... | [((51, 76), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""src"""'], {}), "(0, 'src')\n", (66, 76), False, 'import sys\n'), ((1050, 1146), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'bufsize': '(10 ** 9)', 'stdin': 'None', 'stderr': 'None'}), '(command, stdout=subprocess.PIPE, bufsize=10 ** 9, stdin=\n None, stderr=None)\n', (1066, 1146), False, 'import subprocess\n'), ((1781, 1855), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdin': 'subprocess.PIPE', 'stdout': 'None', 'stderr': 'None'}), '(command, stdin=subprocess.PIPE, stdout=None, stderr=None)\n', (1797, 1855), False, 'import subprocess\n'), ((1865, 1875), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1873, 1875), True, 'import tensorflow as tf\n'), ((1895, 1936), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1909, 1936), True, 'import tensorflow as tf\n'), ((5137, 5147), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5145, 5147), True, 'import tensorflow as tf\n'), ((5235, 5276), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (5249, 5276), True, 'import tensorflow as tf\n'), ((7586, 7603), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7597, 7603), False, 'from collections import defaultdict\n'), ((7629, 7646), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7640, 7646), False, 'from collections import defaultdict\n'), ((8148, 8164), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (8162, 8164), False, 'from argparse import ArgumentParser\n'), ((9527, 9569), 'utils.exists', 'exists', (['opts.in_path', '"""In path not found!"""'], {}), "(opts.in_path, 'In path not found!')\n", (9533, 9569), False, 'from utils import save_img, get_img, exists, list_files, check_version\n'), ((9578, 9606), 'os.path.isdir', 'os.path.isdir', (['opts.out_path'], {}), '(opts.out_path)\n', (9591, 9606), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((9717, 9732), 'utils.check_version', 'check_version', ([], {}), '()\n', (9730, 9732), False, 'from utils import save_img, get_img, exists, list_files, check_version\n'), ((2049, 2079), 'tensorflow.Session', 'tf.Session', ([], {'config': 'soft_config'}), '(config=soft_config)\n', (2059, 2079), True, 'import tensorflow as tf\n'), ((2175, 2244), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'batch_shape', 'name': '"""img_placeholder"""'}), "(tf.float32, shape=batch_shape, name='img_placeholder')\n", (2189, 2244), True, 'import tensorflow as tf\n'), ((2304, 2334), 'transform.net', 'transform.net', (['img_placeholder'], {}), '(img_placeholder)\n', (2317, 2334), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((2352, 2368), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2366, 2368), True, 'import tensorflow as tf\n'), ((2381, 2411), 'os.path.isdir', 'os.path.isdir', (['opts.checkpoint'], {}), '(opts.checkpoint)\n', (2394, 2411), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((2756, 2795), 'numpy.zeros', 'np.zeros', (['batch_shape'], {'dtype': 'np.float32'}), '(batch_shape, dtype=np.float32)\n', (2764, 2795), True, 'import transform, numpy as np, vgg, pdb, os\n'), ((5387, 5417), 'tensorflow.Session', 'tf.Session', ([], {'config': 'soft_config'}), '(config=soft_config)\n', (5397, 5417), True, 'import tensorflow as tf\n'), ((5503, 5572), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'batch_shape', 'name': '"""img_placeholder"""'}), "(tf.float32, shape=batch_shape, name='img_placeholder')\n", (5517, 5572), True, 'import tensorflow as tf\n'), ((5634, 5664), 'transform.net', 'transform.net', (['img_placeholder'], {}), '(img_placeholder)\n', (5647, 5664), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((5682, 5698), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5696, 5698), True, 'import tensorflow as tf\n'), ((5711, 5740), 'os.path.isdir', 'os.path.isdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (5724, 5740), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((9617, 9660), 'utils.exists', 'exists', (['opts.out_path', '"""out dir not found!"""'], {}), "(opts.out_path, 'out dir not found!')\n", (9623, 9660), False, 'from utils import save_img, get_img, exists, list_files, check_version\n'), ((9830, 9857), 'os.path.isdir', 'os.path.isdir', (['opts.in_path'], {}), '(opts.in_path)\n', (9843, 9857), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((10231, 10255), 'utils.list_files', 'list_files', (['opts.in_path'], {}), '(opts.in_path)\n', (10241, 10255), False, 'from utils import save_img, get_img, exists, list_files, check_version\n'), ((609, 641), 'subprocess.check_output', 'subprocess.check_output', (['command'], {}), '(command)\n', (632, 641), False, 'import subprocess\n'), ((2433, 2479), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['opts.checkpoint'], {}), '(opts.checkpoint)\n', (2462, 2479), True, 'import tensorflow as tf\n'), ((5007, 5026), 'utils.get_img', 'get_img', (['data_in[0]'], {}), '(data_in[0])\n', (5014, 5026), False, 'from utils import save_img, get_img, exists, list_files, check_version\n'), ((5762, 5807), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (5791, 5807), True, 'import tensorflow as tf\n'), ((9871, 9900), 'os.path.exists', 'os.path.exists', (['opts.out_path'], {}), '(opts.out_path)\n', (9885, 9900), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((9905, 9933), 'os.path.isdir', 'os.path.isdir', (['opts.out_path'], {}), '(opts.out_path)\n', (9918, 9933), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((3628, 3670), 'numpy.fromstring', 'numpy.fromstring', (['raw_image'], {'dtype': '"""uint8"""'}), "(raw_image, dtype='uint8')\n", (3644, 3670), False, 'import numpy\n'), ((6360, 6399), 'numpy.zeros', 'np.zeros', (['batch_shape'], {'dtype': 'np.float32'}), '(batch_shape, dtype=np.float32)\n', (6368, 6399), True, 'import transform, numpy as np, vgg, pdb, os\n'), ((6951, 6980), 'utils.save_img', 'save_img', (['path_out', '_preds[j]'], {}), '(path_out, _preds[j])\n', (6959, 6980), False, 'from utils import save_img, get_img, exists, list_files, check_version\n'), ((7776, 7793), 'utils.get_img', 'get_img', (['in_image'], {}), '(in_image)\n', (7783, 7793), False, 'from utils import save_img, get_img, exists, list_files, check_version\n'), ((10009, 10039), 'os.path.basename', 'os.path.basename', (['opts.in_path'], {}), '(opts.in_path)\n', (10025, 10039), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((10289, 10318), 'os.path.join', 'os.path.join', (['opts.in_path', 'x'], {}), '(opts.in_path, x)\n', (10301, 10318), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((10360, 10390), 'os.path.join', 'os.path.join', (['opts.out_path', 'x'], {}), '(opts.out_path, x)\n', (10372, 10390), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((6488, 6504), 'utils.get_img', 'get_img', (['path_in'], {}), '(path_in)\n', (6495, 6504), False, 'from utils import save_img, get_img, exists, list_files, check_version\n'), ((3386, 3455), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'batch_shape', 'name': '"""img_placeholder"""'}), "(tf.float32, shape=batch_shape, name='img_placeholder')\n", (3400, 3455), True, 'import tensorflow as tf\n'), ((3543, 3573), 'transform.net', 'transform.net', (['img_placeholder'], {}), '(img_placeholder)\n', (3556, 3573), False, 'import transform, numpy as np, vgg, pdb, os\n'), ((4043, 4069), 'numpy.clip', 'np.clip', (['_preds[i]', '(0)', '(255)'], {}), '(_preds[i], 0, 255)\n', (4050, 4069), True, 'import transform, numpy as np, vgg, pdb, os\n')] |
import numpy as np
def rotation_x(phi: float) -> np.ndarray:
"""
Return the Rotation matrix around the X-axis for phi radians.
"""
R_rot = np.array([[1, 0, 0],
[0, np.cos(phi), -np.sin(phi)],
[0, np.sin(phi), np.cos(phi)]])
return R_rot
def rotation_y(theta: float) -> np.ndarray:
"""
Return the Rotation matrix around the Y-axis for theta radians.
"""
R_rot = np.array([[np.cos(theta), 0, -np.sin(theta)],
[0, 1, 0],
[np.sin(theta), 0, np.cos(theta)]])
return R_rot
def rotation_z(psi: float) -> np.ndarray:
"""
Return the Rotation matrix around the Z-axis for psi radians.
"""
R_rot = np.array([[np.cos(psi), -np.sin(psi), 0],
[np.sin(psi), np.cos(psi), 0],
[0, 0, 1]])
return R_rot
def euler_rotation(roll: float, pitch: float, yaw: float, is_radians: bool = False) -> np.ndarray:
"""
Returns the Euler rotation matrix for 3 rotation angles around X-Y-Z
"""
# Convert to Radians:
if not is_radians:
roll, pitch, yaw = np.deg2rad(roll), np.deg2rad(pitch), np.deg2rad(yaw)
R = rotation_z(yaw) @ rotation_y(pitch) @ rotation_x(roll)
return R
| [
"numpy.sin",
"numpy.cos",
"numpy.deg2rad"
] | [((1149, 1165), 'numpy.deg2rad', 'np.deg2rad', (['roll'], {}), '(roll)\n', (1159, 1165), True, 'import numpy as np\n'), ((1167, 1184), 'numpy.deg2rad', 'np.deg2rad', (['pitch'], {}), '(pitch)\n', (1177, 1184), True, 'import numpy as np\n'), ((1186, 1201), 'numpy.deg2rad', 'np.deg2rad', (['yaw'], {}), '(yaw)\n', (1196, 1201), True, 'import numpy as np\n'), ((204, 215), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (210, 215), True, 'import numpy as np\n'), ((258, 269), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (264, 269), True, 'import numpy as np\n'), ((271, 282), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (277, 282), True, 'import numpy as np\n'), ((456, 469), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (462, 469), True, 'import numpy as np\n'), ((547, 560), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (553, 560), True, 'import numpy as np\n'), ((565, 578), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (571, 578), True, 'import numpy as np\n'), ((748, 759), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (754, 759), True, 'import numpy as np\n'), ((802, 813), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (808, 813), True, 'import numpy as np\n'), ((815, 826), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (821, 826), True, 'import numpy as np\n'), ((218, 229), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (224, 229), True, 'import numpy as np\n'), ((475, 488), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (481, 488), True, 'import numpy as np\n'), ((762, 773), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (768, 773), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Tests for solution 2
"""
import io
import numpy as np
import pytest
import solution1
import solution2
@pytest.mark.parametrize(
'radians, expected',
[
(0, 180),
(np.pi / 2, 90),
(np.pi, 0),
(3 * np.pi / 2, 270),
]
)
def test_bearings_to_polar_degrees(radians, expected):
"""Make sure my math is good"""
assert solution2.bearings_to_polar_degrees(radians) == expected
@pytest.fixture(scope='module')
def big_asteroids():
text = '.#..##.###...#######\n##.############..##.\n.#.######.########.#\n.###.#######.####.#.\n#####.##.#.##.###.##\n..#####..#.#########\n####################\n#.####....###.#.#.##\n##.#################\n#####.##.###..####..\n..######..##.#######\n####.##.####...##..#\n.#####..#.######.###\n##...#.##########...\n#.##########.#######\n.####.#.###.###.#.##\n....##.##.###..#####\n.#.#.###########.###\n#.#.#.#####.####.###\n###.##.####.##.#..##'
asteroids = solution1.read_array(io.StringIO(text))
return asteroids
@pytest.mark.parametrize(
'coords, position',
[
(np.array([11, 12]), 1),
(np.array([12, 1]), 2),
(np.array([12, 2]), 3),
(np.array([12, 8]), 10),
(np.array([16, 0]), 20),
(np.array([16, 9]), 50),
(np.array([10, 16]), 100),
(np.array([9, 6]), 199),
(np.array([8, 2]), 200),
(np.array([10, 9]), 201),
(np.array([11, 1]), 299),
]
)
def test_orderering(coords, position, big_asteroids):
"""
Test that for the big asteroids the ordering is correct
"""
predicted_coords = solution2.order_points_by_angle_distance(big_asteroids)
assert (predicted_coords[position - 1] == coords).all()
def test_best_position(big_asteroids):
"""
Just guarnatee that we've got the best position down for the big asteroids
"""
predicted_coords, predicted_count = solution1.calculate_best_site(big_asteroids)
assert (predicted_coords == np.array([11, 13])).all()
| [
"io.StringIO",
"solution2.order_points_by_angle_distance",
"pytest.fixture",
"solution2.bearings_to_polar_degrees",
"numpy.array",
"pytest.mark.parametrize",
"solution1.calculate_best_site"
] | [((133, 245), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""radians, expected"""', '[(0, 180), (np.pi / 2, 90), (np.pi, 0), (3 * np.pi / 2, 270)]'], {}), "('radians, expected', [(0, 180), (np.pi / 2, 90), (\n np.pi, 0), (3 * np.pi / 2, 270)])\n", (156, 245), False, 'import pytest\n'), ((452, 482), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (466, 482), False, 'import pytest\n'), ((1618, 1673), 'solution2.order_points_by_angle_distance', 'solution2.order_points_by_angle_distance', (['big_asteroids'], {}), '(big_asteroids)\n', (1658, 1673), False, 'import solution2\n'), ((1911, 1955), 'solution1.calculate_best_site', 'solution1.calculate_best_site', (['big_asteroids'], {}), '(big_asteroids)\n', (1940, 1955), False, 'import solution1\n'), ((392, 436), 'solution2.bearings_to_polar_degrees', 'solution2.bearings_to_polar_degrees', (['radians'], {}), '(radians)\n', (427, 436), False, 'import solution2\n'), ((993, 1010), 'io.StringIO', 'io.StringIO', (['text'], {}), '(text)\n', (1004, 1010), False, 'import io\n'), ((1100, 1118), 'numpy.array', 'np.array', (['[11, 12]'], {}), '([11, 12])\n', (1108, 1118), True, 'import numpy as np\n'), ((1133, 1150), 'numpy.array', 'np.array', (['[12, 1]'], {}), '([12, 1])\n', (1141, 1150), True, 'import numpy as np\n'), ((1165, 1182), 'numpy.array', 'np.array', (['[12, 2]'], {}), '([12, 2])\n', (1173, 1182), True, 'import numpy as np\n'), ((1197, 1214), 'numpy.array', 'np.array', (['[12, 8]'], {}), '([12, 8])\n', (1205, 1214), True, 'import numpy as np\n'), ((1230, 1247), 'numpy.array', 'np.array', (['[16, 0]'], {}), '([16, 0])\n', (1238, 1247), True, 'import numpy as np\n'), ((1263, 1280), 'numpy.array', 'np.array', (['[16, 9]'], {}), '([16, 9])\n', (1271, 1280), True, 'import numpy as np\n'), ((1296, 1314), 'numpy.array', 'np.array', (['[10, 16]'], {}), '([10, 16])\n', (1304, 1314), True, 'import numpy as np\n'), ((1331, 1347), 'numpy.array', 'np.array', (['[9, 6]'], {}), '([9, 6])\n', (1339, 1347), True, 'import numpy as np\n'), ((1364, 1380), 'numpy.array', 'np.array', (['[8, 2]'], {}), '([8, 2])\n', (1372, 1380), True, 'import numpy as np\n'), ((1397, 1414), 'numpy.array', 'np.array', (['[10, 9]'], {}), '([10, 9])\n', (1405, 1414), True, 'import numpy as np\n'), ((1431, 1448), 'numpy.array', 'np.array', (['[11, 1]'], {}), '([11, 1])\n', (1439, 1448), True, 'import numpy as np\n'), ((1988, 2006), 'numpy.array', 'np.array', (['[11, 13]'], {}), '([11, 13])\n', (1996, 2006), True, 'import numpy as np\n')] |
import pickle
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import numpy as np
from sklearn.model_selection import train_test_split
from model import AlphaGoZero
from loss import alpha_go_zero_loss
from uniform_replay_buffer import UniformReplayBuffer
import random
f = open('./new_samples_aug_half.npy', 'rb')
total_samples = pickle.load(f)
f.close()
TEST_SPLIT = 500
FREQ_TEST = 10
LR = 0.01
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model = AlphaGoZero(residual=9).float().to(device)
random.shuffle(total_samples)
Xtrain, Xtest = train_test_split(total_samples, test_size=int(0.04 * len(total_samples)), random_state=42)
device_name = "cpu" if device == "cpu" else torch.cuda.get_device_name(0)
print('Training on' + device_name)
print(len(Xtrain))
print(len(Xtest))
# model.load_state_dict(torch.load("win_as_black.pt"))
optimizer = optim.Adam(model.parameters(), lr=LR, weight_decay=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=300)
writer = SummaryWriter()
buffer = UniformReplayBuffer(len(Xtrain))
buffer.store(np.expand_dims(Xtrain, axis=0))
test_buffer = UniformReplayBuffer(len(Xtest))
test_buffer.store(np.expand_dims(Xtest, axis=0))
for epoch in tqdm(range(1000)):
inputs, pi, z = buffer.sample(1024)
inputs = inputs.float().to(device)
pi = pi.float().to(device)
z = z.float().to(device)
optimizer.zero_grad()
p, v = model(inputs)
loss = alpha_go_zero_loss(p, v, pi, z)
loss.backward()
optimizer.step()
scheduler.step()
running_loss = loss.item()
writer.add_scalar('train_loss', running_loss, epoch)
if epoch % FREQ_TEST == 0:
inputs, pi, z = test_buffer.sample(256)
inputs = inputs.float().to(device)
pi = pi.float().to(device)
z = z.float().to(device)
writer.add_scalar('train_loss', running_loss, epoch)
p, v = model(inputs)
loss = alpha_go_zero_loss(p, v, pi, z)
running_loss = loss.item()
writer.add_scalar('test_loss', running_loss, epoch)
torch.save(model.state_dict(), 'model.pt')
| [
"torch.optim.lr_scheduler.StepLR",
"random.shuffle",
"loss.alpha_go_zero_loss",
"numpy.expand_dims",
"model.AlphaGoZero",
"pickle.load",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.get_device_name"
] | [((398, 412), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (409, 412), False, 'import pickle\n'), ((578, 607), 'random.shuffle', 'random.shuffle', (['total_samples'], {}), '(total_samples)\n', (592, 607), False, 'import random\n'), ((1004, 1055), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(300)'}), '(optimizer, step_size=300)\n', (1029, 1055), True, 'import torch.optim as optim\n'), ((1065, 1080), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1078, 1080), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((489, 514), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (512, 514), False, 'import torch\n'), ((762, 791), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (788, 791), False, 'import torch\n'), ((1137, 1167), 'numpy.expand_dims', 'np.expand_dims', (['Xtrain'], {'axis': '(0)'}), '(Xtrain, axis=0)\n', (1151, 1167), True, 'import numpy as np\n'), ((1234, 1263), 'numpy.expand_dims', 'np.expand_dims', (['Xtest'], {'axis': '(0)'}), '(Xtest, axis=0)\n', (1248, 1263), True, 'import numpy as np\n'), ((1505, 1536), 'loss.alpha_go_zero_loss', 'alpha_go_zero_loss', (['p', 'v', 'pi', 'z'], {}), '(p, v, pi, z)\n', (1523, 1536), False, 'from loss import alpha_go_zero_loss\n'), ((1988, 2019), 'loss.alpha_go_zero_loss', 'alpha_go_zero_loss', (['p', 'v', 'pi', 'z'], {}), '(p, v, pi, z)\n', (2006, 2019), False, 'from loss import alpha_go_zero_loss\n'), ((534, 557), 'model.AlphaGoZero', 'AlphaGoZero', ([], {'residual': '(9)'}), '(residual=9)\n', (545, 557), False, 'from model import AlphaGoZero\n')] |
import gym
import os
import errno
import itertools
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import baselines.common.tf_util as U
from baselines import logger
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer
from baselines.common.schedules import LinearSchedule
import oscar.env
# ENV_NAME = "pysc2-simple64-meta-per-v0"
ENV_NAME = "general-learning-v0"
SAVE_PATH = "learning_tools/learning_nn/" + ENV_NAME + "/dqn"
NUMBER_OF_TRAINING_GAME = 5
NUMBER_OF_CPU = 4
def model(inpt, num_actions, scope, reuse=False):
"""This model takes as input an observation and returns values of all actions."""
with tf.variable_scope(scope, reuse=reuse):
out = inpt
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
if __name__ == '__main__':
with U.make_session(NUMBER_OF_CPU) as sess:
# Create the environment
env = gym.make(ENV_NAME)
# Create all the functions necessary to train the model
act, train, update_target, debug = deepq.build_train(
make_obs_ph=lambda name: U.BatchInput(env.observation_space.shape, name=name),
q_func=model,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
)
# try to restore old weight
saver = tf.train.Saver()
try:
saver.restore(sess, SAVE_PATH + '/')
except:
print("old save not found, use new NN")
# Create the replay buffer
replay_buffer = ReplayBuffer(50000)
# Create the schedule for exploration starting from 1 (every action is random) down to
# 0.02 (98% of actions are selected according to values predicted by the model).
exploration = LinearSchedule(schedule_timesteps=100000, initial_p=1.0, final_p=0.02)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
obs = env.reset()
t = 0
old_t = 0
try:
for g in range(NUMBER_OF_TRAINING_GAME):
done = False
while not done:
# Take action and update exploration to the newest value
action = act(obs[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0)
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if t > 1000:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(32)
train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards))
# Update target network periodically.
if t % 1000 == 0:
update_target()
if done: # and len(episode_rewards) % 10 == 0:
# logger.record_tabular("steps", t)
# logger.record_tabular("episodes", len(episode_rewards) - 1)
# logger.record_tabular("episode steps", t - old_t)
# logger.record_tabular("episode reward", episode_rewards[-2])
# logger.record_tabular("mean episode reward", round(np.mean(episode_rewards[-101:-1]), 1))
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
# old_t = t
t += 1
except KeyboardInterrupt:
print("Training aborted")
else:
print("Training finished")
env.close()
del env
# save neural network
# saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
try:
os.makedirs(SAVE_PATH)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(SAVE_PATH):
pass
else:
raise
saver.save(sess, SAVE_PATH + '/')
| [
"baselines.common.tf_util.BatchInput",
"numpy.ones_like",
"gym.make",
"tensorflow.train.Saver",
"tensorflow.contrib.layers.fully_connected",
"baselines.common.tf_util.make_session",
"baselines.deepq.replay_buffer.ReplayBuffer",
"tensorflow.global_variables_initializer",
"os.makedirs",
"baselines.c... | [((692, 729), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (709, 729), True, 'import tensorflow as tf\n'), ((764, 833), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': '(64)', 'activation_fn': 'tf.nn.tanh'}), '(out, num_outputs=64, activation_fn=tf.nn.tanh)\n', (786, 833), True, 'import tensorflow.contrib.layers as layers\n'), ((848, 920), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': 'num_actions', 'activation_fn': 'None'}), '(out, num_outputs=num_actions, activation_fn=None)\n', (870, 920), True, 'import tensorflow.contrib.layers as layers\n'), ((978, 1007), 'baselines.common.tf_util.make_session', 'U.make_session', (['NUMBER_OF_CPU'], {}), '(NUMBER_OF_CPU)\n', (992, 1007), True, 'import baselines.common.tf_util as U\n'), ((1064, 1082), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (1072, 1082), False, 'import gym\n'), ((1498, 1514), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1512, 1514), True, 'import tensorflow as tf\n'), ((1705, 1724), 'baselines.deepq.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['(50000)'], {}), '(50000)\n', (1717, 1724), False, 'from baselines.deepq.replay_buffer import ReplayBuffer\n'), ((1931, 2001), 'baselines.common.schedules.LinearSchedule', 'LinearSchedule', ([], {'schedule_timesteps': '(100000)', 'initial_p': '(1.0)', 'final_p': '(0.02)'}), '(schedule_timesteps=100000, initial_p=1.0, final_p=0.02)\n', (1945, 2001), False, 'from baselines.common.schedules import LinearSchedule\n'), ((2084, 2098), 'baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (2096, 2098), True, 'import baselines.common.tf_util as U\n'), ((4318, 4351), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4349, 4351), True, 'import tensorflow as tf\n'), ((4378, 4400), 'os.makedirs', 'os.makedirs', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (4389, 4400), False, 'import os\n'), ((1392, 1436), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0005)'}), '(learning_rate=0.0005)\n', (1414, 1436), True, 'import tensorflow as tf\n'), ((1246, 1298), 'baselines.common.tf_util.BatchInput', 'U.BatchInput', (['env.observation_space.shape'], {'name': 'name'}), '(env.observation_space.shape, name=name)\n', (1258, 1298), True, 'import baselines.common.tf_util as U\n'), ((4477, 4501), 'os.path.isdir', 'os.path.isdir', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (4490, 4501), False, 'import os\n'), ((3989, 4010), 'baselines.logger.dump_tabular', 'logger.dump_tabular', ([], {}), '()\n', (4008, 4010), False, 'from baselines import logger\n'), ((3207, 3228), 'numpy.ones_like', 'np.ones_like', (['rewards'], {}), '(rewards)\n', (3219, 3228), True, 'import numpy as np\n')] |
"""
Student Name: <NAME>
Student ID: U32332746
Class: CS480
Assignment Number: 4
Due Date: Tuesday 12/07, 11:59 PM
no collabrations
"""
import math
import numpy as np
import ColorType
from Animation import Animation
from Component import Component
from Light import Light
from Material import Material
from Point import Point
import GLUtility
from DisplayableCube import DisplayableCube
from DisplayableTorus import DisplayableTorus
from DisplayableSphere import DisplayableSphere
from DisplayableCubeEBO import DisplayableCubeEBO
from DisplayableEllipsoid import DisplayableEllipsoid
from DisplayableCylinder import DisplayableCylinder
from DisplayableDisk import DisplayableDisk
##### TODO 1: Generate Triangle Meshes
# Requirements:
# 1. Use Element Buffer Object (EBO) to draw the cube. The cube provided in the start code is drawn with Vertex Buffer
# Object (VBO). In the DisplayableCube class draw method, you should switch from VBO draw to EBO draw. To achieve
# this, please first read through VBO and EBO classes in GLBuffer. Then you rewrite the self.vertices and self.indices
# in the DisplayableCube class. Once you have all these down, then switch the line vbo.draw() to ebo.draw().
# 2. Generate Displayable classes for an ellipsoid, torus, and cylinder with end caps.
# These classes should be like the DisplayableCube class and they should all use EBO in the draw method.
# PS: You must use the ellipsoid formula to generate it, scaling the Displayable sphere doesn't count
#
# Displayable object's self.vertices numpy matrix should be defined as this table:
# Column | 0:3 | 3:6 | 6:9 | 9:11
# Stores | Vertex coordinates | Vertex normal | Vertex Color | Vertex texture Coordinates
#
# Their __init__ method should accept following input
# arguments:
# DisplayableEllipsoid(radiusInX, radiusInY, radiusInZ, slices, stacks)
# DisplayableTorus(innerRadius, outerRadius, nsides, rings)
# DisplayableCylinder(endRadius, height, slices, stacks)
#
##### TODO 5: Create your scenes
# Requirements:
# 1. We provide a fixed scene (SceneOne) for you with preset lights, material, and model parameters.
# This scene will be used to examine your illumination implementation, and you should not modify it.
# 2. Create 3 new scenes (can be static scenes). Each of your scenes must have
# * at least 3 differently shaped solid objects
# * each object should have a different material
# * at least 2 lights
# * All types of lights should be used
# 3. Provide a keyboard interface that allows the user to toggle on/off each of the lights in your scene model:
# Hit 1, 2, 3, 4, etc. to identify which light to toggle.
class SceneTwo(Component):
lights = None
lightCubes = None
shaderProg = None
glutility = None
lRadius = None
lAngles = None
lTransformations = None
def __init__(self, shaderProg,curMode=0,lightMode=0):
super().__init__(Point((0, 0, 0)))
self.shaderProg = shaderProg
self.glutility = GLUtility.GLUtility()
self.curMode = 0
self.lightMode = lightMode
self.lTransformations = [self.glutility.translate(0, 2, 0, False),
self.glutility.rotate(60, [0, 0, 1], False),
self.glutility.rotate(120, [0, 0, 1], False)]
self.lRadius = 3
self.lAngles = [0, 0, 0]
routing = []
sphere = Component(Point((0,0.5,0)), DisplayableSphere(shaderProg, 0.3, 36, 36)) #radius, stacks, slides
m0 = Material(np.array((0.1, 0.1, 0.1, 0.1)), np.array((0.2, 0.2, 0.2, 1)),
np.array((1, 1, 1, 0.1)), 64)
sphere.setMaterial(m0)
routing.append(sphere)
self.addChild(sphere)
cube = Component(Point((-1,-0.5,0)), DisplayableCubeEBO(shaderProg, 1, 1, 1, ColorType.ORANGE))
m1 = Material(np.array((0.1, 0.1, 0.1, 0.1)), np.array((0.2, 0.2, 0.2, 1)),
np.array((0.4, 0.4, 0.4, 0.1)), 64)
cube.setMaterial(m1)
routing.append(cube)
self.addChild(cube)
torus = Component(Point((1,-0.5,0)), DisplayableTorus(shaderProg, 0.25, 0.5, 36, 36, ColorType.PURPLE))
m2 = Material(np.array((0.1, 0.1, 0.1, 0.1)), np.array((0.2, 0.2, 0.2, 1)),
np.array((0.8, 0.8, 0.8, 0.1)), 64)
torus.setMaterial(m2)
routing.append(torus)
torus.rotate(90, torus.uAxis)
self.addChild(torus)
for i in routing:
i.renderingRouting = "normal"# "vertex"
l1 = Light(self.lightPos(self.lRadius, self.lAngles[1], self.lTransformations[1]),
np.array((*ColorType.SOFTBLUE, 1.0)))
lightCube1 = Component(Point((0, 1.5, 0)), DisplayableCube(shaderProg, 0.1, 0.1, 0.1, ColorType.SOFTBLUE))
lightCube1.renderingRouting = "vertex"
l2 = Light(self.lightPos(self.lRadius, self.lAngles[2], self.lTransformations[2]),
np.array((*ColorType.SOFTGREEN, 1.0)))
lightCube2 = Component(Point((0, -1.5, 0)), DisplayableCube(shaderProg, 0.1, 0.1, 0.1, ColorType.SOFTGREEN))
lightCube2.renderingRouting = "vertex"
if self.lightMode == 0:
self.addChild(lightCube1)
self.addChild(lightCube2)
self.lights = [l1, l2]
self.lightCubes = [lightCube1, lightCube2]
if self.lightMode == 1:
self.addChild(lightCube1)
self.lights = [l1]
self.lightCubes = [lightCube1]
if self.lightMode == 2:
self.addChild(lightCube2)
self.lights = [l2]
self.lightCubes = [lightCube2]
def lightPos(self, radius, thetaAng, transformationMatrix):
r = np.zeros(4)
r[0] = radius * math.cos(thetaAng / 180 * math.pi)
r[2] = radius * math.sin(thetaAng / 180 * math.pi)
r[3] = 1
r = transformationMatrix @ r
return r[0:3]
def animationUpdate(self):
self.lAngles[0] = (self.lAngles[0] + 0.5) % 360
self.lAngles[1] = (self.lAngles[1] + 0.7) % 360
self.lAngles[2] = (self.lAngles[2] + 1.0) % 360
for i, v in enumerate(self.lights):
lPos = self.lightPos(self.lRadius, self.lAngles[i], self.lTransformations[i])
self.lightCubes[i].setCurrentPosition(Point(lPos))
self.lights[i].setPosition(lPos)
self.shaderProg.setLight(i, v)
for c in self.children:
if isinstance(c, Animation):
c.animationUpdate()
def initialize(self):
self.shaderProg.clearAllLights()
for i, v in enumerate(self.lights):
self.shaderProg.setLight(i, v)
super().initialize()
| [
"DisplayableTorus.DisplayableTorus",
"DisplayableSphere.DisplayableSphere",
"numpy.zeros",
"DisplayableCube.DisplayableCube",
"math.sin",
"DisplayableCubeEBO.DisplayableCubeEBO",
"numpy.array",
"math.cos",
"Point.Point",
"GLUtility.GLUtility"
] | [((3137, 3158), 'GLUtility.GLUtility', 'GLUtility.GLUtility', ([], {}), '()\n', (3156, 3158), False, 'import GLUtility\n'), ((5964, 5975), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (5972, 5975), True, 'import numpy as np\n'), ((3055, 3071), 'Point.Point', 'Point', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (3060, 3071), False, 'from Point import Point\n'), ((3586, 3604), 'Point.Point', 'Point', (['(0, 0.5, 0)'], {}), '((0, 0.5, 0))\n', (3591, 3604), False, 'from Point import Point\n'), ((3604, 3646), 'DisplayableSphere.DisplayableSphere', 'DisplayableSphere', (['shaderProg', '(0.3)', '(36)', '(36)'], {}), '(shaderProg, 0.3, 36, 36)\n', (3621, 3646), False, 'from DisplayableSphere import DisplayableSphere\n'), ((3695, 3725), 'numpy.array', 'np.array', (['(0.1, 0.1, 0.1, 0.1)'], {}), '((0.1, 0.1, 0.1, 0.1))\n', (3703, 3725), True, 'import numpy as np\n'), ((3727, 3755), 'numpy.array', 'np.array', (['(0.2, 0.2, 0.2, 1)'], {}), '((0.2, 0.2, 0.2, 1))\n', (3735, 3755), True, 'import numpy as np\n'), ((3780, 3804), 'numpy.array', 'np.array', (['(1, 1, 1, 0.1)'], {}), '((1, 1, 1, 0.1))\n', (3788, 3804), True, 'import numpy as np\n'), ((3941, 3961), 'Point.Point', 'Point', (['(-1, -0.5, 0)'], {}), '((-1, -0.5, 0))\n', (3946, 3961), False, 'from Point import Point\n'), ((3961, 4018), 'DisplayableCubeEBO.DisplayableCubeEBO', 'DisplayableCubeEBO', (['shaderProg', '(1)', '(1)', '(1)', 'ColorType.ORANGE'], {}), '(shaderProg, 1, 1, 1, ColorType.ORANGE)\n', (3979, 4018), False, 'from DisplayableCubeEBO import DisplayableCubeEBO\n'), ((4043, 4073), 'numpy.array', 'np.array', (['(0.1, 0.1, 0.1, 0.1)'], {}), '((0.1, 0.1, 0.1, 0.1))\n', (4051, 4073), True, 'import numpy as np\n'), ((4075, 4103), 'numpy.array', 'np.array', (['(0.2, 0.2, 0.2, 1)'], {}), '((0.2, 0.2, 0.2, 1))\n', (4083, 4103), True, 'import numpy as np\n'), ((4128, 4158), 'numpy.array', 'np.array', (['(0.4, 0.4, 0.4, 0.1)'], {}), '((0.4, 0.4, 0.4, 0.1))\n', (4136, 4158), True, 'import numpy as np\n'), ((4290, 4309), 'Point.Point', 'Point', (['(1, -0.5, 0)'], {}), '((1, -0.5, 0))\n', (4295, 4309), False, 'from Point import Point\n'), ((4309, 4374), 'DisplayableTorus.DisplayableTorus', 'DisplayableTorus', (['shaderProg', '(0.25)', '(0.5)', '(36)', '(36)', 'ColorType.PURPLE'], {}), '(shaderProg, 0.25, 0.5, 36, 36, ColorType.PURPLE)\n', (4325, 4374), False, 'from DisplayableTorus import DisplayableTorus\n'), ((4399, 4429), 'numpy.array', 'np.array', (['(0.1, 0.1, 0.1, 0.1)'], {}), '((0.1, 0.1, 0.1, 0.1))\n', (4407, 4429), True, 'import numpy as np\n'), ((4431, 4459), 'numpy.array', 'np.array', (['(0.2, 0.2, 0.2, 1)'], {}), '((0.2, 0.2, 0.2, 1))\n', (4439, 4459), True, 'import numpy as np\n'), ((4484, 4514), 'numpy.array', 'np.array', (['(0.8, 0.8, 0.8, 0.1)'], {}), '((0.8, 0.8, 0.8, 0.1))\n', (4492, 4514), True, 'import numpy as np\n'), ((4857, 4893), 'numpy.array', 'np.array', (['(*ColorType.SOFTBLUE, 1.0)'], {}), '((*ColorType.SOFTBLUE, 1.0))\n', (4865, 4893), True, 'import numpy as np\n'), ((4927, 4945), 'Point.Point', 'Point', (['(0, 1.5, 0)'], {}), '((0, 1.5, 0))\n', (4932, 4945), False, 'from Point import Point\n'), ((4947, 5009), 'DisplayableCube.DisplayableCube', 'DisplayableCube', (['shaderProg', '(0.1)', '(0.1)', '(0.1)', 'ColorType.SOFTBLUE'], {}), '(shaderProg, 0.1, 0.1, 0.1, ColorType.SOFTBLUE)\n', (4962, 5009), False, 'from DisplayableCube import DisplayableCube\n'), ((5171, 5208), 'numpy.array', 'np.array', (['(*ColorType.SOFTGREEN, 1.0)'], {}), '((*ColorType.SOFTGREEN, 1.0))\n', (5179, 5208), True, 'import numpy as np\n'), ((5242, 5261), 'Point.Point', 'Point', (['(0, -1.5, 0)'], {}), '((0, -1.5, 0))\n', (5247, 5261), False, 'from Point import Point\n'), ((5263, 5326), 'DisplayableCube.DisplayableCube', 'DisplayableCube', (['shaderProg', '(0.1)', '(0.1)', '(0.1)', 'ColorType.SOFTGREEN'], {}), '(shaderProg, 0.1, 0.1, 0.1, ColorType.SOFTGREEN)\n', (5278, 5326), False, 'from DisplayableCube import DisplayableCube\n'), ((6001, 6035), 'math.cos', 'math.cos', (['(thetaAng / 180 * math.pi)'], {}), '(thetaAng / 180 * math.pi)\n', (6009, 6035), False, 'import math\n'), ((6061, 6095), 'math.sin', 'math.sin', (['(thetaAng / 180 * math.pi)'], {}), '(thetaAng / 180 * math.pi)\n', (6069, 6095), False, 'import math\n'), ((6567, 6578), 'Point.Point', 'Point', (['lPos'], {}), '(lPos)\n', (6572, 6578), False, 'from Point import Point\n')] |
# Copyright 2015-2021 - RoboDK Inc. - https://robodk.com/
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------
# --------------- DESCRIPTION ----------------
#
# This is a robotics toolbox for RoboDK robot post processors and RoboDK API for Python
# This toolbox includes a simple matrix class for pose transofmrations (Mat)
# This toolbox has been inspired from Peter Corke's Robotics Toolbox:
# http://petercorke.com/wordpress/toolboxes/robotics-toolbox
#
# In this document: pose = transformation matrix = homogeneous matrix = 4x4 matrix
#
# More information about the RoboDK API for Python here:
# https://robodk.com/doc/en/RoboDK-API.html
# https://robodk.com/doc/en/PythonAPI/index.html
#
# More information about RoboDK post processors here:
# https://robodk.com/help#PostProcessor
#
# Visit the Matrix and Quaternions FAQ for more information about pose/homogeneous transformations
# http://www.j3d.org/matrix_faq/matrfaq_latest.html
# --------------------------------------------
import math
import operator
import sys
import unittest
import time
#----------------------------------------------------
#-------- Generic file usage ---------------
import os.path
import time
def searchfiles(pattern='C:\\RoboDK\\Library\\*.rdk'):
"""List the files in a directory with a given extension"""
import glob
return glob.glob(pattern)
#def CurrentFile(file = __file__):
# """Returns the current Python file being executed"""
# return os.path.realpath(file)
def getFileDir(filepath):
"""Returns the directory of a file path"""
return os.path.dirname(filepath)
def getBaseName(filepath):
"""Returns the file name and extension of a file path"""
return os.path.basename(filepath)
def getFileName(filepath):
"""Returns the file name (with no extension) of a file path"""
return os.path.splitext(os.path.basename(filepath))[0]
def DateModified(filepath, stringformat=False):
"""Returns the time that a file was modified"""
time_in_s = os.path.getmtime(filepath)
if stringformat:
return time.ctime(time_in_s)
else:
return time_in_s
def DateCreated(filepath, stringformat=False):
"""Returns the time that a file was modified"""
time_in_s = os.path.getctime(filepath)
if stringformat:
return time.ctime(time_in_s)
else:
return time_in_s
def DirExists(folder):
"""Returns true if the folder exists"""
return os.path.isdir(folder)
def FileExists(file):
"""Returns true if the file exists"""
return os.path.exists(file)
def FilterName(namefilter, safechar='P', reserved_names=None):
"""Get a safe program or variable name that can be used for robot programming"""
# remove non accepted characters
for c in r' -[]/\;,><&*:%=+@!#^|?^':
namefilter = namefilter.replace(c, '')
# remove non english characters
char_list = (c for c in namefilter if 0 < ord(c) < 127)
namefilter = ''.join(char_list)
# Make sure we have a non empty string
if len(namefilter) <= 0:
namefilter = safechar
# Make sure we don't start with a number
if namefilter[0].isdigit():
print(namefilter)
namefilter = safechar + namefilter
# Make sure we are not using a reserved name
if reserved_names is not None:
while namefilter.lower() in reserved_names:
namefilter = safechar + namefilter
# Add the name to reserved names
reserved_names.append(namefilter)
return namefilter
#----------------------------------------------------
#-------- Generic math usage ---------------
pi = math.pi
def pause(seconds):
"""Pause in seconds
:param pause: time in seconds
:type pause: float"""
time.sleep(seconds)
def atan2(y, x):
"""Returns angle of a 2D coordinate in the XY plane"""
return math.atan2(y, x)
def sqrt(value):
"""Returns the square root of a value"""
return math.sqrt(value)
def sin(value):
"""Returns the sinus of an angle in radians"""
return math.sin(value)
def cos(value):
"""Returns the cosinus of an angle in radians"""
return math.cos(value)
def asin(value):
"""Returns the arc sinus in radians"""
return math.asin(value)
def acos(value):
"""Returns the arc cosinus in radians"""
return math.acos(value)
def name_2_id(str_name_id):
"""Returns the number of a numbered object. For example: "Frame 3", "Frame3", "Fram3 3" returns 3."""
import re
numbers = re.findall(r'[0-9]+', str_name_id)
if len(numbers) > 0:
return float(numbers[-1])
return -1
#----------------------------------------------------
#-------- Generic matrix usage ---------------
def rotx(rx):
r"""Returns a rotation matrix around the X axis (radians)
.. math::
R_x(\theta) = \begin{bmatrix} 1 & 0 & 0 & 0 \\
0 & c_\theta & -s_\theta & 0 \\
0 & s_\theta & c_\theta & 0 \\
0 & 0 & 0 & 1
\end{bmatrix}
:param float rx: rotation around X axis in radians
.. seealso:: :func:`~robodk.transl`, :func:`~robodk.roty`, :func:`~robodk.roty`
"""
ct = math.cos(rx)
st = math.sin(rx)
return Mat([[1, 0, 0, 0], [0, ct, -st, 0], [0, st, ct, 0], [0, 0, 0, 1]])
def roty(ry):
r"""Returns a rotation matrix around the Y axis (radians)
.. math::
R_y(\theta) = \begin{bmatrix} c_\theta & 0 & s_\theta & 0 \\
0 & 1 & 0 & 0 \\
-s_\theta & 0 & c_\theta & 0 \\
0 & 0 & 0 & 1
\end{bmatrix}
:param float ry: rotation around Y axis in radians
.. seealso:: :func:`~robodk.transl`, :func:`~robodk.rotx`, :func:`~robodk.rotz`
"""
ct = math.cos(ry)
st = math.sin(ry)
return Mat([[ct, 0, st, 0], [0, 1, 0, 0], [-st, 0, ct, 0], [0, 0, 0, 1]])
def rotz(rz):
r"""Returns a rotation matrix around the Z axis (radians)
.. math::
R_x(\theta) = \begin{bmatrix} c_\theta & -s_\theta & 0 & 0 \\
s_\theta & c_\theta & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1
\end{bmatrix}
:param float ry: rotation around Y axis in radians
.. seealso:: :func:`~robodk.transl`, :func:`~robodk.rotx`, :func:`~robodk.roty`
"""
ct = math.cos(rz)
st = math.sin(rz)
return Mat([[ct, -st, 0, 0], [st, ct, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
def transl(tx, ty=None, tz=None):
r"""Returns a translation matrix (mm)
.. math::
T(t_x, t_y, t_z) = \begin{bmatrix} 1 & 0 & 0 & t_x \\
0 & 1 & 0 & t_y \\
0 & 0 & 1 & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
:param float tx: translation along the X axis
:param float ty: translation along the Y axis
:param float tz: translation along the Z axis
.. seealso:: :func:`~robodk.rotx`, :func:`~robodk.roty`, :func:`~robodk.rotz`
"""
if ty is None:
xx = tx[0]
yy = tx[1]
zz = tx[2]
else:
xx = tx
yy = ty
zz = tz
return Mat([[1, 0, 0, xx], [0, 1, 0, yy], [0, 0, 1, zz], [0, 0, 0, 1]])
def RelTool(target_pose, x, y, z, rx=0, ry=0, rz=0):
"""Calculates a relative target with respect to the tool coordinates. This procedure has exactly the same behavior as ABB's RelTool instruction.
X,Y,Z are in mm, W,P,R are in degrees.
:param float x: translation along the Tool X axis (mm)
:param float y: translation along the Tool Y axis (mm)
:param float z: translation along the Tool Z axis (mm)
:param float rx: rotation around the Tool X axis (deg) (optional)
:param float ry: rotation around the Tool Y axis (deg) (optional)
:param float rz: rotation around the Tool Z axis (deg) (optional)
.. seealso:: :func:`~robodk.Offset`, :func:`~robodk.transl`, :func:`~robodk.rotx`, :func:`~robodk.roty`, :func:`~robodk.rotz`
"""
if type(target_pose) != Mat:
target_pose = target_pose.Pose()
new_target = target_pose * transl(x, y, z) * rotx(rx * pi / 180) * roty(ry * pi / 180) * rotz(rz * pi / 180)
return new_target
def Offset(target_pose, x, y, z, rx=0, ry=0, rz=0):
"""Calculates a relative target with respect to the reference frame coordinates.
X,Y,Z are in mm, RX,RY,RZ are in degrees.
:param float x: translation along the Reference X axis (mm)
:param float y: translation along the Reference Y axis (mm)
:param float z: translation along the Reference Z axis (mm)
:param float rx: rotation around the Reference X axis (deg) (optional)
:param float ry: rotation around the Reference Y axis (deg) (optional)
:param float rz: rotation around the Reference Z axis (deg) (optional)
.. seealso:: :func:`~robodk.RelTool`, :func:`~robodk.transl`, :func:`~robodk.rotx`, :func:`~robodk.roty`, :func:`~robodk.rotz`
"""
if type(target_pose) != Mat:
# item object assumed:
target_pose = target_pose.Pose()
if not target_pose.isHomogeneous():
raise Exception(MatrixError, "Pose matrix is not homogeneous!")
new_target = transl(x, y, z) * rotx(rx * pi / 180.0) * roty(ry * pi / 180.0) * rotz(rz * pi / 180.0) * target_pose
return new_target
def point_Zaxis_2_pose(point, zaxis, yaxis_hint1=[0, 0, 1], yaxis_hint2=[0, 1, 1]):
"""Returns a pose given the origin as a point, a Z axis and a preferred orientation for the Y axis"""
pose = eye(4)
pose.setPos(point)
pose.setVZ(zaxis)
yaprox = yaxis_hint1
if angle3(zaxis, yaprox) < 2 * pi / 180:
yaprox = yaxis_hint2
xaxis = normalize3(cross(yaprox, zaxis))
yaxis = cross(zaxis, xaxis)
pose.setVX(xaxis)
pose.setVY(yaxis)
return pose
def eye(size=4):
r"""Returns the identity matrix
.. math::
T(t_x, t_y, t_z) = \begin{bmatrix} 1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1
\end{bmatrix}
:param int size: square matrix size (4x4 Identity matrix by default, otherwise it is initialized to 0)
.. seealso:: :func:`~robodk.transl`, :func:`~robodk.rotx`, :func:`~robodk.roty`, :func:`~robodk.rotz`
"""
if size == 4:
return Mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
else:
newmat = Mat(size, size)
for i in range(size):
newmat[i, i] = 1
return newmat
def size(matrix, dim=None):
"""Returns the size of a matrix (m,n).
Dim can be set to 0 to return m (rows) or 1 to return n (columns)
:param matrix: pose
:type matrix: :class:`.Mat`
:param dim: dimension
:type dim: int
"""
return matrix.size(dim)
def tr(matrix):
"""Returns the transpose of the matrix
:param matrix: pose
:type matrix: :class:`.Mat`"""
return matrix.tr()
def invH(matrix):
"""Returns the inverse of a homogeneous matrix
:param matrix: pose
:type matrix: :class:`.Mat`
.. seealso:: :func:`~robodk.transl`, :func:`~robodk.rotx`, :func:`~robodk.roty`, :func:`~robodk.rotz`
"""
return matrix.invH()
def catV(mat1, mat2):
"""Concatenate 2 matrices (vertical concatenation)"""
return mat1.catV(mat2)
def catH(mat1, mat2):
"""Concatenate 2 matrices (horizontal concatenation)"""
return mat1.catH(mat2)
def tic():
"""Start a stopwatch timer"""
import time
global TICTOC_START_TIME
TICTOC_START_TIME = time.time()
def toc():
"""Read the stopwatch timer"""
import time
if 'TICTOC_START_TIME' in globals():
elapsed = time.time() - TICTOC_START_TIME
#print("Elapsed time is " + str(elapsed) + " seconds.")
return elapsed
else:
print("Toc: start time not set")
return -1
def LoadList(strfile, separator=',', codec='utf-8'):
"""Load data from a CSV file or a TXT file to a Python list (list of list of numbers)
.. seealso:: :func:`~robodk.SaveList`, :func:`~robodk.LoadMat`
Example:
.. code-block:: python
csvdata = LoadList(strfile, ',')
values = []
for i in range(len(csvdata)):
print(csvdata[i])
values.append(csvdata[i])
# We can also save the list back to a CSV file
# SaveList(csvdata, strfile, ',')
"""
def todecimal(value):
try:
return float(value)
except:
return value
import csv
import codecs
# Read all CSV data:
csvdata = []
#with open(strfile) as csvfile:
with codecs.open(strfile, "r", codec) as csvfile:
csvread = csv.reader(csvfile, delimiter=separator, quotechar='|')
for row in csvread:
row_nums = [todecimal(i) for i in row]
csvdata.append(row_nums)
return csvdata
def SaveList(list_variable, strfile, separator=','):
"""Save a list or a list of lists as a CSV or TXT file.
.. seealso:: :func:`~robodk.LoadList`, :func:`~robodk.LoadMat`"""
Mat(list_variable).tr().SaveMat(strfile, separator)
def LoadMat(strfile, separator=','):
"""Load data from a CSV file or a TXT file to a :class:`.Mat` Matrix
.. seealso:: :func:`~robodk.LoadList`
"""
return Mat(LoadList(strfile, separator))
#----------------------------------------------------
#------ Pose to xyzrpw and xyzrpw to pose------------
def PosePP(x,y,z,r,p,w):
"""Create a pose from XYZRPW coordinates. The pose format is the one used by KUKA (XYZABC coordinates). This is function is the same as KUKA_2_Pose (with the difference that the input values are not a list). This function is used as "p" by the intermediate file when generating a robot program.
.. seealso:: :func:`~robodk.KUKA_2_Pose`, :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
a = r*math.pi/180.0
b = p*math.pi/180.0
c = w*math.pi/180.0
ca = math.cos(a)
sa = math.sin(a)
cb = math.cos(b)
sb = math.sin(b)
cc = math.cos(c)
sc = math.sin(c)
return Mat([[cb*ca,ca*sc*sb-cc*sa,sc*sa+cc*ca*sb,x],[cb*sa,cc*ca+sc*sb*sa,cc*sb*sa-ca*sc,y],[-sb,cb*sc,cc*cb,z],[0.0,0.0,0.0,1.0]])
def pose_2_xyzrpw(H):
"""Calculates the equivalent position (mm) and Euler angles (deg) as an [x,y,z,r,p,w] array, given a pose.
It returns the values that correspond to the following operation:
transl(x,y,z)*rotz(w*pi/180)*roty(p*pi/180)*rotx(r*pi/180)
:param H: pose
:type H: :class:`.Mat`
:return: [x,y,z,w,p,r] in mm and deg
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
x = H[0, 3]
y = H[1, 3]
z = H[2, 3]
if (H[2, 0] > (1.0 - 1e-10)):
p = -pi / 2
r = 0
w = math.atan2(-H[1, 2], H[1, 1])
elif H[2, 0] < -1.0 + 1e-10:
p = pi / 2
r = 0
w = math.atan2(H[1, 2], H[1, 1])
else:
p = math.atan2(-H[2, 0], sqrt(H[0, 0] * H[0, 0] + H[1, 0] * H[1, 0]))
w = math.atan2(H[1, 0], H[0, 0])
r = math.atan2(H[2, 1], H[2, 2])
return [x, y, z, r * 180 / pi, p * 180 / pi, w * 180 / pi]
def xyzrpw_2_pose(xyzrpw):
"""Calculates the pose from the position (mm) and Euler angles (deg), given a [x,y,z,r,p,w] array.
The result is the same as calling: H = transl(x,y,z)*rotz(w*pi/180)*roty(p*pi/180)*rotx(r*pi/180)
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
[x, y, z, r, p, w] = xyzrpw
a = r * pi / 180
b = p * pi / 180
c = w * pi / 180
ca = math.cos(a)
sa = math.sin(a)
cb = math.cos(b)
sb = math.sin(b)
cc = math.cos(c)
sc = math.sin(c)
H = Mat([[cb * cc, cc * sa * sb - ca * sc, sa * sc + ca * cc * sb, x], [cb * sc, ca * cc + sa * sb * sc, ca * sb * sc - cc * sa, y], [-sb, cb * sa, ca * cb, z], [0, 0, 0, 1]])
return H
def Pose(x, y, z, rxd, ryd, rzd):
"""Returns the pose (:class:`.Mat`) given the position (mm) and Euler angles (deg) as an array [x,y,z,rx,ry,rz].
The result is the same as calling: H = transl(x,y,z)*rotx(rx*pi/180)*roty(ry*pi/180)*rotz(rz*pi/180)
This pose format is printed for homogeneous poses automatically. This Pose is the same representation used by Mecademic or Staubli robot controllers.
:param float tx: position (X coordinate)
:param float ty: position (Y coordinate)
:param float tz: position (Z coordinate)
:param float rxd: first rotation in deg (X coordinate)
:param float ryd: first rotation in deg (Y coordinate)
:param float rzd: first rotation in deg (Z coordinate)
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`
"""
rx = rxd * pi / 180
ry = ryd * pi / 180
rz = rzd * pi / 180
srx = math.sin(rx)
crx = math.cos(rx)
sry = math.sin(ry)
cry = math.cos(ry)
srz = math.sin(rz)
crz = math.cos(rz)
return Mat([[cry * crz, -cry * srz, sry, x], [crx * srz + crz * srx * sry, crx * crz - srx * sry * srz, -cry * srx, y], [srx * srz - crx * crz * sry, crz * srx + crx * sry * srz, crx * cry, z], [0, 0, 0, 1]])
def TxyzRxyz_2_Pose(xyzrpw):
"""Returns the pose given the position (mm) and Euler angles (rad) as an array [x,y,z,rx,ry,rz].
The result is the same as calling: H = transl(x,y,z)*rotx(rx)*roty(ry)*rotz(rz)
:param xyzrpw: [x,y,z,rx,ry,rz] in mm and radians
:type xyzrpw: list of float
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
[x, y, z, rx, ry, rz] = xyzrpw
srx = math.sin(rx)
crx = math.cos(rx)
sry = math.sin(ry)
cry = math.cos(ry)
srz = math.sin(rz)
crz = math.cos(rz)
H = Mat([[cry * crz, -cry * srz, sry, x], [crx * srz + crz * srx * sry, crx * crz - srx * sry * srz, -cry * srx, y], [srx * srz - crx * crz * sry, crz * srx + crx * sry * srz, crx * cry, z], [0, 0, 0, 1]])
return H
def Pose_2_TxyzRxyz(H):
"""Retrieve the position (mm) and Euler angles (rad) as an array [x,y,z,rx,ry,rz] given a pose.
It returns the values that correspond to the following operation:
H = transl(x,y,z)*rotx(rx)*roty(ry)*rotz(rz).
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
x = H[0, 3]
y = H[1, 3]
z = H[2, 3]
a = H[0, 0]
b = H[0, 1]
c = H[0, 2]
d = H[1, 2]
e = H[2, 2]
if c > (1.0 - 1e-10):
ry1 = pi / 2
rx1 = 0
rz1 = atan2(H[1, 0], H[1, 1])
elif c < (-1.0 + 1e-10):
ry1 = -pi / 2
rx1 = 0
rz1 = atan2(H[1, 0], H[1, 1])
else:
sy = c
cy1 = +sqrt(1 - sy * sy)
sx1 = -d / cy1
cx1 = e / cy1
sz1 = -b / cy1
cz1 = a / cy1
rx1 = atan2(sx1, cx1)
ry1 = atan2(sy, cy1)
rz1 = atan2(sz1, cz1)
return [x, y, z, rx1, ry1, rz1]
def Pose_2_Staubli(H):
"""Converts a pose (4x4 matrix) to a Staubli XYZWPR target
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
xyzwpr = Pose_2_TxyzRxyz(H)
xyzwpr[3] = xyzwpr[3] * 180.0 / pi
xyzwpr[4] = xyzwpr[4] * 180.0 / pi
xyzwpr[5] = xyzwpr[5] * 180.0 / pi
return xyzwpr
def Pose_2_Motoman(H):
"""Converts a pose (4x4 matrix) to a Motoman XYZWPR target (mm and deg)
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
xyzwpr = pose_2_xyzrpw(H)
return xyzwpr
def Pose_2_Fanuc(H):
"""Converts a pose (4x4 matrix) to a Fanuc XYZWPR target (mm and deg)
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
xyzwpr = pose_2_xyzrpw(H)
return xyzwpr
def Pose_2_Techman(H):
"""Converts a pose (4x4 matrix) to a Techman XYZWPR target (mm and deg)
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
xyzwpr = pose_2_xyzrpw(H)
return xyzwpr
def Motoman_2_Pose(xyzwpr):
"""Converts a Motoman target to a pose (4x4 matrix)
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
return xyzrpw_2_pose(xyzwpr)
def Fanuc_2_Pose(xyzwpr):
"""Converts a Motoman target to a pose (4x4 matrix)
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
return xyzrpw_2_pose(xyzwpr)
def Techman_2_Pose(xyzwpr):
"""Converts a Techman target to a pose (4x4 matrix)
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
return xyzrpw_2_pose(xyzwpr)
def Pose_2_KUKA(H):
"""Converts a pose (4x4 matrix) to an XYZABC KUKA target (Euler angles), required by KUKA KRC controllers.
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
x = H[0, 3]
y = H[1, 3]
z = H[2, 3]
if (H[2, 0]) > (1.0 - 1e-10):
p = -pi / 2
r = 0
w = atan2(-H[1, 2], H[1, 1])
elif (H[2, 0]) < (-1.0 + 1e-10):
p = pi / 2
r = 0
w = atan2(H[1, 2], H[1, 1])
else:
p = atan2(-H[2, 0], sqrt(H[0, 0] * H[0, 0] + H[1, 0] * H[1, 0]))
w = atan2(H[1, 0], H[0, 0])
r = atan2(H[2, 1], H[2, 2])
return [x, y, z, w * 180 / pi, p * 180 / pi, r * 180 / pi]
def KUKA_2_Pose(xyzrpw):
"""Converts a KUKA XYZABC target to a pose (4x4 matrix), required by KUKA KRC controllers.
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
[x, y, z, r, p, w] = xyzrpw
a = r * math.pi / 180.0
b = p * math.pi / 180.0
c = w * math.pi / 180.0
ca = math.cos(a)
sa = math.sin(a)
cb = math.cos(b)
sb = math.sin(b)
cc = math.cos(c)
sc = math.sin(c)
return Mat([[cb * ca, ca * sc * sb - cc * sa, sc * sa + cc * ca * sb, x], [cb * sa, cc * ca + sc * sb * sa, cc * sb * sa - ca * sc, y], [-sb, cb * sc, cc * cb, z], [0.0, 0.0, 0.0, 1.0]])
def Adept_2_Pose(xyzrpw):
"""Converts an Adept XYZRPW target to a pose (4x4 matrix)
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
[x, y, z, r, p, w] = xyzrpw
a = r * math.pi / 180.0
b = p * math.pi / 180.0
c = w * math.pi / 180.0
ca = math.cos(a)
sa = math.sin(a)
cb = math.cos(b)
sb = math.sin(b)
cc = math.cos(c)
sc = math.sin(c)
return Mat([[ca * cb * cc - sa * sc, -cc * sa - ca * cb * sc, ca * sb, x], [ca * sc + cb * cc * sa, ca * cc - cb * sa * sc, sa * sb, y], [-cc * sb, sb * sc, cb, z], [0.0, 0.0, 0.0, 1.0]])
def Pose_2_Adept(H):
"""Converts a pose to an Adept target
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
x = H[0, 3]
y = H[1, 3]
z = H[2, 3]
if H[2, 2] > (1.0 - 1e-10):
r = 0
p = 0
w = atan2(H[1, 0], H[0, 0])
elif H[2, 2] < (-1.0 + 1e-10):
r = 0
p = pi
w = atan2(H[1, 0], H[1, 1])
else:
cb = H[2, 2]
sb = +sqrt(1 - cb * cb)
sc = H[2, 1] / sb
cc = -H[2, 0] / sb
sa = H[1, 2] / sb
ca = H[0, 2] / sb
r = atan2(sa, ca)
p = atan2(sb, cb)
w = atan2(sc, cc)
return [x, y, z, r * 180 / pi, p * 180 / pi, w * 180 / pi]
def Comau_2_Pose(xyzrpw):
"""Converts a Comau XYZRPW target to a pose (4x4 matrix), the same representation required by PDL Comau programs.
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
return Adept_2_Pose(xyzrpw)
def Pose_2_Comau(H):
"""Converts a pose to a Comau target, the same representation required by PDL Comau programs.
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`"""
return Pose_2_Adept(H)
def Pose_2_Nachi(pose):
"""Converts a pose to a Nachi XYZRPW target
:param pose: pose
:type pose: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
[x, y, z, r, p, w] = pose_2_xyzrpw(pose)
return [x, y, z, w, p, r]
def Nachi_2_Pose(xyzwpr):
"""Converts a Nachi XYZRPW target to a pose (4x4 matrix)
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
return xyzrpw_2_pose(xyzwpr)
def pose_2_quaternion(Ti):
"""Returns the quaternion orientation vector of a pose (4x4 matrix)
:param Ti: pose
:type Ti: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
a = (Ti[0, 0])
b = (Ti[1, 1])
c = (Ti[2, 2])
sign2 = 1
sign3 = 1
sign4 = 1
if (Ti[2, 1] - Ti[1, 2]) < 0:
sign2 = -1
if (Ti[0, 2] - Ti[2, 0]) < 0:
sign3 = -1
if (Ti[1, 0] - Ti[0, 1]) < 0:
sign4 = -1
q1 = sqrt(max(a + b + c + 1, 0)) / 2
q2 = sign2 * sqrt(max(a - b - c + 1, 0)) / 2
q3 = sign3 * sqrt(max(-a + b - c + 1, 0)) / 2
q4 = sign4 * sqrt(max(-a - b + c + 1, 0)) / 2
return [q1, q2, q3, q4]
def Pose_Split(pose1, pose2, delta_mm=1.0):
"""Create a sequence of poses that transitions from pose1 to pose2 by steps of delta_mm in mm (the first and last pose are not included in the list)"""
pose_delta = invH(pose1) * pose2
distance = norm(pose_delta.Pos())
if distance <= delta_mm:
return [pose2]
pose_list = []
x, y, z, w, p, r = Pose_2_UR(pose_delta)
steps = max(1, int(distance / delta_mm))
xd = x / steps
yd = y / steps
zd = z / steps
wd = w / steps
pd = p / steps
rd = r / steps
for i in range(steps - 1):
factor = i + 1
pose_list.append(pose1 * UR_2_Pose([xd * factor, yd * factor, zd * factor, wd * factor, pd * factor, rd * factor]))
return pose_list
def quaternion_2_pose(qin):
"""Returns the pose orientation matrix (4x4 matrix) given a quaternion orientation vector
:param list qin: quaternions as 4 float values
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
qnorm = sqrt(qin[0] * qin[0] + qin[1] * qin[1] + qin[2] * qin[2] + qin[3] * qin[3])
q = qin
q[0] = q[0] / qnorm
q[1] = q[1] / qnorm
q[2] = q[2] / qnorm
q[3] = q[3] / qnorm
pose = Mat([[1 - 2*q[2]*q[2] - 2*q[3]*q[3], 2*q[1]*q[2] - 2*q[3]*q[0], 2*q[1]*q[3] + 2*q[2]*q[0], 0],
[2*q[1]*q[2] + 2*q[3]*q[0], 1 - 2*q[1]*q[1] - 2*q[3]*q[3], 2*q[2]*q[3] - 2*q[1]*q[0], 0],
[2*q[1]*q[3] - 2*q[2]*q[0], 2*q[2]*q[3] + 2*q[1]*q[0], 1 - 2*q[1]*q[1] - 2*q[2]*q[2], 0],
[0, 0, 0, 1]])
return pose
def Pose_2_ABB(H):
"""Converts a pose to an ABB target (using quaternion representation).
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
q = pose_2_quaternion(H)
return [H[0, 3], H[1, 3], H[2, 3], q[0], q[1], q[2], q[3]]
def print_pose_ABB(pose):
"""Displays an ABB RAPID target (the same way it is displayed in IRC5 controllers).
:param pose: pose
:type pose: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
q = pose_2_quaternion(pose)
print('[[%.3f,%.3f,%.3f],[%.6f,%.6f,%.6f,%.6f]]' % (pose[0, 3], pose[1, 3], pose[2, 3], q[0], q[1], q[2], q[3]))
def Pose_2_UR(pose):
"""Calculate the p[x,y,z,u,v,w] position with rotation vector for a pose target. This is the same format required by Universal Robot controllers.
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
NUMERIC_TOLERANCE = 1e-8
def saturate_1(value):
return min(max(value, -1.0), 1.0)
angle = acos(saturate_1((pose[0, 0] + pose[1, 1] + pose[2, 2] - 1) * 0.5))
rxyz = [pose[2, 1] - pose[1, 2], pose[0, 2] - pose[2, 0], pose[1, 0] - pose[0, 1]]
if angle < NUMERIC_TOLERANCE:
rxyz = [0, 0, 0]
else:
sin_angle = sin(angle)
if abs(sin_angle) < NUMERIC_TOLERANCE or norm(rxyz) < NUMERIC_TOLERANCE:
d3 = [pose[0, 0], pose[1, 1], pose[2, 2]]
mx = max(d3)
mx_id = d3.index(mx)
if mx_id == 0:
rxyz = [pose[0, 0] + 1, pose[1, 0], pose[2, 0]]
elif mx_id == 1:
rxyz = [pose[0, 1], pose[1, 1] + 1, pose[2, 1]]
else:
rxyz = [pose[0, 2], pose[1, 2], pose[2, 2] + 1]
rxyz = mult3(rxyz, angle / (sqrt(max(0, 2 * (1 + mx)))))
else:
rxyz = normalize3(rxyz)
rxyz = mult3(rxyz, angle)
return [pose[0, 3], pose[1, 3], pose[2, 3], rxyz[0], rxyz[1], rxyz[2]]
def UR_2_Pose(xyzwpr):
"""Calculate the pose target given a p[x,y,z,u,v,w] cartesian target with rotation vector. This is the same format required by Universal Robot controllers.
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`
"""
x, y, z, w, p, r = xyzwpr
wpr = [w, p, r]
angle = norm(wpr)
cosang = cos(0.5 * angle)
if angle == 0.0:
q234 = [0.0, 0.0, 0.0]
else:
ratio = sin(0.5 * angle) / angle
q234 = mult3(wpr, ratio)
q1234 = [cosang, q234[0], q234[1], q234[2]]
pose = quaternion_2_pose(q1234)
pose.setPos([x, y, z])
return pose
#----------------------------------------------------
#-------- ROBOT MODEL (D-H and D-H M) ---------------
def dh(rz, tx=None, tz=None, rx=None):
"""Returns the Denavit-Hartenberg 4x4 matrix for a robot link.
calling dh(rz,tx,tz,rx) is the same as using rotz(rz)*transl(tx,0,tz)*rotx(rx)
calling dh(rz,tx,tz,rx) is the same as calling dh([rz,tx,tz,rx])
"""
if tx is None: [rz, tx, tz, rx] = rz
crx = math.cos(rx)
srx = math.sin(rx)
crz = math.cos(rz)
srz = math.sin(rz)
return Mat( [[crz, -srz*crx, srz*srx, tx*crz],
[srz, crz*crx, -crz*srx, tx*srz],
[ 0, srx, crx, tz],
[ 0, 0, 0, 1]])
def dhm(rx, tx=None, tz=None, rz=None):
"""Returns the Denavit-Hartenberg Modified 4x4 matrix for a robot link (Craig 1986).
calling dhm(rx,tx,tz,rz) is the same as using rotx(rx)*transl(tx,0,tz)*rotz(rz)
calling dhm(rx,tx,tz,rz) is the same as calling dhm([rx,tx,tz,rz])
"""
if tx is None: [rx, tx, tz, rz] = rx
crx = math.cos(rx)
srx = math.sin(rx)
crz = math.cos(rz)
srz = math.sin(rz)
return Mat([[crz, -srz, 0, tx],
[crx*srz, crx*crz, -srx, -tz*srx],
[srx*srz, crz*srx, crx, tz*crx],
[ 0, 0, 0, 1]])
def joints_2_angles(jin, type):
"""Converts the robot encoders into angles between links depending on the type of the robot."""
jout = jin
if type == 2:
jout[2] = -jin[1] - jin[2]
jout[3] = -jin[3]
jout[4] = -jin[4]
jout[5] = -jin[5]
elif type == 3:
jout[2] = -jin[2]
jout[3] = -jin[3]
jout[4] = -jin[4]
jout[5] = -jin[5]
elif type == 4:
jout[2] = +jin[1] + jin[2]
elif type == 11:
jout[2] = -jin[1] - jin[2]
jout[0] = -jin[0]
jout[3] = -jin[3]
jout[5] = -jin[5]
return jout
def angles_2_joints(jin, type):
"""Converts the angles between links into the robot motor space depending on the type of the robot."""
jout = jin
if type == 2:
jout[2] = -jin[1] - jin[2]
jout[3] = -jin[3]
jout[4] = -jin[4]
jout[5] = -jin[5]
elif type == 3:
jout[2] = -jin[2]
jout[3] = -jin[3]
jout[4] = -jin[4]
jout[5] = -jin[5]
elif type == 11:
jout[2] = -jin[1] - jin[2]
jout[0] = -jin[0]
jout[3] = -jin[3]
jout[5] = -jin[5]
return jout
#----------------------------------------------------
#-------- Useful geometric tools ---------------
def norm(p):
"""Returns the norm of a 3D vector"""
return sqrt(p[0] * p[0] + p[1] * p[1] + p[2] * p[2])
def normalize3(a):
"""Returns the unitary vector"""
norminv = 1.0 / norm(a)
return [a[0] * norminv, a[1] * norminv, a[2] * norminv]
def cross(a, b):
"""Returns the cross product of two 3D vectors"""
c = [a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]]
return c
def dot(a, b):
"""Returns the dot product of two 3D vectors"""
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def angle3(a, b):
"""Returns the angle in radians of two 3D vectors"""
cos_angle = dot(normalize3(a), normalize3(b))
cos_angle = min(1.0, max(-1.0, cos_angle))
return acos(cos_angle)
def pose_angle(pose):
"""Returns the angle in radians of a 4x4 matrix pose
:param pose: pose
:type pose: :class:`.Mat`"""
cos_ang = (pose[0, 0] + pose[1, 1] + pose[2, 2] - 1) / 2
cos_ang = min(max(cos_ang, -1), 1)
return acos(cos_ang)
def pose_angle_between(pose1, pose2):
"""Returns the angle in radians between two poses (4x4 matrix pose)"""
return pose_angle(invH(pose1) * pose2)
def mult3(v, d):
"""Multiplies a 3D vector to a scalar"""
return [v[0] * d, v[1] * d, v[2] * d]
def subs3(a, b):
"""Subtracts two 3D vectors c=a-b"""
return [a[0] - b[0], a[1] - b[1], a[2] - b[2]]
def add3(a, b):
"""Adds two 3D vectors c=a+b"""
return [a[0] + b[0], a[1] + b[1], a[2] + b[2]]
def distance(a, b):
"""Calculates the distance between two points"""
return norm(subs3(a, b))
def pose_is_similar(a, b, tolerance=0.1):
"""Check if the pose is similar. Returns True if both poses are less than 0.1 mm or 0.1 deg appart. Optionally provide the tolerance in mm+deg"""
if distance(a.Pos(), b.Pos()) + pose_angle_between(a, b) * 180 / pi < tolerance:
return True
return False
def intersect_line_2_plane(pline, vline, pplane, vplane):
"""Calculates the intersection betweeen a line and a plane"""
D = -dot(vplane, pplane)
k = -(D + dot(vplane, pline)) / dot(vplane, vline)
p = add3(pline, mult3(vline, k))
return p
def proj_pt_2_plane(point, planepoint, planeABC):
"""Projects a point to a plane"""
return intersect_line_2_plane(point, planeABC, planepoint, planeABC)
def proj_pt_2_line(point, paxe, vaxe):
"""Projects a point to a line"""
vpaxe2point = subs3(point, paxe)
dist = dot(vaxe, vpaxe2point) / dot(vaxe, vaxe)
return add3(paxe, mult3(vaxe, dist))
def fitPlane(points):
"""Best fits a plane to a cloud of points"""
import numpy as np
XYZ = np.array(points)
[rows, cols] = XYZ.shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
p = (np.ones((rows, 1)))
AB = np.hstack([XYZ, p])
[u, d, v] = np.linalg.svd(AB, 0)
B = v[3, :] # Solution is last column of v.
nn = np.linalg.norm(B[0:3])
B = B / nn
pplane = [0, 0, -(B[3] / B[2])]
vplane = B[0:3].tolist()
return pplane, vplane
#----------------------------------------------------
#-------- Mat matrix class ---------------
class MatrixError(Exception):
""" An exception class for Matrix """
pass
class Mat(object):
"""Mat is a matrix object. The main purpose of this object is to represent a pose in the 3D space (position and orientation).
A pose is a 4x4 matrix that represents the position and orientation of one reference frame with respect to another one, in the 3D space.
Poses are commonly used in robotics to place objects, reference frames and targets with respect to each other.
.. seealso:: :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Adept_2_Pose`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.KUKA_2_Pose`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
Example:
.. code-block:: python
from robolink import * # import the robolink library
from robodk import * # import the robodk library
RDK = Robolink() # connect to the RoboDK API
robot = RDK.Item('', ITEM_TYPE_ROBOT) # Retrieve a robot available in RoboDK
#target = RDK.Item('Target 1') # Retrieve a target (example)
pose = robot.Pose() # retrieve the current robot position as a pose (position of the active tool with respect to the active reference frame)
# target = target.Pose() # the same can be applied to targets (taught position)
# Read the 4x4 pose matrix as [X,Y,Z , A,B,C] Euler representation (mm and deg): same representation as KUKA robots
XYZABC = Pose_2_KUKA(pose)
print(XYZABC)
# Read the 4x4 pose matrix as [X,Y,Z, q1,q2,q3,q4] quaternion representation (position in mm and orientation in quaternion): same representation as ABB robots (RAPID programming)
xyzq1234 = Pose_2_ABB(pose)
print(xyzq1234)
# Read the 4x4 pose matrix as [X,Y,Z, u,v,w] representation (position in mm and orientation vector in radians): same representation as Universal Robots
xyzuvw = Pose_2_UR(pose)
print(xyzuvw)
x,y,z,a,b,c = XYZABC # Use the KUKA representation (for example) and calculate a new pose based on the previous pose
XYZABC2 = [x,y,z+50,a,b,c+45]
pose2 = KUKA_2_Pose(XYZABC2) # Convert the XYZABC array to a pose (4x4 matrix)
robot.MoveJ(pose2) # Make a joint move to the new position
# target.setPose(pose2) # We can also update the pose to targets, tools, reference frames, objects, ...
"""
def __init__(self, rows=None, ncols=None):
if ncols is None:
if rows is None:
m = 4
n = 4
self.rows = [[0] * n for x in range(m)]
else:
if isinstance(rows, Mat):
rows = rows.copy().rows
m = len(rows)
transpose = 0
if isinstance(rows, list) and len(rows) == 0:
# Check empty matrix
self.rows = [[]]
n = 0
return
if not isinstance(rows[0], list):
rows = [rows]
transpose = 1
n = len(rows[0])
if any([len(row) != n for row in rows[1:]]): # Validity check
#raise Exception(MatrixError, "inconsistent row length")
# make the size uniform (fill blanks with zeros)
n = max([len(i) for i in rows])
for row in rows:
row += [0] * (n - len(row))
self.rows = rows
if transpose:
self.rows = [list(item) for item in zip(*self.rows)]
else:
m = max(rows, 0)
n = max(ncols, 0)
if m == 0:
m = 1
n = 0
self.rows = [[0] * n for x in range(m)]
def __iter__(self):
if self.size(0) == 0 or self.size(1) == 0:
return iter([])
return iter(self.tr().rows)
def copy(self):
sz = self.size()
newmat = Mat(sz[0], sz[1])
for i in range(sz[0]):
for j in range(sz[1]):
newmat[i, j] = self[i, j]
return newmat
def __len__(self):
"""Return the number of columns"""
return len(self.rows[0])
def ColsCount(self):
"""Return the number of coumns. Same as len().
.. seealso:: :func:`~Mat.Cols`, :func:`~Mat.Rows`, :func:`~Mat.RowsCount`
"""
return len(self.rows[0])
def RowsCount(self):
"""Return the number of rows
.. seealso:: :func:`~Mat.Cols`, :func:`~Mat.Rows`, :func:`~Mat.ColsCount`
"""
return len(self.rows[0])
def Cols(self):
"""Retrieve the matrix as a list of columns (list of list of float).
.. seealso:: :func:`~Mat.Rows`, :func:`~Mat.ColsCount`, :func:`~Mat.RowsCount`
Example:
.. code-block:: python
>>> transl(10,20,30).Rows()
[[1, 0, 0, 10], [0, 1, 0, 20], [0, 0, 1, 30], [0, 0, 0, 1]]
>>> transl(10,20,30).Cols()
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [10, 20, 30, 1]]
"""
return self.tr().rows
def Rows(self):
"""Get the matrix as a list of lists
.. seealso:: :func:`~Mat.Cols`, :func:`~Mat.ColsCount`, :func:`~Mat.RowsCount`
"""
return self.rows
def __getitem__(self, idx):
if isinstance(idx, int): #integer A[1]
return tr(Mat(self.rows[idx]))
elif isinstance(idx, slice): #one slice: A[1:3]
return Mat(self.rows[idx])
else: #two slices: A[1:3,1:3]
idx1 = idx[0]
idx2 = idx[1]
if isinstance(idx1, int) and isinstance(idx2, int):
return self.rows[idx1][idx2]
matsize = self.size()
if isinstance(idx1, slice):
indices1 = idx1.indices(matsize[0])
rg1 = range(*indices1)
else: #is int
rg1 = range(idx1, idx1 + 1)
if isinstance(idx2, slice):
indices2 = idx2.indices(matsize[1])
rg2 = range(*indices2)
else: #is int
rg2 = range(idx2, idx2 + 1)
#newm = int(abs((rg1.stop-rg1.start)/rg1.step))
#newn = int(abs((rg2.stop-rg2.start)/rg2.step))
newm = rg1
newn = rg2
newmat = Mat(len(newm), len(newn))
cm = 0
for i in rg1:
cn = 0
for j in rg2:
newmat.rows[cm][cn] = self.rows[i][j]
cn = cn + 1
cm = cm + 1
return newmat
def __setitem__(self, idx, item):
if isinstance(item, float) or isinstance(item, int):
item = Mat([[item]])
elif isinstance(item, list):
item = Mat(item)
matsize = self.size()
if isinstance(idx, int): #integer A[1]
idx1 = idx
idx2 = 0
#raise Exception(MatrixError, "Cannot set item. Use [i,:] instead.")
#self.rows[idx] = item
elif isinstance(idx, slice): #one slice: A[1:3]
# raise Exception(MatrixError, "Cannot set item. Use [a:b,:] instead.")
idx1 = idx
idx2 = 0
else:
idx1 = idx[0]
idx2 = idx[1]
# at this point we have two slices: example A[1:3,1:3]
if isinstance(idx1, slice):
indices1 = idx1.indices(matsize[0])
rg1 = range(*indices1)
else: #is int
rg1 = range(idx1, idx1 + 1)
if isinstance(idx2, slice):
indices2 = idx2.indices(matsize[1])
rg2 = range(*indices2)
else: #is int
rg2 = range(idx2, idx2 + 1)
#newm = int(abs((rg1.stop-rg1.start)/rg1.step))
#newn = int(abs((rg2.stop-rg2.start)/rg2.step))
newm = rg1
newn = rg2
itmsz = item.size()
if len(newm) != itmsz[0] or len(newn) != itmsz[1]:
raise Exception(MatrixError, "Submatrix indices does not match the new matrix sizes", itmsz[0], "x", itmsz[1], "<-", newm, "x", newn)
#newmat = Mat(newm,newn)
cm = 0
for i in rg1:
cn = 0
for j in rg2:
self.rows[i][j] = item.rows[cm][cn]
cn = cn + 1
cm = cm + 1
def __str__(self):
#s='\n [ '.join([(', '.join([str(item) for item in row])+' ],') for row in self.rows])
str_add = ''
if self.isHomogeneous():
x, y, z, rx, ry, rz = Pose_2_TxyzRxyz(self)
str_add = 'Pose(%.3f, %.3f, %.3f, %.3f, %.3f, %.3f):\n' % (x, y, z, rx * 180 / pi, ry * 180 / pi, rz * 180 / pi)
s = '\n [ '.join([(', '.join([('%.3f' % item if type(item) == float else str(item)) for item in row]) + ' ],') for row in self.rows])
return str_add + '[[ ' + s[:-1] + ']\n'
def __repr__(self):
s = str(self)
rank = str(self.size())
rep = "Matrix: %s\n%s" % (rank, s)
return rep
def tr(self):
"""Returns the transpose of the matrix"""
if self.size(0) == 0 or self.size(1) == 0:
return Mat(0, 0)
mat = Mat([list(item) for item in zip(*self.rows)])
return mat
def size(self, dim=None):
"""Returns the size of a matrix (m,n).
Dim can be set to 0 to return m (rows) or 1 to return n (columns)"""
m = len(self.rows)
if m > 0:
n = len(self.rows[0])
else:
n = 0
if dim is None:
return (m, n)
elif dim == 0:
return m
elif dim == 1:
return n
else:
raise Exception(MatrixError, "Invalid dimension!")
def catV(self, mat2):
"""Concatenate with another matrix (vertical concatenation)"""
if not isinstance(mat2, Mat):
#raise Exception(MatrixError, "Concatenation must be performed with 2 matrices")
return self.catH(Mat(mat2).tr())
sz1 = self.size()
sz2 = mat2.size()
if sz1[1] != sz2[1]:
raise Exception(MatrixError, "Horizontal size of matrices does not match")
newmat = Mat(sz1[0] + sz2[0], sz1[1])
newmat[0:sz1[0], :] = self
newmat[sz1[0]:, :] = mat2
return newmat
def catH(self, mat2):
"""Concatenate with another matrix (horizontal concatenation)"""
if not isinstance(mat2, Mat):
#raise Exception(MatrixError, "Concatenation must be performed with 2 matrices")
return self.catH(Mat(mat2))
sz1 = self.size()
sz2 = mat2.size()
if sz1[0] != sz2[0]:
raise Exception(MatrixError, "Horizontal size of matrices does not match")
newmat = Mat(sz1[0], sz1[1] + sz2[1])
newmat[:, :sz1[1]] = self
newmat[:, sz1[1]:] = mat2
return newmat
def __eq__(self, other):
"""Test equality"""
if other is None:
return False
#return (other.rows == self.rows)
return pose_is_similar(other, self)
def __ne__(self, other):
return not (self == other)
def __add__(self, mat):
"""Add a matrix to this matrix and
return the new matrix. It doesn't modify
the current matrix"""
if isinstance(mat, int) or isinstance(mat, float):
m, n = self.size()
result = Mat(m, n)
for x in range(m):
for y in range(n):
result.rows[x][y] = self.rows[x][y] + mat
return result
sz = self.size()
m = sz[0]
n = sz[1]
ret = Mat(m, n)
if sz != mat.size():
raise Exception(MatrixError, "Can not add matrices of sifferent sizes!")
for x in range(m):
row = [sum(item) for item in zip(self.rows[x], mat.rows[x])]
ret.rows[x] = row
return ret
def __sub__(self, mat):
"""Subtract a matrix from this matrix and
return the new matrix. It doesn't modify
the current matrix"""
if isinstance(mat, int) or isinstance(mat, float):
m, n = self.size()
result = Mat(m, n)
for x in range(m):
for y in range(n):
result.rows[x][y] = self.rows[x][y] - mat
return result
sz = self.size()
m = sz[0]
n = sz[1]
ret = Mat(m, n)
if sz != mat.size():
raise Exception(MatrixError, "Can not subtract matrices of sifferent sizes!")
for x in range(m):
row = [item[0] - item[1] for item in zip(self.rows[x], mat.rows[x])]
ret.rows[x] = row
return ret
def __mul__(self, mat):
"""Multiply a matrix with this matrix and
return the new matrix. It doesn't modify
the current matrix"""
if isinstance(mat, int) or isinstance(mat, float):
m, n = self.size()
mulmat = Mat(m, n)
for x in range(m):
for y in range(n):
mulmat.rows[x][y] = self.rows[x][y] * mat
return mulmat
if isinstance(mat, list): #case of a matrix times a vector
szvect = len(mat)
m = self.size(0)
matvect = Mat(mat)
if szvect + 1 == m:
vectok = catV(matvect, Mat([[1]]))
result = self * vectok
return (result[:-1, :]).tr().rows[0]
elif szvect == m:
result = self * Mat(matvect)
return result.tr().rows[0]
else:
raise Exception(MatrixError, "Invalid product")
else:
matm, matn = mat.size()
m, n = self.size()
if (n != matm):
raise Exception(MatrixError, "Matrices cannot be multipled (unexpected size)!")
mat_t = mat.tr()
mulmat = Mat(m, matn)
for x in range(m):
for y in range(mat_t.size(0)):
mulmat.rows[x][y] = sum([item[0] * item[1] for item in zip(self.rows[x], mat_t.rows[y])])
return mulmat
def eye(self, m=4):
"""Make identity matrix of size (mxm)"""
rows = [[0] * m for x in range(m)]
idx = 0
for row in rows:
row[idx] = 1
idx += 1
return Mat(rows)
def isHomogeneous(self):
"""returns 1 if it is a Homogeneous matrix"""
m, n = self.size()
if m != 4 or n != 4:
return False
#if self[3,:] != Mat([[0.0,0.0,0.0,1.0]]):
# return False
test = self[0:3, 0:3]
test = test * test.tr()
test[0, 0] = test[0, 0] - 1.0
test[1, 1] = test[1, 1] - 1.0
test[2, 2] = test[2, 2] - 1.0
zero = 0.0
for x in range(3):
for y in range(3):
zero = zero + abs(test[x, y])
if zero > 1e-4:
return False
return True
def RelTool(self, x, y, z, rx=0, ry=0, rz=0):
"""Calculates a target relative with respect to the tool coordinates.
X,Y,Z are in mm, W,P,R are in degrees. The behavior of this function is the same as ABB's RAPID RelTool command."""
return RelTool(self, x, y, z, rx, ry, rz)
def Offset(self, x, y, z, rx=0, ry=0, rz=0):
"""Calculates a relative target with respect to the reference frame coordinates.
X,Y,Z are in mm, W,P,R are in degrees."""
return Offset(self, x, y, z, rx, ry, rz)
def invH(self):
"""Returns the inverse of this pose (homogeneous matrix assumed)"""
if not self.isHomogeneous():
raise Exception(MatrixError, "Pose matrix is not homogeneous. invH() can only compute the inverse of a homogeneous matrix")
Hout = self.tr()
Hout[3, 0:3] = Mat([[0, 0, 0]])
Hout[0:3, 3] = (Hout[0:3, 0:3] * self[0:3, 3]) * (-1)
return Hout
def inv(self):
"""Returns the inverse of this pose (homogeneous matrix assumed)"""
return self.invH()
def tolist(self):
"""Returns the first column of the matrix as a list"""
return tr(self).rows[0]
def list(self):
"""Returns the first column of the matrix as a list"""
return tr(self).rows[0]
def list2(self):
"""Returns the matrix as list of lists (one list per column)"""
return tr(self).rows
def Pos(self):
"""Returns the position of a pose (assumes that a 4x4 homogeneous matrix is being used)"""
return self[0:3, 3].tolist()
def VX(self):
"""Returns the X vector of a pose (assumes that a 4x4 homogeneous matrix is being used)"""
return self[0:3, 0].tolist()
def VY(self):
"""Returns the Y vector of a pose (assumes that a 4x4 homogeneous matrix is being used)"""
return self[0:3, 1].tolist()
def VZ(self):
"""Returns the Z vector of a pose (assumes that a 4x4 homogeneous matrix is being used)"""
return self[0:3, 2].tolist()
def Rot33(self):
"""Returns the sub 3x3 rotation matrix"""
return self[0:3, 0:3]
def setPos(self, newpos):
"""Sets the XYZ position of a pose (assumes that a 4x4 homogeneous matrix is being used)"""
self[0, 3] = newpos[0]
self[1, 3] = newpos[1]
self[2, 3] = newpos[2]
return self
def setVX(self, v_xyz):
"""Sets the VX vector of a pose, which is the first column of a homogeneous matrix (assumes that a 4x4 homogeneous matrix is being used)"""
v_xyz = normalize3(v_xyz)
self[0, 0] = v_xyz[0]
self[1, 0] = v_xyz[1]
self[2, 0] = v_xyz[2]
return self
def setVY(self, v_xyz):
"""Sets the VY vector of a pose, which is the first column of a homogeneous matrix (assumes that a 4x4 homogeneous matrix is being used)"""
v_xyz = normalize3(v_xyz)
self[0, 1] = v_xyz[0]
self[1, 1] = v_xyz[1]
self[2, 1] = v_xyz[2]
return self
def setVZ(self, v_xyz):
"""Sets the VZ vector of a pose, which is the first column of a homogeneous matrix (assumes that a 4x4 homogeneous matrix is being used)"""
v_xyz = normalize3(v_xyz)
self[0, 2] = v_xyz[0]
self[1, 2] = v_xyz[1]
self[2, 2] = v_xyz[2]
return self
def translationPose(self):
"""Return the translation pose of this matrix. The rotation returned is set to identity (assumes that a 4x4 homogeneous matrix is being used)"""
return transl(self.Pos())
def rotationPose(self):
"""Return the rotation pose of this matrix. The position returned is set to [0,0,0] (assumes that a 4x4 homogeneous matrix is being used)"""
mat_rotation = Mat(self)
mat_rotation.setPos([0, 0, 0])
return mat_rotation
def SaveCSV(self, strfile):
"""Save the :class:`.Mat` Matrix to a CSV (Comma Separated Values) file. The file can be easily opened as a spreadsheet such as Excel.
.. seealso:: :func:`~Mat.SaveMat`, :func:`~robodk.SaveList`, :func:`~robodk.LoadList`, :func:`~robodk.LoadMat`
"""
self.tr().SaveMat(strfile)
def SaveMat(self, strfile, separator=','):
"""Save the :class:`.Mat` Matrix to a CSV or TXT file
.. seealso:: :func:`~Mat.SaveCSV`, :func:`~robodk.SaveList`, :func:`~robodk.LoadList`, :func:`~robodk.LoadMat`
"""
sz = self.size()
m = sz[0]
n = sz[1]
file = open(strfile, 'w')
for j in range(n):
for i in range(m):
file.write(('%.6f' + separator) % self.rows[i][j])
file.write('\n')
file.close()
#-------------------------------------------------------
# FTP TRANSFER Tools
def RemoveFileFTP(ftp, filepath):
"""Delete a file on a remote server."""
import ftplib
try:
ftp.delete(filepath)
except ftplib.all_errors as e:
import sys
print('POPUP: Could not remove file {0}: {1}'.format(filepath, e))
sys.stdout.flush()
def RemoveDirFTP(ftp, path):
"""Recursively delete a directory tree on a remote server."""
import ftplib
wd = ftp.pwd()
try:
names = ftp.nlst(path)
except ftplib.all_errors as e:
# some FTP servers complain when you try and list non-existent paths
print('RemoveDirFTP: Could not remove folder {0}: {1}'.format(path, e))
return
for name in names:
if os.path.split(name)[1] in ('.', '..'): continue
print('RemoveDirFTP: Checking {0}'.format(name))
try:
ftp.cwd(path + '/' + name) # if we can cwd to it, it's a folder
ftp.cwd(wd) # don't try a nuke a folder we're in
RemoveDirFTP(ftp, path + '/' + name)
except ftplib.all_errors:
ftp.delete(path + '/' + name)
#RemoveFileFTP(ftp, name)
try:
ftp.rmd(path)
except ftplib.all_errors as e:
print('RemoveDirFTP: Could not remove {0}: {1}'.format(path, e))
def UploadDirFTP(localpath, server_ip, remote_path, username, password):
"""Upload a folder to a robot through FTP recursively"""
import ftplib
import os
import sys
main_folder = os.path.basename(os.path.normpath(localpath))
print("POPUP: <p>Connecting to <strong>%s</strong> using user name <strong>%s</strong> and password ****</p><p>Please wait...</p>" % (server_ip, username))
sys.stdout.flush()
try:
myFTP = ftplib.FTP(server_ip, username, password)
print('Connection established')
except:
error_str = sys.exc_info()[1]
print("POPUP: <font color=\"red\">Connection to %s failed: <p>%s</p></font>" % (server_ip, error_str))
sys.stdout.flush()
pause(4)
return False
remote_path_prog = remote_path + '/' + main_folder
myPath = r'%s' % localpath
print("POPUP: Connected. Deleting existing files on %s..." % remote_path_prog)
sys.stdout.flush()
RemoveDirFTP(myFTP, remote_path_prog)
print("POPUP: Connected. Uploading program to %s..." % server_ip)
sys.stdout.flush()
try:
myFTP.cwd(remote_path)
myFTP.mkd(main_folder)
myFTP.cwd(remote_path_prog)
except:
error_str = sys.exc_info()[1]
print("POPUP: <font color=\"red\">Remote path not found or can't be created: %s</font>" % (remote_path))
sys.stdout.flush()
pause(4)
#contin = mbox("Remote path\n%s\nnot found or can't create folder.\n\nChange path and permissions and retry." % remote_path)
return False
def uploadThis(path):
files = os.listdir(path)
os.chdir(path)
for f in files:
if os.path.isfile(path + r'\{}'.format(f)):
print(' Sending file: %s' % f)
print("POPUP: Sending file: %s" % f)
sys.stdout.flush()
fh = open(f, 'rb')
myFTP.storbinary('STOR %s' % f, fh)
fh.close()
elif os.path.isdir(path + r'\{}'.format(f)):
print(' Sending folder: %s' % f)
myFTP.mkd(f)
myFTP.cwd(f)
uploadThis(path + r'\{}'.format(f))
myFTP.cwd('..')
os.chdir('..')
uploadThis(myPath) # now call the recursive function
myFTP.close()
print("POPUP: Folder trasfer completed: <font color=\"blue\">%s</font>" % remote_path)
sys.stdout.flush()
return True
def UploadFileFTP(file_path_name, server_ip, remote_path, username, password):
"""Upload a file to a robot through FTP"""
filepath = getFileDir(file_path_name)
filename = getBaseName(file_path_name)
import ftplib
import os
import sys
print("POPUP: <p>Connecting to <strong>%s</strong> using user name <strong>%s</strong> and password ****</p><p>Please wait...</p>" % (server_ip, username))
sys.stdout.flush()
try:
myFTP = ftplib.FTP(server_ip, username, password)
except:
error_str = sys.exc_info()[1]
print("POPUP: <font color=\"red\">Connection to %s failed: <p>%s</p></font>" % (server_ip, error_str))
sys.stdout.flush()
pause(4)
return False
remote_path_prog = remote_path + '/' + filename
print("POPUP: Connected. Deleting remote file %s..." % remote_path_prog)
sys.stdout.flush()
RemoveFileFTP(myFTP, remote_path_prog)
print("POPUP: Connected. Uploading program to %s..." % server_ip)
sys.stdout.flush()
try:
myFTP.cwd(remote_path)
except:
error_str = sys.exc_info()[1]
print("POPUP: <font color=\"red\">Remote path not found or can't be created: %s</font>" % (remote_path))
sys.stdout.flush()
pause(4)
#contin = mbox("Remote path\n%s\nnot found or can't create folder.\n\nChange path and permissions and retry." % remote_path)
return False
def uploadThis(localfile, filename):
print(' Sending file: %s' % localfile)
print("POPUP: Sending file: %s" % filename)
sys.stdout.flush()
fh = open(localfile, 'rb')
myFTP.storbinary('STOR %s' % filename, fh)
fh.close()
uploadThis(file_path_name, filename)
myFTP.close()
print("POPUP: File trasfer completed: <font color=\"blue\">%s</font>" % remote_path_prog)
sys.stdout.flush()
return True
def UploadFTP(program, robot_ip, remote_path, ftp_user, ftp_pass, pause_sec=2):
"""Upload a program or a list of programs to the robot through FTP provided the connection parameters"""
# Iterate through program list if it is a list of files
if isinstance(program, list):
if len(program) == 0:
print('POPUP: Nothing to transfer')
sys.stdout.flush()
pause(pause_sec)
return
for prog in program:
UploadFTP(prog, robot_ip, remote_path, ftp_user, ftp_pass, 0)
print("POPUP: <font color=\"blue\">Done: %i files and folders successfully transferred</font>" % len(program))
sys.stdout.flush()
pause(pause_sec)
print("POPUP: Done")
sys.stdout.flush()
return
import os
if os.path.isfile(program):
print('Sending program file %s...' % program)
UploadFileFTP(program, robot_ip, remote_path, ftp_user, ftp_pass)
else:
print('Sending program folder %s...' % program)
UploadDirFTP(program, robot_ip, remote_path, ftp_user, ftp_pass)
pause(pause_sec)
print("POPUP: Done")
sys.stdout.flush()
#------------------------------------------------------
#-------- TKinter dependencies ---------------
_tkinter_available = True
if sys.version_info[0] < 3:
# Python 2.X only:
try:
import Tkinter as tkinter
import tkFileDialog as filedialog
import tkMessageBox as messagebox
except:
_tkinter_available = False
else:
# Python 3.x only
try:
import tkinter
from tkinter import filedialog
from tkinter import messagebox
except ModuleNotFoundError:
_tkinter_available = False
#------------------
#------------------
if _tkinter_available:
def getOpenFile(path_preference="C:/RoboDK/Library/", strfile='', strtitle='Open file ...', defaultextension='.txt', filetypes=[('All files', '.*'), ('Text files', '.txt')]):
"""Pop up a file dialog window to select a file to open. Returns a file object opened in read-only mode. Use returned value.name to retrieve the file path."""
options = {}
options['initialdir'] = path_preference
options['title'] = strtitle
options['defaultextension'] = defaultextension #'.txt'
options['filetypes'] = filetypes # [('all files', '.*'), ('text files', '.txt')]
options['initialfile'] = strfile
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
file_path = filedialog.askopenfilename(**options)
# same as: file_path = tkinter.filedialog.askopenfilename()
return file_path
def getSaveFile(path_preference="C:/RoboDK/Library/", strfile='file.txt', strtitle='Save file as ...', defaultextension='.txt', filetypes=[('All files', '.*'), ('Text files', '.txt')]):
"""Pop up a file dialog window to select a file to save. Returns a file object opened in write-only mode. Use returned value.name to retrieve the file path."""
options = {}
options['initialdir'] = path_preference
options['title'] = strtitle
options['defaultextension'] = defaultextension #'.txt'
options['filetypes'] = filetypes # [('all files', '.*'), ('text files', '.txt')]
options['initialfile'] = strfile
#options['parent'] = root
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
file_path = filedialog.asksaveasfile(**options)
#same as: file_path = tkinter.filedialog.asksaveasfile(**options)
return file_path
def getOpenFileName(path_preference="C:/RoboDK/Library/", strfile='', strtitle='Open file ...', defaultextension='.txt', filetypes=[('All files', '.*'), ('Text files', '.txt')]):
"""Pop up a file dialog window to select a file to open. Returns the file path as a string."""
options = {}
options['initialdir'] = path_preference
options['title'] = strtitle
options['defaultextension'] = defaultextension #'.txt'
options['filetypes'] = filetypes # [('all files', '.*'), ('text files', '.txt')]
options['initialfile'] = strfile
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
file_path = filedialog.askopenfilename(**options)
# same as: file_path = tkinter.filedialog.askopenfilename()
return file_path
def getSaveFileName(path_preference="C:/RoboDK/Library/", strfile='file.txt', strtitle='Save file as ...', defaultextension='.txt', filetypes=[('All files', '.*'), ('Text files', '.txt')]):
"""Pop up a file dialog window to select a file to save. Returns the file path as a string."""
options = {}
options['initialdir'] = path_preference
options['title'] = strtitle
options['defaultextension'] = defaultextension #'.txt'
options['filetypes'] = filetypes # [('all files', '.*'), ('text files', '.txt')]
options['initialfile'] = strfile
#options['parent'] = root
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
file_path = filedialog.asksaveasfilename(**options)
#same as: file_path = tkinter.filedialog.asksaveasfile(**options)
return file_path
def getSaveFolder(path_programs='/', popup_msg='Select a directory to save your program'):
"""Ask the user to select a folder to save a program or other file. Returns the path of the folder as a string."""
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
dirname = filedialog.askdirectory(initialdir=path_programs, title=popup_msg)
if len(dirname) < 1:
dirname = None
return dirname
def getOpenFolder(path_preference="C:/RoboDK/Library/", strtitle='Open folder ...'):
"""Pop up a folder dialog window to select a folder to open. Returns the path of the folder as a string."""
options = {}
options['title'] = strtitle
options['initialdir'] = path_preference
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
file_path = filedialog.askdirectory(**options)
return file_path
def ShowMessage(msg, title=None):
"""Show a blocking message"""
print(msg)
if title is None:
title = msg
root = tkinter.Tk()
root.overrideredirect(1)
root.withdraw()
root.attributes("-topmost", True)
result = messagebox.showinfo(title, msg) #, icon='warning')#, parent=texto)
root.destroy()
return result
def ShowMessageYesNo(msg, title=None):
"""Show a blocking message and let the user answer Yes or No"""
print(msg)
if title is None:
title = msg
root = tkinter.Tk()
root.overrideredirect(1)
root.withdraw()
root.attributes("-topmost", True)
result = messagebox.askyesno(title, msg) #, icon='warning')#, parent=texto)
root.destroy()
return result
def ShowMessageYesNoCancel(msg, title=None):
"""Show a blocking message and let the user answer Yes, No or Cancel"""
print(msg)
if title is None:
title = msg
root = tkinter.Tk()
root.overrideredirect(1)
root.withdraw()
root.attributes("-topmost", True)
result = messagebox.askyesnocancel(title, msg) #, icon='warning')#, parent=texto)
root.destroy()
return result
class MessageBox(object):
def __init__(self, msg, b1, b2, frame, t, entry):
root = self.root = tkinter.Tk()
root.title('Input')
self.msg = str(msg)
# ctrl+c to copy self.msg
root.bind('<Control-c>', func=self.to_clip)
# remove the outer frame if frame=False
if not frame: root.overrideredirect(True)
# default values for the buttons to return
self.b1_return = True
self.b2_return = False
# if b1 or b2 is a tuple unpack into the button text & return value
if isinstance(b1, tuple): b1, self.b1_return = b1
if isinstance(b2, tuple): b2, self.b2_return = b2
# main frame
frm_1 = tkinter.Frame(root)
frm_1.pack(ipadx=2, ipady=2)
# the message
message = tkinter.Label(frm_1, text=self.msg)
message.pack(padx=8, pady=8)
# if entry=True create and set focus
if entry is not None:
if entry == True:
entry = ''
self.entry = tkinter.Entry(frm_1)
self.entry.pack()
self.entry.insert(0, entry)
self.entry.focus_set()
# button frame
frm_2 = tkinter.Frame(frm_1)
frm_2.pack(padx=4, pady=4)
# buttons
btn_1 = tkinter.Button(frm_2, width=8, text=b1)
btn_1['command'] = self.b1_action
btn_1.pack(side='left')
if not entry: btn_1.focus_set()
btn_2 = tkinter.Button(frm_2, width=8, text=b2)
btn_2['command'] = self.b2_action
btn_2.pack(side='left')
# the enter button will trigger the focused button's action
btn_1.bind('<KeyPress-Return>', func=self.b1_action)
btn_2.bind('<KeyPress-Return>', func=self.b2_action)
# roughly center the box on screen
# for accuracy see: http://stackoverflow.com/a/10018670/1217270
root.update_idletasks()
xp = (root.winfo_screenwidth() // 2) - (root.winfo_width() // 2)
yp = (root.winfo_screenheight() // 2) - (root.winfo_height() // 2)
geom = (root.winfo_width(), root.winfo_height(), xp, yp)
root.geometry('{0}x{1}+{2}+{3}'.format(*geom))
# call self.close_mod when the close button is pressed
root.protocol("WM_DELETE_WINDOW", self.close_mod)
# a trick to activate the window (on windows 7)
#root.deiconify()
# if t is specified: call time_out after t seconds
if t: root.after(int(t * 1000), func=self.time_out)
def b1_action(self, event=None):
try:
x = self.entry.get()
except AttributeError:
self.returning = self.b1_return
self.root.quit()
else:
if x:
self.returning = x
self.root.quit()
def b2_action(self, event=None):
self.returning = self.b2_return
self.root.quit()
# remove this function and the call to protocol
# then the close button will act normally
def close_mod(self):
pass
def time_out(self):
try:
x = self.entry.get()
except AttributeError:
self.returning = None
else:
self.returning = x
finally:
self.root.quit()
def to_clip(self, event=None):
self.root.clipboard_clear()
self.root.clipboard_append(self.msg)
def mbox(msg, b1='OK', b2='Cancel', frame=True, t=False, entry=None):
"""Create an instance of MessageBox, and get data back from the user.
:param msg: string to be displayed
:type msg: str
:param b1: left button text, or a tuple (<text for button>, <to return on press>)
:type b1: str, tuple
:param b2: right button text, or a tuple (<text for button>, <to return on press>)
:type b2: str, tuple
:param frame: include a standard outerframe: True or False
:type frame: bool
:param t: time in seconds (int or float) until the msgbox automatically closes
:type t: int, float
:param entry: include an entry widget that will provide its contents returned. Provide text to fill the box
:type entry: None, bool, str
Example:
.. code-block:: python
name = mbox('Enter your name', entry=True)
name = mbox('Enter your name', entry='default')
if name:
print("Value: " + name)
value = mbox('Male or female?', ('male', 'm'), ('female', 'f'))
mbox('Process done')
"""
msgbox = MessageBox(msg, b1, b2, frame, t, entry)
try:
from robolink import getPathIcon
iconpath = getPathIcon()
msgbox.root.iconbitmap(iconpath)
except:
print("RoboDK's Robolink module not found")
msgbox.root.attributes("-topmost", True)
msgbox.root.mainloop()
# the function pauses here until the mainloop is quit
msgbox.root.destroy()
return msgbox.returning
| [
"tkinter.filedialog.asksaveasfilename",
"csv.reader",
"math.asin",
"math.atan2",
"time.ctime",
"numpy.ones",
"os.path.isfile",
"numpy.linalg.svd",
"numpy.linalg.norm",
"sys.stdout.flush",
"sys.exc_info",
"glob.glob",
"tkinter.Frame",
"tkinter.Label",
"os.chdir",
"codecs.open",
"tkint... | [((1935, 1953), 'glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (1944, 1953), False, 'import glob\n'), ((2180, 2205), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (2195, 2205), False, 'import os\n'), ((2312, 2338), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (2328, 2338), False, 'import os\n'), ((2622, 2648), 'os.path.getmtime', 'os.path.getmtime', (['filepath'], {}), '(filepath)\n', (2638, 2648), False, 'import os\n'), ((2868, 2894), 'os.path.getctime', 'os.path.getctime', (['filepath'], {}), '(filepath)\n', (2884, 2894), False, 'import os\n'), ((3077, 3098), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (3090, 3098), False, 'import os\n'), ((3181, 3201), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (3195, 3201), False, 'import os\n'), ((4430, 4449), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (4440, 4449), False, 'import time\n'), ((4544, 4560), 'math.atan2', 'math.atan2', (['y', 'x'], {}), '(y, x)\n', (4554, 4560), False, 'import math\n'), ((4641, 4657), 'math.sqrt', 'math.sqrt', (['value'], {}), '(value)\n', (4650, 4657), False, 'import math\n'), ((4743, 4758), 'math.sin', 'math.sin', (['value'], {}), '(value)\n', (4751, 4758), False, 'import math\n'), ((4846, 4861), 'math.cos', 'math.cos', (['value'], {}), '(value)\n', (4854, 4861), False, 'import math\n'), ((4940, 4956), 'math.asin', 'math.asin', (['value'], {}), '(value)\n', (4949, 4956), False, 'import math\n'), ((5037, 5053), 'math.acos', 'math.acos', (['value'], {}), '(value)\n', (5046, 5053), False, 'import math\n'), ((5224, 5257), 're.findall', 're.findall', (['"""[0-9]+"""', 'str_name_id'], {}), "('[0-9]+', str_name_id)\n", (5234, 5257), False, 'import re\n'), ((5897, 5909), 'math.cos', 'math.cos', (['rx'], {}), '(rx)\n', (5905, 5909), False, 'import math\n'), ((5920, 5932), 'math.sin', 'math.sin', (['rx'], {}), '(rx)\n', (5928, 5932), False, 'import math\n'), ((6460, 6472), 'math.cos', 'math.cos', (['ry'], {}), '(ry)\n', (6468, 6472), False, 'import math\n'), ((6483, 6495), 'math.sin', 'math.sin', (['ry'], {}), '(ry)\n', (6491, 6495), False, 'import math\n'), ((7023, 7035), 'math.cos', 'math.cos', (['rz'], {}), '(rz)\n', (7031, 7035), False, 'import math\n'), ((7046, 7058), 'math.sin', 'math.sin', (['rz'], {}), '(rz)\n', (7054, 7058), False, 'import math\n'), ((12271, 12282), 'time.time', 'time.time', ([], {}), '()\n', (12280, 12282), False, 'import time\n'), ((15132, 15143), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (15140, 15143), False, 'import math\n'), ((15154, 15165), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (15162, 15165), False, 'import math\n'), ((15176, 15187), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (15184, 15187), False, 'import math\n'), ((15198, 15209), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (15206, 15209), False, 'import math\n'), ((15220, 15231), 'math.cos', 'math.cos', (['c'], {}), '(c)\n', (15228, 15231), False, 'import math\n'), ((15242, 15253), 'math.sin', 'math.sin', (['c'], {}), '(c)\n', (15250, 15253), False, 'import math\n'), ((17445, 17456), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (17453, 17456), False, 'import math\n'), ((17467, 17478), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (17475, 17478), False, 'import math\n'), ((17489, 17500), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (17497, 17500), False, 'import math\n'), ((17511, 17522), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (17519, 17522), False, 'import math\n'), ((17533, 17544), 'math.cos', 'math.cos', (['c'], {}), '(c)\n', (17541, 17544), False, 'import math\n'), ((17555, 17566), 'math.sin', 'math.sin', (['c'], {}), '(c)\n', (17563, 17566), False, 'import math\n'), ((18695, 18707), 'math.sin', 'math.sin', (['rx'], {}), '(rx)\n', (18703, 18707), False, 'import math\n'), ((18719, 18731), 'math.cos', 'math.cos', (['rx'], {}), '(rx)\n', (18727, 18731), False, 'import math\n'), ((18743, 18755), 'math.sin', 'math.sin', (['ry'], {}), '(ry)\n', (18751, 18755), False, 'import math\n'), ((18767, 18779), 'math.cos', 'math.cos', (['ry'], {}), '(ry)\n', (18775, 18779), False, 'import math\n'), ((18791, 18803), 'math.sin', 'math.sin', (['rz'], {}), '(rz)\n', (18799, 18803), False, 'import math\n'), ((18815, 18827), 'math.cos', 'math.cos', (['rz'], {}), '(rz)\n', (18823, 18827), False, 'import math\n'), ((19810, 19822), 'math.sin', 'math.sin', (['rx'], {}), '(rx)\n', (19818, 19822), False, 'import math\n'), ((19834, 19846), 'math.cos', 'math.cos', (['rx'], {}), '(rx)\n', (19842, 19846), False, 'import math\n'), ((19858, 19870), 'math.sin', 'math.sin', (['ry'], {}), '(ry)\n', (19866, 19870), False, 'import math\n'), ((19882, 19894), 'math.cos', 'math.cos', (['ry'], {}), '(ry)\n', (19890, 19894), False, 'import math\n'), ((19906, 19918), 'math.sin', 'math.sin', (['rz'], {}), '(rz)\n', (19914, 19918), False, 'import math\n'), ((19930, 19942), 'math.cos', 'math.cos', (['rz'], {}), '(rz)\n', (19938, 19942), False, 'import math\n'), ((27455, 27466), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (27463, 27466), False, 'import math\n'), ((27477, 27488), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (27485, 27488), False, 'import math\n'), ((27499, 27510), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (27507, 27510), False, 'import math\n'), ((27521, 27532), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (27529, 27532), False, 'import math\n'), ((27543, 27554), 'math.cos', 'math.cos', (['c'], {}), '(c)\n', (27551, 27554), False, 'import math\n'), ((27565, 27576), 'math.sin', 'math.sin', (['c'], {}), '(c)\n', (27573, 27576), False, 'import math\n'), ((28405, 28416), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (28413, 28416), False, 'import math\n'), ((28427, 28438), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (28435, 28438), False, 'import math\n'), ((28449, 28460), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (28457, 28460), False, 'import math\n'), ((28471, 28482), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (28479, 28482), False, 'import math\n'), ((28493, 28504), 'math.cos', 'math.cos', (['c'], {}), '(c)\n', (28501, 28504), False, 'import math\n'), ((28515, 28526), 'math.sin', 'math.sin', (['c'], {}), '(c)\n', (28523, 28526), False, 'import math\n'), ((39470, 39482), 'math.cos', 'math.cos', (['rx'], {}), '(rx)\n', (39478, 39482), False, 'import math\n'), ((39494, 39506), 'math.sin', 'math.sin', (['rx'], {}), '(rx)\n', (39502, 39506), False, 'import math\n'), ((39518, 39530), 'math.cos', 'math.cos', (['rz'], {}), '(rz)\n', (39526, 39530), False, 'import math\n'), ((39542, 39554), 'math.sin', 'math.sin', (['rz'], {}), '(rz)\n', (39550, 39554), False, 'import math\n'), ((40126, 40138), 'math.cos', 'math.cos', (['rx'], {}), '(rx)\n', (40134, 40138), False, 'import math\n'), ((40150, 40162), 'math.sin', 'math.sin', (['rx'], {}), '(rx)\n', (40158, 40162), False, 'import math\n'), ((40174, 40186), 'math.cos', 'math.cos', (['rz'], {}), '(rz)\n', (40182, 40186), False, 'import math\n'), ((40198, 40210), 'math.sin', 'math.sin', (['rz'], {}), '(rz)\n', (40206, 40210), False, 'import math\n'), ((44502, 44518), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (44510, 44518), True, 'import numpy as np\n'), ((44730, 44748), 'numpy.ones', 'np.ones', (['(rows, 1)'], {}), '((rows, 1))\n', (44737, 44748), True, 'import numpy as np\n'), ((44760, 44779), 'numpy.hstack', 'np.hstack', (['[XYZ, p]'], {}), '([XYZ, p])\n', (44769, 44779), True, 'import numpy as np\n'), ((44797, 44817), 'numpy.linalg.svd', 'np.linalg.svd', (['AB', '(0)'], {}), '(AB, 0)\n', (44810, 44817), True, 'import numpy as np\n'), ((44878, 44900), 'numpy.linalg.norm', 'np.linalg.norm', (['B[0:3]'], {}), '(B[0:3])\n', (44892, 44900), True, 'import numpy as np\n'), ((67759, 67777), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (67775, 67777), False, 'import sys\n'), ((68299, 68317), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (68315, 68317), False, 'import sys\n'), ((68437, 68455), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (68453, 68455), False, 'import sys\n'), ((69808, 69826), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (69824, 69826), False, 'import sys\n'), ((70279, 70297), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (70295, 70297), False, 'import sys\n'), ((70737, 70755), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (70753, 70755), False, 'import sys\n'), ((70876, 70894), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (70892, 70894), False, 'import sys\n'), ((71750, 71768), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (71766, 71768), False, 'import sys\n'), ((72621, 72644), 'os.path.isfile', 'os.path.isfile', (['program'], {}), '(program)\n', (72635, 72644), False, 'import os\n'), ((72973, 72991), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (72989, 72991), False, 'import sys\n'), ((2687, 2708), 'time.ctime', 'time.ctime', (['time_in_s'], {}), '(time_in_s)\n', (2697, 2708), False, 'import time\n'), ((2933, 2954), 'time.ctime', 'time.ctime', (['time_in_s'], {}), '(time_in_s)\n', (2943, 2954), False, 'import time\n'), ((13434, 13466), 'codecs.open', 'codecs.open', (['strfile', '"""r"""', 'codec'], {}), "(strfile, 'r', codec)\n", (13445, 13466), False, 'import codecs\n'), ((13498, 13553), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': 'separator', 'quotechar': '"""|"""'}), "(csvfile, delimiter=separator, quotechar='|')\n", (13508, 13553), False, 'import csv\n'), ((16306, 16335), 'math.atan2', 'math.atan2', (['(-H[1, 2])', 'H[1, 1]'], {}), '(-H[1, 2], H[1, 1])\n', (16316, 16335), False, 'import math\n'), ((67564, 67591), 'os.path.normpath', 'os.path.normpath', (['localpath'], {}), '(localpath)\n', (67580, 67591), False, 'import os\n'), ((67805, 67846), 'ftplib.FTP', 'ftplib.FTP', (['server_ip', 'username', 'password'], {}), '(server_ip, username, password)\n', (67815, 67846), False, 'import ftplib\n'), ((68981, 68997), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (68991, 68997), False, 'import os\n'), ((69007, 69021), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (69015, 69021), False, 'import os\n'), ((69616, 69630), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (69624, 69630), False, 'import os\n'), ((70325, 70366), 'ftplib.FTP', 'ftplib.FTP', (['server_ip', 'username', 'password'], {}), '(server_ip, username, password)\n', (70335, 70366), False, 'import ftplib\n'), ((71460, 71478), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (71476, 71478), False, 'import sys\n'), ((72477, 72495), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (72493, 72495), False, 'import sys\n'), ((72561, 72579), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (72577, 72579), False, 'import sys\n'), ((74325, 74337), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (74335, 74337), False, 'import tkinter\n'), ((74427, 74464), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '(**options)\n', (74453, 74464), False, 'from tkinter import filedialog\n'), ((75279, 75291), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (75289, 75291), False, 'import tkinter\n'), ((75381, 75416), 'tkinter.filedialog.asksaveasfile', 'filedialog.asksaveasfile', ([], {}), '(**options)\n', (75405, 75416), False, 'from tkinter import filedialog\n'), ((76130, 76142), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (76140, 76142), False, 'import tkinter\n'), ((76232, 76269), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '(**options)\n', (76258, 76269), False, 'from tkinter import filedialog\n'), ((77023, 77035), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (77033, 77035), False, 'import tkinter\n'), ((77125, 77164), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {}), '(**options)\n', (77153, 77164), False, 'from tkinter import filedialog\n'), ((77504, 77516), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (77514, 77516), False, 'import tkinter\n'), ((77604, 77670), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'initialdir': 'path_programs', 'title': 'popup_msg'}), '(initialdir=path_programs, title=popup_msg)\n', (77627, 77670), False, 'from tkinter import filedialog\n'), ((78088, 78100), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (78098, 78100), False, 'import tkinter\n'), ((78190, 78224), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {}), '(**options)\n', (78213, 78224), False, 'from tkinter import filedialog\n'), ((78421, 78433), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (78431, 78433), False, 'import tkinter\n'), ((78554, 78585), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['title', 'msg'], {}), '(title, msg)\n', (78573, 78585), False, 'from tkinter import messagebox\n'), ((78878, 78890), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (78888, 78890), False, 'import tkinter\n'), ((79011, 79042), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['title', 'msg'], {}), '(title, msg)\n', (79030, 79042), False, 'from tkinter import messagebox\n'), ((79349, 79361), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (79359, 79361), False, 'import tkinter\n'), ((79482, 79519), 'tkinter.messagebox.askyesnocancel', 'messagebox.askyesnocancel', (['title', 'msg'], {}), '(title, msg)\n', (79507, 79519), False, 'from tkinter import messagebox\n'), ((2468, 2494), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (2484, 2494), False, 'import os\n'), ((12413, 12424), 'time.time', 'time.time', ([], {}), '()\n', (12422, 12424), False, 'import time\n'), ((16418, 16446), 'math.atan2', 'math.atan2', (['H[1, 2]', 'H[1, 1]'], {}), '(H[1, 2], H[1, 1])\n', (16428, 16446), False, 'import math\n'), ((16550, 16578), 'math.atan2', 'math.atan2', (['H[1, 0]', 'H[0, 0]'], {}), '(H[1, 0], H[0, 0])\n', (16560, 16578), False, 'import math\n'), ((16592, 16620), 'math.atan2', 'math.atan2', (['H[2, 1]', 'H[2, 2]'], {}), '(H[2, 1], H[2, 2])\n', (16602, 16620), False, 'import math\n'), ((66315, 66333), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (66331, 66333), False, 'import sys\n'), ((68061, 68079), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (68077, 68079), False, 'import sys\n'), ((68742, 68760), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (68758, 68760), False, 'import sys\n'), ((70540, 70558), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (70556, 70558), False, 'import sys\n'), ((71112, 71130), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (71128, 71130), False, 'import sys\n'), ((72170, 72188), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (72186, 72188), False, 'import sys\n'), ((79729, 79741), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (79739, 79741), False, 'import tkinter\n'), ((80393, 80412), 'tkinter.Frame', 'tkinter.Frame', (['root'], {}), '(root)\n', (80406, 80412), False, 'import tkinter\n'), ((80505, 80540), 'tkinter.Label', 'tkinter.Label', (['frm_1'], {'text': 'self.msg'}), '(frm_1, text=self.msg)\n', (80518, 80540), False, 'import tkinter\n'), ((80955, 80975), 'tkinter.Frame', 'tkinter.Frame', (['frm_1'], {}), '(frm_1)\n', (80968, 80975), False, 'import tkinter\n'), ((81060, 81099), 'tkinter.Button', 'tkinter.Button', (['frm_2'], {'width': '(8)', 'text': 'b1'}), '(frm_2, width=8, text=b1)\n', (81074, 81099), False, 'import tkinter\n'), ((81250, 81289), 'tkinter.Button', 'tkinter.Button', (['frm_2'], {'width': '(8)', 'text': 'b2'}), '(frm_2, width=8, text=b2)\n', (81264, 81289), False, 'import tkinter\n'), ((84776, 84789), 'robolink.getPathIcon', 'getPathIcon', ([], {}), '()\n', (84787, 84789), False, 'from robolink import getPathIcon\n'), ((66765, 66784), 'os.path.split', 'os.path.split', (['name'], {}), '(name)\n', (66778, 66784), False, 'import os\n'), ((67922, 67936), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (67934, 67936), False, 'import sys\n'), ((68601, 68615), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (68613, 68615), False, 'import sys\n'), ((69224, 69242), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (69240, 69242), False, 'import sys\n'), ((70401, 70415), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (70413, 70415), False, 'import sys\n'), ((70971, 70985), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (70983, 70985), False, 'import sys\n'), ((80765, 80785), 'tkinter.Entry', 'tkinter.Entry', (['frm_1'], {}), '(frm_1)\n', (80778, 80785), False, 'import tkinter\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
def fsp_matrix(a, b):
batch = a.shape[0]
a_channel = a.shape[1]
b_channel = b.shape[1]
h = a.shape[2]
w = a.shape[3]
a_t = a.transpose([0, 2, 3, 1])
a_t = a_t.reshape([batch, h * w, a_channel])
b_t = b.transpose([0, 2, 3, 1]).reshape([batch, h * w, b_channel])
a_r = a_t.repeat(
b_channel, axis=1).reshape(
[batch, h * w, b_channel, a_channel]).transpose([0, 1, 3, 2])
b_r = b_t.repeat(
a_channel, axis=1).reshape([batch, h * w, a_channel, b_channel])
return np.mean(a_r * b_r, axis=1)
class TestFSPOp(OpTest):
def setUp(self):
self.op_type = "fsp"
self.initTestCase()
feature_map_0 = np.random.uniform(0, 10, self.a_shape).astype('float32')
feature_map_1 = np.random.uniform(0, 10, self.b_shape).astype('float32')
self.inputs = {'X': feature_map_0, 'Y': feature_map_1}
self.outputs = {'Out': fsp_matrix(feature_map_0, feature_map_1)}
def initTestCase(self):
self.a_shape = (2, 16, 32, 31)
self.b_shape = (2, 28, 32, 31)
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.random.uniform",
"numpy.mean"
] | [((1208, 1234), 'numpy.mean', 'np.mean', (['(a_r * b_r)'], {'axis': '(1)'}), '(a_r * b_r, axis=1)\n', (1215, 1234), True, 'import numpy as np\n'), ((1949, 1964), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1962, 1964), False, 'import unittest\n'), ((1365, 1403), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', 'self.a_shape'], {}), '(0, 10, self.a_shape)\n', (1382, 1403), True, 'import numpy as np\n'), ((1446, 1484), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', 'self.b_shape'], {}), '(0, 10, self.b_shape)\n', (1463, 1484), True, 'import numpy as np\n')] |
from pathlib import Path
from marius.tools.preprocess.dataset import NodeClassificationDataset
from marius.tools.preprocess.utils import download_url, extract_file
import numpy as np
from marius.tools.preprocess.converters.torch_converter import TorchEdgeListConverter
from marius.tools.preprocess.converters.spark_converter import SparkEdgeListConverter
from marius.tools.configuration.constants import PathConstants
from marius.tools.preprocess.datasets.dataset_helpers import remap_nodes
import torch
import os
from omegaconf import OmegaConf
class OGBMag240M(NodeClassificationDataset):
def __init__(self, output_directory: Path, spark=False):
super().__init__(output_directory, spark)
self.dataset_name = "ogb_mag240m"
self.dataset_url = "https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/mag240m_kddcup2021.zip"
def download(self, overwrite=False):
self.input_cites_edge_list_file = self.output_directory / Path("cites_edge_index.npy")
self.input_splits_file = self.output_directory / Path("split_dict.pt")
self.input_node_feature_file = self.output_directory / Path("node_feat.npy")
self.input_node_label_file = self.output_directory / Path("node_label.npy")
download = False
if not self.input_cites_edge_list_file.exists():
download = True
if not self.input_splits_file.exists():
download = True
if not self.input_node_feature_file.exists():
download = True
if not self.input_node_label_file.exists():
download = True
if download:
archive_path = download_url(self.dataset_url, self.output_directory, overwrite)
extract_file(archive_path, remove_input=False)
(self.output_directory / Path("mag240m_kddcup2021/processed/paper___cites___paper/edge_index.npy")).rename(self.input_cites_edge_list_file)
(self.output_directory / Path("mag240m_kddcup2021/split_dict.pt")).rename(self.input_splits_file)
(self.output_directory / Path("mag240m_kddcup2021/processed/paper/node_feat.npy")).rename(self.input_node_feature_file)
(self.output_directory / Path("mag240m_kddcup2021/processed/paper/node_label.npy")).rename(self.input_node_label_file)
def preprocess(self, num_partitions=1, remap_ids=True, splits=None, sequential_train_nodes=False, partitioned_eval=False):
citation_edges = np.load(self.input_cites_edge_list_file).astype(np.int32).transpose()
split_dict = torch.load(self.input_splits_file)
train_nodes = split_dict['train'].astype(np.int32)
valid_nodes = split_dict['valid'].astype(np.int32)
# test_nodes = split_dict['test'].astype(np.int32)
test_nodes = valid_nodes
converter = SparkEdgeListConverter if self.spark else TorchEdgeListConverter
converter = converter(
output_dir=self.output_directory,
train_edges=citation_edges,
num_partitions=num_partitions,
remap_ids=remap_ids,
sequential_train_nodes=sequential_train_nodes,
format="numpy",
known_node_ids=[train_nodes, valid_nodes, test_nodes, np.arange(121751666, dtype=np.int32)], # not all nodes appear in the edges
num_nodes=121751666,
num_rels=1,
partitioned_evaluation=partitioned_eval
)
dataset_stats = converter.convert()
features = np.load(self.input_node_feature_file)
labels = np.load(self.input_node_label_file)
labels[np.isnan(labels)] = -1
labels = labels.astype(np.int32)
if remap_ids:
node_mapping = np.genfromtxt(self.output_directory / Path(PathConstants.node_mapping_path), delimiter=",")
train_nodes, valid_nodes, test_nodes, features, labels = remap_nodes(node_mapping, train_nodes, valid_nodes, test_nodes, features, labels)
# convert to float32 in chunks, tested on ~500 GB RAM, need at least ~375GB minimum for float32 features
num_nodes = features.shape[0]
feat_dim = features.shape[1]
np.save(self.output_directory / Path("temp.npy"), features)
features = None
features = np.zeros((num_nodes, feat_dim), np.float32)
chunk_size = int(2E7)
start = 0
while start < num_nodes:
float16_features = np.load(self.output_directory / Path("temp.npy"), mmap_mode='r')[start:start+chunk_size]
features[start:start+chunk_size] = float16_features.astype(np.float32)
start += chunk_size
os.remove(self.output_directory / Path("temp.npy"))
with open(self.train_nodes_file, "wb") as f:
f.write(bytes(train_nodes))
with open(self.valid_nodes_file, "wb") as f:
f.write(bytes(valid_nodes))
with open(self.test_nodes_file, "wb") as f:
f.write(bytes(test_nodes))
with open(self.node_features_file, "wb") as f:
chunk_size = int(1E7)
start = 0
while start < num_nodes:
f.write(bytes(features[start:start+chunk_size]))
start += chunk_size
with open(self.node_labels_file, "wb") as f:
f.write(bytes(labels))
# update dataset yaml
dataset_stats.num_train = train_nodes.shape[0]
dataset_stats.num_valid = valid_nodes.shape[0]
dataset_stats.num_test = test_nodes.shape[0]
dataset_stats.feature_dim = features.shape[1]
dataset_stats.num_classes = 153
dataset_stats.num_nodes = labels.shape[0]
with open(self.output_directory / Path("dataset.yaml"), "w") as f:
yaml_file = OmegaConf.to_yaml(dataset_stats)
f.writelines(yaml_file)
return dataset_stats
| [
"marius.tools.preprocess.utils.extract_file",
"numpy.load",
"omegaconf.OmegaConf.to_yaml",
"marius.tools.preprocess.datasets.dataset_helpers.remap_nodes",
"torch.load",
"numpy.zeros",
"numpy.isnan",
"pathlib.Path",
"numpy.arange",
"marius.tools.preprocess.utils.download_url"
] | [((2543, 2577), 'torch.load', 'torch.load', (['self.input_splits_file'], {}), '(self.input_splits_file)\n', (2553, 2577), False, 'import torch\n'), ((3488, 3525), 'numpy.load', 'np.load', (['self.input_node_feature_file'], {}), '(self.input_node_feature_file)\n', (3495, 3525), True, 'import numpy as np\n'), ((3543, 3578), 'numpy.load', 'np.load', (['self.input_node_label_file'], {}), '(self.input_node_label_file)\n', (3550, 3578), True, 'import numpy as np\n'), ((4251, 4294), 'numpy.zeros', 'np.zeros', (['(num_nodes, feat_dim)', 'np.float32'], {}), '((num_nodes, feat_dim), np.float32)\n', (4259, 4294), True, 'import numpy as np\n'), ((972, 1000), 'pathlib.Path', 'Path', (['"""cites_edge_index.npy"""'], {}), "('cites_edge_index.npy')\n", (976, 1000), False, 'from pathlib import Path\n'), ((1058, 1079), 'pathlib.Path', 'Path', (['"""split_dict.pt"""'], {}), "('split_dict.pt')\n", (1062, 1079), False, 'from pathlib import Path\n'), ((1143, 1164), 'pathlib.Path', 'Path', (['"""node_feat.npy"""'], {}), "('node_feat.npy')\n", (1147, 1164), False, 'from pathlib import Path\n'), ((1226, 1248), 'pathlib.Path', 'Path', (['"""node_label.npy"""'], {}), "('node_label.npy')\n", (1230, 1248), False, 'from pathlib import Path\n'), ((1647, 1711), 'marius.tools.preprocess.utils.download_url', 'download_url', (['self.dataset_url', 'self.output_directory', 'overwrite'], {}), '(self.dataset_url, self.output_directory, overwrite)\n', (1659, 1711), False, 'from marius.tools.preprocess.utils import download_url, extract_file\n'), ((1724, 1770), 'marius.tools.preprocess.utils.extract_file', 'extract_file', (['archive_path'], {'remove_input': '(False)'}), '(archive_path, remove_input=False)\n', (1736, 1770), False, 'from marius.tools.preprocess.utils import download_url, extract_file\n'), ((3594, 3610), 'numpy.isnan', 'np.isnan', (['labels'], {}), '(labels)\n', (3602, 3610), True, 'import numpy as np\n'), ((3869, 3954), 'marius.tools.preprocess.datasets.dataset_helpers.remap_nodes', 'remap_nodes', (['node_mapping', 'train_nodes', 'valid_nodes', 'test_nodes', 'features', 'labels'], {}), '(node_mapping, train_nodes, valid_nodes, test_nodes, features,\n labels)\n', (3880, 3954), False, 'from marius.tools.preprocess.datasets.dataset_helpers import remap_nodes\n'), ((5725, 5757), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['dataset_stats'], {}), '(dataset_stats)\n', (5742, 5757), False, 'from omegaconf import OmegaConf\n'), ((4180, 4196), 'pathlib.Path', 'Path', (['"""temp.npy"""'], {}), "('temp.npy')\n", (4184, 4196), False, 'from pathlib import Path\n'), ((4653, 4669), 'pathlib.Path', 'Path', (['"""temp.npy"""'], {}), "('temp.npy')\n", (4657, 4669), False, 'from pathlib import Path\n'), ((3229, 3265), 'numpy.arange', 'np.arange', (['(121751666)'], {'dtype': 'np.int32'}), '(121751666, dtype=np.int32)\n', (3238, 3265), True, 'import numpy as np\n'), ((3746, 3783), 'pathlib.Path', 'Path', (['PathConstants.node_mapping_path'], {}), '(PathConstants.node_mapping_path)\n', (3750, 3783), False, 'from pathlib import Path\n'), ((5668, 5688), 'pathlib.Path', 'Path', (['"""dataset.yaml"""'], {}), "('dataset.yaml')\n", (5672, 5688), False, 'from pathlib import Path\n'), ((1809, 1882), 'pathlib.Path', 'Path', (['"""mag240m_kddcup2021/processed/paper___cites___paper/edge_index.npy"""'], {}), "('mag240m_kddcup2021/processed/paper___cites___paper/edge_index.npy')\n", (1813, 1882), False, 'from pathlib import Path\n'), ((1961, 2001), 'pathlib.Path', 'Path', (['"""mag240m_kddcup2021/split_dict.pt"""'], {}), "('mag240m_kddcup2021/split_dict.pt')\n", (1965, 2001), False, 'from pathlib import Path\n'), ((2071, 2127), 'pathlib.Path', 'Path', (['"""mag240m_kddcup2021/processed/paper/node_feat.npy"""'], {}), "('mag240m_kddcup2021/processed/paper/node_feat.npy')\n", (2075, 2127), False, 'from pathlib import Path\n'), ((2203, 2260), 'pathlib.Path', 'Path', (['"""mag240m_kddcup2021/processed/paper/node_label.npy"""'], {}), "('mag240m_kddcup2021/processed/paper/node_label.npy')\n", (2207, 2260), False, 'from pathlib import Path\n'), ((2451, 2491), 'numpy.load', 'np.load', (['self.input_cites_edge_list_file'], {}), '(self.input_cites_edge_list_file)\n', (2458, 2491), True, 'import numpy as np\n'), ((4439, 4455), 'pathlib.Path', 'Path', (['"""temp.npy"""'], {}), "('temp.npy')\n", (4443, 4455), False, 'from pathlib import Path\n')] |
# coding: utf-8
# # Mask R-CNN - Train modified model on Shapes Dataset
# ### the modified model (include model_lib) does not include any mask related heads or losses
import os
import sys
# import random
import math
# import re
import gc
import time
import numpy as np
# import cv2
import argparse
import platform
# import matplotlib
# import matplotlib.pyplot as plt
import tensorflow as tf
import keras
import keras.backend as KB
sys.path.append('../')
import mrcnn.model_mod as modellib
import mrcnn.visualize as visualize
import mrcnn.shapes as shapes
from mrcnn.config import Config
from mrcnn.dataset import Dataset
from mrcnn.utils import log, stack_tensors, stack_tensors_3d
from mrcnn.datagen import data_generator, load_image_gt
from mrcnn.callbacks import get_layer_output_1,get_layer_output_2
# from mrcnn.visualize import plot_gaussian
from mrcnn.prep_notebook import prep_oldshapes_train, load_model
import pprint
pp = pprint.PrettyPrinter(indent=2, width=100)
np.set_printoptions(linewidth=100,precision=4,threshold=1000, suppress = True)
print(sys.argv)
DEFAULT_LOGS_DIR = 'mrcnn_logs'
##------------------------------------------------------------------------------------
## process input arguments
## example:
## train-shapes_gpu --epochs 12 --steps-in-epoch 7 --last_epoch 1234 --logs_dir mrcnn_logs
##------------------------------------------------------------------------------------
# Parse command line arguments
parser = argparse.ArgumentParser(description='Train Mask R-CNN on MS COCO.')
# parser.add_argument("command",
# metavar="<command>",
# help="'train' or 'evaluate' on MS COCO")
# parser.add_argument('--dataset', required=True,
# metavar="/path/to/coco/",
# help='Directory of the MS-COCO dataset')
# parser.add_argument('--limit', required=False,
# default=500,
# metavar="<image count>",
# help='Images to use for evaluation (defaults=500)')
parser.add_argument('--model', required=False,
default='last',
metavar="/path/to/weights.h5",
help="'coco' , 'init' , or Path to weights .h5 file ")
parser.add_argument('--logs_dir', required=True,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--last_epoch', required=False,
default=0,
metavar="<last epoch ran>",
help='Identify last completed epcoh for tensorboard continuation')
parser.add_argument('--lr', required=False,
default=0.001,
metavar="<learning rate>",
help='Learning Rate (default=0.001)')
parser.add_argument('--epochs', required=False,
default=3,
metavar="<epochs to run>",
help='Number of epochs to run (default=3)')
parser.add_argument('--steps_in_epoch', required=False,
default=1,
metavar="<steps in each epoch>",
help='Number of batches to run in each epochs (default=5)')
parser.add_argument('--batch_size', required=False,
default=5,
metavar="<batch size>",
help='Number of data samples in each batch (default=5)')
args = parser.parse_args()
# args = parser.parse_args("train --dataset E:\MLDatasets\coco2014 --model mask_rcnn_coco.h5 --limit 10".split())
pp.pprint(args)
print("Model : ", args.model)
# print("Dataset: ", args.dataset)
# print("Logs: ", args.logs)
# print("Limit: ", args.limit)
print("Epochs to run : ", args.epochs)
print("Steps in each epoch: ", args.steps_in_epoch)
##------------------------------------------------------------------------------------
## setup project directories
#---------------------------------------------------------------------------------
# # Root directory of the project
# MODEL_DIR : Directory to save logs and trained model
# COCO_MODEL_PATH : Path to COCO trained weights
#---------------------------------------------------------------------------------
import platform
syst = platform.system()
if syst == 'Windows':
# Root directory of the project
print(' windows ' , syst)
# WINDOWS MACHINE ------------------------------------------------------------------
ROOT_DIR = "E:\\"
MODEL_PATH = os.path.join(ROOT_DIR, "models")
DATASET_PATH = os.path.join(ROOT_DIR, 'MLDatasets')
MODEL_DIR = os.path.join(MODEL_PATH, args.logs_dir)
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs")
COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
elif syst == 'Linux':
print(' Linx ' , syst)
# LINUX MACHINE ------------------------------------------------------------------
ROOT_DIR = os.getcwd()
MODEL_PATH = os.path.expanduser('~/models')
DATASET_PATH = os.path.expanduser('~/MLDatasets')
MODEL_DIR = os.path.join(MODEL_PATH, args.logs_dir)
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014")
DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
else :
raise Error('unreconized system ' )
print("Tensorflow Version: {} Keras Version : {} ".format(tf.__version__,keras.__version__))
import pprint
##------------------------------------------------------------------------------------
## Build configuration object
##------------------------------------------------------------------------------------
config = shapes.ShapesConfig()
config.BATCH_SIZE = int(args.batch_size) # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = int(args.batch_size) # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = int(args.steps_in_epoch)
config.LEARNING_RATE = float(args.lr)
config.EPOCHS_TO_RUN = int(args.epochs)
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
config.LAST_EPOCH_RAN = int(args.last_epoch)
config.display()
##------------------------------------------------------------------------------------
## Build shape dataset
##------------------------------------------------------------------------------------
# Training dataset
# generate 500 shapes
dataset_train = shapes.ShapesDataset()
dataset_train.load_shapes(7000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = shapes.ShapesDataset()
dataset_val.load_shapes(1000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
##------------------------------------------------------------------------------------
## Build Model
##------------------------------------------------------------------------------------
try :
del model
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = True)
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
# print(model.find_last())
##----------------------------------------------------------------------------------------------
## Load Model Weight file
##----------------------------------------------------------------------------------------------
load_model(model, init_with = args.model)
config.display()
model.layer_info()
print(config.BATCH_SIZE)
print(model.config.BATCH_SIZE)
##----------------------------------------------------------------------------------------------
## Training
##
## Train the FCN only
## Passing layers="heads" freezes all layers except the head
## layers. You can also pass a regular expression to select
## which layers to train by name pattern.
##----------------------------------------------------------------------------------------------
train_layers = ['fcn']
loss_names = ["fcn_norm_loss"]
model.epoch = config.LAST_EPOCH_RAN
model.config.LEARNING_RATE = config.LEARNING_RATE
model.config.STEPS_PER_EPOCH = config.STEPS_PER_EPOCH
model.train(dataset_train, dataset_val,
learning_rate = model.config.LEARNING_RATE,
epochs_to_run = config.EPOCHS_TO_RUN,
# epochs = 25, # total number of epochs to run (accross multiple trainings)
layers = train_layers,
losses = loss_names,
min_LR = 1.0e-9,
)
##------------------------------------------------------------------------------------
## setup tf session and debugging
##------------------------------------------------------------------------------------
# keras_backend.set_session(tf_debug.LocalCLIDebugWrapperSession(tf.Session()))
# if 'tensorflow' == KB.backend():
# from tensorflow.python import debug as tf_debug
#
# config = tf.ConfigProto(device_count = {'GPU': 0} )
# tf_sess = tf.Session(config=config)
# tf_sess = tf_debug.LocalCLIDebugWrapperSession(tf_sess)
# KB.set_session(tf_sess)
#
#
# tfconfig = tf.ConfigProto(
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5),
# device_count = {'GPU': 1}
# )
# tfconfig = tf.ConfigProto()
# tfconfig.gpu_options.allow_growth=True
# tfconfig.gpu_options.visible_device_list = "0"
# tfconfig.gpu_options.per_process_gpu_memory_fraction=0.5
# tf_sess = tf.Session(config=tfconfig)
# set_session(tf_sess)
##------------------------------------------------------------------------------------
| [
"sys.path.append",
"mrcnn.shapes.ShapesDataset",
"os.path.expanduser",
"numpy.set_printoptions",
"argparse.ArgumentParser",
"os.getcwd",
"mrcnn.model_mod.MaskRCNN",
"pprint.PrettyPrinter",
"gc.collect",
"mrcnn.shapes.ShapesConfig",
"platform.system",
"os.path.join",
"mrcnn.prep_notebook.load... | [((435, 457), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (450, 457), False, 'import sys\n'), ((974, 1015), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)', 'width': '(100)'}), '(indent=2, width=100)\n', (994, 1015), False, 'import pprint\n'), ((1016, 1094), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(100)', 'precision': '(4)', 'threshold': '(1000)', 'suppress': '(True)'}), '(linewidth=100, precision=4, threshold=1000, suppress=True)\n', (1035, 1094), True, 'import numpy as np\n'), ((1500, 1567), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Mask R-CNN on MS COCO."""'}), "(description='Train Mask R-CNN on MS COCO.')\n", (1523, 1567), False, 'import argparse\n'), ((4497, 4514), 'platform.system', 'platform.system', ([], {}), '()\n', (4512, 4514), False, 'import platform\n'), ((6245, 6266), 'mrcnn.shapes.ShapesConfig', 'shapes.ShapesConfig', ([], {}), '()\n', (6264, 6266), True, 'import mrcnn.shapes as shapes\n'), ((6968, 6990), 'mrcnn.shapes.ShapesDataset', 'shapes.ShapesDataset', ([], {}), '()\n', (6988, 6990), True, 'import mrcnn.shapes as shapes\n'), ((7129, 7151), 'mrcnn.shapes.ShapesDataset', 'shapes.ShapesDataset', ([], {}), '()\n', (7149, 7151), True, 'import mrcnn.shapes as shapes\n'), ((7496, 7514), 'keras.backend.clear_session', 'KB.clear_session', ([], {}), '()\n', (7512, 7514), True, 'import keras.backend as KB\n'), ((7523, 7614), 'mrcnn.model_mod.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'MODEL_DIR', 'FCN_layers': '(True)'}), "(mode='training', config=config, model_dir=MODEL_DIR,\n FCN_layers=True)\n", (7540, 7614), True, 'import mrcnn.model_mod as modellib\n'), ((8007, 8046), 'mrcnn.prep_notebook.load_model', 'load_model', (['model'], {'init_with': 'args.model'}), '(model, init_with=args.model)\n', (8017, 8046), False, 'from mrcnn.prep_notebook import prep_oldshapes_train, load_model\n'), ((4747, 4779), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""models"""'], {}), "(ROOT_DIR, 'models')\n", (4759, 4779), False, 'import os\n'), ((4804, 4840), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""MLDatasets"""'], {}), "(ROOT_DIR, 'MLDatasets')\n", (4816, 4840), False, 'import os\n'), ((4865, 4904), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'args.logs_dir'], {}), '(MODEL_PATH, args.logs_dir)\n', (4877, 4904), False, 'import os\n'), ((4929, 4974), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mask_rcnn_coco.h5"""'], {}), "(MODEL_PATH, 'mask_rcnn_coco.h5')\n", (4941, 4974), False, 'import os\n'), ((4999, 5042), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mrcnn_coco_logs"""'], {}), "(MODEL_PATH, 'mrcnn_coco_logs')\n", (5011, 5042), False, 'import os\n'), ((5067, 5105), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""coco2014"""'], {}), "(DATASET_PATH, 'coco2014')\n", (5079, 5105), False, 'import os\n'), ((5129, 5214), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5"""'], {}), "(MODEL_PATH, 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n )\n", (5141, 5214), False, 'import os\n'), ((7465, 7477), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7475, 7477), False, 'import gc\n'), ((5370, 5381), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5379, 5381), False, 'import os\n'), ((5406, 5436), 'os.path.expanduser', 'os.path.expanduser', (['"""~/models"""'], {}), "('~/models')\n", (5424, 5436), False, 'import os\n'), ((5461, 5495), 'os.path.expanduser', 'os.path.expanduser', (['"""~/MLDatasets"""'], {}), "('~/MLDatasets')\n", (5479, 5495), False, 'import os\n'), ((5520, 5559), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'args.logs_dir'], {}), '(MODEL_PATH, args.logs_dir)\n', (5532, 5559), False, 'import os\n'), ((5584, 5629), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mask_rcnn_coco.h5"""'], {}), "(MODEL_PATH, 'mask_rcnn_coco.h5')\n", (5596, 5629), False, 'import os\n'), ((5654, 5692), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""coco2014"""'], {}), "(DATASET_PATH, 'coco2014')\n", (5666, 5692), False, 'import os\n'), ((5716, 5759), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mrcnn_coco_logs"""'], {}), "(MODEL_PATH, 'mrcnn_coco_logs')\n", (5728, 5759), False, 'import os\n'), ((5784, 5869), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5"""'], {}), "(MODEL_PATH, 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n )\n", (5796, 5869), False, 'import os\n')] |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy.spatial.distance import pdist, squareform
def kriging(x, y, z, semi_mod, semi_popt, xnew=None, ynew=None, plot=False,
masked=False, silent=True, eop=None, block=False):
"""
Kriging a surface from a set of 2D points with a given semivariogram
model and associated optimized parameters.
Plot the surface and the corresponding kriging variance, if wanted.
The coordinates and values of the surface can be masked outside the
convex hull of the given input points.
Optional extraction of kriged values at distinct points within the
surface is possible.
Block kriging for the average of the convex hull is possible.
Howvere kringing on a rectangular surface with subsequent averaging
is almost always faster.
Definition
----------
def kriging(x, y, z, semi_mod, semi_popt, xnew=None, ynew=None, plot=False,
masked=False, silent=True, eop=None, block=False):
Input
-----
x array, x coordinates
y array, y coordinates
z array, values
semi_mod function, semivariogram model (e.g. output from the JAMS
semivariogram routine)
semi_popt array, parameters of the semivariogram model (e.g. output
from the JAMS semivariogram routine)
xnew array (n), x coordinates of the desired surface, they will be
used to generate a 2D mesh for the surface. If left None,
values will be kriged only for the points given in eop.
ynew array (m), y coordinates of the desired surface, they will be
used to generate a 2D mesh for the surface. If left None,
values will be kriged only for the points given in eop.
eop array (k,2), x and y coordinates of distinct points where
a kriged value is desired.
Optional Input
--------------
plot bool, plots will be generated if True, otherwise not.
masked bool, if True, the output arrays will be np.ma.masked_arrays
where coordinates and values outside of the convex hull of
the input data are masked. In the generated plots these
values will be hidden. If False, the output arrays will be
np.arrays and all values within the kriging rectangle are
visible in the plots.
silent bool, if True, no runtime diagnostics are printed to the
console.
block bool, if True, calculate block kriging
Note that kringing on a rectangular surface xnew,ynew with possible masking
and calculating the mean afterwards is almost always much faster,
except for very fine xnew,ynew grids.
Output
------
if eop is None:
xnew 2D array (n,m), x coordinates of the surface grid
ynew 2D array (n,m), y coordinates of the surface grid
znew 2D array (n,m), values of the surface grid
varnew 2D array (n,m), kriging variance of the surface grid
if xnew is None and not block:
eopz array (k), kriged values at the desired distinct points of eop
eopvar array (k), kriging variance at the desired distinct points of
eop
if block:
bave average over convex_hull of x,y or xnew,ynew.
bvar kriging variance of average bave
otherwise:
xnew 2D array (n,m), x coordinates of the surface grid
ynew 2D array (n,m), y coordinates of the surface grid
znew 2D array (n,m), values of the surface grid
varnew 2D array (n,m), kriging variance of the surface grid
eopz array (k), kriged values at the desired distinct points of eop
eopvar array (k), kriging variance at the desired distinct points of
graphs:
kriging surface, shows the kriged surface
kriging variance, shows the kriging variance
References
----------
This routine is recoded and extended from a matlab script by <NAME>.
Examples
--------
# provide you some sample data:
>>> # seed for reproducible results in doctest
>>> np.random.seed(1)
>>> x = np.array([652225.,652175.,652205.,652235.,652265.,652165.,
... 652195.,652225.,652255.,652285.,652175.,652205.,
... 652235.,652265.,652175.,652205.,652235.,652265.,
... 652195.,652225.,652255.,652285.,652235.,652265.,
... 652225.,652255.,652285.,652195.,652200.,652200.,
... 652240.,652230.,652260.,652260.,652265.])
>>> y = np.array([5772960.,5772970.,5772970.,5772970.,5772970.,
... 5772980.,5772980.,5772980.,5772980.,5772980.,
... 5772990.,5772990.,5772990.,5772990.,5773000.,
... 5773000.,5773000.,5773000.,5773010.,5773010.,
... 5773010.,5773010.,5773020.,5773020.,5773030.,
... 5773030.,5773030.,5772985.,5772990.,5772995.,
... 5773015.,5773025.,5772985.,5772990.,5772995.])
>>> z = np.array([2.16512767,4.97776467,4.2279204 ,0. ,
... 8.25658422,0.01238773,5.05858306,8.33503939,
... 7.53470443,7.15304826,9.45150218,8.79359049,
... 0.0536634 ,0.42101194,0.22721601,1.1458486 ,
... 6.79183025,2.50622739,3.76725118,3.97934707,
... 0. ,0.24743279,1.4627512 ,0.38430722,
... 5.30171261,0. ,3.17667353,3.80908144,
... 7.12445478,4.83891708,6.10898131,2.93801857,
... 2.56170107,2.54503559,1.72767934])
# make semivariogram
>>> from semivariogram import semivariogram
>>> nL = 40
>>> di = [0]
>>> td = 180
>>> nugget,sill,orange,vark,h,g,c,semi_mod,semi_popt = semivariogram(
... x,y,z,nL,di,td,stype='omnidirectional',negscat=0.5,
... model='exponential',graph=False,lunit='m',
... p0=(0.,20.,1./8.),runtimediag=False)
# x and y coordinates for the surface
>>> xnew = np.arange(np.amin(x),np.amax(x),5.)
>>> ynew = np.arange(np.amin(y),np.amax(y),5.)
# krig the surface
>>> xnew, ynew, znew, varnew = kriging(x,y,z,semi_mod,semi_popt,
... xnew=xnew,ynew=ynew,silent=True,
... plot=False,masked=False,eop=None)
>>> from autostring import astr
>>> print(astr(znew[0][0:8],1,pp=True))
['2.8' '3.4' '3.9' '4.2' '4.3' '4.2' '4.0' '3.8']
>>> print(astr(np.mean(znew),1))
3.7
# block krig the surface
>>> bave, bvar = kriging(x, y, z, semi_mod, semi_popt,xnew=xnew, ynew=ynew,
... silent=True,plot=False,masked=False,eop=None,block=True)
>>> print(astr(bave,1,pp=True))
3.5
>>> print(astr(np.sqrt(bvar),3,pp=True))
3.096
# krig only at points of interest
>>> poi = np.array([[652209.16,5772986.26],
... [652281.10,5773014.27],
... [652202.39,5772997.96],
... [652264.51,5772992.49],
... [652274.81,5772961.62],
... [652204.93,5772992.82],
... [652232.38,5773021.34],
... [652278.25,5773019.58],
... [652199.17,5773004.12],
... [652276.71,5773006.25]])
>>> eopz, eopvar = kriging(x,y,z,semi_mod,semi_popt,xnew=None,
... ynew=None,plot=False,masked=False,
... silent=True,eop=poi)
>>> print(astr(eopz[0:8],1,pp=True))
['7.8' '0.7' '3.1' '1.2' '7.4' '6.7' '2.1' '1.1']
# krig both, whole surface and on points of interest
>>> xnew = np.arange(np.min(x),np.max(x),5.)
>>> ynew = np.arange(np.min(y),np.max(y),5.)
>>> xnew, ynew, znew, varnew, eopz, eopvar = kriging(x,y,z,semi_mod,
... semi_popt,xnew=xnew,
... ynew=ynew,plot=False,
... masked=False,silent=True,
... eop=poi)
>>> print(astr(znew[0][0:8],1,pp=True))
['2.8' '3.4' '3.9' '4.2' '4.3' '4.2' '4.0' '3.8']
>>> print(astr(eopz[0:8],1,pp=True))
['7.8' '0.7' '3.1' '1.2' '7.4' '6.7' '2.1' '1.1']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2014 <NAME>, <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, AP & JM, Nov 2012
Modified, AP, Dec 2012 - documentation change
MC & JM, Feb 2013 - block
MC, Feb 2013 - block uses volume_poly
- include Langriangian multiplier in kriging variance
- quadruple integral of block variance
calculated by Monte-Carlo
MC, Feb 2013 - ported to Python 3
MC, Apr 2014 - assert
"""
if not silent:
import time
print('KRIG: prepare data...')
# ironing :-)
x, y, z = x.flatten(), y.flatten(), z.flatten()
semi_popt = semi_popt.flatten()
if xnew is not None:
xnew, ynew = xnew.flatten(), ynew.flatten()
if eop is not None:
eopx, eopy = eop[:,0].flatten(), eop[:,1].flatten()
###########################################################################
# orignal x + y data
# reshape and calculate lags and gammas
assert np.size(x) == np.size(y), 'kriging: x and y must have same dimensions'
assert np.size(x) == np.size(z), 'kriging: x and z must have same dimensions'
xy = np.vstack((x,y)).transpose()
lag = squareform(pdist(xy, 'euclidean'))
gamma = semi_mod(lag,semi_popt)
# make A and append row and column of one's
A = np.vstack((gamma,np.ones(np.shape(gamma)[1])))
A = np.hstack((A,np.ones(np.shape(A)[0]).reshape(-1,1)))
A[-1,-1] = 0.
invA = np.linalg.inv(A)
#######################################################################
# calculate convex hull to hide outer areas
if masked:
from convex_hull import convex_hull
from in_poly import in_poly
if not silent:
print('KRIG: calculate hull...')
start = time.time()
hull_points = convex_hull(np.vstack((x,y)), graphic=False,
smidgen=0.0075)
if not silent:
stop = time.time()
print('KRIG: calculating hull took %0.3f sec' %(stop-start))
###########################################################################
# krig on grid
if (xnew is not None) and (not block):
if not silent:
print('KRIG: prepare mesh...')
# make 2D mesh grid
xnew, ynew = np.meshgrid(xnew, ynew)
xnew_v = xnew.flatten()
ynew_v = ynew.flatten()
length = np.size(xnew_v)
#######################################################################
# calculate every znew of xnew and ynew
if not silent:
print('KRIG: kriging...')
start = time.time()
znew = np.empty_like(xnew_v)
varnew = np.empty_like(xnew_v)
#lamnew = np.empty((np.size(xnew_v), np.size(x)))
if masked:
mask = np.empty_like(xnew_v, dtype=int)
for igrid in range(length):
# make B
b = np.sqrt((x-xnew_v[igrid])**2 + (y-ynew_v[igrid])**2)
B = semi_mod(b,semi_popt)
B = np.append(B, 1.)
# calculate lambda
lmd = np.dot(invA, B)
# shorten it
mu = lmd[-1]
lmd = lmd[:-1]
B = B[:-1]
znew[igrid] = np.dot(z,lmd)
varnew[igrid] = np.dot(lmd.transpose(), B) + mu
#lamnew[igrid,:] = lmd
###################################################################
# calculate convex hull to hide outer areas
if masked:
mask[igrid] = in_poly([xnew_v[igrid], ynew_v[igrid]],
hull_points[:,0],
hull_points[:,1])
znew = znew.reshape(np.shape(xnew))
varnew = varnew.reshape(np.shape(xnew))
#lamnew = lamnew.reshape(np.shape(xnew))
if masked:
mask = mask.reshape(np.shape(xnew))
mask = np.where(mask>0, 0, 1)
xnew = np.ma.masked_array(xnew, mask)
ynew = np.ma.masked_array(ynew, mask)
znew = np.ma.masked_array(znew, mask)
varnew = np.ma.masked_array(varnew, mask)
if not silent:
stop = time.time()
print('KRIG: kriging took %0.3f sec' %(stop-start))
#######################################################################
# krig on extraction points
if eop is not None:
length = np.size(eopx)
#######################################################################
# calculate every znew of xnew and ynew
if not silent:
print('KRIG: kriging...')
start = time.time()
eopz = np.empty_like(eopx)
eopvar = np.empty_like(eopx)
#eoplam = np.empty((np.size(eopx), np.size(x)))
if masked:
mask = np.empty_like(eopx, dtype=int)
for igrid in range(length):
# make B
b = np.sqrt((x-eopx[igrid])**2 + (y-eopy[igrid])**2)
B = semi_mod(b,semi_popt)
B = np.append(B, 1.)
# calculate lambda
lmd = np.dot(invA, B)
# shorten it
mu = lmd[-1]
lmd = lmd[:-1]
B = B[:-1]
eopz[igrid] = np.dot(z,lmd)
eopvar[igrid] = np.dot(lmd.transpose(), B) + mu
#eoplam[igrid,:] = lmd
###################################################################
# calculate convex hull to hide outer areas
if masked:
mask[igrid] = in_poly([eopx[igrid], eopy[igrid]],
hull_points[:,0], hull_points[:,1])
if masked:
mask = np.where(mask>0, 0, 1)
eopx = np.ma.masked_array(eopx, mask)
eopy = np.ma.masked_array(eopy, mask)
eopz = np.ma.masked_array(eopz, mask)
eopvar = np.ma.masked_array(eopvar, mask)
if not silent:
stop = time.time()
print('KRIG: kriging took %0.3f sec' %(stop-start))
###########################################################################
# block kriging
if block:
from scipy.spatial import Delaunay # for triangulation
from scipy.integrate import dblquad # area integral
from convex_hull import convex_hull # convex hull of data points
from volume_poly import volume_poly # the volume above a polygon
if not silent:
print('KRIG: block kriging...')
start = time.time()
def semiyx(yy,xx,obsyx,f,p):
dis = np.sqrt((xx-obsyx[1])**2 + (yy-obsyx[0])**2)
return f(dis,p) # semivariogram(distance, parameter)
# Construct B-vector
B = np.empty(x.size+1, dtype=np.float)
B[-1] = 1.
if (xnew is not None) and (not masked):
# Assume rectangle
xmin = np.amin(xnew)
ymin = np.amin(ynew)
xmax = np.amax(xnew)
ymax = np.amax(ynew)
# Do not calc double integral because 4 triangles are double as fast.
# # Calc mean semivariogramm over whole region for each point
# area = (xmax-xmin)*(ymax-ymin)
# for i in range(x.size):
# tvol, tvol_err = dblquad(semiyx, xmin, xmax, lambda xx: ymin, lambda xx: ymax,
# args=([y[i],x[i]], semi_mod, semi_popt))
# B[i] = tvol / area
# Construct 4 triangles
xs = 0.5*(xmax+xmin) # centre of gravity
ys = 0.5*(ymax+ymin)
ntriangles = 4
tri = np.empty((ntriangles,3,2), dtype=np.float)
tri[0,0,:] = [xmin,ymin]
tri[0,1,:] = [xmax,ymin]
tri[0,2,:] = [xs,ys]
tri[1,0,:] = [xmin,ymin]
tri[1,1,:] = [xmin,ymax]
tri[1,2,:] = [xs,ys]
tri[2,0,:] = [xmin,ymax]
tri[2,1,:] = [xmax,ymax]
tri[2,2,:] = [xs,ys]
tri[3,0,:] = [xmax,ymax]
tri[3,1,:] = [xmax,ymin]
tri[3,2,:] = [xs,ys]
# Construct convex hull
cxy = np.empty((ntriangles,2), dtype=np.float)
cxy[0,:] = [xmin,ymin]
cxy[1,:] = [xmax,ymin]
cxy[2,:] = [xmax,ymax]
cxy[3,:] = [xmin,ymax]
# Calc mean semivariogramm over whole region for each point
for i in range(x.size):
tvol, tvol_err, area = volume_poly(semiyx, tri=tri, area=True,
obsyx=[y[i],x[i]], f=semi_mod, p=semi_popt)
B[i] = tvol / area
else:
# Get convex hull and vertices
xy = np.array(list(zip(x,y)))
d = Delaunay(xy[:,:])
cxy = convex_hull(xy.transpose())
xs = np.mean(cxy[:,0])
ys = np.mean(cxy[:,1])
# # All triangles
# tri = xy[d.vertices,:]
# ntriangles = tri.shape[0]
# Construct triangles from convex hull and centre of gravity
ntriangles = d.convex_hull.shape[0]
tri = np.empty((ntriangles,3,2), dtype=np.float)
for i in range(ntriangles):
tri[i,0,:] = xy[d.convex_hull[i,0],:]
tri[i,1,:] = xy[d.convex_hull[i,1],:]
tri[i,2,:] = [xs,ys]
# Calc mean semivariogramm over whole region for each point
for i in range(x.size):
tvol, tvol_err, area = volume_poly(semiyx, tri=tri, area=True,
obsyx=[y[i],x[i]], f=semi_mod, p=semi_popt)
B[i] = tvol / area
# calculate lambda
lmd = np.dot(invA, B)
# shorten it
mu = lmd[-1]
lmd = lmd[:-1]
B = B[:-1]
# average
baverage = np.dot(z,lmd)
# Kriging error
# Integration of quadruple integral by Monte-Carlo
n = 0.0
total = 0.0
total2 = 0.0
while True:
n += 1.0
xx1 = xmin + (xmax-xmin) * np.random.random()
xx2 = xmin + (xmax-xmin) * np.random.random()
yy1 = ymin + (ymax-ymin) * np.random.random()
yy2 = ymin + (ymax-ymin) * np.random.random()
f = semiyx(yy1, xx1, [yy2,xx2], semi_mod, semi_popt)
total += f
total2 += (f**2)
if n>100.:
mm = total/n # E(f)
vv = (total2/n - mm**2)/(n-1.0) # 1/n*Var(f) = 1/n * (n/n-1)*(E(f^2)-E(f)^2)
ee = np.sqrt(vv)
if ee/mm*100. < 0.1: break
# Integral would be V*mm with err V*err
# but we need mean, i.e. mm
bvariance = np.abs(np.dot(lmd,B) + mu - mm) # on std example, bvar was negative ?
if not silent:
stop = time.time()
print('KRIG: block kriging took %0.3f sec' %(stop-start))
###########################################################################
# plotting
if plot:
import matplotlib as mpl
import matplotlib.pyplot as plt
if not silent:
print('KRIG: plotting...')
mpl.rc('font', size=20)
mpl.rc('lines', linewidth=2)
mpl.rc('axes', linewidth=1.5)
# mpl.rc('xtick.major', width=1.5)
# mpl.rc('ytick.major', width=1.5)
mpl.rcParams['lines.markersize']=6
mpl.rcParams['lines.markeredgewidth']=1
mpl.rcParams['grid.linewidth']=1.5
# mpl.rcParams['legend.frameon']=False
mpl.rcParams['legend.numpoints']=1
mpl.rcParams['legend.handlelength']=1
mpl.rcParams['mathtext.default']='regular'
# plotting contours of kriging
# fig1 = plt.figure('kriging: surface', figsize=(15,10))
fig1 = plt.figure(1, figsize=(15,10))
sub1 = fig1.add_subplot(111, aspect='equal')#, aspect=1)
if xnew is not None:
lines = sub1.contour(xnew,ynew,znew,10,linewidths=1.5,colors='k')
fillings = sub1.contourf(xnew,ynew,znew,10,cmap=plt.cm.jet)
if masked:
hull = sub1.plot(np.hstack((hull_points[:,0],hull_points[0,0])),
np.hstack((hull_points[:,1],hull_points[0,1])),
color='k')
if eop is not None:
scat = sub1.scatter(eopx,eopy,marker='o',c='k',s=40)
sub1.xaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
sub1.yaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
sub1.xaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub1.yaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub1.grid('on')
sub1.set_title('kriging')
plt.xlabel('easting')
plt.ylabel('northing')
fig1.autofmt_xdate(rotation=45)
# plt.tight_layout(pad=1, h_pad=0, w_pad=0)
# cbar need to be below autofm_xdate !!!???
if xnew is not None:
cbar = fig1.colorbar(fillings, orientation='vertical', pad=0.05,
shrink = 0.7)
cbar.set_label('value')
# plotting contours of variance
# fig2 = plt.figure('kriging: variance', figsize=(15,10))
fig2 = plt.figure(2, figsize=(15,10))
sub2 = fig2.add_subplot(111, aspect='equal')#, aspect=1)
if xnew is not None:
lines = sub2.contour(xnew,ynew,varnew,10,linewidths=1.5,
colors='k')
fillings = sub2.contourf(xnew,ynew,varnew,10,cmap=plt.cm.jet)
if masked:
hull = sub2.plot(np.hstack((hull_points[:,0],hull_points[0,0])),
np.hstack((hull_points[:,1],hull_points[0,1])),
color='k')
if eop is not None:
scat = sub2.scatter(eopx,eopy,marker='o',c='k',s=40)
sub2.xaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
sub2.yaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
sub2.xaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub2.yaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub2.grid('on')
sub2.set_title('variance')
plt.xlabel('easting')
plt.ylabel('northing')
fig2.autofmt_xdate(rotation=45)
# plt.tight_layout(pad=1, h_pad=0, w_pad=0)
# cbar need to be below autofm_xdate !!!???
if xnew is not None:
cbar = fig2.colorbar(fillings, orientation='vertical', pad=0.05,
shrink = 0.7)
cbar.set_label('value')
plt.show()
if eop is None:
if block:
return baverage, bvariance
else:
return xnew, ynew, znew, varnew
elif xnew is None:
return eopz, eopvar
else:
return xnew, ynew, znew, varnew, eopz, eopvar
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
# x = np.array([652225.,652175.,652205.,652235.,652265.,652165.,
# 652195.,652225.,652255.,652285.,652175.,652205.,
# 652235.,652265.,652175.,652205.,652235.,652265.,
# 652195.,652225.,652255.,652285.,652235.,652265.,
# 652225.,652255.,652285.,652195.,652200.,652200.,
# 652240.,652230.,652260.,652260.,652265.])
# y = np.array([5772960.,5772970.,5772970.,5772970.,5772970.,
# 5772980.,5772980.,5772980.,5772980.,5772980.,
# 5772990.,5772990.,5772990.,5772990.,5773000.,
# 5773000.,5773000.,5773000.,5773010.,5773010.,
# 5773010.,5773010.,5773020.,5773020.,5773030.,
# 5773030.,5773030.,5772985.,5772990.,5772995.,
# 5773015.,5773025.,5772985.,5772990.,5772995.])
# z = np.array([2.16512767,4.97776467,4.2279204 ,0. ,
# 8.25658422,0.01238773,5.05858306,8.33503939,
# 7.53470443,7.15304826,9.45150218,8.79359049,
# 0.0536634 ,0.42101194,0.22721601,1.1458486 ,
# 6.79183025,2.50622739,3.76725118,3.97934707,
# 0. ,0.24743279,1.4627512 ,0.38430722,
# 5.30171261,0. ,3.17667353,3.80908144,
# 7.12445478,4.83891708,6.10898131,2.93801857,
# 2.56170107,2.54503559,1.72767934])
# # make semivariogram
# from semivariogram import semivariogram
# nL = 40
# di = [0]
# td = 180
# nugget,sill,orange,vark,h,g,c,semi_mod,semi_popt = semivariogram(
# x,y,z,nL,di,td,stype='omnidirectional',negscat=0.5,
# model='exponential',graph=False,lunit='m',
# p0=(0.,20.,1./8.),runtimediag=False)
# # x and y coordinates for the surface
# xnew = np.arange(np.amin(x),np.amax(x),5.)
# ynew = np.arange(np.amin(y),np.amax(y),5.)
# xnew, ynew, znew, varnew = kriging(x,y,z,semi_mod,semi_popt,
# xnew=xnew,ynew=ynew,silent=True,
# plot=True,masked=False,eop=None)
# print(np.round(znew[0],3))
# # [ 3.576 3.758 3.912 3.937 3.884 3.83 3.792 3.759 3.71 3.613
# # 3.407 2.981 2.165 2.366 2.458 2.797 3.304 3.817 4.298 4.717
# # 4.918 4.77 4.478 4.238]
# print(np.round(np.mean(znew),2))
# # 3.69
# # block krig the surface
# bave, bvar = kriging(x, y, z, semi_mod, semi_popt,
# xnew=xnew, ynew=ynew,
# silent=True,plot=False,masked=False,eop=None,block=True)
# print(np.round(bave,3), np.round(np.sqrt(bvar),3))
# # 3.659 2.842
# # krig only at points of interest
# poi = np.array([[652209.16,5772986.26],
# [652281.10,5773014.27],
# [652202.39,5772997.96],
# [652264.51,5772992.49],
# [652274.81,5772961.62],
# [652204.93,5772992.82],
# [652232.38,5773021.34],
# [652278.25,5773019.58],
# [652199.17,5773004.12],
# [652276.71,5773006.25]])
# eopz, eopvar = kriging(x,y,z,semi_mod,semi_popt,xnew=None,
# ynew=None,plot=False,masked=False,
# silent=True,eop=poi)
# print(np.round(eopz,3))
# # [ 6.409 1.677 3.168 1.262 4.636 6.534 2.244 2.255 2.996 2.111]
# # krig both, whole surface and on points of interest
# xnew = np.arange(np.min(x),np.max(x),5.)
# ynew = np.arange(np.min(y),np.max(y),5.)
# xnew, ynew, znew, varnew, eopz, eopvar = kriging(x,y,z,semi_mod,
# semi_popt,xnew=xnew,
# ynew=ynew,plot=False,
# masked=False,silent=True,
# eop=poi)
# print(np.round(znew[0],3))
# # [ 3.576 3.758 3.912 3.937 3.884 3.83 3.792 3.759 3.71 3.613
# # 3.407 2.981 2.165 2.366 2.458 2.797 3.304 3.817 4.298 4.717
# # 4.918 4.77 4.478 4.238]
# print(np.round(eopz,3))
# # [ 6.409 1.677 3.168 1.262 4.636 6.534 2.244 2.255 2.996 2.111]
| [
"matplotlib.rc",
"numpy.amin",
"numpy.empty",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.mean",
"scipy.spatial.distance.pdist",
"numpy.ma.masked_array",
"doctest.testmod",
"scipy.spatial.Delaunay",
"matplotlib.ticker.ScalarFormatter",
"numpy.meshgrid",
"numpy.empty_like",
"numpy.app... | [((12222, 12238), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (12235, 12238), True, 'import numpy as np\n'), ((25959, 26016), 'doctest.testmod', 'doctest.testmod', ([], {'optionflags': 'doctest.NORMALIZE_WHITESPACE'}), '(optionflags=doctest.NORMALIZE_WHITESPACE)\n', (25974, 26016), False, 'import doctest\n'), ((11754, 11764), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (11761, 11764), True, 'import numpy as np\n'), ((11768, 11778), 'numpy.size', 'np.size', (['y'], {}), '(y)\n', (11775, 11778), True, 'import numpy as np\n'), ((11836, 11846), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (11843, 11846), True, 'import numpy as np\n'), ((11850, 11860), 'numpy.size', 'np.size', (['z'], {}), '(z)\n', (11857, 11860), True, 'import numpy as np\n'), ((11968, 11990), 'scipy.spatial.distance.pdist', 'pdist', (['xy', '"""euclidean"""'], {}), "(xy, 'euclidean')\n", (11973, 11990), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((13061, 13084), 'numpy.meshgrid', 'np.meshgrid', (['xnew', 'ynew'], {}), '(xnew, ynew)\n', (13072, 13084), True, 'import numpy as np\n'), ((13166, 13181), 'numpy.size', 'np.size', (['xnew_v'], {}), '(xnew_v)\n', (13173, 13181), True, 'import numpy as np\n'), ((13420, 13441), 'numpy.empty_like', 'np.empty_like', (['xnew_v'], {}), '(xnew_v)\n', (13433, 13441), True, 'import numpy as np\n'), ((13459, 13480), 'numpy.empty_like', 'np.empty_like', (['xnew_v'], {}), '(xnew_v)\n', (13472, 13480), True, 'import numpy as np\n'), ((15181, 15194), 'numpy.size', 'np.size', (['eopx'], {}), '(eopx)\n', (15188, 15194), True, 'import numpy as np\n'), ((15433, 15452), 'numpy.empty_like', 'np.empty_like', (['eopx'], {}), '(eopx)\n', (15446, 15452), True, 'import numpy as np\n'), ((15470, 15489), 'numpy.empty_like', 'np.empty_like', (['eopx'], {}), '(eopx)\n', (15483, 15489), True, 'import numpy as np\n'), ((17515, 17551), 'numpy.empty', 'np.empty', (['(x.size + 1)'], {'dtype': 'np.float'}), '(x.size + 1, dtype=np.float)\n', (17523, 17551), True, 'import numpy as np\n'), ((20520, 20535), 'numpy.dot', 'np.dot', (['invA', 'B'], {}), '(invA, B)\n', (20526, 20535), True, 'import numpy as np\n'), ((20661, 20675), 'numpy.dot', 'np.dot', (['z', 'lmd'], {}), '(z, lmd)\n', (20667, 20675), True, 'import numpy as np\n'), ((22001, 22024), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'size': '(20)'}), "('font', size=20)\n", (22007, 22024), True, 'import matplotlib as mpl\n'), ((22033, 22061), 'matplotlib.rc', 'mpl.rc', (['"""lines"""'], {'linewidth': '(2)'}), "('lines', linewidth=2)\n", (22039, 22061), True, 'import matplotlib as mpl\n'), ((22070, 22099), 'matplotlib.rc', 'mpl.rc', (['"""axes"""'], {'linewidth': '(1.5)'}), "('axes', linewidth=1.5)\n", (22076, 22099), True, 'import matplotlib as mpl\n'), ((22627, 22658), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(15, 10)'}), '(1, figsize=(15, 10))\n', (22637, 22658), True, 'import matplotlib.pyplot as plt\n'), ((23663, 23684), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""easting"""'], {}), "('easting')\n", (23673, 23684), True, 'import matplotlib.pyplot as plt\n'), ((23693, 23715), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""northing"""'], {}), "('northing')\n", (23703, 23715), True, 'import matplotlib.pyplot as plt\n'), ((24172, 24203), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(15, 10)'}), '(2, figsize=(15, 10))\n', (24182, 24203), True, 'import matplotlib.pyplot as plt\n'), ((25250, 25271), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""easting"""'], {}), "('easting')\n", (25260, 25271), True, 'import matplotlib.pyplot as plt\n'), ((25280, 25302), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""northing"""'], {}), "('northing')\n", (25290, 25302), True, 'import matplotlib.pyplot as plt\n'), ((25645, 25655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25653, 25655), True, 'import matplotlib.pyplot as plt\n'), ((11918, 11935), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (11927, 11935), True, 'import numpy as np\n'), ((12547, 12558), 'time.time', 'time.time', ([], {}), '()\n', (12556, 12558), False, 'import time\n'), ((12593, 12610), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (12602, 12610), True, 'import numpy as np\n'), ((12718, 12729), 'time.time', 'time.time', ([], {}), '()\n', (12727, 12729), False, 'import time\n'), ((13392, 13403), 'time.time', 'time.time', ([], {}), '()\n', (13401, 13403), False, 'import time\n'), ((13577, 13609), 'numpy.empty_like', 'np.empty_like', (['xnew_v'], {'dtype': 'int'}), '(xnew_v, dtype=int)\n', (13590, 13609), True, 'import numpy as np\n'), ((13684, 13744), 'numpy.sqrt', 'np.sqrt', (['((x - xnew_v[igrid]) ** 2 + (y - ynew_v[igrid]) ** 2)'], {}), '((x - xnew_v[igrid]) ** 2 + (y - ynew_v[igrid]) ** 2)\n', (13691, 13744), True, 'import numpy as np\n'), ((13791, 13808), 'numpy.append', 'np.append', (['B', '(1.0)'], {}), '(B, 1.0)\n', (13800, 13808), True, 'import numpy as np\n'), ((13858, 13873), 'numpy.dot', 'np.dot', (['invA', 'B'], {}), '(invA, B)\n', (13864, 13873), True, 'import numpy as np\n'), ((14007, 14021), 'numpy.dot', 'np.dot', (['z', 'lmd'], {}), '(z, lmd)\n', (14013, 14021), True, 'import numpy as np\n'), ((14485, 14499), 'numpy.shape', 'np.shape', (['xnew'], {}), '(xnew)\n', (14493, 14499), True, 'import numpy as np\n'), ((14533, 14547), 'numpy.shape', 'np.shape', (['xnew'], {}), '(xnew)\n', (14541, 14547), True, 'import numpy as np\n'), ((14684, 14708), 'numpy.where', 'np.where', (['(mask > 0)', '(0)', '(1)'], {}), '(mask > 0, 0, 1)\n', (14692, 14708), True, 'import numpy as np\n'), ((14727, 14757), 'numpy.ma.masked_array', 'np.ma.masked_array', (['xnew', 'mask'], {}), '(xnew, mask)\n', (14745, 14757), True, 'import numpy as np\n'), ((14777, 14807), 'numpy.ma.masked_array', 'np.ma.masked_array', (['ynew', 'mask'], {}), '(ynew, mask)\n', (14795, 14807), True, 'import numpy as np\n'), ((14827, 14857), 'numpy.ma.masked_array', 'np.ma.masked_array', (['znew', 'mask'], {}), '(znew, mask)\n', (14845, 14857), True, 'import numpy as np\n'), ((14879, 14911), 'numpy.ma.masked_array', 'np.ma.masked_array', (['varnew', 'mask'], {}), '(varnew, mask)\n', (14897, 14911), True, 'import numpy as np\n'), ((14955, 14966), 'time.time', 'time.time', ([], {}), '()\n', (14964, 14966), False, 'import time\n'), ((15405, 15416), 'time.time', 'time.time', ([], {}), '()\n', (15414, 15416), False, 'import time\n'), ((15584, 15614), 'numpy.empty_like', 'np.empty_like', (['eopx'], {'dtype': 'int'}), '(eopx, dtype=int)\n', (15597, 15614), True, 'import numpy as np\n'), ((15689, 15745), 'numpy.sqrt', 'np.sqrt', (['((x - eopx[igrid]) ** 2 + (y - eopy[igrid]) ** 2)'], {}), '((x - eopx[igrid]) ** 2 + (y - eopy[igrid]) ** 2)\n', (15696, 15745), True, 'import numpy as np\n'), ((15792, 15809), 'numpy.append', 'np.append', (['B', '(1.0)'], {}), '(B, 1.0)\n', (15801, 15809), True, 'import numpy as np\n'), ((15859, 15874), 'numpy.dot', 'np.dot', (['invA', 'B'], {}), '(invA, B)\n', (15865, 15874), True, 'import numpy as np\n'), ((16008, 16022), 'numpy.dot', 'np.dot', (['z', 'lmd'], {}), '(z, lmd)\n', (16014, 16022), True, 'import numpy as np\n'), ((16454, 16478), 'numpy.where', 'np.where', (['(mask > 0)', '(0)', '(1)'], {}), '(mask > 0, 0, 1)\n', (16462, 16478), True, 'import numpy as np\n'), ((16497, 16527), 'numpy.ma.masked_array', 'np.ma.masked_array', (['eopx', 'mask'], {}), '(eopx, mask)\n', (16515, 16527), True, 'import numpy as np\n'), ((16547, 16577), 'numpy.ma.masked_array', 'np.ma.masked_array', (['eopy', 'mask'], {}), '(eopy, mask)\n', (16565, 16577), True, 'import numpy as np\n'), ((16597, 16627), 'numpy.ma.masked_array', 'np.ma.masked_array', (['eopz', 'mask'], {}), '(eopz, mask)\n', (16615, 16627), True, 'import numpy as np\n'), ((16649, 16681), 'numpy.ma.masked_array', 'np.ma.masked_array', (['eopvar', 'mask'], {}), '(eopvar, mask)\n', (16667, 16681), True, 'import numpy as np\n'), ((16725, 16736), 'time.time', 'time.time', ([], {}), '()\n', (16734, 16736), False, 'import time\n'), ((17291, 17302), 'time.time', 'time.time', ([], {}), '()\n', (17300, 17302), False, 'import time\n'), ((17359, 17411), 'numpy.sqrt', 'np.sqrt', (['((xx - obsyx[1]) ** 2 + (yy - obsyx[0]) ** 2)'], {}), '((xx - obsyx[1]) ** 2 + (yy - obsyx[0]) ** 2)\n', (17366, 17411), True, 'import numpy as np\n'), ((17667, 17680), 'numpy.amin', 'np.amin', (['xnew'], {}), '(xnew)\n', (17674, 17680), True, 'import numpy as np\n'), ((17700, 17713), 'numpy.amin', 'np.amin', (['ynew'], {}), '(ynew)\n', (17707, 17713), True, 'import numpy as np\n'), ((17733, 17746), 'numpy.amax', 'np.amax', (['xnew'], {}), '(xnew)\n', (17740, 17746), True, 'import numpy as np\n'), ((17766, 17779), 'numpy.amax', 'np.amax', (['ynew'], {}), '(ynew)\n', (17773, 17779), True, 'import numpy as np\n'), ((18409, 18453), 'numpy.empty', 'np.empty', (['(ntriangles, 3, 2)'], {'dtype': 'np.float'}), '((ntriangles, 3, 2), dtype=np.float)\n', (18417, 18453), True, 'import numpy as np\n'), ((18934, 18975), 'numpy.empty', 'np.empty', (['(ntriangles, 2)'], {'dtype': 'np.float'}), '((ntriangles, 2), dtype=np.float)\n', (18942, 18975), True, 'import numpy as np\n'), ((19551, 19569), 'scipy.spatial.Delaunay', 'Delaunay', (['xy[:, :]'], {}), '(xy[:, :])\n', (19559, 19569), False, 'from scipy.spatial import Delaunay\n'), ((19633, 19651), 'numpy.mean', 'np.mean', (['cxy[:, 0]'], {}), '(cxy[:, 0])\n', (19640, 19651), True, 'import numpy as np\n'), ((19669, 19687), 'numpy.mean', 'np.mean', (['cxy[:, 1]'], {}), '(cxy[:, 1])\n', (19676, 19687), True, 'import numpy as np\n'), ((19933, 19977), 'numpy.empty', 'np.empty', (['(ntriangles, 3, 2)'], {'dtype': 'np.float'}), '((ntriangles, 3, 2), dtype=np.float)\n', (19941, 19977), True, 'import numpy as np\n'), ((21665, 21676), 'time.time', 'time.time', ([], {}), '()\n', (21674, 21676), False, 'import time\n'), ((23248, 23278), 'matplotlib.ticker.MultipleLocator', 'mpl.ticker.MultipleLocator', (['(10)'], {}), '(10)\n', (23274, 23278), True, 'import matplotlib as mpl\n'), ((23317, 23347), 'matplotlib.ticker.MultipleLocator', 'mpl.ticker.MultipleLocator', (['(10)'], {}), '(10)\n', (23343, 23347), True, 'import matplotlib as mpl\n'), ((23388, 23431), 'matplotlib.ticker.ScalarFormatter', 'mpl.ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (23414, 23431), True, 'import matplotlib as mpl\n'), ((23512, 23555), 'matplotlib.ticker.ScalarFormatter', 'mpl.ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (23538, 23555), True, 'import matplotlib as mpl\n'), ((24834, 24864), 'matplotlib.ticker.MultipleLocator', 'mpl.ticker.MultipleLocator', (['(10)'], {}), '(10)\n', (24860, 24864), True, 'import matplotlib as mpl\n'), ((24903, 24933), 'matplotlib.ticker.MultipleLocator', 'mpl.ticker.MultipleLocator', (['(10)'], {}), '(10)\n', (24929, 24933), True, 'import matplotlib as mpl\n'), ((24974, 25017), 'matplotlib.ticker.ScalarFormatter', 'mpl.ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (25000, 25017), True, 'import matplotlib as mpl\n'), ((25098, 25141), 'matplotlib.ticker.ScalarFormatter', 'mpl.ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (25124, 25141), True, 'import matplotlib as mpl\n'), ((14306, 14383), 'in_poly.in_poly', 'in_poly', (['[xnew_v[igrid], ynew_v[igrid]]', 'hull_points[:, 0]', 'hull_points[:, 1]'], {}), '([xnew_v[igrid], ynew_v[igrid]], hull_points[:, 0], hull_points[:, 1])\n', (14313, 14383), False, 'from in_poly import in_poly\n'), ((14649, 14663), 'numpy.shape', 'np.shape', (['xnew'], {}), '(xnew)\n', (14657, 14663), True, 'import numpy as np\n'), ((16307, 16380), 'in_poly.in_poly', 'in_poly', (['[eopx[igrid], eopy[igrid]]', 'hull_points[:, 0]', 'hull_points[:, 1]'], {}), '([eopx[igrid], eopy[igrid]], hull_points[:, 0], hull_points[:, 1])\n', (16314, 16380), False, 'from in_poly import in_poly\n'), ((19262, 19351), 'volume_poly.volume_poly', 'volume_poly', (['semiyx'], {'tri': 'tri', 'area': '(True)', 'obsyx': '[y[i], x[i]]', 'f': 'semi_mod', 'p': 'semi_popt'}), '(semiyx, tri=tri, area=True, obsyx=[y[i], x[i]], f=semi_mod, p=\n semi_popt)\n', (19273, 19351), False, 'from volume_poly import volume_poly\n'), ((20308, 20397), 'volume_poly.volume_poly', 'volume_poly', (['semiyx'], {'tri': 'tri', 'area': '(True)', 'obsyx': '[y[i], x[i]]', 'f': 'semi_mod', 'p': 'semi_popt'}), '(semiyx, tri=tri, area=True, obsyx=[y[i], x[i]], f=semi_mod, p=\n semi_popt)\n', (20319, 20397), False, 'from volume_poly import volume_poly\n'), ((21393, 21404), 'numpy.sqrt', 'np.sqrt', (['vv'], {}), '(vv)\n', (21400, 21404), True, 'import numpy as np\n'), ((22953, 23002), 'numpy.hstack', 'np.hstack', (['(hull_points[:, 0], hull_points[0, 0])'], {}), '((hull_points[:, 0], hull_points[0, 0]))\n', (22962, 23002), True, 'import numpy as np\n'), ((23030, 23079), 'numpy.hstack', 'np.hstack', (['(hull_points[:, 1], hull_points[0, 1])'], {}), '((hull_points[:, 1], hull_points[0, 1]))\n', (23039, 23079), True, 'import numpy as np\n'), ((24539, 24588), 'numpy.hstack', 'np.hstack', (['(hull_points[:, 0], hull_points[0, 0])'], {}), '((hull_points[:, 0], hull_points[0, 0]))\n', (24548, 24588), True, 'import numpy as np\n'), ((24616, 24665), 'numpy.hstack', 'np.hstack', (['(hull_points[:, 1], hull_points[0, 1])'], {}), '((hull_points[:, 1], hull_points[0, 1]))\n', (24625, 24665), True, 'import numpy as np\n'), ((12110, 12125), 'numpy.shape', 'np.shape', (['gamma'], {}), '(gamma)\n', (12118, 12125), True, 'import numpy as np\n'), ((20902, 20920), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (20918, 20920), True, 'import numpy as np\n'), ((20960, 20978), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (20976, 20978), True, 'import numpy as np\n'), ((21018, 21036), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (21034, 21036), True, 'import numpy as np\n'), ((21076, 21094), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (21092, 21094), True, 'import numpy as np\n'), ((21559, 21573), 'numpy.dot', 'np.dot', (['lmd', 'B'], {}), '(lmd, B)\n', (21565, 21573), True, 'import numpy as np\n'), ((12161, 12172), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (12169, 12172), True, 'import numpy as np\n')] |
import os
import numpy as np
import pybullet as p
from igibson.object_states import *
from igibson.objects.articulated_object import URDFObject
from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer
from igibson.robots.behavior_robot import BehaviorRobot
from igibson.robots.fetch import Fetch
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import get_ig_model_path
CABINET_POS = np.array([100, 100, 100])
CABINET_JOINT = {
"bottom_cabinet_0_joint_1": (0.1, 0.5),
"bottom_cabinet_0_joint_2": (0.2, 0.6),
"bottom_cabinet_0_joint_3": (0.3, 0.7),
"bottom_cabinet_0_joint_4": (0.4, 0.8),
}
FETCH_POS = np.array([0.5, -2.5, 0])
FETCH_JOINT = np.array([1.0, 0.5])
BROBOT_POS = np.array([0.0, 0.5, 0.5])
BROBOT_JOINT = np.array([0.5, None])
PILLOW_POS = np.array([1, 33, 7])
def test_saving():
s = Simulator(mode="headless", use_pb_gui=False)
scene = InteractiveIndoorScene(
"Rs_int",
# load_object_categories=["bottom_cabinet", "pot_plant", "floor_lamp"],
)
s.import_scene(scene)
# Change kinematic of existing objects
scene.objects_by_name["bottom_cabinet_0"].set_position(CABINET_POS)
scene.objects_by_name["bottom_cabinet_0"].set_joint_states(CABINET_JOINT)
# Change non-kinematic states of existing object
scene.objects_by_name["pot_plant_1"].states[Soaked].set_value(True)
scene.objects_by_name["floor_lamp_3"].states[ToggledOn].set_value(True)
# Find a bed, move one of its pillows.
bed = scene.objects_by_name["bed_28"]
pillow_bid = next(bid for bid in bed.get_body_ids() if bid != bed.get_body_ids()[bed.main_body])
p.resetBasePositionAndOrientation(pillow_bid, PILLOW_POS, [0, 0, 0, 1])
# Save
scene.save(urdf_path="changed_state.urdf", pybullet_filename="changed_state.bullet")
# Add a new object
model_path = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf")
simulator_obj = URDFObject(model_path, name="00_0", category="apple", scale=np.array([1.0, 1.0, 1.0]))
whole_object = simulator_obj
object_parts = []
for i, part in enumerate(simulator_obj.metadata["object_parts"]):
category = part["category"]
model = part["model"]
# Scale the offset accordingly
part_pos = part["pos"] * whole_object.scale
part_orn = part["orn"]
model_path = get_ig_model_path(category, model)
filename = os.path.join(model_path, model + ".urdf")
obj_name = whole_object.name + "_part_{}".format(i)
simulator_obj_part = URDFObject(
filename,
name=obj_name,
category=category,
model_path=model_path,
scale=whole_object.scale,
)
object_parts.append((simulator_obj_part, (part_pos, part_orn)))
grouped_obj_parts = ObjectGrouper(object_parts)
apple = ObjectMultiplexer(whole_object.name + "_multiplexer", [whole_object, grouped_obj_parts], 0)
s.import_object(apple)
apple.set_position([0, 0, 1])
# Change its default non-kinematic state
apple.states[Sliced].set_value(True)
# Import agents
fetch = Fetch(name="agent_0")
s.import_object(fetch)
fetch.set_position(FETCH_POS)
fetch.joints["head_tilt_joint"].reset_state(*FETCH_JOINT)
brobot = BehaviorRobot(name="agent_1")
s.import_object(brobot)
brobot.set_position(BROBOT_POS)
for direction in ["x", "y", "z", "rx", "ry", "rz"]:
brobot.joints["right_hand_shoulder__right_hand_{}".format(direction)].reset_state(*BROBOT_JOINT)
# Save
scene.save(urdf_path="changed_structure.urdf", pybullet_filename="changed_structure.bullet")
s.disconnect()
def test_loading_state_with_bullet_file():
s = Simulator(mode="headless", use_pb_gui=False)
scene = InteractiveIndoorScene("Rs_int")
s.import_scene(scene)
scene.restore(urdf_path="changed_state.urdf", pybullet_filename="changed_state.bullet")
# print("load_state")
# embed()
assert np.array_equal(scene.objects_by_name["bottom_cabinet_0"].get_position(), CABINET_POS)
joint_states = scene.objects_by_name["bottom_cabinet_0"].get_joint_states()
for key in joint_states:
assert np.array_equal(np.array(joint_states[key]), np.array(CABINET_JOINT[key]))
assert scene.objects_by_name["pot_plant_1"].states[Soaked].get_value()
assert scene.objects_by_name["floor_lamp_3"].states[ToggledOn].get_value()
# Check if non-main bodies are also correctly moved.
bed = scene.objects_by_name["bed_28"]
pillow_bid = next(bid for bid in bed.get_body_ids() if bid != bed.get_body_ids()[bed.main_body])
assert np.array_equal(p.getBasePositionAndOrientation(pillow_bid)[0], PILLOW_POS)
s.disconnect()
def test_loading_state_without_bullet_file():
s = Simulator(mode="headless", use_pb_gui=False)
scene = InteractiveIndoorScene("Rs_int")
s.import_scene(scene)
scene.restore(urdf_path="changed_state.urdf")
# print("load_state")
# embed()
assert np.array_equal(scene.objects_by_name["bottom_cabinet_0"].get_position(), CABINET_POS)
joint_states = scene.objects_by_name["bottom_cabinet_0"].get_joint_states()
for key in joint_states:
assert np.array_equal(np.array(joint_states[key]), np.array(CABINET_JOINT[key]))
assert scene.objects_by_name["pot_plant_1"].states[Soaked].get_value()
assert scene.objects_by_name["floor_lamp_3"].states[ToggledOn].get_value()
# Check if non-main bodies are also correctly moved.
bed = scene.objects_by_name["bed_28"]
pillow_bid = next(bid for bid in bed.get_body_ids() if bid != bed.get_body_ids()[bed.main_body])
assert np.array_equal(p.getBasePositionAndOrientation(pillow_bid)[0], PILLOW_POS)
s.disconnect()
def test_loading_state_with_sliceable():
s = Simulator(mode="headless", use_pb_gui=False)
scene = InteractiveIndoorScene(
"Rs_int",
urdf_path="changed_structure.urdf",
)
s.import_scene(scene)
scene.restore(urdf_path="changed_structure.urdf")
# print("load_state")
# embed()
assert np.array_equal(scene.objects_by_name["bottom_cabinet_0"].get_position(), CABINET_POS)
joint_states = scene.objects_by_name["bottom_cabinet_0"].get_joint_states()
for key in joint_states:
assert np.array_equal(np.array(joint_states[key]), np.array(CABINET_JOINT[key]))
assert scene.objects_by_name["pot_plant_1"].states[Soaked].get_value()
assert scene.objects_by_name["floor_lamp_3"].states[ToggledOn].get_value()
assert scene.objects_by_name["00_0_multiplexer"].states[Sliced].get_value()
assert np.allclose(scene.objects_by_name["agent_0"].get_position(), FETCH_POS)
assert np.allclose(scene.objects_by_name["agent_0"].joints["head_tilt_joint"].get_state()[:2], FETCH_JOINT)
assert np.allclose(scene.objects_by_name["agent_1"].get_position(), BROBOT_POS)
for direction in ["x", "y", "z", "rx", "ry", "rz"]:
np.allclose(
scene.objects_by_name["agent_1"]
.joints["right_hand_shoulder__right_hand_{}".format(direction)]
.get_state()[0],
BROBOT_JOINT[0],
)
s.disconnect()
def test_loading_structure_with_bullet_file():
s = Simulator(mode="headless", use_pb_gui=False)
scene = InteractiveIndoorScene(
"Rs_int",
urdf_path="changed_structure.urdf",
pybullet_filename="changed_structure.bullet",
)
s.import_scene(scene)
# print("load_structure")
# embed()
assert np.array_equal(scene.objects_by_name["bottom_cabinet_0"].get_position(), CABINET_POS)
joint_states = scene.objects_by_name["bottom_cabinet_0"].get_joint_states()
for key in joint_states:
assert np.array_equal(np.array(joint_states[key]), np.array(CABINET_JOINT[key]))
assert scene.objects_by_name["pot_plant_1"].states[Soaked].get_value()
assert scene.objects_by_name["floor_lamp_3"].states[ToggledOn].get_value()
assert scene.objects_by_name["00_0_multiplexer"].states[Sliced].get_value()
assert np.allclose(scene.objects_by_name["agent_0"].get_position(), FETCH_POS)
assert np.allclose(scene.objects_by_name["agent_0"].joints["head_tilt_joint"].get_state()[:2], FETCH_JOINT)
assert np.allclose(scene.objects_by_name["agent_1"].get_position(), BROBOT_POS)
for direction in ["x", "y", "z", "rx", "ry", "rz"]:
np.allclose(
scene.objects_by_name["agent_1"]
.joints["right_hand_shoulder__right_hand_{}".format(direction)]
.get_state()[0],
BROBOT_JOINT[0],
)
s.disconnect()
def test_loading_structure_without_bullet_file():
s = Simulator(mode="headless", use_pb_gui=False)
scene = InteractiveIndoorScene(
"Rs_int",
urdf_path="changed_structure.urdf",
)
s.import_scene(scene)
# print("load_structure")
# embed()
assert np.array_equal(scene.objects_by_name["bottom_cabinet_0"].get_position(), CABINET_POS)
joint_states = scene.objects_by_name["bottom_cabinet_0"].get_joint_states()
for key in joint_states:
assert np.array_equal(np.array(joint_states[key]), np.array(CABINET_JOINT[key]))
assert scene.objects_by_name["pot_plant_1"].states[Soaked].get_value()
assert scene.objects_by_name["floor_lamp_3"].states[ToggledOn].get_value()
assert scene.objects_by_name["00_0_multiplexer"].states[Sliced].get_value()
assert np.allclose(scene.objects_by_name["agent_0"].get_position(), FETCH_POS)
assert np.allclose(scene.objects_by_name["agent_0"].joints["head_tilt_joint"].get_state()[:2], FETCH_JOINT)
assert np.allclose(scene.objects_by_name["agent_1"].get_position(), BROBOT_POS)
for direction in ["x", "y", "z", "rx", "ry", "rz"]:
np.allclose(
scene.objects_by_name["agent_1"]
.joints["right_hand_shoulder__right_hand_{}".format(direction)]
.get_state()[0],
BROBOT_JOINT[0],
)
s.disconnect()
| [
"pybullet.getBasePositionAndOrientation",
"igibson.objects.multi_object_wrappers.ObjectGrouper",
"igibson.objects.multi_object_wrappers.ObjectMultiplexer",
"igibson.simulator.Simulator",
"pybullet.resetBasePositionAndOrientation",
"numpy.array",
"igibson.robots.behavior_robot.BehaviorRobot",
"igibson.... | [((507, 532), 'numpy.array', 'np.array', (['[100, 100, 100]'], {}), '([100, 100, 100])\n', (515, 532), True, 'import numpy as np\n'), ((741, 765), 'numpy.array', 'np.array', (['[0.5, -2.5, 0]'], {}), '([0.5, -2.5, 0])\n', (749, 765), True, 'import numpy as np\n'), ((780, 800), 'numpy.array', 'np.array', (['[1.0, 0.5]'], {}), '([1.0, 0.5])\n', (788, 800), True, 'import numpy as np\n'), ((814, 839), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (822, 839), True, 'import numpy as np\n'), ((855, 876), 'numpy.array', 'np.array', (['[0.5, None]'], {}), '([0.5, None])\n', (863, 876), True, 'import numpy as np\n'), ((890, 910), 'numpy.array', 'np.array', (['[1, 33, 7]'], {}), '([1, 33, 7])\n', (898, 910), True, 'import numpy as np\n'), ((940, 984), 'igibson.simulator.Simulator', 'Simulator', ([], {'mode': '"""headless"""', 'use_pb_gui': '(False)'}), "(mode='headless', use_pb_gui=False)\n", (949, 984), False, 'from igibson.simulator import Simulator\n'), ((998, 1030), 'igibson.scenes.igibson_indoor_scene.InteractiveIndoorScene', 'InteractiveIndoorScene', (['"""Rs_int"""'], {}), "('Rs_int')\n", (1020, 1030), False, 'from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\n'), ((1739, 1810), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['pillow_bid', 'PILLOW_POS', '[0, 0, 0, 1]'], {}), '(pillow_bid, PILLOW_POS, [0, 0, 0, 1])\n', (1772, 1810), True, 'import pybullet as p\n'), ((2912, 2939), 'igibson.objects.multi_object_wrappers.ObjectGrouper', 'ObjectGrouper', (['object_parts'], {}), '(object_parts)\n', (2925, 2939), False, 'from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer\n'), ((2952, 3047), 'igibson.objects.multi_object_wrappers.ObjectMultiplexer', 'ObjectMultiplexer', (["(whole_object.name + '_multiplexer')", '[whole_object, grouped_obj_parts]', '(0)'], {}), "(whole_object.name + '_multiplexer', [whole_object,\n grouped_obj_parts], 0)\n", (2969, 3047), False, 'from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer\n'), ((3225, 3246), 'igibson.robots.fetch.Fetch', 'Fetch', ([], {'name': '"""agent_0"""'}), "(name='agent_0')\n", (3230, 3246), False, 'from igibson.robots.fetch import Fetch\n'), ((3384, 3413), 'igibson.robots.behavior_robot.BehaviorRobot', 'BehaviorRobot', ([], {'name': '"""agent_1"""'}), "(name='agent_1')\n", (3397, 3413), False, 'from igibson.robots.behavior_robot import BehaviorRobot\n'), ((3822, 3866), 'igibson.simulator.Simulator', 'Simulator', ([], {'mode': '"""headless"""', 'use_pb_gui': '(False)'}), "(mode='headless', use_pb_gui=False)\n", (3831, 3866), False, 'from igibson.simulator import Simulator\n'), ((3879, 3911), 'igibson.scenes.igibson_indoor_scene.InteractiveIndoorScene', 'InteractiveIndoorScene', (['"""Rs_int"""'], {}), "('Rs_int')\n", (3901, 3911), False, 'from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\n'), ((4882, 4926), 'igibson.simulator.Simulator', 'Simulator', ([], {'mode': '"""headless"""', 'use_pb_gui': '(False)'}), "(mode='headless', use_pb_gui=False)\n", (4891, 4926), False, 'from igibson.simulator import Simulator\n'), ((4939, 4971), 'igibson.scenes.igibson_indoor_scene.InteractiveIndoorScene', 'InteractiveIndoorScene', (['"""Rs_int"""'], {}), "('Rs_int')\n", (4961, 4971), False, 'from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\n'), ((5895, 5939), 'igibson.simulator.Simulator', 'Simulator', ([], {'mode': '"""headless"""', 'use_pb_gui': '(False)'}), "(mode='headless', use_pb_gui=False)\n", (5904, 5939), False, 'from igibson.simulator import Simulator\n'), ((5952, 6020), 'igibson.scenes.igibson_indoor_scene.InteractiveIndoorScene', 'InteractiveIndoorScene', (['"""Rs_int"""'], {'urdf_path': '"""changed_structure.urdf"""'}), "('Rs_int', urdf_path='changed_structure.urdf')\n", (5974, 6020), False, 'from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\n'), ((7314, 7358), 'igibson.simulator.Simulator', 'Simulator', ([], {'mode': '"""headless"""', 'use_pb_gui': '(False)'}), "(mode='headless', use_pb_gui=False)\n", (7323, 7358), False, 'from igibson.simulator import Simulator\n'), ((7371, 7489), 'igibson.scenes.igibson_indoor_scene.InteractiveIndoorScene', 'InteractiveIndoorScene', (['"""Rs_int"""'], {'urdf_path': '"""changed_structure.urdf"""', 'pybullet_filename': '"""changed_structure.bullet"""'}), "('Rs_int', urdf_path='changed_structure.urdf',\n pybullet_filename='changed_structure.bullet')\n", (7393, 7489), False, 'from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\n'), ((8740, 8784), 'igibson.simulator.Simulator', 'Simulator', ([], {'mode': '"""headless"""', 'use_pb_gui': '(False)'}), "(mode='headless', use_pb_gui=False)\n", (8749, 8784), False, 'from igibson.simulator import Simulator\n'), ((8797, 8865), 'igibson.scenes.igibson_indoor_scene.InteractiveIndoorScene', 'InteractiveIndoorScene', (['"""Rs_int"""'], {'urdf_path': '"""changed_structure.urdf"""'}), "('Rs_int', urdf_path='changed_structure.urdf')\n", (8819, 8865), False, 'from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\n'), ((1966, 2000), 'igibson.utils.assets_utils.get_ig_model_path', 'get_ig_model_path', (['"""apple"""', '"""00_0"""'], {}), "('apple', '00_0')\n", (1983, 2000), False, 'from igibson.utils.assets_utils import get_ig_model_path\n'), ((2456, 2490), 'igibson.utils.assets_utils.get_ig_model_path', 'get_ig_model_path', (['category', 'model'], {}), '(category, model)\n', (2473, 2490), False, 'from igibson.utils.assets_utils import get_ig_model_path\n'), ((2510, 2551), 'os.path.join', 'os.path.join', (['model_path', "(model + '.urdf')"], {}), "(model_path, model + '.urdf')\n", (2522, 2551), False, 'import os\n'), ((2641, 2749), 'igibson.objects.articulated_object.URDFObject', 'URDFObject', (['filename'], {'name': 'obj_name', 'category': 'category', 'model_path': 'model_path', 'scale': 'whole_object.scale'}), '(filename, name=obj_name, category=category, model_path=\n model_path, scale=whole_object.scale)\n', (2651, 2749), False, 'from igibson.objects.articulated_object import URDFObject\n'), ((2095, 2120), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (2103, 2120), True, 'import numpy as np\n'), ((4306, 4333), 'numpy.array', 'np.array', (['joint_states[key]'], {}), '(joint_states[key])\n', (4314, 4333), True, 'import numpy as np\n'), ((4335, 4363), 'numpy.array', 'np.array', (['CABINET_JOINT[key]'], {}), '(CABINET_JOINT[key])\n', (4343, 4363), True, 'import numpy as np\n'), ((4746, 4789), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['pillow_bid'], {}), '(pillow_bid)\n', (4777, 4789), True, 'import pybullet as p\n'), ((5324, 5351), 'numpy.array', 'np.array', (['joint_states[key]'], {}), '(joint_states[key])\n', (5332, 5351), True, 'import numpy as np\n'), ((5353, 5381), 'numpy.array', 'np.array', (['CABINET_JOINT[key]'], {}), '(CABINET_JOINT[key])\n', (5361, 5381), True, 'import numpy as np\n'), ((5764, 5807), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['pillow_bid'], {}), '(pillow_bid)\n', (5795, 5807), True, 'import pybullet as p\n'), ((6400, 6427), 'numpy.array', 'np.array', (['joint_states[key]'], {}), '(joint_states[key])\n', (6408, 6427), True, 'import numpy as np\n'), ((6429, 6457), 'numpy.array', 'np.array', (['CABINET_JOINT[key]'], {}), '(CABINET_JOINT[key])\n', (6437, 6457), True, 'import numpy as np\n'), ((7823, 7850), 'numpy.array', 'np.array', (['joint_states[key]'], {}), '(joint_states[key])\n', (7831, 7850), True, 'import numpy as np\n'), ((7852, 7880), 'numpy.array', 'np.array', (['CABINET_JOINT[key]'], {}), '(CABINET_JOINT[key])\n', (7860, 7880), True, 'import numpy as np\n'), ((9195, 9222), 'numpy.array', 'np.array', (['joint_states[key]'], {}), '(joint_states[key])\n', (9203, 9222), True, 'import numpy as np\n'), ((9224, 9252), 'numpy.array', 'np.array', (['CABINET_JOINT[key]'], {}), '(CABINET_JOINT[key])\n', (9232, 9252), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cycle-consistency evaluator."""
import itertools
from typing import List
from .base import Evaluator
from .base import EvaluatorOutput
import numpy as np
from scipy.spatial.distance import cdist
from xirl.models import SelfSupervisedOutput
class _CycleConsistency(Evaluator):
"""Base class for cycle consistency evaluation."""
def __init__(self, n_way, stride, distance):
"""Constructor.
Args:
n_way: The number of cycle-consistency ways.
stride: Controls how many frames are skipped in each video sequence. For
example, if the embedding vector of the first video is (100, 128), a
stride of 5 reduces it to (20, 128).
distance: The distance metric to use when calculating nearest-neighbours.
Raises:
ValueError: If the distance metric is invalid or the
mode is invalid.
"""
super().__init__(inter_class=False)
assert n_way in [2, 3], "n_way must be 2 or 3."
assert isinstance(stride, int), "stride must be an integer."
if distance not in ["sqeuclidean", "cosine"]:
raise ValueError(
"{} is not a supported distance metric.".format(distance))
self.n_way = n_way
self.stride = stride
self.distance = distance
def _evaluate_two_way(self, embs):
"""Two-way cycle consistency."""
num_embs = len(embs)
total_combinations = num_embs * (num_embs - 1)
ccs = np.zeros((total_combinations))
idx = 0
for i in range(num_embs):
query_emb = embs[i][::self.stride]
ground_truth = np.arange(len(embs[i]))[::self.stride]
for j in range(num_embs):
if i == j:
continue
candidate_emb = embs[j][::self.stride]
dists = cdist(query_emb, candidate_emb, self.distance)
nns = np.argmin(dists[:, np.argmin(dists, axis=1)], axis=0)
ccs[idx] = np.mean(np.abs(nns - ground_truth) <= 1)
idx += 1
ccs = ccs[~np.isnan(ccs)]
return EvaluatorOutput(scalar=np.mean(ccs))
def _evaluate_three_way(self, embs):
"""Three-way cycle consistency."""
num_embs = len(embs)
cycles = np.stack(list(itertools.permutations(np.arange(num_embs), 3)))
total_combinations = len(cycles)
ccs = np.zeros((total_combinations))
for c_idx, cycle in enumerate(cycles):
# Forward consistency check. Each cycle will be a length 3
# permutation, e.g. U - V - W. We compute nearest neighbours across
# consecutive pairs in the cycle and loop back to the first cycle
# index to obtain: U - V - W - U.
query_emb = None
for i in range(len(cycle)):
if query_emb is None:
query_emb = embs[cycle[i]][::self.stride]
candidate_emb = embs[cycle[(i + 1) % len(cycle)]][::self.stride]
dists = cdist(query_emb, candidate_emb, self.distance)
nns_forward = np.argmin(dists, axis=1)
query_emb = candidate_emb[nns_forward]
ground_truth_forward = np.arange(len(embs[cycle[0]]))[::self.stride]
cc_forward = np.abs(nns_forward - ground_truth_forward) <= 1
# Backward consistency check. A backward check is equivalent to
# reversing the middle pair V - W and performing a forward check,
# e.g. U - W - V - U.
cycle[1:] = cycle[1:][::-1]
query_emb = None
for i in range(len(cycle)):
if query_emb is None:
query_emb = embs[cycle[i]][::self.stride]
candidate_emb = embs[cycle[(i + 1) % len(cycle)]][::self.stride]
dists = cdist(query_emb, candidate_emb, self.distance)
nns_backward = np.argmin(dists, axis=1)
query_emb = candidate_emb[nns_backward]
ground_truth_backward = np.arange(len(embs[cycle[0]]))[::self.stride]
cc_backward = np.abs(nns_backward - ground_truth_backward) <= 1
# Require consistency both ways.
cc = np.logical_and(cc_forward, cc_backward)
ccs[c_idx] = np.mean(cc)
ccs = ccs[~np.isnan(ccs)]
return EvaluatorOutput(scalar=np.mean(ccs))
def evaluate(self, outs):
embs = [o.embs for o in outs]
if self.n_way == 2:
return self._evaluate_two_way(embs)
return self._evaluate_three_way(embs)
class TwoWayCycleConsistency(_CycleConsistency):
"""2-way cycle consistency evaluator [1].
References:
[1]: https://arxiv.org/abs/1805.11592
"""
def __init__(self, stride, distance):
super().__init__(2, stride, distance)
class ThreeWayCycleConsistency(_CycleConsistency):
"""2-way cycle consistency evaluator [1].
References:
[1]: https://arxiv.org/abs/1805.11592
"""
def __init__(self, stride, distance):
super().__init__(3, stride, distance)
| [
"scipy.spatial.distance.cdist",
"numpy.abs",
"numpy.logical_and",
"numpy.zeros",
"numpy.isnan",
"numpy.argmin",
"numpy.mean",
"numpy.arange"
] | [((2000, 2028), 'numpy.zeros', 'np.zeros', (['total_combinations'], {}), '(total_combinations)\n', (2008, 2028), True, 'import numpy as np\n'), ((2804, 2832), 'numpy.zeros', 'np.zeros', (['total_combinations'], {}), '(total_combinations)\n', (2812, 2832), True, 'import numpy as np\n'), ((4411, 4450), 'numpy.logical_and', 'np.logical_and', (['cc_forward', 'cc_backward'], {}), '(cc_forward, cc_backward)\n', (4425, 4450), True, 'import numpy as np\n'), ((4470, 4481), 'numpy.mean', 'np.mean', (['cc'], {}), '(cc)\n', (4477, 4481), True, 'import numpy as np\n'), ((2307, 2353), 'scipy.spatial.distance.cdist', 'cdist', (['query_emb', 'candidate_emb', 'self.distance'], {}), '(query_emb, candidate_emb, self.distance)\n', (2312, 2353), False, 'from scipy.spatial.distance import cdist\n'), ((2514, 2527), 'numpy.isnan', 'np.isnan', (['ccs'], {}), '(ccs)\n', (2522, 2527), True, 'import numpy as np\n'), ((2563, 2575), 'numpy.mean', 'np.mean', (['ccs'], {}), '(ccs)\n', (2570, 2575), True, 'import numpy as np\n'), ((3357, 3403), 'scipy.spatial.distance.cdist', 'cdist', (['query_emb', 'candidate_emb', 'self.distance'], {}), '(query_emb, candidate_emb, self.distance)\n', (3362, 3403), False, 'from scipy.spatial.distance import cdist\n'), ((3426, 3450), 'numpy.argmin', 'np.argmin', (['dists'], {'axis': '(1)'}), '(dists, axis=1)\n', (3435, 3450), True, 'import numpy as np\n'), ((3592, 3634), 'numpy.abs', 'np.abs', (['(nns_forward - ground_truth_forward)'], {}), '(nns_forward - ground_truth_forward)\n', (3598, 3634), True, 'import numpy as np\n'), ((4072, 4118), 'scipy.spatial.distance.cdist', 'cdist', (['query_emb', 'candidate_emb', 'self.distance'], {}), '(query_emb, candidate_emb, self.distance)\n', (4077, 4118), False, 'from scipy.spatial.distance import cdist\n'), ((4142, 4166), 'numpy.argmin', 'np.argmin', (['dists'], {'axis': '(1)'}), '(dists, axis=1)\n', (4151, 4166), True, 'import numpy as np\n'), ((4311, 4355), 'numpy.abs', 'np.abs', (['(nns_backward - ground_truth_backward)'], {}), '(nns_backward - ground_truth_backward)\n', (4317, 4355), True, 'import numpy as np\n'), ((4497, 4510), 'numpy.isnan', 'np.isnan', (['ccs'], {}), '(ccs)\n', (4505, 4510), True, 'import numpy as np\n'), ((4546, 4558), 'numpy.mean', 'np.mean', (['ccs'], {}), '(ccs)\n', (4553, 4558), True, 'import numpy as np\n'), ((2731, 2750), 'numpy.arange', 'np.arange', (['num_embs'], {}), '(num_embs)\n', (2740, 2750), True, 'import numpy as np\n'), ((2449, 2475), 'numpy.abs', 'np.abs', (['(nns - ground_truth)'], {}), '(nns - ground_truth)\n', (2455, 2475), True, 'import numpy as np\n'), ((2387, 2411), 'numpy.argmin', 'np.argmin', (['dists'], {'axis': '(1)'}), '(dists, axis=1)\n', (2396, 2411), True, 'import numpy as np\n')] |
"""
The inference algorithm
introduce a new format of fit
trial isolation
unequal trial ready
"""
import copy
import logging
import warnings
import numpy as np
from numpy import identity, einsum
from scipy.linalg import solve, norm, svd, LinAlgError
from . import gp
from .base import Model
from .callback import Saver, show
from .preprocess import get_config, get_params, fill_trials, fill_params, initialize
from .util import cut_trials, clip
from .gp import make_cholesky
from .evaluation import timer
from .math import trunc_exp
logger = logging.getLogger(__name__)
def estep(trials, params, config):
"""Update variational distribution q (E step)"""
niter = config["Eniter"] # maximum number of iterations
if niter < 1:
return
# See the explanation in mstep.
# constrain_loading(trials, params, config)
# dimenionalities
zdim = params["zdim"]
rank = params["rank"] # rank of prior covariance
likelihood = params["likelihood"]
# misc
dmu_bound = config["dmu_bound"]
tol = config["tol"]
method = config["method"]
poiss_mask = likelihood == "poisson"
gauss_mask = likelihood == "gaussian"
###
# print(poiss_mask)
# print(gauss_mask)
##
# parameters
a = params["a"]
b = params["b"]
noise = params["noise"]
gauss_noise = noise[gauss_mask]
Ir = identity(rank)
# boolean indexing creates copies
# pull indexing out of the loop for performance
for i in range(niter):
# TODO: parallel trials ?
for trial in trials:
y = trial["y"]
x = trial["x"]
mu = trial["mu"]
w = trial["w"]
v = trial["v"]
dmu = trial["dmu"]
prior = params["cholesky"][
y.shape[0]
] # TODO: adapt unequal lengths, move into trials
residual = np.empty_like(y, dtype=float)
U = np.empty_like(y, dtype=float)
y_poiss = y[:, poiss_mask]
y_gauss = y[:, gauss_mask]
###
# print(y_poiss.shape)
# print(y_gauss.shape)
###
xb = einsum("ijk, jk -> ik", x, b)
eta = mu @ a + xb
r = trunc_exp(eta + 0.5 * v @ (a ** 2))
###
# print(xb.shape)
###
# mean of y
mean_gauss = eta[:, gauss_mask]
mean_poiss = r[:, poiss_mask]
###
# print(y_poiss.shape, mean_poiss.shape)
# print(y_gauss.shape, mean_gauss.shape, gauss_noise.shape)
###
for l in range(zdim):
G = prior[l]
###
# print(G.shape)
###
# working residuals
# extensible to many other distributions
# see GLM's working residuals
residual[:, poiss_mask] = y_poiss - mean_poiss
residual[:, gauss_mask] = (y_gauss - mean_gauss) / gauss_noise
###
# print(w.shape)
###
wadj = w[:, [l]] # keep dimension
###
# print(G.shape, wadj.shape)
###
GtWG = G.T @ (wadj * G)
u = G @ (G.T @ (residual @ a[l, :])) - mu[:, l]
try:
M = solve(Ir + GtWG, (wadj * G).T @ u, sym_pos=True)
delta_mu = u - G @ ((wadj * G).T @ u) + G @ (GtWG @ M)
clip(delta_mu, dmu_bound)
except Exception as e:
logger.exception(repr(e), exc_info=True)
delta_mu = 0
dmu[:, l] = delta_mu
mu[:, l] += delta_mu
# TODO: remove duplicated computation
eta = mu @ a + xb
r = trunc_exp(eta + 0.5 * v @ (a ** 2))
U[:, poiss_mask] = r[:, poiss_mask]
U[:, gauss_mask] = 1 / gauss_noise
w = U @ (a.T ** 2)
if method == "VB":
for l in range(zdim):
G = prior[l]
GtWG = G.T @ (w[:, l, np.newaxis] * G)
try:
M = solve(Ir + GtWG, GtWG, sym_pos=True)
v[:, l] = np.sum(G * (G - G @ GtWG + G @ (GtWG @ M)), axis=1)
except Exception as e:
logger.exception(repr(e), exc_info=True)
# make sure save all changes
# TODO: make inline modification
trial["mu"] = mu
trial["w"] = w
trial["v"] = v
trial["dmu"] = dmu
# center over all trials if not only infer posterior
# constrain_mu(model)
# if norm(dmu) < tol * norm(mu):
# break
def mstep(trials, params, config):
"""Optimize loading and regression (M step)"""
niter = config["Mniter"] # maximum number of iterations
if niter < 1:
return
# It's more proper to constrain the latent before mstep.
# If the parameters are fixed, it's no need to optimize the posterior.
# Besides, the constraint modifies the loading and bias.
# constrain_latent(trials, params, config)
# dimenionalities
ydim = params["ydim"]
xdim = params["xdim"]
zdim = params["zdim"]
rank = params["rank"] # rank of prior covariance
ntrial = len(trials) # number of trials
# parameters
a = params["a"]
b = params["b"]
likelihood = params["likelihood"]
noise = params["noise"]
poiss_mask = likelihood == "poisson"
gauss_mask = likelihood == "gaussian"
gauss_noise = noise[gauss_mask]
da = params["da"]
db = params["db"]
# misc
use_hessian = config["use_hessian"]
da_bound = config["da_bound"]
db_bound = config["db_bound"]
tol = config["tol"]
method = config["method"]
learning_rate = config["learning_rate"]
y = np.concatenate([trial["y"] for trial in trials], axis=0)
x = np.concatenate(
[trial["x"] for trial in trials], axis=0
) # TODO: check dimensionality of x
mu = np.concatenate([trial["mu"] for trial in trials], axis=0)
v = np.concatenate([trial["v"] for trial in trials], axis=0)
for i in range(niter):
eta = mu @ a + einsum("ijk, jk -> ik", x, b)
# (time, regression, neuron) x (regression, neuron) -> (time, neuron) # TODO: use matmul broadcast
r = trunc_exp(eta + 0.5 * v @ (a ** 2))
noise = np.var(y - eta, axis=0, ddof=0) # MLE
for n in range(ydim):
if likelihood[n] == "poisson":
# loading
mu_plus_v_times_a = mu + v * a[:, n]
grad_a = mu.T @ y[:, n] - mu_plus_v_times_a.T @ r[:, n]
if use_hessian:
nhess_a = mu_plus_v_times_a.T @ (
r[:, n, np.newaxis] * mu_plus_v_times_a
)
nhess_a[np.diag_indices_from(nhess_a)] += r[:, n] @ v
try:
delta_a = solve(nhess_a, grad_a, sym_pos=True)
except Exception as e:
logger.exception(repr(e), exc_info=True)
delta_a = learning_rate * grad_a
else:
delta_a = learning_rate * grad_a
clip(delta_a, da_bound)
da[:, n] = delta_a
a[:, n] += delta_a
# regression
grad_b = x[..., n].T @ (y[:, n] - r[:, n])
if use_hessian:
nhess_b = x[..., n].T @ (r[:, np.newaxis, n] * x[..., n])
try:
delta_b = solve(nhess_b, grad_b, sym_pos=True)
except Exception as e:
logger.exception(repr(e), exc_info=True)
delta_b = learning_rate * grad_b
else:
delta_b = learning_rate * grad_b
clip(delta_b, db_bound)
db[:, n] = delta_b
b[:, n] += delta_b
elif likelihood[n] == "gaussian":
# a's least squares solution for Gaussian channel
# (m'm + diag(j'v))^-1 m'(y - Hb)
M = mu.T @ mu
M[np.diag_indices_from(M)] += np.sum(v, axis=0)
a[:, n] = solve(M, mu.T @ (y[:, n] - x[..., n] @ b[:, n]), sym_pos=True)
# b's least squares solution for Gaussian channel
# (H'H)^-1 H'(y - ma)
b[:, n] = solve(
x[..., n].T @ x[..., n],
x[..., n].T @ (y[:, n] - mu @ a[:, n]),
sym_pos=True,
)
b[1:, n] = 0
# TODO: only make history filter components zeros
else:
pass
# update parameters in fit
# TODO: make inline modification
params["a"] = a
params["b"] = b
params["noise"] = noise
# normalize loading by latent and rescale latent
# constrain_a(model)
# if norm(da) < tol * norm(a) and norm(db) < tol * norm(b):
# break
def hstep(trials, params, config):
"""Wrapper of hyperparameters tuning"""
if not config["Hstep"]:
return
gp.optimize(trials, params, config)
def infer(trials, params, config):
estep(trials, params, config)
def vem(trials, params, config):
"""Variational EM
This function implements the algorithm.
"""
# this function should not know if the trials are original or segmented ones
# the caller determines which to use
# pass segments to speed up estimation and hyperparameter tuning
# the caller gets runtime
callbacks = config["callbacks"]
tol = config["tol"]
niter = config["EMniter"]
# profile and debug purpose
# invalid every new run
runtime = {
"it": 0,
"e_elapsed": [],
"m_elapsed": [],
"h_elapsed": [],
"em_elapsed": [],
}
#######################
# iterative algorithm #
#######################
# disable gabbage collection during the iterative procedure
for it in range(niter):
# print("EM iteration", it + 1)
runtime["it"] += 1
with timer() as em_elapsed:
##########
# E step #
##########
with timer() as estep_elapsed:
constrain_loading(trials, params, config)
estep(trials, params, config)
##########
# M step #
##########
with timer() as mstep_elapsed:
constrain_latent(trials, params, config)
mstep(trials, params, config)
###################
# H step #
###################
with timer() as hstep_elapsed:
hstep(trials, params, config)
# print("Iter {:d}, ELBO: {:.3f}".format(it, lbound))
runtime["e_elapsed"].append(estep_elapsed())
runtime["m_elapsed"].append(mstep_elapsed())
runtime["h_elapsed"].append(hstep_elapsed())
runtime["em_elapsed"].append(em_elapsed())
config["runtime"] = runtime
for callback in callbacks:
try:
callback(trials, params, config)
except:
pass
#####################
# convergence check #
#####################
mu = np.concatenate([trial["mu"] for trial in trials], axis=0)
a = params["a"]
b = params["b"]
dmu = np.concatenate([trial["dmu"] for trial in trials], axis=0)
da = params["da"]
db = params["db"]
# converged = norm(dmu) < tol * norm(mu) and \
# norm(da) < tol * norm(a) and \
# norm(db) < tol * norm(b)
#
# should_stop = converged
should_stop = False
if should_stop:
break
##############################
# end of iterative procedure #
##############################
def constrain_latent(trials, params, config):
"""Center and scale latent mean"""
constraint = config["constrain_latent"]
if not constraint or constraint == "none":
return
mu = np.concatenate([trial["mu"] for trial in trials], axis=0)
mean_over_trials = mu.mean(axis=0, keepdims=True)
std_over_trials = mu.std(axis=0, keepdims=True)
if constraint in ("location", "both"):
for trial in trials:
trial["mu"] -= mean_over_trials
# compensate bias
# commented to isolated from changing external variables
params["b"][0, :] += np.squeeze(mean_over_trials @ params["a"])
if constraint in ("scale", "both"):
for trial in trials:
trial["mu"] /= std_over_trials
# compensate loading
# commented to isolated from changing external variables
params["a"] *= std_over_trials.T
def constrain_loading(trials, params, config):
"""Normalize loading matrix"""
constraint = config["constrain_loading"]
if not constraint or constraint == "none":
return
eps = config["eps"]
a = params["a"]
if constraint == "svd":
u, s, v = svd(a, full_matrices=False)
# A = USV
us = a @ v.T
for trial in trials:
trial["mu"] = trial["mu"] @ us
params["a"] = v
else:
if constraint == "fro":
s = norm(a, ord="fro") + eps
else:
s = norm(a, ord=constraint, axis=1, keepdims=True) + eps
params["a"] /= s
for trial in trials:
trial["mu"] *= s.T
def update_w(trials, params, config):
likelihood = params["likelihood"]
poiss_mask = likelihood == "poisson"
gauss_mask = likelihood == "gaussian"
a = params["a"]
b = params["b"]
noise = params["noise"]
gauss_noise = noise[gauss_mask]
for trial in trials:
y = trial["y"]
x = trial["x"]
mu = trial["mu"]
w = trial.setdefault("w", np.zeros_like(mu))
v = trial.setdefault("v", np.zeros_like(mu))
# (neuron, time, regression) x (regression, neuron) -> (time, neuron)
eta = mu @ a + einsum("ijk, jk -> ik", x, b)
r = trunc_exp(eta + 0.5 * v @ (a ** 2))
U = np.empty_like(r)
U[:, poiss_mask] = r[:, poiss_mask]
U[:, gauss_mask] = 1 / gauss_noise
trial["w"] = U @ (a.T ** 2)
def update_v(trials, params, config):
if config["method"] != "VB":
return
for trial in trials:
zdim = params["zdim"]
mu = trial["mu"]
w = trial.setdefault("w", np.zeros_like(mu))
v = trial.setdefault("v", np.zeros_like(mu))
prior = params["cholesky"][mu.shape[0]]
Ir = identity(prior[0].shape[-1])
for l in range(zdim):
G = prior[l]
GtWG = G.T @ (w[:, [l]] * G)
try:
v[:, l] = np.sum(
G
* (
G - G @ GtWG + G @ (GtWG @ solve(Ir + GtWG, GtWG, sym_pos=True))
),
axis=1,
)
except LinAlgError:
warnings.warn("singular I + G'WG")
class VLGP(Model):
def __init__(self, n_factors, random_state=0, **kwargs):
self.n_factors = n_factors
self.random_state = random_state
self._weight = None
self._bias = None
self.setup(**kwargs)
def fit(self, trials, **kwargs):
"""Fit the vLGP model to data using vEM
:param trials: list of trials
:return: the trials containing the latent factors
"""
config = get_config(**kwargs)
# add built-in callbacks
callbacks = config["callbacks"]
if "path" in config:
saver = Saver()
callbacks.extend([show, saver.save])
config["callbacks"] = callbacks
params = get_params(trials, self.n_factors, **kwargs)
print("Initializing...")
initialize(trials, params, config)
# fill arrays
fill_params(params)
fill_trials(trials)
make_cholesky(trials, params, config)
update_w(trials, params, config)
update_v(trials, params, config)
subtrials = cut_trials(trials, params, config)
make_cholesky(subtrials, params, config)
fill_trials(subtrials)
params["initial"] = copy.deepcopy(params)
# VEM
print("Fitting...")
vem(subtrials, params, config)
# E step only for inference given above estimated parameters and hyperparameters
make_cholesky(trials, params, config)
update_w(trials, params, config)
update_v(trials, params, config)
print("Inferring...")
infer(trials, params, config)
print("Done")
self._weight = params["a"]
self._bias = params["b"]
return trials
def infer(self, trials):
if not self.isfiited:
raise ValueError(
"This model is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method."
)
raise NotImplementedError()
def __eq__(self, other):
if (
isinstance(other, VLGP)
and self.n_factors == other.n_factors
and np.array_equal(self.weight, other.weight)
and np.array_equal(self.bias, other.bias)
):
return True
return False
def setup(self, **kwargs):
pass
@property
def isfitted(self):
return self.weight is not None
@property
def weight(self):
return self._weight
@property
def bias(self):
return self._bias
| [
"scipy.linalg.solve",
"copy.deepcopy",
"numpy.zeros_like",
"numpy.array_equal",
"numpy.sum",
"numpy.diag_indices_from",
"numpy.empty_like",
"numpy.identity",
"logging.getLogger",
"numpy.einsum",
"scipy.linalg.svd",
"scipy.linalg.norm",
"numpy.squeeze",
"warnings.warn",
"numpy.var",
"nu... | [((545, 572), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (562, 572), False, 'import logging\n'), ((1364, 1378), 'numpy.identity', 'identity', (['rank'], {}), '(rank)\n', (1372, 1378), False, 'from numpy import identity, einsum\n'), ((5960, 6016), 'numpy.concatenate', 'np.concatenate', (["[trial['y'] for trial in trials]"], {'axis': '(0)'}), "([trial['y'] for trial in trials], axis=0)\n", (5974, 6016), True, 'import numpy as np\n'), ((6025, 6081), 'numpy.concatenate', 'np.concatenate', (["[trial['x'] for trial in trials]"], {'axis': '(0)'}), "([trial['x'] for trial in trials], axis=0)\n", (6039, 6081), True, 'import numpy as np\n'), ((6140, 6197), 'numpy.concatenate', 'np.concatenate', (["[trial['mu'] for trial in trials]"], {'axis': '(0)'}), "([trial['mu'] for trial in trials], axis=0)\n", (6154, 6197), True, 'import numpy as np\n'), ((6206, 6262), 'numpy.concatenate', 'np.concatenate', (["[trial['v'] for trial in trials]"], {'axis': '(0)'}), "([trial['v'] for trial in trials], axis=0)\n", (6220, 6262), True, 'import numpy as np\n'), ((12349, 12406), 'numpy.concatenate', 'np.concatenate', (["[trial['mu'] for trial in trials]"], {'axis': '(0)'}), "([trial['mu'] for trial in trials], axis=0)\n", (12363, 12406), True, 'import numpy as np\n'), ((6516, 6547), 'numpy.var', 'np.var', (['(y - eta)'], {'axis': '(0)', 'ddof': '(0)'}), '(y - eta, axis=0, ddof=0)\n', (6522, 6547), True, 'import numpy as np\n'), ((11536, 11593), 'numpy.concatenate', 'np.concatenate', (["[trial['mu'] for trial in trials]"], {'axis': '(0)'}), "([trial['mu'] for trial in trials], axis=0)\n", (11550, 11593), True, 'import numpy as np\n'), ((11656, 11714), 'numpy.concatenate', 'np.concatenate', (["[trial['dmu'] for trial in trials]"], {'axis': '(0)'}), "([trial['dmu'] for trial in trials], axis=0)\n", (11670, 11714), True, 'import numpy as np\n'), ((12750, 12792), 'numpy.squeeze', 'np.squeeze', (["(mean_over_trials @ params['a'])"], {}), "(mean_over_trials @ params['a'])\n", (12760, 12792), True, 'import numpy as np\n'), ((13325, 13352), 'scipy.linalg.svd', 'svd', (['a'], {'full_matrices': '(False)'}), '(a, full_matrices=False)\n', (13328, 13352), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((14400, 14416), 'numpy.empty_like', 'np.empty_like', (['r'], {}), '(r)\n', (14413, 14416), True, 'import numpy as np\n'), ((14877, 14905), 'numpy.identity', 'identity', (['prior[0].shape[-1]'], {}), '(prior[0].shape[-1])\n', (14885, 14905), False, 'from numpy import identity, einsum\n'), ((16548, 16569), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (16561, 16569), False, 'import copy\n'), ((1883, 1912), 'numpy.empty_like', 'np.empty_like', (['y'], {'dtype': 'float'}), '(y, dtype=float)\n', (1896, 1912), True, 'import numpy as np\n'), ((1929, 1958), 'numpy.empty_like', 'np.empty_like', (['y'], {'dtype': 'float'}), '(y, dtype=float)\n', (1942, 1958), True, 'import numpy as np\n'), ((2159, 2188), 'numpy.einsum', 'einsum', (['"""ijk, jk -> ik"""', 'x', 'b'], {}), "('ijk, jk -> ik', x, b)\n", (2165, 2188), False, 'from numpy import identity, einsum\n'), ((6314, 6343), 'numpy.einsum', 'einsum', (['"""ijk, jk -> ik"""', 'x', 'b'], {}), "('ijk, jk -> ik', x, b)\n", (6320, 6343), False, 'from numpy import identity, einsum\n'), ((14136, 14153), 'numpy.zeros_like', 'np.zeros_like', (['mu'], {}), '(mu)\n', (14149, 14153), True, 'import numpy as np\n'), ((14189, 14206), 'numpy.zeros_like', 'np.zeros_like', (['mu'], {}), '(mu)\n', (14202, 14206), True, 'import numpy as np\n'), ((14310, 14339), 'numpy.einsum', 'einsum', (['"""ijk, jk -> ik"""', 'x', 'b'], {}), "('ijk, jk -> ik', x, b)\n", (14316, 14339), False, 'from numpy import identity, einsum\n'), ((14743, 14760), 'numpy.zeros_like', 'np.zeros_like', (['mu'], {}), '(mu)\n', (14756, 14760), True, 'import numpy as np\n'), ((14796, 14813), 'numpy.zeros_like', 'np.zeros_like', (['mu'], {}), '(mu)\n', (14809, 14813), True, 'import numpy as np\n'), ((17466, 17507), 'numpy.array_equal', 'np.array_equal', (['self.weight', 'other.weight'], {}), '(self.weight, other.weight)\n', (17480, 17507), True, 'import numpy as np\n'), ((17524, 17561), 'numpy.array_equal', 'np.array_equal', (['self.bias', 'other.bias'], {}), '(self.bias, other.bias)\n', (17538, 17561), True, 'import numpy as np\n'), ((13546, 13564), 'scipy.linalg.norm', 'norm', (['a'], {'ord': '"""fro"""'}), "(a, ord='fro')\n", (13550, 13564), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((13601, 13647), 'scipy.linalg.norm', 'norm', (['a'], {'ord': 'constraint', 'axis': '(1)', 'keepdims': '(True)'}), '(a, ord=constraint, axis=1, keepdims=True)\n', (13605, 13647), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((3382, 3430), 'scipy.linalg.solve', 'solve', (['(Ir + GtWG)', '((wadj * G).T @ u)'], {'sym_pos': '(True)'}), '(Ir + GtWG, (wadj * G).T @ u, sym_pos=True)\n', (3387, 3430), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((8360, 8377), 'numpy.sum', 'np.sum', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (8366, 8377), True, 'import numpy as np\n'), ((8404, 8466), 'scipy.linalg.solve', 'solve', (['M', '(mu.T @ (y[:, n] - x[..., n] @ b[:, n]))'], {'sym_pos': '(True)'}), '(M, mu.T @ (y[:, n] - x[..., n] @ b[:, n]), sym_pos=True)\n', (8409, 8466), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((8598, 8686), 'scipy.linalg.solve', 'solve', (['(x[..., n].T @ x[..., n])', '(x[..., n].T @ (y[:, n] - mu @ a[:, n]))'], {'sym_pos': '(True)'}), '(x[..., n].T @ x[..., n], x[..., n].T @ (y[:, n] - mu @ a[:, n]),\n sym_pos=True)\n', (8603, 8686), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((15306, 15340), 'warnings.warn', 'warnings.warn', (['"""singular I + G\'WG"""'], {}), '("singular I + G\'WG")\n', (15319, 15340), False, 'import warnings\n'), ((4233, 4269), 'scipy.linalg.solve', 'solve', (['(Ir + GtWG)', 'GtWG'], {'sym_pos': '(True)'}), '(Ir + GtWG, GtWG, sym_pos=True)\n', (4238, 4269), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((4304, 4355), 'numpy.sum', 'np.sum', (['(G * (G - G @ GtWG + G @ (GtWG @ M)))'], {'axis': '(1)'}), '(G * (G - G @ GtWG + G @ (GtWG @ M)), axis=1)\n', (4310, 4355), True, 'import numpy as np\n'), ((6981, 7010), 'numpy.diag_indices_from', 'np.diag_indices_from', (['nhess_a'], {}), '(nhess_a)\n', (7001, 7010), True, 'import numpy as np\n'), ((7087, 7123), 'scipy.linalg.solve', 'solve', (['nhess_a', 'grad_a'], {'sym_pos': '(True)'}), '(nhess_a, grad_a, sym_pos=True)\n', (7092, 7123), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((7734, 7770), 'scipy.linalg.solve', 'solve', (['nhess_b', 'grad_b'], {'sym_pos': '(True)'}), '(nhess_b, grad_b, sym_pos=True)\n', (7739, 7770), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n'), ((8332, 8355), 'numpy.diag_indices_from', 'np.diag_indices_from', (['M'], {}), '(M)\n', (8352, 8355), True, 'import numpy as np\n'), ((15151, 15187), 'scipy.linalg.solve', 'solve', (['(Ir + GtWG)', 'GtWG'], {'sym_pos': '(True)'}), '(Ir + GtWG, GtWG, sym_pos=True)\n', (15156, 15187), False, 'from scipy.linalg import solve, norm, svd, LinAlgError\n')] |
import os.path
import numpy as np
import torch
import tqdm
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
from fibber import resources
from fibber.metrics.bert_classifier import get_optimizer
from fibber.paraphrase_strategies.strategy_base import StrategyBase
def make_input_output_pair(tokenizer, x):
"""Tokenize the text, then construct input and output for GPT2."""
toks = tokenizer.encode(x, add_special_tokens=True)
(len(toks)) // 2
output = toks[:]
# output[:half] = -100
return toks, output
def make_batch(toks_list):
"""Convert multiple text to a batch tensor."""
n = len(toks_list)
max_len = max([len(x) for x in toks_list])
ids = np.zeros((n, max_len), dtype='int')
mask = np.zeros((n, max_len), dtype='int')
for i, item in enumerate(toks_list):
ids[i, :len(item)] = np.asarray(item)
mask[i, :len(item)] = 1
return ids, mask
class FudgeStrategy(StrategyBase):
"""A baseline paraphrase strategy. Just return the reference."""
__abbr__ = "fu"
def fit(self, trainset):
self._sim_metric = self._metric_bundle.get_metric("USESimilarityMetric")
self._clf_metric = self._metric_bundle.get_target_classifier()
gpt2_pretrained_model = "gpt2-medium"
self._tokenizer = GPT2TokenizerFast.from_pretrained(
resources.get_transformers(gpt2_pretrained_model))
path = "gpt2-%s-1" % self._dataset_name
if os.path.exists(path):
self._model = GPT2LMHeadModel.from_pretrained(path).to(self._device)
return
self._model = GPT2LMHeadModel.from_pretrained(
resources.get_transformers(gpt2_pretrained_model)).to(
self._device)
opt, sche = get_optimizer("adam", lr=0.0001, decay=0.001,
params=self._model.parameters(), train_step=5000)
self._model.train()
for i in tqdm.tqdm(range(5000)):
batch = np.random.choice(trainset["data"], size=8)
text = [item["text0"] for item in batch]
input_output = [
make_input_output_pair(
self._tokenizer,
"<|endoftext|> " +
x +
" <|endoftext|> " +
x) for x in text]
input, output = zip(*input_output)
toks_input, mask = make_batch(input)
toks_output, _ = make_batch(output)
toks_output = toks_output * mask - 100 * (1 - mask)
toks_input = torch.tensor(toks_input)
toks_output = torch.tensor(toks_output)
mask = torch.tensor(mask)
output = self._model(toks_input.to(self._device), attention_mask=mask.to(self._device),
labels=toks_output.to(self._device))
opt.zero_grad()
output[0].backward()
opt.step()
sche.step()
if i % 100 == 0:
print(output[0])
self._model.eval()
self._model.save_pretrained(path)
def score(self, data_record, tmp, text, ll):
t0_for_use = self._tokenizer.decode(tmp[1:ll + 1])
sim = self._sim_metric.measure_batch(t0_for_use, text)
dist = self._clf_metric.predict_dist_batch(t0_for_use, text)
label = data_record["label"]
# print(t0_for_use, text)
correct_prob = (dist[:, label]).copy()
dist[:, label] = -1e8
incorrect_prob = np.max(dist, axis=1)
return -np.maximum(correct_prob - incorrect_prob, 0) - 10 * \
np.maximum(0.95 - np.asarray(sim), 0) ** 2
def paraphrase_example(self, data_record, field_name, n):
tmp, _ = make_input_output_pair(
self._tokenizer, "<|endoftext|> " + data_record["text0"] + " <|endoftext|> " + data_record["text0"])
batch = torch.tensor([tmp]).to(self._device)
topk = 20
ret = []
for _ in range(n):
for i in range(len(tmp) // 2 + 2, len(tmp) - 1):
lm_logits = self._model(batch[:, :i + 1])[0][0, -1, :]
lm_logits[self._tokenizer.eos_token_id] = -1e8
lm_prob = torch.log_softmax(lm_logits, dim=0)
kth_vals, kth_idx = lm_prob.topk(topk, dim=-1)
kth_idx = kth_idx.detach().cpu().numpy()
batch_np = batch.detach().cpu().numpy()
text = [list(batch_np[0][len(tmp) // 2 + 2:i + 1]) + [t] for t in kth_idx]
text = [self._tokenizer.decode(t) for t in text]
score = self.score(data_record, tmp, text, i - len(tmp) // 2 - 1)
kth_vals += torch.tensor(score).to(self._device)
kth_vals = torch.softmax(kth_vals, dim=0).detach().cpu().numpy()
pick = np.random.choice(kth_idx, p=kth_vals)
batch[0][i + 1] = pick
ret.append(
self._tokenizer.decode(
batch[0].detach().cpu().numpy()[
len(tmp) //
2 +
2:len(tmp) -
1]))
return ret
| [
"torch.log_softmax",
"numpy.maximum",
"transformers.GPT2LMHeadModel.from_pretrained",
"numpy.asarray",
"numpy.zeros",
"fibber.resources.get_transformers",
"torch.softmax",
"numpy.max",
"numpy.random.choice",
"torch.tensor"
] | [((701, 736), 'numpy.zeros', 'np.zeros', (['(n, max_len)'], {'dtype': '"""int"""'}), "((n, max_len), dtype='int')\n", (709, 736), True, 'import numpy as np\n'), ((748, 783), 'numpy.zeros', 'np.zeros', (['(n, max_len)'], {'dtype': '"""int"""'}), "((n, max_len), dtype='int')\n", (756, 783), True, 'import numpy as np\n'), ((855, 871), 'numpy.asarray', 'np.asarray', (['item'], {}), '(item)\n', (865, 871), True, 'import numpy as np\n'), ((3497, 3517), 'numpy.max', 'np.max', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (3503, 3517), True, 'import numpy as np\n'), ((1355, 1404), 'fibber.resources.get_transformers', 'resources.get_transformers', (['gpt2_pretrained_model'], {}), '(gpt2_pretrained_model)\n', (1381, 1404), False, 'from fibber import resources\n'), ((1979, 2021), 'numpy.random.choice', 'np.random.choice', (["trainset['data']"], {'size': '(8)'}), "(trainset['data'], size=8)\n", (1995, 2021), True, 'import numpy as np\n'), ((2556, 2580), 'torch.tensor', 'torch.tensor', (['toks_input'], {}), '(toks_input)\n', (2568, 2580), False, 'import torch\n'), ((2607, 2632), 'torch.tensor', 'torch.tensor', (['toks_output'], {}), '(toks_output)\n', (2619, 2632), False, 'import torch\n'), ((2652, 2670), 'torch.tensor', 'torch.tensor', (['mask'], {}), '(mask)\n', (2664, 2670), False, 'import torch\n'), ((3535, 3579), 'numpy.maximum', 'np.maximum', (['(correct_prob - incorrect_prob)', '(0)'], {}), '(correct_prob - incorrect_prob, 0)\n', (3545, 3579), True, 'import numpy as np\n'), ((3877, 3896), 'torch.tensor', 'torch.tensor', (['[tmp]'], {}), '([tmp])\n', (3889, 3896), False, 'import torch\n'), ((4198, 4233), 'torch.log_softmax', 'torch.log_softmax', (['lm_logits'], {'dim': '(0)'}), '(lm_logits, dim=0)\n', (4215, 4233), False, 'import torch\n'), ((4821, 4858), 'numpy.random.choice', 'np.random.choice', (['kth_idx'], {'p': 'kth_vals'}), '(kth_idx, p=kth_vals)\n', (4837, 4858), True, 'import numpy as np\n'), ((1515, 1552), 'transformers.GPT2LMHeadModel.from_pretrained', 'GPT2LMHeadModel.from_pretrained', (['path'], {}), '(path)\n', (1546, 1552), False, 'from transformers import GPT2LMHeadModel, GPT2TokenizerFast\n'), ((1657, 1706), 'fibber.resources.get_transformers', 'resources.get_transformers', (['gpt2_pretrained_model'], {}), '(gpt2_pretrained_model)\n', (1683, 1706), False, 'from fibber import resources\n'), ((4679, 4698), 'torch.tensor', 'torch.tensor', (['score'], {}), '(score)\n', (4691, 4698), False, 'import torch\n'), ((3619, 3634), 'numpy.asarray', 'np.asarray', (['sim'], {}), '(sim)\n', (3629, 3634), True, 'import numpy as np\n'), ((4743, 4773), 'torch.softmax', 'torch.softmax', (['kth_vals'], {'dim': '(0)'}), '(kth_vals, dim=0)\n', (4756, 4773), False, 'import torch\n')] |
# File: linear.py
# Last Change: 29.10.2018
# Content: Linear Regression layout class
# Authors: <NAME>,
import numpy as np
# => Simple Linear Regression Model
class SLRModel(object):
# Initializer / Constructor:
def __init__(self):
self.coef = []
# Hook methods for build in functions:
def __str__(self):
return "f(x) = {}x + {}".format(self.coef[0], self.coef[1])
def __getitem__(self, key):
return self.coef[key]
# Public Methods
def fit(self, x, y):
self.coef.clear()
n = np.size(x)
x_mean, y_mean = np.mean(x), np.mean(y)
ss_xy, ss_xx = np.sum(y*x - n*y_mean*x_mean), np.sum(x*x - n*x_mean*x_mean)
m = ss_xy / ss_xx
b = y_mean - m*x_mean
self.coef = [m, b]
def predict(self, x):
result = []
for row in x:
result.append( (self.coef[0] * row + self.coef[1]) )
return result
| [
"numpy.size",
"numpy.mean",
"numpy.sum"
] | [((555, 565), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (562, 565), True, 'import numpy as np\n'), ((591, 601), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (598, 601), True, 'import numpy as np\n'), ((603, 613), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (610, 613), True, 'import numpy as np\n'), ((638, 673), 'numpy.sum', 'np.sum', (['(y * x - n * y_mean * x_mean)'], {}), '(y * x - n * y_mean * x_mean)\n', (644, 673), True, 'import numpy as np\n'), ((669, 704), 'numpy.sum', 'np.sum', (['(x * x - n * x_mean * x_mean)'], {}), '(x * x - n * x_mean * x_mean)\n', (675, 704), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import time
import argparse
import os
import json
import mnist_dataset
import alexnet
def val_data():
x_train, y_train, label_train, x_test, y_test, label_test = mnist_dataset.read_data()
return x_test, y_test, label_test
def calc_accuracy(predictions, labels):
predictions = np.argmax(predictions, axis=1)
same = 0
for i, x in enumerate(predictions):
if x == labels[i]:
same += 1
if len(predictions) == 0:
return 0
else:
return same / len(predictions)
def get_concrete_function(graph_def, inputs, outputs, print_graph=False):
def imports_graph_def():
tf.compat.v1.import_graph_def(graph_def, name="")
wrap_function = tf.compat.v1.wrap_function(imports_graph_def, [])
graph = wrap_function.graph
return wrap_function.prune(
tf.nest.map_structure(graph.as_graph_element, inputs),
tf.nest.map_structure(graph.as_graph_element, outputs))
def infer_perf_pb(pb_model_file, val_data, inputs=["x:0"], outputs=["Identity:0"]):
x_test, y_test, label_test = val_data
q_model = alexnet.load_pb(pb_model_file)
concrete_function = get_concrete_function(graph_def=q_model.as_graph_def(),
inputs=inputs,
outputs=outputs,
print_graph=True)
bt = time.time()
_frozen_graph_predictions = concrete_function(x=tf.constant(x_test))
et = time.time()
accuracy = calc_accuracy(_frozen_graph_predictions[0], label_test)
print('accuracy:', accuracy)
throughput = x_test.shape[0] / (et - bt)
print('max throughput(fps):', throughput)
# latency when BS=1
times = 1000
single_test = x_test[:1]
bt = 0
warmup = 20
for i in range(times):
if i == warmup:
bt = time.time()
_frozen_graph_predictions = concrete_function(x=tf.constant(single_test))
et = time.time()
latency = (et - bt) * 1000 / (times - warmup)
print('latency(ms):', latency)
return accuracy, throughput, latency
def save_res(result):
accuracy, throughput, latency = result
res = {}
res['accuracy'] = accuracy
res['throughput'] = throughput
res['latency'] = latency
outfile = args.index + ".json"
with open(outfile, 'w') as f:
json.dump(res, f)
print("Save result to {}".format(outfile))
parser = argparse.ArgumentParser()
parser.add_argument('--index', type=str, help='file name of output', required=True)
parser.add_argument('--input-graph', type=str, help='file name for graph', required=True)
parser.add_argument('--num-intra-threads', type=str, help='number of threads for an operator', required=False,
default="24" )
parser.add_argument('--num-inter-threads', type=str, help='number of threads across operators', required=False,
default="1")
parser.add_argument('--omp-num-threads', type=str, help='number of threads to use', required=False,
default="24")
args = parser.parse_args()
os.environ["KMP_BLOCKTIME"] = "1"
os.environ["KMP_SETTINGS"] = "0"
os.environ["OMP_NUM_THREADS"] = args.omp_num_threads
os.environ["TF_NUM_INTEROP_THREADS"] = args.num_inter_threads
os.environ["TF_NUM_INTRAOP_THREADS"] = args.num_intra_threads
save_res(infer_perf_pb(args.input_graph, val_data())) | [
"json.dump",
"alexnet.load_pb",
"argparse.ArgumentParser",
"numpy.argmax",
"tensorflow.compat.v1.wrap_function",
"tensorflow.constant",
"time.time",
"tensorflow.nest.map_structure",
"mnist_dataset.read_data",
"tensorflow.compat.v1.import_graph_def"
] | [((2488, 2513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2511, 2513), False, 'import argparse\n'), ((214, 239), 'mnist_dataset.read_data', 'mnist_dataset.read_data', ([], {}), '()\n', (237, 239), False, 'import mnist_dataset\n'), ((338, 368), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (347, 368), True, 'import numpy as np\n'), ((751, 800), 'tensorflow.compat.v1.wrap_function', 'tf.compat.v1.wrap_function', (['imports_graph_def', '[]'], {}), '(imports_graph_def, [])\n', (777, 800), True, 'import tensorflow as tf\n'), ((1135, 1165), 'alexnet.load_pb', 'alexnet.load_pb', (['pb_model_file'], {}), '(pb_model_file)\n', (1150, 1165), False, 'import alexnet\n'), ((1444, 1455), 'time.time', 'time.time', ([], {}), '()\n', (1453, 1455), False, 'import time\n'), ((1538, 1549), 'time.time', 'time.time', ([], {}), '()\n', (1547, 1549), False, 'import time\n'), ((2016, 2027), 'time.time', 'time.time', ([], {}), '()\n', (2025, 2027), False, 'import time\n'), ((680, 729), 'tensorflow.compat.v1.import_graph_def', 'tf.compat.v1.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (709, 729), True, 'import tensorflow as tf\n'), ((874, 927), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['graph.as_graph_element', 'inputs'], {}), '(graph.as_graph_element, inputs)\n', (895, 927), True, 'import tensorflow as tf\n'), ((937, 991), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['graph.as_graph_element', 'outputs'], {}), '(graph.as_graph_element, outputs)\n', (958, 991), True, 'import tensorflow as tf\n'), ((2409, 2426), 'json.dump', 'json.dump', (['res', 'f'], {}), '(res, f)\n', (2418, 2426), False, 'import json\n'), ((1508, 1527), 'tensorflow.constant', 'tf.constant', (['x_test'], {}), '(x_test)\n', (1519, 1527), True, 'import tensorflow as tf\n'), ((1913, 1924), 'time.time', 'time.time', ([], {}), '()\n', (1922, 1924), False, 'import time\n'), ((1981, 2005), 'tensorflow.constant', 'tf.constant', (['single_test'], {}), '(single_test)\n', (1992, 2005), True, 'import tensorflow as tf\n')] |
# flake8: noqa E731
from alibi.api.defaults import DEFAULT_META_CEM, DEFAULT_DATA_CEM
from alibi.explainers import CEM
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
import pytest
@pytest.mark.tf1
def test_cem(disable_tf2):
# load iris dataset
dataset = load_iris()
# scale dataset
dataset.data = (dataset.data - dataset.data.mean(axis=0)) / dataset.data.std(axis=0)
# define train and test set
X, Y = dataset.data, dataset.target
# fit random forest to training data
np.random.seed(0)
clf = LogisticRegression(solver='liblinear')
clf.fit(X, Y)
# define prediction function
predict_fn = lambda x: clf.predict_proba(x)
# instance to be explained
idx = 0
X_expl = X[idx].reshape((1,) + X[idx].shape)
# test explainer initialization
shape = (1, 4)
feature_range = (X.min(axis=0).reshape(shape) - .1, X.max(axis=0).reshape(shape) + .1)
cem = CEM(predict_fn, 'PN', shape, feature_range=feature_range, max_iterations=10, no_info_val=-1.)
explanation = cem.explain(X_expl, verbose=False)
assert not cem.model
assert set(explanation.data.keys()) >= {'X', 'X_pred', 'PN', 'PN_pred', 'grads_graph', 'grads_num'}
assert (explanation.X != explanation.PN).astype(int).sum() > 0
assert explanation.X_pred != explanation.PN_pred
assert explanation.grads_graph.shape == explanation.grads_num.shape
assert explanation.meta.keys() == DEFAULT_META_CEM.keys()
assert explanation.data.keys() == DEFAULT_DATA_CEM.keys()
| [
"sklearn.datasets.load_iris",
"numpy.random.seed",
"alibi.api.defaults.DEFAULT_META_CEM.keys",
"sklearn.linear_model.LogisticRegression",
"alibi.explainers.CEM",
"alibi.api.defaults.DEFAULT_DATA_CEM.keys"
] | [((328, 339), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (337, 339), False, 'from sklearn.datasets import load_iris\n'), ((569, 586), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (583, 586), True, 'import numpy as np\n'), ((597, 635), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""'}), "(solver='liblinear')\n", (615, 635), False, 'from sklearn.linear_model import LogisticRegression\n'), ((986, 1084), 'alibi.explainers.CEM', 'CEM', (['predict_fn', '"""PN"""', 'shape'], {'feature_range': 'feature_range', 'max_iterations': '(10)', 'no_info_val': '(-1.0)'}), "(predict_fn, 'PN', shape, feature_range=feature_range, max_iterations=10,\n no_info_val=-1.0)\n", (989, 1084), False, 'from alibi.explainers import CEM\n'), ((1493, 1516), 'alibi.api.defaults.DEFAULT_META_CEM.keys', 'DEFAULT_META_CEM.keys', ([], {}), '()\n', (1514, 1516), False, 'from alibi.api.defaults import DEFAULT_META_CEM, DEFAULT_DATA_CEM\n'), ((1555, 1578), 'alibi.api.defaults.DEFAULT_DATA_CEM.keys', 'DEFAULT_DATA_CEM.keys', ([], {}), '()\n', (1576, 1578), False, 'from alibi.api.defaults import DEFAULT_META_CEM, DEFAULT_DATA_CEM\n')] |
import logging
import numpy as np
import pandas as pd
import pickle
import re
from scipy import stats
from sklearn import linear_model
import statsmodels.api as sm
import string
import time
from pyspan.config import *
log_dir = paths["log_dir"]
if not ganesha:
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.pyplot as plt
# https://stackoverflow.com/questions/19310735#19319972
def extended(ax, x, y, **args):
xlim = ax.get_xlim()
ylim = ax.get_ylim()
x_ext = np.linspace(xlim[0], xlim[1], 100)
p = np.polyfit(x, y , deg=1)
y_ext = np.poly1d(p)(x_ext)
ax.plot(x_ext, y_ext, **args)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return ax
#https://chris35wills.github.io/matplotlib_diverging_colorbar/
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin = None, vmax = None, midpoint = None,
clip = False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip = None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, .5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
def startLog(fn):
filename = "{}{}.log".format(log_dir, fn)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO, filename=filename)
# A custom timestamp to integer function to cope with timestamps outside of
# Unix epochs
def timestamp_to_integer(timestamp, fmt):
time_obj=time.strptime(timestamp, fmt)
nyear, nday=time_obj.tm_year, time_obj.tm_yday
return 365*nyear+nday
def approx_eq(a, b, tol = 1e-8):
return abs(a-b) <= tol
def tokenize(text, ignore_ascii = False):
text = re.sub('[%s]'%re.escape(string.punctuation), ' ', text)
if ignore_ascii:
return [ word.lower().encode("ascii", "ignore") for word in
text.replace("\n", " ").split() ]
else:
return [ word.lower() for word in
text.replace("\n", " ").split() ]
def get_metric(metrics, token, party):
assert party in ("democrat", "republican")
col = "dmetric" if party == "democrat" else "rmetric"
if token not in metrics.index:
return np.nan
return metrics.loc[token][col]
def get_partisan_score(excerpt, metrics, f = np.prod):
tokens = tokenize(excerpt)
dmetrics = np.array(map(lambda token: get_metric(metrics, token, "democrat"),
tokens))
dmetrics = dmetrics[~np.isnan(dmetrics)]
rmetrics = np.array(map(lambda token: get_metric(metrics, token, "republican"),
tokens))
rmetrics = rmetrics[~np.isnan(rmetrics)]
# Calculate metric for the entire excerpt
dscore = f(dmetrics)
rscore = f(rmetrics)
return dscore, rscore
# Replace missing data with its mean
def replace_nans(arr):
arr[np.isnan(arr)] = np.mean(arr[~np.isnan(arr)])
return arr
# Return logistic CDF(x)
def sigmoid(x):
return np.exp(x)/(1 + np.exp(x))
# Return a statsmodels.OLS object fitted to the data passed as argument
def linear_regression(Y, X, add_constant = True):
Y, X = np.array(Y), np.array(X)
if add_constant:
X = sm.add_constant(X)
mod = sm.OLS(Y, X)
return mod.fit()
# Return an sklearn.linear_model.LogisticRegression object fitted to the data
# passed as argument. Optionally, set `lasso` to True to introduce an L1 penalty
# (enforce sparsity in the vector of coefficients).
def logistic_regression(Y, X, lasso = False, **kwargs):
if lasso:
kwargs["penalty"] = kwargs.get("penalty", "l1")
kwargs["solver"] = kwargs.get("solver", "liblinear")
assert kwargs["penalty"] == "l1"
assert kwargs["solver"] == "liblinear"
logit = linear_model.LogisticRegressionCV(**kwargs)
logit.fit(X, Y)
return logit
# Takes a matrix X as input. If shape_ == "long", each row of X is assumed to
# be an observation, and each column is assumed to correspond to a variable. If
# shape_ == "wide", the reverse should be true (shape_ defaults to "long").
#
# Plots the correlation matrix of X. If a filename is specified, saves to that
# filename.
def plot_corrs(X, shape_ = "long", labels = None, filename = None,
cmap = "seismic"):
assert shape_ in ("long", "wide")
if shape_ == "wide":
X = X.T
n = X.shape[1]
if not labels:
labels = [""] * n
assert len(labels) == n
corrs = [ stats.pearsonr(X[:,i], X[:,j])[0] for i in range(n) for j in
range(n) ]
corrs = np.array(corrs).reshape((n, n))
for i in range(n):
corrs[i,i] = np.nan
fig = plt.figure()
plt.imshow(corrs, cmap = cmap, clim = (-1, 1),
norm = MidpointNormalize(midpoint = 0, vmin = -1, vmax = 1))
plt.xticks(range(n), labels, rotation = 90, fontsize = 20)
plt.yticks(range(n), labels, fontsize = 20)
plt.colorbar()
plt.title("Pairwise correlations", fontsize = 20)
if filename:
plt.savefig(filename,
bbox_inches = mpl.transforms.Bbox([[-1, -1.25], [6, 4]]))
return fig
# Plot the histogram of the X argument, along with lines indicating the mean
# and standard deviation. If a filename is specified, saves to that filename.
def histogram(X, bins = 50, title = "Histogram", filename = None, **kwargs):
mu, se = np.mean(X), stats.sem(X)
fig = plt.figure(**kwargs)
plt.hist(X, bins = bins)
ylim = plt.ylim()
plt.ylim(*ylim)
# Indicate the mean
plt.plot([ mu, mu ], [ 0, ylim[1] ])
# Indicate one SE above and below the mean
plt.plot([ mu-se, mu-se ], [ 0, ylim[1] ], color = "orange",
linestyle = "--")
plt.plot([ mu+se, mu+se ], [ 0, ylim[1] ], color = "orange",
linestyle = "--")
plt.title(title)
if filename:
plt.savefig(filename)
return fig, mu, se
# Turn a list of lists into a csv
def to_csv(X, filename, **kwargs):
if filename[-4:] != ".csv":
filename += ".csv"
df = pd.DataFrame(X, **kwargs)
df.to_csv(filename)
# Generate a random sequence of four letters to use as an identifier when
# making bootstrapped samples
def make_id():
return "".join(np.random.choice(list(string.lowercase), 4))
def is_nan(x):
if not isinstance(x, np.float):
return False
return np.isnan(x)
is_nan = np.vectorize(is_nan)
def plot_prf(gam, term, coef = None):
XX = gam.generate_X_grid(term = term)
pdeps, conf_intervals = gam.partial_dependence(term = term, X = XX,
width = .95)
order_ixs = np.argsort(XX[:,term])
x = XX[order_ixs,term]
pdeps = pdeps[order_ixs]
conf_intervals = conf_intervals[order_ixs,:]
plt.plot(x, pdeps)
plt.fill_between(x, conf_intervals[:,0], conf_intervals[:,1],
alpha = .5)
# Return a correlation between vectors X and Y where elements with missing
# values in X or Y are removed
def pearsonr_(x, y):
return stats.pearsonr(x[(~is_nan(x)) & (~is_nan(y))],
y[(~is_nan(x)) & (~is_nan(y))])
# https://en.wikipedia.org/wiki/Effect_size#Cohen's_d
def cohensd(a1, a2):
diff = np.mean(a1) - np.mean(a2)
s1 = np.std(a1, ddof = 1)
s2 = np.std(a2, ddof = 1)
n1 = len(a1)
n2 = len(a2)
spooled = np.sqrt(((n1 - 1) * s1**2 + (n2 - 1) * s2**2) / (n1 + n2 - 2))
return diff / spooled
| [
"matplotlib.pyplot.title",
"statsmodels.api.OLS",
"numpy.polyfit",
"numpy.isnan",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.exp",
"numpy.interp",
"matplotlib.pyplot.fill_between",
"pandas.DataFrame",
"numpy.std",
"re.escape",
"matplotlib.pyplot.colorbar",
"numpy.l... | [((6482, 6502), 'numpy.vectorize', 'np.vectorize', (['is_nan'], {}), '(is_nan)\n', (6494, 6502), True, 'import numpy as np\n'), ((1339, 1453), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO', 'filename': 'filename'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO, filename=filename)\n", (1358, 1453), False, 'import logging\n'), ((1620, 1649), 'time.strptime', 'time.strptime', (['timestamp', 'fmt'], {}), '(timestamp, fmt)\n', (1633, 1649), False, 'import time\n'), ((3350, 3362), 'statsmodels.api.OLS', 'sm.OLS', (['Y', 'X'], {}), '(Y, X)\n', (3356, 3362), True, 'import statsmodels.api as sm\n'), ((3884, 3927), 'sklearn.linear_model.LogisticRegressionCV', 'linear_model.LogisticRegressionCV', ([], {}), '(**kwargs)\n', (3917, 3927), False, 'from sklearn import linear_model\n'), ((4772, 4784), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4782, 4784), True, 'import matplotlib.pyplot as plt\n'), ((5027, 5041), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5039, 5041), True, 'import matplotlib.pyplot as plt\n'), ((5046, 5093), 'matplotlib.pyplot.title', 'plt.title', (['"""Pairwise correlations"""'], {'fontsize': '(20)'}), "('Pairwise correlations', fontsize=20)\n", (5055, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5518, 5538), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '(**kwargs)\n', (5528, 5538), True, 'import matplotlib.pyplot as plt\n'), ((5543, 5565), 'matplotlib.pyplot.hist', 'plt.hist', (['X'], {'bins': 'bins'}), '(X, bins=bins)\n', (5551, 5565), True, 'import matplotlib.pyplot as plt\n'), ((5579, 5589), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (5587, 5589), True, 'import matplotlib.pyplot as plt\n'), ((5594, 5609), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (5602, 5609), True, 'import matplotlib.pyplot as plt\n'), ((5638, 5670), 'matplotlib.pyplot.plot', 'plt.plot', (['[mu, mu]', '[0, ylim[1]]'], {}), '([mu, mu], [0, ylim[1]])\n', (5646, 5670), True, 'import matplotlib.pyplot as plt\n'), ((5726, 5800), 'matplotlib.pyplot.plot', 'plt.plot', (['[mu - se, mu - se]', '[0, ylim[1]]'], {'color': '"""orange"""', 'linestyle': '"""--"""'}), "([mu - se, mu - se], [0, ylim[1]], color='orange', linestyle='--')\n", (5734, 5800), True, 'import matplotlib.pyplot as plt\n'), ((5822, 5896), 'matplotlib.pyplot.plot', 'plt.plot', (['[mu + se, mu + se]', '[0, ylim[1]]'], {'color': '"""orange"""', 'linestyle': '"""--"""'}), "([mu + se, mu + se], [0, ylim[1]], color='orange', linestyle='--')\n", (5830, 5896), True, 'import matplotlib.pyplot as plt\n'), ((5918, 5934), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5927, 5934), True, 'import matplotlib.pyplot as plt\n'), ((6143, 6168), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X, **kwargs)\n', (6155, 6168), True, 'import pandas as pd\n'), ((6461, 6472), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (6469, 6472), True, 'import numpy as np\n'), ((6736, 6759), 'numpy.argsort', 'np.argsort', (['XX[:, term]'], {}), '(XX[:, term])\n', (6746, 6759), True, 'import numpy as np\n'), ((6868, 6886), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'pdeps'], {}), '(x, pdeps)\n', (6876, 6886), True, 'import matplotlib.pyplot as plt\n'), ((6891, 6965), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'conf_intervals[:, 0]', 'conf_intervals[:, 1]'], {'alpha': '(0.5)'}), '(x, conf_intervals[:, 0], conf_intervals[:, 1], alpha=0.5)\n', (6907, 6965), True, 'import matplotlib.pyplot as plt\n'), ((7352, 7370), 'numpy.std', 'np.std', (['a1'], {'ddof': '(1)'}), '(a1, ddof=1)\n', (7358, 7370), True, 'import numpy as np\n'), ((7382, 7400), 'numpy.std', 'np.std', (['a2'], {'ddof': '(1)'}), '(a2, ddof=1)\n', (7388, 7400), True, 'import numpy as np\n'), ((7451, 7517), 'numpy.sqrt', 'np.sqrt', (['(((n1 - 1) * s1 ** 2 + (n2 - 1) * s2 ** 2) / (n1 + n2 - 2))'], {}), '(((n1 - 1) * s1 ** 2 + (n2 - 1) * s2 ** 2) / (n1 + n2 - 2))\n', (7458, 7517), True, 'import numpy as np\n'), ((538, 572), 'numpy.linspace', 'np.linspace', (['xlim[0]', 'xlim[1]', '(100)'], {}), '(xlim[0], xlim[1], 100)\n', (549, 572), True, 'import numpy as np\n'), ((585, 608), 'numpy.polyfit', 'np.polyfit', (['x', 'y'], {'deg': '(1)'}), '(x, y, deg=1)\n', (595, 608), True, 'import numpy as np\n'), ((2989, 3002), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (2997, 3002), True, 'import numpy as np\n'), ((3103, 3112), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (3109, 3112), True, 'import numpy as np\n'), ((3263, 3274), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3271, 3274), True, 'import numpy as np\n'), ((3276, 3287), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3284, 3287), True, 'import numpy as np\n'), ((3321, 3339), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (3336, 3339), True, 'import statsmodels.api as sm\n'), ((5482, 5492), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (5489, 5492), True, 'import numpy as np\n'), ((5494, 5506), 'scipy.stats.sem', 'stats.sem', (['X'], {}), '(X)\n', (5503, 5506), False, 'from scipy import stats\n'), ((5960, 5981), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (5971, 5981), True, 'import matplotlib.pyplot as plt\n'), ((7317, 7328), 'numpy.mean', 'np.mean', (['a1'], {}), '(a1)\n', (7324, 7328), True, 'import numpy as np\n'), ((7331, 7342), 'numpy.mean', 'np.mean', (['a2'], {}), '(a2)\n', (7338, 7342), True, 'import numpy as np\n'), ((626, 638), 'numpy.poly1d', 'np.poly1d', (['p'], {}), '(p)\n', (635, 638), True, 'import numpy as np\n'), ((1023, 1072), 'matplotlib.colors.Normalize.__init__', 'colors.Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (1048, 1072), True, 'import matplotlib.colors as colors\n'), ((1856, 1885), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (1865, 1885), False, 'import re\n'), ((2610, 2628), 'numpy.isnan', 'np.isnan', (['dmetrics'], {}), '(dmetrics)\n', (2618, 2628), True, 'import numpy as np\n'), ((2776, 2794), 'numpy.isnan', 'np.isnan', (['rmetrics'], {}), '(rmetrics)\n', (2784, 2794), True, 'import numpy as np\n'), ((3118, 3127), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (3124, 3127), True, 'import numpy as np\n'), ((4580, 4612), 'scipy.stats.pearsonr', 'stats.pearsonr', (['X[:, i]', 'X[:, j]'], {}), '(X[:, i], X[:, j])\n', (4594, 4612), False, 'from scipy import stats\n'), ((4678, 4693), 'numpy.array', 'np.array', (['corrs'], {}), '(corrs)\n', (4686, 4693), True, 'import numpy as np\n'), ((1229, 1251), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (1238, 1251), True, 'import numpy as np\n'), ((1253, 1268), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (1261, 1268), True, 'import numpy as np\n'), ((3019, 3032), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (3027, 3032), True, 'import numpy as np\n'), ((5177, 5219), 'matplotlib.transforms.Bbox', 'mpl.transforms.Bbox', (['[[-1, -1.25], [6, 4]]'], {}), '([[-1, -1.25], [6, 4]])\n', (5196, 5219), True, 'import matplotlib as mpl\n')] |
from neorl import TS
import numpy as np
#this example is not finalized, it uses TS to solve two combinatorial problems:
#Travel salesman problem
#Job scheduling problem
#############################################
#### -- Scheduling Problem related input
#############################################
def input_data(njobs=30):
'''
Job Scheduling Problem Data
Returns a dict of jobs number as Key and
weight, processing time (hours) and due date (hours) as values.
'''
if njobs==10:
JobData={1: {'weight': 0.5, 'processing_time': 1.5, 'due_date': 4},
2: {'weight': 0.8, 'processing_time': 2.2, 'due_date': 4},
3: {'weight': 0.6, 'processing_time': 0.5, 'due_date': 4},
4: {'weight': 0.4, 'processing_time': 1.6, 'due_date': 6},
5: {'weight': 0.4, 'processing_time': 3.5, 'due_date': 6},
6: {'weight': 0.1, 'processing_time': 2.1, 'due_date': 6},
7: {'weight': 0.2, 'processing_time': 2.5, 'due_date': 8},
8: {'weight': 0.5, 'processing_time': 0.6, 'due_date': 8},
9: {'weight': 0.3, 'processing_time': 3.2, 'due_date': 8},
10: {'weight': 0.6, 'processing_time': 5.2, 'due_date': 8}}
elif njobs==20:
JobData={1: {'weight': 0.5, 'processing_time': 0.5, 'due_date': 8},
2: {'weight': 0.8, 'processing_time': 3.6, 'due_date': 6},
3: {'weight': 0.6, 'processing_time': 2.8000000000000003, 'due_date': 5},
4: {'weight': 0.4, 'processing_time': 1.9000000000000001, 'due_date': 12},
5: {'weight': 0.4, 'processing_time': 1.0, 'due_date': 10},
6: {'weight': 0.1, 'processing_time': 1.8, 'due_date': 11},
7: {'weight': 0.2, 'processing_time': 2.6, 'due_date': 6},
8: {'weight': 0.5, 'processing_time': 4.699999999999999, 'due_date': 18},
9: {'weight': 0.3, 'processing_time': 1.8, 'due_date': 8},
10: {'weight': 0.6, 'processing_time': 1.6, 'due_date': 14},
11: {'weight': 0.5, 'processing_time': 0.2, 'due_date': 6},
12: {'weight': 0.8, 'processing_time': 0.4, 'due_date': 30},
13: {'weight': 0.6, 'processing_time': 2.2, 'due_date': 22},
14: {'weight': 0.4, 'processing_time': 1.2000000000000002, 'due_date': 6},
15: {'weight': 0.4, 'processing_time': 0.1, 'due_date': 38},
16: {'weight': 0.1, 'processing_time': 2.0, 'due_date': 16},
17: {'weight': 0.2, 'processing_time': 2.9, 'due_date': 10},
18: {'weight': 0.5, 'processing_time': 0.1, 'due_date': 12},
19: {'weight': 0.3, 'processing_time': 2.5, 'due_date': 20},
20: {'weight': 0.6, 'processing_time': 0.9, 'due_date': 15}}
elif njobs==30:
JobData={1: {'weight': 0.5, 'processing_time': 6, 'due_date': 29},
2: {'weight': 0.9, 'processing_time': 1, 'due_date': 38},
3: {'weight': 0.0, 'processing_time': 2, 'due_date': 28},
4: {'weight': 0.3, 'processing_time': 6, 'due_date': 35},
5: {'weight': 0.6, 'processing_time': 5, 'due_date': 43},
6: {'weight': 0.3, 'processing_time': 4, 'due_date': 44},
7: {'weight': 0.9, 'processing_time': 6, 'due_date': 26},
8: {'weight': 0.4, 'processing_time': 1, 'due_date': 18},
9: {'weight': 0.2, 'processing_time': 2, 'due_date': 48},
10: {'weight': 0.4, 'processing_time': 4, 'due_date': 37},
11: {'weight': 1.0, 'processing_time': 1, 'due_date': 26},
12: {'weight': 0.1, 'processing_time': 4, 'due_date': 36},
13: {'weight': 0.4, 'processing_time': 6, 'due_date': 24},
14: {'weight': 0.8, 'processing_time': 3, 'due_date': 45},
15: {'weight': 0.1, 'processing_time': 4, 'due_date': 41},
16: {'weight': 1.0, 'processing_time': 1, 'due_date': 18},
17: {'weight': 0.1, 'processing_time': 3, 'due_date': 46},
18: {'weight': 0.4, 'processing_time': 4, 'due_date': 36},
19: {'weight': 0.7, 'processing_time': 8, 'due_date': 20},
20: {'weight': 1.0, 'processing_time': 8, 'due_date': 18},
21: {'weight': 0.2, 'processing_time': 2, 'due_date': 25},
22: {'weight': 0.1, 'processing_time': 2, 'due_date': 34},
23: {'weight': 0.0, 'processing_time': 2, 'due_date': 36},
24: {'weight': 0.6, 'processing_time': 4, 'due_date': 42},
25: {'weight': 0.1, 'processing_time': 5, 'due_date': 29},
26: {'weight': 0.9, 'processing_time': 6, 'due_date': 26},
27: {'weight': 0.7, 'processing_time': 3, 'due_date': 33},
28: {'weight': 0.6, 'processing_time': 1, 'due_date': 24},
29: {'weight': 0.4, 'processing_time': 1, 'due_date': 25},
30: {'weight': 0.9, 'processing_time': 4, 'due_date': 26}}
else:
raise Exception('--error: choose njobs as 10, 20, or 30')
return JobData
def Objfun(solution):
'''Takes a set of scheduled jobs, dict (input data)
Return the objective function value of the solution
'''
t = 0 #starting time
objfun_value = 0
for job in solution:
C_i = t + dictt[job]["processing_time"] # Completion time
d_i = dictt[job]["due_date"] # due date of the job
T_i = max(0, C_i - d_i) #tardiness for the job
W_i = dictt[job]["weight"] # job's weight
objfun_value += W_i * T_i
t = C_i
return objfun_value
#############################################
#### -- TSP related input
#############################################
def Gen_TSP_Data():
#---51 cities
#locations
city_loc_list = [[37,52],[49,49],[52,64],[20,26],[40,30],[21,47],[17,63],[31,62],[52,33],[51,21],[42,41],[31,32],[5,25]\
,[12, 42],[36, 16],[52, 41],[27, 23],[17, 33],[13, 13],[57, 58],[62, 42],[42, 57],[16, 57],[8 ,52],[7 ,38],[27, 68],[30, 48]\
,[43, 67],[58, 48],[58, 27],[37, 69],[38, 46],[46, 10],[61, 33],[62, 63],[63, 69],[32, 22],[45, 35],[59, 15],[5 ,6],[10, 17]\
,[21, 10],[5 ,64],[30, 15],[39, 10],[32, 39],[25, 32],[25, 55],[48, 28],[56, 37],[30, 40]]
increment = 0
global dictt
dictt = {}
i = 0
for data in city_loc_list:
dictt[str(i+1)] = [data[0],data[1]]
i += 1
#optimal solution for comparison
optimum_tour_city = [1,22,8,26,31,28,3,36,35,20,2,29,21,16,50,34,30,9,49,10,39,33,45,15,44,42,40,19,41,13,25,14,24,43,7,23,48\
,6,27,51,46,12,47,18,4,17,37,5,38,11,32]
optimum_tour_cost = Tour(optimum_tour_city)
return optimum_tour_city,optimum_tour_cost
def Tour(tour):
cost = 0
for increment in range(0,len(tour) - 1): # compute euclidean distance from cities to cities
loc1 = np.array([dictt[str(tour[increment])][0],dictt[str(tour[increment])][1]])
loc2 = np.array([dictt[str(tour[increment + 1])][0],dictt[str(tour[increment + 1])][1]])
dist = np.sqrt(np.sum(np.power(loc1 - loc2,2)))
cost += int(round(dist))
loc1 = np.array([dictt[str(tour[-1])][0],dictt[str(tour[-1])][1]])
loc2 = np.array([dictt[str(tour[0])][0],dictt[str(tour[0])][1]])
dist = np.sqrt(np.sum(np.power(loc1 - loc2,2)))
cost += int(round(dist))
return - cost
if __name__=='__main__':
if 0:
#Setup the parameter space
optimum_tour_city,optimum_tour_cost = Gen_TSP_Data()
print("------- Running TSP -------")
print("Optimum tour cost:",optimum_tour_cost)
nx=51
BOUNDS={}
for i in range(1,nx+1):
BOUNDS['x'+str(i)]=['int', 1, 51]
##use Instance_10.xlsx, Instance_20.xlsx, Instance_30.xlsx
ts=TS(mode = "min", bounds = BOUNDS, fit = Tour, tabu_tenure=6,
penalization_weight = 0.8, swap_mode = "swap", ncores=1, seed=1)
ts.evolute(ngen = 500)
if 1:
#Setup the parameter space
print("------- Running JOB Scheduling -------")
njobs=10
dictt = input_data(njobs=njobs)
BOUNDS={}
for i in range(1,njobs+1):
BOUNDS['x'+str(i)]=['int', 1, njobs]
ts=TS(mode = "min", bounds = BOUNDS, fit = Objfun,
tabu_tenure=6, penalization_weight = 0.8, swap_mode = "swap", ncores=1, seed=1)
ts.evolute(ngen = 30)
| [
"numpy.power",
"neorl.TS"
] | [((8252, 8372), 'neorl.TS', 'TS', ([], {'mode': '"""min"""', 'bounds': 'BOUNDS', 'fit': 'Tour', 'tabu_tenure': '(6)', 'penalization_weight': '(0.8)', 'swap_mode': '"""swap"""', 'ncores': '(1)', 'seed': '(1)'}), "(mode='min', bounds=BOUNDS, fit=Tour, tabu_tenure=6, penalization_weight=\n 0.8, swap_mode='swap', ncores=1, seed=1)\n", (8254, 8372), False, 'from neorl import TS\n'), ((8708, 8829), 'neorl.TS', 'TS', ([], {'mode': '"""min"""', 'bounds': 'BOUNDS', 'fit': 'Objfun', 'tabu_tenure': '(6)', 'penalization_weight': '(0.8)', 'swap_mode': '"""swap"""', 'ncores': '(1)', 'seed': '(1)'}), "(mode='min', bounds=BOUNDS, fit=Objfun, tabu_tenure=6,\n penalization_weight=0.8, swap_mode='swap', ncores=1, seed=1)\n", (8710, 8829), False, 'from neorl import TS\n'), ((7740, 7764), 'numpy.power', 'np.power', (['(loc1 - loc2)', '(2)'], {}), '(loc1 - loc2, 2)\n', (7748, 7764), True, 'import numpy as np\n'), ((7511, 7535), 'numpy.power', 'np.power', (['(loc1 - loc2)', '(2)'], {}), '(loc1 - loc2, 2)\n', (7519, 7535), True, 'import numpy as np\n')] |
"""
===============
Radon transform
===============
In computed tomography, the tomography reconstruction problem is to obtain
a tomographic slice image from a set of projections [1]_. A projection is
formed by drawing a set of parallel rays through the 2D object of interest,
assigning the integral of the object's contrast along each ray to a single
pixel in the projection. A single projection of a 2D object is one dimensional.
To enable computed tomography reconstruction of the object, several projections
must be acquired, each of them corresponding to a different angle between the
rays with respect to the object. A collection of projections at several angles
is called a sinogram, which is a linear transform of the original image.
The inverse Radon transform is used in computed tomography to reconstruct
a 2D image from the measured projections (the sinogram). A practical, exact
implementation of the inverse Radon transform does not exist, but there are
several good approximate algorithms available.
As the inverse Radon transform reconstructs the object from a set of
projections, the (forward) Radon transform can be used to simulate a
tomography experiment.
This script performs the Radon transform to simulate a tomography experiment
and reconstructs the input image based on the resulting sinogram formed by
the simulation. Two methods for performing the inverse Radon transform
and reconstructing the original image are compared: The Filtered Back
Projection (FBP) and the Simultaneous Algebraic Reconstruction
Technique (SART).
For further information on tomographic reconstruction, see
.. [1] <NAME>, <NAME>, "Principles of Computerized Tomographic Imaging",
IEEE Press 1988. http://www.slaney.org/pct/pct-toc.html
.. [2] Wikipedia, Radon transform,
http://en.wikipedia.org/wiki/Radon_transform#Relationship_with_the_Fourier_transform
.. [3] <NAME>, "Angenaeherte Aufloesung von Systemen linearer
Gleichungen", Bulletin International de l'Academie Polonaise
des Sciences et des Lettres, 35 pp 355--357 (1937)
.. [4] <NAME>, <NAME>, "Simultaneous algebraic reconstruction
technique (SART): a superior implementation of the ART algorithm",
Ultrasonic Imaging 6 pp 81--94 (1984)
The forward transform
=====================
As our original image, we will use the Shepp-Logan phantom. When calculating
the Radon transform, we need to decide how many projection angles we wish
to use. As a rule of thumb, the number of projections should be about the
same as the number of pixels there are across the object (to see why this
is so, consider how many unknown pixel values must be determined in the
reconstruction process and compare this to the number of measurements
provided by the projections), and we follow that rule here. Below is the
original image and its Radon transform, often known as its *sinogram*:
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage import data_dir
from skimage.transform import radon, rescale
image = imread(data_dir + "/phantom.png", as_grey=True)
image = rescale(image, scale=0.4, mode='reflect', multichannel=False)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Original")
ax1.imshow(image, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=True)
ax2.set_title("Radon transform\n(Sinogram)")
ax2.set_xlabel("Projection angle (deg)")
ax2.set_ylabel("Projection position (pixels)")
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.tight_layout()
plt.show()
######################################################################
#
# Reconstruction with the Filtered Back Projection (FBP)
# ======================================================
#
# The mathematical foundation of the filtered back projection is the Fourier
# slice theorem [2]_. It uses Fourier transform of the projection and
# interpolation in Fourier space to obtain the 2D Fourier transform of the
# image, which is then inverted to form the reconstructed image. The filtered
# back projection is among the fastest methods of performing the inverse
# Radon transform. The only tunable parameter for the FBP is the filter,
# which is applied to the Fourier transformed projections. It may be used to
# suppress high frequency noise in the reconstruction. ``skimage`` provides a
# few different options for the filter.
from skimage.transform import iradon
reconstruction_fbp = iradon(sinogram, theta=theta, circle=True)
error = reconstruction_fbp - image
print('FBP rms reconstruction error: %.3g' % np.sqrt(np.mean(error**2)))
imkwargs = dict(vmin=-0.2, vmax=0.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5),
sharex=True, sharey=True)
ax1.set_title("Reconstruction\nFiltered back projection")
ax1.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nFiltered back projection")
ax2.imshow(reconstruction_fbp - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
######################################################################
#
# Reconstruction with the Simultaneous Algebraic Reconstruction Technique
# =======================================================================
#
# Algebraic reconstruction techniques for tomography are based on a
# straightforward idea: for a pixelated image the value of a single ray in a
# particular projection is simply a sum of all the pixels the ray passes
# through on its way through the object. This is a way of expressing the
# forward Radon transform. The inverse Radon transform can then be formulated
# as a (large) set of linear equations. As each ray passes through a small
# fraction of the pixels in the image, this set of equations is sparse,
# allowing iterative solvers for sparse linear systems to tackle the system
# of equations. One iterative method has been particularly popular, namely
# Kaczmarz' method [3]_, which has the property that the solution will
# approach a least-squares solution of the equation set.
#
# The combination of the formulation of the reconstruction problem as a set
# of linear equations and an iterative solver makes algebraic techniques
# relatively flexible, hence some forms of prior knowledge can be
# incorporated with relative ease.
#
# ``skimage`` provides one of the more popular variations of the algebraic
# reconstruction techniques: the Simultaneous Algebraic Reconstruction
# Technique (SART) [1]_ [4]_. It uses Kaczmarz' method [3]_ as the iterative
# solver. A good reconstruction is normally obtained in a single iteration,
# making the method computationally effective. Running one or more extra
# iterations will normally improve the reconstruction of sharp, high
# frequency features and reduce the mean squared error at the expense of
# increased high frequency noise (the user will need to decide on what number
# of iterations is best suited to the problem at hand. The implementation in
# ``skimage`` allows prior information of the form of a lower and upper
# threshold on the reconstructed values to be supplied to the reconstruction.
from skimage.transform import iradon_sart
reconstruction_sart = iradon_sart(sinogram, theta=theta)
error = reconstruction_sart - image
print('SART (1 iteration) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
fig, axes = plt.subplots(2, 2, figsize=(8, 8.5), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].set_title("Reconstruction\nSART")
ax[0].imshow(reconstruction_sart, cmap=plt.cm.Greys_r)
ax[1].set_title("Reconstruction error\nSART")
ax[1].imshow(reconstruction_sart - image, cmap=plt.cm.Greys_r, **imkwargs)
# Run a second iteration of SART by supplying the reconstruction
# from the first iteration as an initial estimate
reconstruction_sart2 = iradon_sart(sinogram, theta=theta,
image=reconstruction_sart)
error = reconstruction_sart2 - image
print('SART (2 iterations) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
ax[2].set_title("Reconstruction\nSART, 2 iterations")
ax[2].imshow(reconstruction_sart2, cmap=plt.cm.Greys_r)
ax[3].set_title("Reconstruction error\nSART, 2 iterations")
ax[3].imshow(reconstruction_sart2 - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
| [
"skimage.transform.iradon",
"skimage.transform.iradon_sart",
"matplotlib.pyplot.show",
"skimage.transform.rescale",
"numpy.mean",
"skimage.transform.radon",
"matplotlib.pyplot.subplots",
"skimage.io.imread"
] | [((3100, 3147), 'skimage.io.imread', 'imread', (["(data_dir + '/phantom.png')"], {'as_grey': '(True)'}), "(data_dir + '/phantom.png', as_grey=True)\n", (3106, 3147), False, 'from skimage.io import imread\n'), ((3156, 3217), 'skimage.transform.rescale', 'rescale', (['image'], {'scale': '(0.4)', 'mode': '"""reflect"""', 'multichannel': '(False)'}), "(image, scale=0.4, mode='reflect', multichannel=False)\n", (3163, 3217), False, 'from skimage.transform import radon, rescale\n'), ((3237, 3273), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 4.5)'}), '(1, 2, figsize=(8, 4.5))\n', (3249, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3416, 3454), 'skimage.transform.radon', 'radon', (['image'], {'theta': 'theta', 'circle': '(True)'}), '(image, theta=theta, circle=True)\n', (3421, 3454), False, 'from skimage.transform import radon, rescale\n'), ((3715, 3725), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3723, 3725), True, 'import matplotlib.pyplot as plt\n'), ((4617, 4659), 'skimage.transform.iradon', 'iradon', (['sinogram'], {'theta': 'theta', 'circle': '(True)'}), '(sinogram, theta=theta, circle=True)\n', (4623, 4659), False, 'from skimage.transform import iradon\n'), ((4824, 4886), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 4.5)', 'sharex': '(True)', 'sharey': '(True)'}), '(1, 2, figsize=(8, 4.5), sharex=True, sharey=True)\n', (4836, 4886), True, 'import matplotlib.pyplot as plt\n'), ((5164, 5174), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5172, 5174), True, 'import matplotlib.pyplot as plt\n'), ((7333, 7367), 'skimage.transform.iradon_sart', 'iradon_sart', (['sinogram'], {'theta': 'theta'}), '(sinogram, theta=theta)\n', (7344, 7367), False, 'from skimage.transform import iradon_sart\n'), ((7511, 7573), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(8, 8.5)', 'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, figsize=(8, 8.5), sharex=True, sharey=True)\n', (7523, 7573), True, 'import matplotlib.pyplot as plt\n'), ((7949, 8010), 'skimage.transform.iradon_sart', 'iradon_sart', (['sinogram'], {'theta': 'theta', 'image': 'reconstruction_sart'}), '(sinogram, theta=theta, image=reconstruction_sart)\n', (7960, 8010), False, 'from skimage.transform import iradon_sart\n'), ((8426, 8436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8434, 8436), True, 'import matplotlib.pyplot as plt\n'), ((4748, 4767), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (4755, 4767), True, 'import numpy as np\n'), ((7478, 7497), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (7485, 7497), True, 'import numpy as np\n'), ((8158, 8177), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (8165, 8177), True, 'import numpy as np\n')] |
"""
(mag, T, x, y, s, d, k, bgd) vs time
"""
import numpy as np, matplotlib.pyplot as plt, pandas as pd
from glob import glob
import os
from astropy.io import fits
from bspline_fit_xyTmag_BIC_approach import homog_get_data
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_EPD_parameters_vs_time(lcpaths, lcdatalist, magtype='IRM1',
frac_of_lc=1.0, savdir=None):
xcols = ['TMID_BJD','XIC','YIC','CCDTEMP','FSV','FDV','FKV','BGV']
xkeys = ['<KEY>']
for lcpath, lc in zip(lcpaths, lcdatalist):
savname = 'EPDparams_vs_time_frac{:.1f}_{}.png'.format(
frac_of_lc, os.path.splitext(os.path.basename(lcpath))[0])
savpath = os.path.join(savdir, savname)
if os.path.exists(savpath):
print('found {}, continue'.format(savpath))
continue
time = lc['TMID_BJD']
flux = lc[magtype]
timeind = np.argsort(time)
d = {}
for xcol, k in zip(xcols, xkeys):
# pandas/seaborn wants little-endian
le_array = lc[xcol].byteswap().newbyteorder()
# sort everything by time
time_sorted_arr = le_array[timeind]
# take the cut so that you deal with orbit-specific data, if
# desired
d[k] = time_sorted_arr[:int(frac_of_lc*len(time_sorted_arr))]
d[magtype] = flux[timeind][:int(frac_of_lc*len(time_sorted_arr))]
df = pd.DataFrame(d)
if np.all(pd.isnull(df[magtype])):
print('mags are all NaN for {}, continue'.format(savname))
continue
# PLOT each xcol vs time
plt.close('all')
fig,axs = plt.subplots(nrows=len(xcols), ncols=1, figsize=(6,6),
sharex=True)
axs = axs.flatten()
xoffset = int(np.median(df['tmidbjd']))
xval = df['tmidbjd'] - xoffset
for ax, xkey in zip(axs, xkeys):
if xkey=='tmidbjd':
xkey = magtype
yval = df[xkey]
if xkey in ['x','y']:
yoffset = int(np.mean(yval))
yval -= yoffset
xkey += '- {:d}'.format(yoffset)
elif xkey in [magtype]:
yoffset = np.round(np.median(yval), decimals=1)
yval -= yoffset
yval *= 1e3
xkey += '- {:.1f} [mmag]'.format(yoffset)
ax.scatter(xval, yval, rasterized=True, alpha=0.8, zorder=3, c='k',
lw=0, s=3)
ax.set_ylabel(xkey, fontsize='xx-small')
#ax.xaxis.set_ticks_position('both')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
ax.xaxis.set_tick_params(labelsize='xx-small')
ax.yaxis.set_tick_params(labelsize='xx-small')
if magtype in xkey:
ylim = ax.get_ylim()
ax.set_ylim((max(ylim), min(ylim)))
ax.set_xlabel('BJD$_{\mathrm{TDB}}$ - '+'{}'.format(xoffset))
fig.tight_layout(h_pad=-1)
fig.savefig(savpath, dpi=400, bbox_inches='tight')
print('made {}'.format(savpath))
if __name__=="__main__":
for lctype in ['center','corner']:
savdir = '../results/projid1088_cam2_ccd2_lcs/{}_lcs/'.format(lctype)
lcdir = savdir
lcpaths, lcdatalist = homog_get_data(lctype=lctype, lcdir=lcdir)
plot_EPD_parameters_vs_time(lcpaths, lcdatalist, magtype='IRM1',
frac_of_lc=1.0, savdir=savdir)
#plot_EPD_parameters_vs_time(lcpaths, lcdatalist, magtype='IRM1',
# frac_of_lc=0.4, savdir=savdir)
| [
"pandas.DataFrame",
"os.path.basename",
"numpy.median",
"matplotlib.pyplot.close",
"os.path.exists",
"pandas.isnull",
"numpy.argsort",
"bspline_fit_xyTmag_BIC_approach.homog_get_data",
"numpy.mean",
"os.path.join"
] | [((712, 741), 'os.path.join', 'os.path.join', (['savdir', 'savname'], {}), '(savdir, savname)\n', (724, 741), False, 'import os\n'), ((754, 777), 'os.path.exists', 'os.path.exists', (['savpath'], {}), '(savpath)\n', (768, 777), False, 'import os\n'), ((933, 949), 'numpy.argsort', 'np.argsort', (['time'], {}), '(time)\n', (943, 949), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((1458, 1473), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (1470, 1473), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((1652, 1668), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1661, 1668), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((3429, 3471), 'bspline_fit_xyTmag_BIC_approach.homog_get_data', 'homog_get_data', ([], {'lctype': 'lctype', 'lcdir': 'lcdir'}), '(lctype=lctype, lcdir=lcdir)\n', (3443, 3471), False, 'from bspline_fit_xyTmag_BIC_approach import homog_get_data\n'), ((1493, 1515), 'pandas.isnull', 'pd.isnull', (['df[magtype]'], {}), '(df[magtype])\n', (1502, 1515), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((1838, 1862), 'numpy.median', 'np.median', (["df['tmidbjd']"], {}), "(df['tmidbjd'])\n", (1847, 1862), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((663, 687), 'os.path.basename', 'os.path.basename', (['lcpath'], {}), '(lcpath)\n', (679, 687), False, 'import os\n'), ((2101, 2114), 'numpy.mean', 'np.mean', (['yval'], {}), '(yval)\n', (2108, 2114), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((2268, 2283), 'numpy.median', 'np.median', (['yval'], {}), '(yval)\n', (2277, 2283), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n')] |
from __future__ import print_function
import time
import os
import glob
import numpy
import math
from scipy.fftpack import fft
from scipy.fftpack.realtransforms import dct
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import utilities
from scipy.signal import lfilter
eps = 0.00000001
""" Time-domain audio features """
def stZCR(frame):
"""Computes zero crossing rate of frame"""
count = len(frame)
countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2
return (numpy.float64(countZ) / numpy.float64(count-1.0))
def stEnergy(frame):
"""Computes signal energy of frame"""
return numpy.sum(frame ** 2) / numpy.float64(len(frame))
def stEnergyEntropy(frame, n_short_blocks=10):
"""Computes entropy of energy"""
Eol = numpy.sum(frame ** 2) # total frame energy
L = len(frame)
sub_win_len = int(numpy.floor(L / n_short_blocks))
if L != sub_win_len * n_short_blocks:
frame = frame[0:sub_win_len * n_short_blocks]
# sub_wins is of size [n_short_blocks x L]
sub_wins = frame.reshape(sub_win_len, n_short_blocks, order='F').copy()
# Compute normalized sub-frame energies:
s = numpy.sum(sub_wins ** 2, axis=0) / (Eol + eps)
# Compute entropy of the normalized sub-frame energies:
Entropy = -numpy.sum(s * numpy.log2(s + eps))
return Entropy
""" Frequency-domain audio features """
def stSpectralCentroidAndSpread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (numpy.arange(1, len(X) + 1)) * (fs/(2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = numpy.sum(ind * Xt)
DEN = numpy.sum(Xt) + eps
# Centroid:
C = (NUM / DEN)
# Spread:
S = numpy.sqrt(numpy.sum(((ind - C) ** 2) * Xt) / DEN)
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def stSpectralEntropy(X, n_short_blocks=10):
"""Computes the spectral entropy"""
L = len(X) # number of frame samples
Eol = numpy.sum(X ** 2) # total spectral energy
sub_win_len = int(numpy.floor(L / n_short_blocks)) # length of sub-frame
if L != sub_win_len * n_short_blocks:
X = X[0:sub_win_len * n_short_blocks]
sub_wins = X.reshape(sub_win_len, n_short_blocks, order='F').copy() # define sub-frames (using matrix reshape)
s = numpy.sum(sub_wins ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies
En = -numpy.sum(s*numpy.log2(s + eps)) # compute spectral entropy
return En
def stSpectralFlux(X, X_prev):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
X_prev: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
sumX = numpy.sum(X + eps)
sumPrevX = numpy.sum(X_prev + eps)
F = numpy.sum((X / sumX - X_prev/sumPrevX) ** 2)
return F
def stSpectralRollOff(X, c, fs):
"""Computes spectral roll-off"""
totalEnergy = numpy.sum(X ** 2)
fftLength = len(X)
Thres = c*totalEnergy
# Ffind the spectral rolloff as the frequency position
# where the respective spectral energy is equal to c*totalEnergy
CumSum = numpy.cumsum(X ** 2) + eps
[a, ] = numpy.nonzero(CumSum > Thres)
if len(a) > 0:
mC = numpy.float64(a[0]) / (float(fftLength))
else:
mC = 0.0
return (mC)
def stHarmonic(frame, fs):
"""
Computes harmonic ratio and pitch
"""
M = numpy.round(0.016 * fs) - 1
R = numpy.correlate(frame, frame, mode='full')
g = R[len(frame)-1]
R = R[len(frame):-1]
# estimate m0 (as the first zero crossing of R)
[a, ] = numpy.nonzero(numpy.diff(numpy.sign(R)))
if len(a) == 0:
m0 = len(R)-1
else:
m0 = a[0]
if M > len(R):
M = len(R) - 1
Gamma = numpy.zeros((M), dtype=numpy.float64)
CSum = numpy.cumsum(frame ** 2)
Gamma[m0:M] = R[m0:M] / (numpy.sqrt((g * CSum[M:m0:-1])) + eps)
ZCR = stZCR(Gamma)
if ZCR > 0.15:
HR = 0.0
f0 = 0.0
else:
if len(Gamma) == 0:
HR = 1.0
blag = 0.0
Gamma = numpy.zeros((M), dtype=numpy.float64)
else:
HR = numpy.max(Gamma)
blag = numpy.argmax(Gamma)
# Get fundamental frequency:
f0 = fs / (blag + eps)
if f0 > 5000:
f0 = 0.0
if HR < 0.1:
f0 = 0.0
return (HR, f0)
def mfccInitFilterBanks(fs, nfft):
"""
Computes the triangular filterbank for MFCC computation
(used in the stFeatureExtraction function before the stMFCC function call)
This function is taken from the scikits.talkbox library (MIT Licence):
https://pypi.python.org/pypi/scikits.talkbox
"""
# filter bank params:
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
if fs < 8000:
nlogfil = 5
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle:
freqs = numpy.zeros(nFiltTotal+2)
freqs[:numLinFiltTotal] = lowfreq + numpy.arange(numLinFiltTotal) * linsc
freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** numpy.arange(1, numLogFilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nFiltTotal, nfft))
nfreqs = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nFiltTotal):
lowTrFreq = freqs[i]
cenTrFreq = freqs[i+1]
highTrFreq = freqs[i+2]
lid = numpy.arange(numpy.floor(lowTrFreq * nfft / fs) + 1,
numpy.floor(cenTrFreq * nfft / fs) + 1,
dtype=numpy.int)
lslope = heights[i] / (cenTrFreq - lowTrFreq)
rid = numpy.arange(numpy.floor(cenTrFreq * nfft / fs) + 1,
numpy.floor(highTrFreq * nfft / fs) + 1,
dtype=numpy.int)
rslope = heights[i] / (highTrFreq - cenTrFreq)
fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)
fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])
return fbank, freqs
def stMFCC(X, fbank, n_mfcc_feats):
"""
Computes the MFCCs of a frame, given the fft mag
ARGUMENTS:
X: fft magnitude abs(FFT)
fbank: filter bank (see mfccInitFilterBanks)
RETURN
ceps: MFCCs (13 element vector)
Note: MFCC calculation is, in general, taken from the
scikits.talkbox library (MIT Licence),
# with a small number of modifications to make it more
compact and suitable for the pyAudioAnalysis Lib
"""
mspec = numpy.log10(numpy.dot(X, fbank.T)+eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:n_mfcc_feats]
return ceps
def stChromaFeaturesInit(nfft, fs):
"""
This function initializes the chroma matrices used in the calculation of the chroma features
"""
freqs = numpy.array([((f + 1) * fs) / (2 * nfft) for f in range(nfft)])
Cp = 27.50
nChroma = numpy.round(12.0 * numpy.log2(freqs / Cp)).astype(int)
nFreqsPerChroma = numpy.zeros((nChroma.shape[0], ))
uChroma = numpy.unique(nChroma)
for u in uChroma:
idx = numpy.nonzero(nChroma == u)
nFreqsPerChroma[idx] = idx[0].shape
return nChroma, nFreqsPerChroma
def stChromaFeatures(X, fs, nChroma, nFreqsPerChroma):
#TODO: 1 complexity
#TODO: 2 bug with large windows
chromaNames = ['A', 'A#', 'B', 'C', 'C#', 'D',
'D#', 'E', 'F', 'F#', 'G', 'G#']
spec = X**2
if nChroma.max()<nChroma.shape[0]:
C = numpy.zeros((nChroma.shape[0],))
C[nChroma] = spec
C /= nFreqsPerChroma[nChroma]
else:
I = numpy.nonzero(nChroma>nChroma.shape[0])[0][0]
C = numpy.zeros((nChroma.shape[0],))
C[nChroma[0:I-1]] = spec
C /= nFreqsPerChroma
finalC = numpy.zeros((12, 1))
newD = int(numpy.ceil(C.shape[0] / 12.0) * 12)
C2 = numpy.zeros((newD, ))
C2[0:C.shape[0]] = C
C2 = C2.reshape(int(C2.shape[0]/12), 12)
#for i in range(12):
# finalC[i] = numpy.sum(C[i:C.shape[0]:12])
finalC = numpy.matrix(numpy.sum(C2, axis=0)).T
finalC /= spec.sum()
# ax = plt.gca()
# plt.hold(False)
# plt.plot(finalC)
# ax.set_xticks(range(len(chromaNames)))
# ax.set_xticklabels(chromaNames)
# xaxis = numpy.arange(0, 0.02, 0.01);
# ax.set_yticks(range(len(xaxis)))
# ax.set_yticklabels(xaxis)
# plt.show(block=False)
# plt.draw()
return chromaNames, finalC
def stChromagram(signal, fs, win, step, PLOT=False):
"""
Short-term FFT mag for spectogram estimation:
Returns:
a numpy array (nFFT x numOfShortTermWindows)
ARGUMENTS:
signal: the input signal samples
fs: the sampling freq (in Hz)
win: the short-term window size (in samples)
step: the short-term window step (in samples)
PLOT: flag, 1 if results are to be ploted
RETURNS:
"""
win = int(win)
step = int(step)
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX - DC)
N = len(signal) # total number of signals
cur_p = 0
count_fr = 0
nfft = int(win / 2)
nChroma, nFreqsPerChroma = stChromaFeaturesInit(nfft, fs)
chromaGram = numpy.array([], dtype=numpy.float64)
while (cur_p + win - 1 < N):
count_fr += 1
x = signal[cur_p:cur_p + win]
cur_p = cur_p + step
X = abs(fft(x))
X = X[0:nfft]
X = X / len(X)
chromaNames, C = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma)
C = C[:, 0]
if count_fr == 1:
chromaGram = C.T
else:
chromaGram = numpy.vstack((chromaGram, C.T))
FreqAxis = chromaNames
TimeAxis = [(t * step) / fs for t in range(chromaGram.shape[0])]
if (PLOT):
fig, ax = plt.subplots()
chromaGramToPlot = chromaGram.transpose()[::-1, :]
Ratio = int(chromaGramToPlot.shape[1] / (3*chromaGramToPlot.shape[0]))
if Ratio < 1:
Ratio = 1
chromaGramToPlot = numpy.repeat(chromaGramToPlot, Ratio, axis=0)
imgplot = plt.imshow(chromaGramToPlot)
fstep = int(nfft / 5.0)
# FreqTicks = range(0, int(nfft) + fstep, fstep)
# FreqTicksLabels = [str(fs/2-int((f*fs) / (2*nfft))) for f in FreqTicks]
ax.set_yticks(range(int(Ratio / 2), len(FreqAxis) * Ratio, Ratio))
ax.set_yticklabels(FreqAxis[::-1])
TStep = int(count_fr / 3)
TimeTicks = range(0, count_fr, TStep)
TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks]
ax.set_xticks(TimeTicks)
ax.set_xticklabels(TimeTicksLabels)
ax.set_xlabel('time (secs)')
imgplot.set_cmap('jet')
plt.colorbar()
plt.show()
return (chromaGram, TimeAxis, FreqAxis)
def phormants(x, fs):
N = len(x)
w = numpy.hamming(N)
# Apply window and high pass filter.
x1 = x * w
x1 = lfilter([1], [1., 0.63], x1)
# Get LPC.
ncoeff = 2 + fs / 1000
A, e, k = lpc(x1, ncoeff)
#A, e, k = lpc(x1, 8)
# Get roots.
rts = numpy.roots(A)
rts = [r for r in rts if numpy.imag(r) >= 0]
# Get angles.
angz = numpy.arctan2(numpy.imag(rts), numpy.real(rts))
# Get frequencies.
frqs = sorted(angz * (fs / (2 * math.pi)))
return frqs
def beatExtraction(st_features, win_len, PLOT=False):
"""
This function extracts an estimate of the beat rate for a musical signal.
ARGUMENTS:
- st_features: a numpy array (n_feats x numOfShortTermWindows)
- win_len: window size in seconds
RETURNS:
- BPM: estimates of beats per minute
- Ratio: a confidence measure
"""
# Features that are related to the beat tracking task:
toWatch = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
max_beat_time = int(round(2.0 / win_len))
hist_all = numpy.zeros((max_beat_time,))
for ii, i in enumerate(toWatch): # for each feature
DifThres = 2.0 * (numpy.abs(st_features[i, 0:-1] - st_features[i, 1::])).mean() # dif threshold (3 x Mean of Difs)
if DifThres<=0:
DifThres = 0.0000000000000001
[pos1, _] = utilities.peakdet(st_features[i, :], DifThres) # detect local maxima
posDifs = [] # compute histograms of local maxima changes
for j in range(len(pos1)-1):
posDifs.append(pos1[j+1]-pos1[j])
[hist_times, HistEdges] = numpy.histogram(posDifs, numpy.arange(0.5, max_beat_time + 1.5))
hist_centers = (HistEdges[0:-1] + HistEdges[1::]) / 2.0
hist_times = hist_times.astype(float) / st_features.shape[1]
hist_all += hist_times
if PLOT:
plt.subplot(9, 2, ii + 1)
plt.plot(st_features[i, :], 'k')
for k in pos1:
plt.plot(k, st_features[i, k], 'k*')
f1 = plt.gca()
f1.axes.get_xaxis().set_ticks([])
f1.axes.get_yaxis().set_ticks([])
if PLOT:
plt.show(block=False)
plt.figure()
# Get beat as the argmax of the agregated histogram:
I = numpy.argmax(hist_all)
bpms = 60 / (hist_centers * win_len)
BPM = bpms[I]
# ... and the beat ratio:
Ratio = hist_all[I] / hist_all.sum()
if PLOT:
# filter out >500 beats from plotting:
hist_all = hist_all[bpms < 500]
bpms = bpms[bpms < 500]
plt.plot(bpms, hist_all, 'k')
plt.xlabel('Beats per minute')
plt.ylabel('Freq Count')
plt.show(block=True)
return BPM, Ratio
def stSpectogram(signal, fs, win, step, PLOT=False):
"""
Short-term FFT mag for spectogram estimation:
Returns:
a numpy array (nFFT x numOfShortTermWindows)
ARGUMENTS:
signal: the input signal samples
fs: the sampling freq (in Hz)
win: the short-term window size (in samples)
step: the short-term window step (in samples)
PLOT: flag, 1 if results are to be ploted
RETURNS:
"""
win = int(win)
step = int(step)
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX - DC)
N = len(signal) # total number of signals
cur_p = 0
count_fr = 0
nfft = int(win / 2)
specgram = numpy.array([], dtype=numpy.float64)
while (cur_p + win - 1 < N):
count_fr += 1
x = signal[cur_p:cur_p+win]
cur_p = cur_p + step
X = abs(fft(x))
X = X[0:nfft]
X = X / len(X)
if count_fr == 1:
specgram = X ** 2
else:
specgram = numpy.vstack((specgram, X))
FreqAxis = [float((f + 1) * fs) / (2 * nfft) for f in range(specgram.shape[1])]
TimeAxis = [float(t * step) / fs for t in range(specgram.shape[0])]
if (PLOT):
fig, ax = plt.subplots()
imgplot = plt.imshow(specgram.transpose()[::-1, :])
fstep = int(nfft / 5.0)
FreqTicks = range(0, int(nfft) + fstep, fstep)
FreqTicksLabels = [str(fs / 2 - int((f * fs) / (2 * nfft))) for f in FreqTicks]
ax.set_yticks(FreqTicks)
ax.set_yticklabels(FreqTicksLabels)
TStep = int(count_fr/3)
TimeTicks = range(0, count_fr, TStep)
TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks]
ax.set_xticks(TimeTicks)
ax.set_xticklabels(TimeTicksLabels)
ax.set_xlabel('time (secs)')
ax.set_ylabel('freq (Hz)')
imgplot.set_cmap('jet')
plt.colorbar()
plt.show()
return (specgram, TimeAxis, FreqAxis)
""" Windowing and feature extraction """
def stFeatureExtraction(signal, fs, win, step):
"""
This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a numpy matrix.
ARGUMENTS
signal: the input signal samples
fs: the sampling freq (in Hz)
win: the short-term window size (in samples)
step: the short-term window step (in samples)
RETURNS
st_features: a numpy array (n_feats x numOfShortTermWindows)
"""
win = int(win)
step = int(step)
# Signal normalization
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
cur_p = 0
count_fr = 0
nFFT = int(win / 2)
[fbank, freqs] = mfccInitFilterBanks(fs, nFFT) # compute the triangular filter banks used in the mfcc calculation
nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, fs)
n_time_spectral_feats = 8
n_harmonic_feats = 0
n_mfcc_feats = 13
n_chroma_feats = 13
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats + n_chroma_feats
# n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats
feature_names = []
feature_names.append("zcr")
feature_names.append("energy")
feature_names.append("energy_entropy")
feature_names += ["spectral_centroid", "spectral_spread"]
feature_names.append("spectral_entropy")
feature_names.append("spectral_flux")
feature_names.append("spectral_rolloff")
feature_names += ["mfcc_{0:d}".format(mfcc_i)
for mfcc_i in range(1, n_mfcc_feats+1)]
feature_names += ["chroma_{0:d}".format(chroma_i)
for chroma_i in range(1, n_chroma_feats)]
feature_names.append("chroma_std")
st_features = []
while (cur_p + win - 1 < N): # for each short-term window until the end of signal
count_fr += 1
x = signal[cur_p:cur_p+win] # get current window
cur_p = cur_p + step # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
if count_fr == 1:
X_prev = X.copy() # keep previous fft mag (used in spectral flux)
curFV = numpy.zeros((n_total_feats, 1))
curFV[0] = stZCR(x) # zero crossing rate
curFV[1] = stEnergy(x) # short-term energy
curFV[2] = stEnergyEntropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, fs) # spectral centroid and spread
curFV[5] = stSpectralEntropy(X) # spectral entropy
curFV[6] = stSpectralFlux(X, X_prev) # spectral flux
curFV[7] = stSpectralRollOff(X, 0.90, fs) # spectral rolloff
curFV[n_time_spectral_feats:n_time_spectral_feats+n_mfcc_feats, 0] = \
stMFCC(X, fbank, n_mfcc_feats).copy() # MFCCs
chromaNames, chromaF = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma)
curFV[n_time_spectral_feats + n_mfcc_feats:
n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \
chromaF
curFV[n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \
chromaF.std()
st_features.append(curFV)
# delta features
'''
if count_fr>1:
delta = curFV - prevFV
curFVFinal = numpy.concatenate((curFV, delta))
else:
curFVFinal = numpy.concatenate((curFV, curFV))
prevFV = curFV
st_features.append(curFVFinal)
'''
# end of delta
X_prev = X.copy()
st_features = numpy.concatenate(st_features, 1)
return st_features, feature_names
def mtFeatureExtraction(signal, fs, mt_win, mt_step, st_win, st_step):
"""
Mid-term feature extraction
"""
mt_win_ratio = int(round(mt_win / st_step))
mt_step_ratio = int(round(mt_step / st_step))
mt_features = []
st_features, f_names = stFeatureExtraction(signal, fs, st_win, st_step)
n_feats = len(st_features)
n_stats = 2
mt_features, mid_feature_names = [], []
#for i in range(n_stats * n_feats + 1):
for i in range(n_stats * n_feats):
mt_features.append([])
mid_feature_names.append("")
for i in range(n_feats): # for each of the short-term features:
cur_p = 0
N = len(st_features[i])
mid_feature_names[i] = f_names[i] + "_" + "mean"
mid_feature_names[i + n_feats] = f_names[i] + "_" + "std"
while (cur_p < N):
N1 = cur_p
N2 = cur_p + mt_win_ratio
if N2 > N:
N2 = N
cur_st_feats = st_features[i][N1:N2]
mt_features[i].append(numpy.mean(cur_st_feats))
mt_features[i + n_feats].append(numpy.std(cur_st_feats))
#mt_features[i+2*n_feats].append(numpy.std(cur_st_feats) / (numpy.mean(cur_st_feats)+0.00000010))
cur_p += mt_step_ratio
return numpy.array(mt_features), st_features, mid_feature_names
# TODO
def stFeatureSpeed(signal, fs, win, step):
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / MAX
# print (numpy.abs(signal)).max()
N = len(signal) # total number of signals
cur_p = 0
count_fr = 0
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
nlinfil = 13
nlogfil = 27
n_mfcc_feats = 13
nfil = nlinfil + nlogfil
nfft = win / 2
if fs < 8000:
nlogfil = 5
nfil = nlinfil + nlogfil
nfft = win / 2
# compute filter banks for mfcc:
[fbank, freqs] = mfccInitFilterBanks(fs, nfft, lowfreq, linsc, logsc, nlinfil, nlogfil)
n_time_spectral_feats = 8
n_harmonic_feats = 1
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats
#st_features = numpy.array([], dtype=numpy.float64)
st_features = []
while (cur_p + win - 1 < N):
count_fr += 1
x = signal[cur_p:cur_p + win]
cur_p = cur_p + step
X = abs(fft(x))
X = X[0:nfft]
X = X / len(X)
Ex = 0.0
El = 0.0
X[0:4] = 0
# M = numpy.round(0.016 * fs) - 1
# R = numpy.correlate(frame, frame, mode='full')
st_features.append(stHarmonic(x, fs))
# for i in range(len(X)):
#if (i < (len(X) / 8)) and (i > (len(X)/40)):
# Ex += X[i]*X[i]
#El += X[i]*X[i]
# st_features.append(Ex / El)
# st_features.append(numpy.argmax(X))
# if curFV[n_time_spectral_feats+n_mfcc_feats+1]>0:
# print curFV[n_time_spectral_feats+n_mfcc_feats], curFV[n_time_spectral_feats+n_mfcc_feats+1]
return numpy.array(st_features)
""" Feature Extraction Wrappers
- The first two feature extraction wrappers are used to extract long-term averaged
audio features for a list of WAV files stored in a given category.
It is important to note that, one single feature is extracted per WAV file (not the whole sequence of feature vectors)
"""
def dirWavFeatureExtraction(dirName, mt_win, mt_step, st_win, st_step,
compute_beat=False):
"""
This function extracts the mid-term features of the WAVE files of a particular folder.
The resulting feature vector is extracted by long-term averaging the mid-term features.
Therefore ONE FEATURE VECTOR is extracted for each WAV file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
"""
all_mt_feats = numpy.array([])
process_times = []
types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(dirName, files)))
wav_file_list = sorted(wav_file_list)
wav_file_list2, mt_feature_names = [], []
for i, wavFile in enumerate(wav_file_list):
print("Analyzing file {0:d} of "
"{1:d}: {2:s}".format(i+1,
len(wav_file_list),
wavFile))
if os.stat(wavFile).st_size == 0:
print(" (EMPTY FILE -- SKIPPING)")
continue
[fs, x] = audioBasicIO.readAudioFile(wavFile)
if isinstance(x, int):
continue
t1 = time.clock()
x = audioBasicIO.stereo2mono(x)
if x.shape[0]<float(fs)/5:
print(" (AUDIO FILE TOO SMALL - SKIPPING)")
continue
wav_file_list2.append(wavFile)
if compute_beat:
[mt_term_feats, st_features, mt_feature_names] = \
mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win), round(fs * st_step))
[beat, beat_conf] = beatExtraction(st_features, st_step)
else:
[mt_term_feats, _, mt_feature_names] = \
mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win), round(fs * st_step))
mt_term_feats = numpy.transpose(mt_term_feats)
mt_term_feats = mt_term_feats.mean(axis=0)
# long term averaging of mid-term statistics
if (not numpy.isnan(mt_term_feats).any()) and \
(not numpy.isinf(mt_term_feats).any()):
if compute_beat:
mt_term_feats = numpy.append(mt_term_feats, beat)
mt_term_feats = numpy.append(mt_term_feats, beat_conf)
if len(all_mt_feats) == 0:
# append feature vector
all_mt_feats = mt_term_feats
else:
all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats))
t2 = time.clock()
duration = float(len(x)) / fs
process_times.append((t2 - t1) / duration)
if len(process_times) > 0:
print("Feature extraction complexity ratio: "
"{0:.1f} x realtime".format((1.0 / numpy.mean(numpy.array(process_times)))))
return (all_mt_feats, wav_file_list2, mt_feature_names)
def dirsWavFeatureExtraction(dirNames, mt_win, mt_step, st_win, st_step, compute_beat=False):
'''
Same as dirWavFeatureExtraction, but instead of a single dir it
takes a list of paths as input and returns a list of feature matrices.
EXAMPLE:
[features, classNames] =
a.dirsWavFeatureExtraction(['audioData/classSegmentsRec/noise','audioData/classSegmentsRec/speech',
'audioData/classSegmentsRec/brush-teeth','audioData/classSegmentsRec/shower'], 1, 1, 0.02, 0.02);
It can be used during the training process of a classification model ,
in order to get feature matrices from various audio classes (each stored in a seperate path)
'''
# feature extraction for each class:
features = []
classNames = []
fileNames = []
for i, d in enumerate(dirNames):
[f, fn, feature_names] = dirWavFeatureExtraction(d, mt_win, mt_step,
st_win, st_step,
compute_beat=compute_beat)
if f.shape[0] > 0:
# if at least one audio file has been found in the provided folder:
features.append(f)
fileNames.append(fn)
if d[-1] == os.sep:
classNames.append(d.split(os.sep)[-2])
else:
classNames.append(d.split(os.sep)[-1])
return features, classNames, fileNames
def dirWavFeatureExtractionNoAveraging(dirName, mt_win, mt_step, st_win, st_step):
"""
This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames:
"""
all_mt_feats = numpy.array([])
signal_idx = numpy.array([])
process_times = []
types = ('*.wav', '*.aif', '*.aiff', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(dirName, files)))
wav_file_list = sorted(wav_file_list)
for i, wavFile in enumerate(wav_file_list):
[fs, x] = audioBasicIO.readAudioFile(wavFile)
if isinstance(x, int):
continue
x = audioBasicIO.stereo2mono(x)
[mt_term_feats, _, _] = mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win),
round(fs * st_step))
mt_term_feats = numpy.transpose(mt_term_feats)
if len(all_mt_feats) == 0: # append feature vector
all_mt_feats = mt_term_feats
signal_idx = numpy.zeros((mt_term_feats.shape[0], ))
else:
all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats))
signal_idx = numpy.append(signal_idx, i * numpy.ones((mt_term_feats.shape[0], )))
return (all_mt_feats, signal_idx, wav_file_list)
# The following two feature extraction wrappers extract features for given audio files, however
# NO LONG-TERM AVERAGING is performed. Therefore, the output for each audio file is NOT A SINGLE FEATURE VECTOR
# but a whole feature matrix.
#
# Also, another difference between the following two wrappers and the previous is that they NO LONG-TERM AVERAGING IS PERFORMED.
# In other words, the WAV files in these functions are not used as uniform samples that need to be averaged but as sequences
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile,
storeStFeatures=False, storeToCSV=False, PLOT=False):
"""
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file
"""
[fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
if storeStFeatures:
[mtF, stF, _] = mtFeatureExtraction(x, fs,
round(fs * midTermSize),
round(fs * midTermStep),
round(fs * shortTermSize),
round(fs * shortTermStep))
else:
[mtF, _, _] = mtFeatureExtraction(x, fs, round(fs*midTermSize),
round(fs * midTermStep),
round(fs * shortTermSize),
round(fs * shortTermStep))
# save mt features to numpy file
numpy.save(outPutFile, mtF)
if PLOT:
print("Mid-term numpy file: " + outPutFile + ".npy saved")
if storeToCSV:
numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",")
if PLOT:
print("Mid-term CSV file: " + outPutFile + ".csv saved")
if storeStFeatures:
# save st features to numpy file
numpy.save(outPutFile+"_st", stF)
if PLOT:
print("Short-term numpy file: " + outPutFile + "_st.npy saved")
if storeToCSV:
# store st features to CSV file
numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",")
if PLOT:
print("Short-term CSV file: " + outPutFile + "_st.csv saved")
def mtFeatureExtractionToFileDir(dirName, midTermSize, midTermStep,
shortTermSize, shortTermStep,
storeStFeatures=False, storeToCSV=False,
PLOT=False):
types = (dirName + os.sep + '*.wav', )
filesToProcess = []
for files in types:
filesToProcess.extend(glob.glob(files))
for f in filesToProcess:
outPath = f
mtFeatureExtractionToFile(f, midTermSize, midTermStep, shortTermSize,
shortTermStep, outPath, storeStFeatures,
storeToCSV, PLOT)
| [
"numpy.roots",
"numpy.sum",
"numpy.abs",
"numpy.double",
"numpy.argmax",
"numpy.floor",
"numpy.ones",
"numpy.isnan",
"numpy.imag",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.mean",
"numpy.correlate",
"matplotlib.pyplot.gca",
"glob.glob",
"numpy.float64",
"scipy.fftpack.realtr... | [((197, 213), 'matplotlib.use', 'mpl.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (204, 213), True, 'import matplotlib as mpl\n'), ((856, 877), 'numpy.sum', 'numpy.sum', (['(frame ** 2)'], {}), '(frame ** 2)\n', (865, 877), False, 'import numpy\n'), ((1691, 1710), 'numpy.sum', 'numpy.sum', (['(ind * Xt)'], {}), '(ind * Xt)\n', (1700, 1710), False, 'import numpy\n'), ((2097, 2114), 'numpy.sum', 'numpy.sum', (['(X ** 2)'], {}), '(X ** 2)\n', (2106, 2114), False, 'import numpy\n'), ((2976, 2994), 'numpy.sum', 'numpy.sum', (['(X + eps)'], {}), '(X + eps)\n', (2985, 2994), False, 'import numpy\n'), ((3010, 3033), 'numpy.sum', 'numpy.sum', (['(X_prev + eps)'], {}), '(X_prev + eps)\n', (3019, 3033), False, 'import numpy\n'), ((3042, 3088), 'numpy.sum', 'numpy.sum', (['((X / sumX - X_prev / sumPrevX) ** 2)'], {}), '((X / sumX - X_prev / sumPrevX) ** 2)\n', (3051, 3088), False, 'import numpy\n'), ((3191, 3208), 'numpy.sum', 'numpy.sum', (['(X ** 2)'], {}), '(X ** 2)\n', (3200, 3208), False, 'import numpy\n'), ((3439, 3468), 'numpy.nonzero', 'numpy.nonzero', (['(CumSum > Thres)'], {}), '(CumSum > Thres)\n', (3452, 3468), False, 'import numpy\n'), ((3712, 3754), 'numpy.correlate', 'numpy.correlate', (['frame', 'frame'], {'mode': '"""full"""'}), "(frame, frame, mode='full')\n", (3727, 3754), False, 'import numpy\n'), ((4037, 4072), 'numpy.zeros', 'numpy.zeros', (['M'], {'dtype': 'numpy.float64'}), '(M, dtype=numpy.float64)\n', (4048, 4072), False, 'import numpy\n'), ((4086, 4110), 'numpy.cumsum', 'numpy.cumsum', (['(frame ** 2)'], {}), '(frame ** 2)\n', (4098, 4110), False, 'import numpy\n'), ((5287, 5314), 'numpy.zeros', 'numpy.zeros', (['(nFiltTotal + 2)'], {}), '(nFiltTotal + 2)\n', (5298, 5314), False, 'import numpy\n'), ((5601, 5632), 'numpy.zeros', 'numpy.zeros', (['(nFiltTotal, nfft)'], {}), '((nFiltTotal, nfft))\n', (5612, 5632), False, 'import numpy\n'), ((7450, 7482), 'numpy.zeros', 'numpy.zeros', (['(nChroma.shape[0],)'], {}), '((nChroma.shape[0],))\n', (7461, 7482), False, 'import numpy\n'), ((7499, 7520), 'numpy.unique', 'numpy.unique', (['nChroma'], {}), '(nChroma)\n', (7511, 7520), False, 'import numpy\n'), ((8284, 8304), 'numpy.zeros', 'numpy.zeros', (['(12, 1)'], {}), '((12, 1))\n', (8295, 8304), False, 'import numpy\n'), ((8365, 8385), 'numpy.zeros', 'numpy.zeros', (['(newD,)'], {}), '((newD,))\n', (8376, 8385), False, 'import numpy\n'), ((9480, 9500), 'numpy.double', 'numpy.double', (['signal'], {}), '(signal)\n', (9492, 9500), False, 'import numpy\n'), ((9822, 9858), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'numpy.float64'}), '([], dtype=numpy.float64)\n', (9833, 9858), False, 'import numpy\n'), ((11445, 11461), 'numpy.hamming', 'numpy.hamming', (['N'], {}), '(N)\n', (11458, 11461), False, 'import numpy\n'), ((11531, 11560), 'scipy.signal.lfilter', 'lfilter', (['[1]', '[1.0, 0.63]', 'x1'], {}), '([1], [1.0, 0.63], x1)\n', (11538, 11560), False, 'from scipy.signal import lfilter\n'), ((11699, 11713), 'numpy.roots', 'numpy.roots', (['A'], {}), '(A)\n', (11710, 11713), False, 'import numpy\n'), ((12524, 12553), 'numpy.zeros', 'numpy.zeros', (['(max_beat_time,)'], {}), '((max_beat_time,))\n', (12535, 12553), False, 'import numpy\n'), ((13845, 13867), 'numpy.argmax', 'numpy.argmax', (['hist_all'], {}), '(hist_all)\n', (13857, 13867), False, 'import numpy\n'), ((14834, 14854), 'numpy.double', 'numpy.double', (['signal'], {}), '(signal)\n', (14846, 14854), False, 'import numpy\n'), ((15112, 15148), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'numpy.float64'}), '([], dtype=numpy.float64)\n', (15123, 15148), False, 'import numpy\n'), ((17108, 17128), 'numpy.double', 'numpy.double', (['signal'], {}), '(signal)\n', (17120, 17128), False, 'import numpy\n'), ((20598, 20631), 'numpy.concatenate', 'numpy.concatenate', (['st_features', '(1)'], {}), '(st_features, 1)\n', (20615, 20631), False, 'import numpy\n'), ((22074, 22094), 'numpy.double', 'numpy.double', (['signal'], {}), '(signal)\n', (22086, 22094), False, 'import numpy\n'), ((23743, 23767), 'numpy.array', 'numpy.array', (['st_features'], {}), '(st_features)\n', (23754, 23767), False, 'import numpy\n'), ((24703, 24718), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (24714, 24718), False, 'import numpy\n'), ((29409, 29424), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (29420, 29424), False, 'import numpy\n'), ((29442, 29457), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (29453, 29457), False, 'import numpy\n'), ((31589, 31625), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (['fileName'], {}), '(fileName)\n', (31615, 31625), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((31634, 31661), 'pyAudioAnalysis.audioBasicIO.stereo2mono', 'audioBasicIO.stereo2mono', (['x'], {}), '(x)\n', (31658, 31661), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((32324, 32351), 'numpy.save', 'numpy.save', (['outPutFile', 'mtF'], {}), '(outPutFile, mtF)\n', (32334, 32351), False, 'import numpy\n'), ((584, 605), 'numpy.float64', 'numpy.float64', (['countZ'], {}), '(countZ)\n', (597, 605), False, 'import numpy\n'), ((608, 634), 'numpy.float64', 'numpy.float64', (['(count - 1.0)'], {}), '(count - 1.0)\n', (621, 634), False, 'import numpy\n'), ((710, 731), 'numpy.sum', 'numpy.sum', (['(frame ** 2)'], {}), '(frame ** 2)\n', (719, 731), False, 'import numpy\n'), ((943, 974), 'numpy.floor', 'numpy.floor', (['(L / n_short_blocks)'], {}), '(L / n_short_blocks)\n', (954, 974), False, 'import numpy\n'), ((1253, 1285), 'numpy.sum', 'numpy.sum', (['(sub_wins ** 2)'], {'axis': '(0)'}), '(sub_wins ** 2, axis=0)\n', (1262, 1285), False, 'import numpy\n'), ((1721, 1734), 'numpy.sum', 'numpy.sum', (['Xt'], {}), '(Xt)\n', (1730, 1734), False, 'import numpy\n'), ((2173, 2204), 'numpy.floor', 'numpy.floor', (['(L / n_short_blocks)'], {}), '(L / n_short_blocks)\n', (2184, 2204), False, 'import numpy\n'), ((2443, 2475), 'numpy.sum', 'numpy.sum', (['(sub_wins ** 2)'], {'axis': '(0)'}), '(sub_wins ** 2, axis=0)\n', (2452, 2475), False, 'import numpy\n'), ((3400, 3420), 'numpy.cumsum', 'numpy.cumsum', (['(X ** 2)'], {}), '(X ** 2)\n', (3412, 3420), False, 'import numpy\n'), ((3676, 3699), 'numpy.round', 'numpy.round', (['(0.016 * fs)'], {}), '(0.016 * fs)\n', (3687, 3699), False, 'import numpy\n'), ((7035, 7076), 'scipy.fftpack.realtransforms.dct', 'dct', (['mspec'], {'type': '(2)', 'norm': '"""ortho"""', 'axis': '(-1)'}), "(mspec, type=2, norm='ortho', axis=-1)\n", (7038, 7076), False, 'from scipy.fftpack.realtransforms import dct\n'), ((7557, 7584), 'numpy.nonzero', 'numpy.nonzero', (['(nChroma == u)'], {}), '(nChroma == u)\n', (7570, 7584), False, 'import numpy\n'), ((7971, 8003), 'numpy.zeros', 'numpy.zeros', (['(nChroma.shape[0],)'], {}), '((nChroma.shape[0],))\n', (7982, 8003), False, 'import numpy\n'), ((8164, 8196), 'numpy.zeros', 'numpy.zeros', (['(nChroma.shape[0],)'], {}), '((nChroma.shape[0],))\n', (8175, 8196), False, 'import numpy\n'), ((10402, 10416), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10414, 10416), True, 'import matplotlib.pyplot as plt\n'), ((10626, 10671), 'numpy.repeat', 'numpy.repeat', (['chromaGramToPlot', 'Ratio'], {'axis': '(0)'}), '(chromaGramToPlot, Ratio, axis=0)\n', (10638, 10671), False, 'import numpy\n'), ((10690, 10718), 'matplotlib.pyplot.imshow', 'plt.imshow', (['chromaGramToPlot'], {}), '(chromaGramToPlot)\n', (10700, 10718), True, 'import matplotlib.pyplot as plt\n'), ((11319, 11333), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11331, 11333), True, 'import matplotlib.pyplot as plt\n'), ((11342, 11352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11350, 11352), True, 'import matplotlib.pyplot as plt\n'), ((11807, 11822), 'numpy.imag', 'numpy.imag', (['rts'], {}), '(rts)\n', (11817, 11822), False, 'import numpy\n'), ((11824, 11839), 'numpy.real', 'numpy.real', (['rts'], {}), '(rts)\n', (11834, 11839), False, 'import numpy\n'), ((12869, 12915), 'pyAudioAnalysis.utilities.peakdet', 'utilities.peakdet', (['st_features[i, :]', 'DifThres'], {}), '(st_features[i, :], DifThres)\n', (12886, 12915), False, 'from pyAudioAnalysis import utilities\n'), ((13736, 13757), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (13744, 13757), True, 'import matplotlib.pyplot as plt\n'), ((13766, 13778), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13776, 13778), True, 'import matplotlib.pyplot as plt\n'), ((14140, 14169), 'matplotlib.pyplot.plot', 'plt.plot', (['bpms', 'hist_all', '"""k"""'], {}), "(bpms, hist_all, 'k')\n", (14148, 14169), True, 'import matplotlib.pyplot as plt\n'), ((14178, 14208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Beats per minute"""'], {}), "('Beats per minute')\n", (14188, 14208), True, 'import matplotlib.pyplot as plt\n'), ((14217, 14241), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Freq Count"""'], {}), "('Freq Count')\n", (14227, 14241), True, 'import matplotlib.pyplot as plt\n'), ((14250, 14270), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (14258, 14270), True, 'import matplotlib.pyplot as plt\n'), ((15652, 15666), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (15664, 15666), True, 'import matplotlib.pyplot as plt\n'), ((16325, 16339), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (16337, 16339), True, 'import matplotlib.pyplot as plt\n'), ((16348, 16358), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16356, 16358), True, 'import matplotlib.pyplot as plt\n'), ((19101, 19132), 'numpy.zeros', 'numpy.zeros', (['(n_total_feats, 1)'], {}), '((n_total_feats, 1))\n', (19112, 19132), False, 'import numpy\n'), ((21951, 21975), 'numpy.array', 'numpy.array', (['mt_features'], {}), '(mt_features)\n', (21962, 21975), False, 'import numpy\n'), ((25399, 25434), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (['wavFile'], {}), '(wavFile)\n', (25425, 25434), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((25509, 25521), 'time.clock', 'time.clock', ([], {}), '()\n', (25519, 25521), False, 'import time\n'), ((25542, 25569), 'pyAudioAnalysis.audioBasicIO.stereo2mono', 'audioBasicIO.stereo2mono', (['x'], {}), '(x)\n', (25566, 25569), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((26365, 26395), 'numpy.transpose', 'numpy.transpose', (['mt_term_feats'], {}), '(mt_term_feats)\n', (26380, 26395), False, 'import numpy\n'), ((29760, 29795), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (['wavFile'], {}), '(wavFile)\n', (29786, 29795), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((29877, 29904), 'pyAudioAnalysis.audioBasicIO.stereo2mono', 'audioBasicIO.stereo2mono', (['x'], {}), '(x)\n', (29901, 29904), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((30227, 30257), 'numpy.transpose', 'numpy.transpose', (['mt_term_feats'], {}), '(mt_term_feats)\n', (30242, 30257), False, 'import numpy\n'), ((32459, 32515), 'numpy.savetxt', 'numpy.savetxt', (["(outPutFile + '.csv')", 'mtF.T'], {'delimiter': '""","""'}), "(outPutFile + '.csv', mtF.T, delimiter=',')\n", (32472, 32515), False, 'import numpy\n'), ((32674, 32709), 'numpy.save', 'numpy.save', (["(outPutFile + '_st')", 'stF'], {}), "(outPutFile + '_st', stF)\n", (32684, 32709), False, 'import numpy\n'), ((1812, 1842), 'numpy.sum', 'numpy.sum', (['((ind - C) ** 2 * Xt)'], {}), '((ind - C) ** 2 * Xt)\n', (1821, 1842), False, 'import numpy\n'), ((3501, 3520), 'numpy.float64', 'numpy.float64', (['a[0]'], {}), '(a[0])\n', (3514, 3520), False, 'import numpy\n'), ((3895, 3908), 'numpy.sign', 'numpy.sign', (['R'], {}), '(R)\n', (3905, 3908), False, 'import numpy\n'), ((4140, 4169), 'numpy.sqrt', 'numpy.sqrt', (['(g * CSum[M:m0:-1])'], {}), '(g * CSum[M:m0:-1])\n', (4150, 4169), False, 'import numpy\n'), ((4359, 4394), 'numpy.zeros', 'numpy.zeros', (['M'], {'dtype': 'numpy.float64'}), '(M, dtype=numpy.float64)\n', (4370, 4394), False, 'import numpy\n'), ((4428, 4444), 'numpy.max', 'numpy.max', (['Gamma'], {}), '(Gamma)\n', (4437, 4444), False, 'import numpy\n'), ((4464, 4483), 'numpy.argmax', 'numpy.argmax', (['Gamma'], {}), '(Gamma)\n', (4476, 4483), False, 'import numpy\n'), ((5353, 5382), 'numpy.arange', 'numpy.arange', (['numLinFiltTotal'], {}), '(numLinFiltTotal)\n', (5365, 5382), False, 'import numpy\n'), ((5457, 5488), 'numpy.arange', 'numpy.arange', (['(1)', '(numLogFilt + 3)'], {}), '(1, numLogFilt + 3)\n', (5469, 5488), False, 'import numpy\n'), ((5646, 5664), 'numpy.arange', 'numpy.arange', (['nfft'], {}), '(nfft)\n', (5658, 5664), False, 'import numpy\n'), ((6997, 7018), 'numpy.dot', 'numpy.dot', (['X', 'fbank.T'], {}), '(X, fbank.T)\n', (7006, 7018), False, 'import numpy\n'), ((8320, 8349), 'numpy.ceil', 'numpy.ceil', (['(C.shape[0] / 12.0)'], {}), '(C.shape[0] / 12.0)\n', (8330, 8349), False, 'import numpy\n'), ((8559, 8580), 'numpy.sum', 'numpy.sum', (['C2'], {'axis': '(0)'}), '(C2, axis=0)\n', (8568, 8580), False, 'import numpy\n'), ((9569, 9586), 'numpy.abs', 'numpy.abs', (['signal'], {}), '(signal)\n', (9578, 9586), False, 'import numpy\n'), ((9998, 10004), 'scipy.fftpack.fft', 'fft', (['x'], {}), '(x)\n', (10001, 10004), False, 'from scipy.fftpack import fft\n'), ((10240, 10271), 'numpy.vstack', 'numpy.vstack', (['(chromaGram, C.T)'], {}), '((chromaGram, C.T))\n', (10252, 10271), False, 'import numpy\n'), ((13211, 13249), 'numpy.arange', 'numpy.arange', (['(0.5)', '(max_beat_time + 1.5)'], {}), '(0.5, max_beat_time + 1.5)\n', (13223, 13249), False, 'import numpy\n'), ((13444, 13469), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(9)', '(2)', '(ii + 1)'], {}), '(9, 2, ii + 1)\n', (13455, 13469), True, 'import matplotlib.pyplot as plt\n'), ((13482, 13514), 'matplotlib.pyplot.plot', 'plt.plot', (['st_features[i, :]', '"""k"""'], {}), "(st_features[i, :], 'k')\n", (13490, 13514), True, 'import matplotlib.pyplot as plt\n'), ((13612, 13621), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13619, 13621), True, 'import matplotlib.pyplot as plt\n'), ((14923, 14940), 'numpy.abs', 'numpy.abs', (['signal'], {}), '(signal)\n', (14932, 14940), False, 'import numpy\n'), ((15286, 15292), 'scipy.fftpack.fft', 'fft', (['x'], {}), '(x)\n', (15289, 15292), False, 'from scipy.fftpack import fft\n'), ((15433, 15460), 'numpy.vstack', 'numpy.vstack', (['(specgram, X)'], {}), '((specgram, X))\n', (15445, 15460), False, 'import numpy\n'), ((17198, 17215), 'numpy.abs', 'numpy.abs', (['signal'], {}), '(signal)\n', (17207, 17215), False, 'import numpy\n'), ((18796, 18802), 'scipy.fftpack.fft', 'fft', (['x'], {}), '(x)\n', (18799, 18802), False, 'from scipy.fftpack import fft\n'), ((22163, 22180), 'numpy.abs', 'numpy.abs', (['signal'], {}), '(signal)\n', (22172, 22180), False, 'import numpy\n'), ((23083, 23089), 'scipy.fftpack.fft', 'fft', (['x'], {}), '(x)\n', (23086, 23089), False, 'from scipy.fftpack import fft\n'), ((27012, 27024), 'time.clock', 'time.clock', ([], {}), '()\n', (27022, 27024), False, 'import time\n'), ((30398, 30436), 'numpy.zeros', 'numpy.zeros', (['(mt_term_feats.shape[0],)'], {}), '((mt_term_feats.shape[0],))\n', (30409, 30436), False, 'import numpy\n'), ((30479, 30522), 'numpy.vstack', 'numpy.vstack', (['(all_mt_feats, mt_term_feats)'], {}), '((all_mt_feats, mt_term_feats))\n', (30491, 30522), False, 'import numpy\n'), ((32880, 32939), 'numpy.savetxt', 'numpy.savetxt', (["(outPutFile + '_st.csv')", 'stF.T'], {'delimiter': '""","""'}), "(outPutFile + '_st.csv', stF.T, delimiter=',')\n", (32893, 32939), False, 'import numpy\n'), ((33411, 33427), 'glob.glob', 'glob.glob', (['files'], {}), '(files)\n', (33420, 33427), False, 'import glob\n'), ((1390, 1409), 'numpy.log2', 'numpy.log2', (['(s + eps)'], {}), '(s + eps)\n', (1400, 1409), False, 'import numpy\n'), ((2565, 2584), 'numpy.log2', 'numpy.log2', (['(s + eps)'], {}), '(s + eps)\n', (2575, 2584), False, 'import numpy\n'), ((5837, 5871), 'numpy.floor', 'numpy.floor', (['(lowTrFreq * nfft / fs)'], {}), '(lowTrFreq * nfft / fs)\n', (5848, 5871), False, 'import numpy\n'), ((5905, 5939), 'numpy.floor', 'numpy.floor', (['(cenTrFreq * nfft / fs)'], {}), '(cenTrFreq * nfft / fs)\n', (5916, 5939), False, 'import numpy\n'), ((6084, 6118), 'numpy.floor', 'numpy.floor', (['(cenTrFreq * nfft / fs)'], {}), '(cenTrFreq * nfft / fs)\n', (6095, 6118), False, 'import numpy\n'), ((6164, 6199), 'numpy.floor', 'numpy.floor', (['(highTrFreq * nfft / fs)'], {}), '(highTrFreq * nfft / fs)\n', (6175, 6199), False, 'import numpy\n'), ((8098, 8139), 'numpy.nonzero', 'numpy.nonzero', (['(nChroma > nChroma.shape[0])'], {}), '(nChroma > nChroma.shape[0])\n', (8111, 8139), False, 'import numpy\n'), ((11743, 11756), 'numpy.imag', 'numpy.imag', (['r'], {}), '(r)\n', (11753, 11756), False, 'import numpy\n'), ((13558, 13594), 'matplotlib.pyplot.plot', 'plt.plot', (['k', 'st_features[i, k]', '"""k*"""'], {}), "(k, st_features[i, k], 'k*')\n", (13566, 13594), True, 'import matplotlib.pyplot as plt\n'), ((21700, 21724), 'numpy.mean', 'numpy.mean', (['cur_st_feats'], {}), '(cur_st_feats)\n', (21710, 21724), False, 'import numpy\n'), ((21770, 21793), 'numpy.std', 'numpy.std', (['cur_st_feats'], {}), '(cur_st_feats)\n', (21779, 21793), False, 'import numpy\n'), ((24897, 24925), 'os.path.join', 'os.path.join', (['dirName', 'files'], {}), '(dirName, files)\n', (24909, 24925), False, 'import os\n'), ((25272, 25288), 'os.stat', 'os.stat', (['wavFile'], {}), '(wavFile)\n', (25279, 25288), False, 'import os\n'), ((26673, 26706), 'numpy.append', 'numpy.append', (['mt_term_feats', 'beat'], {}), '(mt_term_feats, beat)\n', (26685, 26706), False, 'import numpy\n'), ((26739, 26777), 'numpy.append', 'numpy.append', (['mt_term_feats', 'beat_conf'], {}), '(mt_term_feats, beat_conf)\n', (26751, 26777), False, 'import numpy\n'), ((26951, 26994), 'numpy.vstack', 'numpy.vstack', (['(all_mt_feats, mt_term_feats)'], {}), '((all_mt_feats, mt_term_feats))\n', (26963, 26994), False, 'import numpy\n'), ((29619, 29647), 'os.path.join', 'os.path.join', (['dirName', 'files'], {}), '(dirName, files)\n', (29631, 29647), False, 'import os\n'), ((547, 564), 'numpy.sign', 'numpy.sign', (['frame'], {}), '(frame)\n', (557, 564), False, 'import numpy\n'), ((7391, 7413), 'numpy.log2', 'numpy.log2', (['(freqs / Cp)'], {}), '(freqs / Cp)\n', (7401, 7413), False, 'import numpy\n'), ((12675, 12727), 'numpy.abs', 'numpy.abs', (['(st_features[i, 0:-1] - st_features[i, 1:])'], {}), '(st_features[i, 0:-1] - st_features[i, 1:])\n', (12684, 12727), False, 'import numpy\n'), ((30577, 30614), 'numpy.ones', 'numpy.ones', (['(mt_term_feats.shape[0],)'], {}), '((mt_term_feats.shape[0],))\n', (30587, 30614), False, 'import numpy\n'), ((26516, 26542), 'numpy.isnan', 'numpy.isnan', (['mt_term_feats'], {}), '(mt_term_feats)\n', (26527, 26542), False, 'import numpy\n'), ((26577, 26603), 'numpy.isinf', 'numpy.isinf', (['mt_term_feats'], {}), '(mt_term_feats)\n', (26588, 26603), False, 'import numpy\n'), ((27267, 27293), 'numpy.array', 'numpy.array', (['process_times'], {}), '(process_times)\n', (27278, 27293), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
import os
import math
import re
import numpy as np
from tqdm import tqdm
import datetime
import subprocess
from mmd.utils.MLogger import MLogger
from mmd.mmd.VmdData import VmdMorphFrame, VmdMotion
from mmd.mmd.PmxData import PmxModel
from mmd.mmd.VmdWriter import VmdWriter
from mmd.utils.MServiceUtils import get_file_encoding
from mmd.monaural_adapter import FFMPEGMonauralProcessAudioAdapter
logger = MLogger(__name__, level=1)
SEPARATOR = "----------"
def execute(args):
try:
logger.info('リップ生成処理開始: {0}', args.audio_dir, decoration=MLogger.DECORATION_BOX)
if not os.path.exists(args.audio_dir):
logger.error("指定された音声ディレクトリパスが存在しません。\n{0}", args.audio_dir, decoration=MLogger.DECORATION_BOX)
return False
if not os.path.exists(args.lyrics_file):
logger.error("指定された歌詞ファイルパスが存在しません。\n{0}", args.lyrics_file, decoration=MLogger.DECORATION_BOX)
return False
vocal_audio_file = os.path.join(args.audio_dir, 'vocals.wav')
logger.info("リップファイル生成開始", decoration=MLogger.DECORATION_LINE)
# exoヘッダテンプレートを読み込み
exo_header_txt = None
with open(os.path.join("config", "exo.head.txt"), "r", encoding='shift-jis') as f:
exo_header_txt = f.read()
# exo文字テンプレートを読み込み
exo_chara_txt = None
with open(os.path.join("config", "exo.chara.txt"), "r", encoding='shift-jis') as f:
exo_chara_txt = f.read()
# 歌詞ファイルをコピー
separates = []
full_lyrics_txts = []
with open(args.lyrics_file, "r", encoding=get_file_encoding(args.lyrics_file)) as f:
# 空白だけは許容
# 連続した改行は分割文字列に置換
lyric = ""
for v in f.readlines():
if re.fullmatch(r'^(\d?\d)\:(\d\d).(\d\d\d)-(\d?\d)\:(\d\d).(\d\d\d)$', v.strip()):
m = re.match(r'^(\d?\d)\:(\d\d).(\d\d\d)-(\d?\d)\:(\d\d).(\d\d\d)$', v.strip())
# 開始秒数
separate_start_sec = float(m.group(1)) * 60 + float(m.group(2)) + float(m.group(3)) * 0.001
# 終了秒数
separate_end_sec = float(m.group(4)) * 60 + float(m.group(5)) + float(m.group(6)) * 0.001
# 追加
separates.append((separate_start_sec, separate_end_sec, v.strip()))
else:
if len(v.strip()) == 0:
# 改行のみの場合、追加
full_lyrics_txts.append(lyric)
lyric = ""
elif re.match(r'\d', v.strip()) is not None and lyric == "":
logger.warning("秒数区切りの書式が間違っています。\n入力文字列: {0}\n{1}", v.strip(), args.lyrics_file, decoration=MLogger.DECORATION_BOX)
else:
# 普通の文字列は結合だけしておく
lyric += re.sub(r'\n', '', re.sub(r'(!|\!|?|\?| | |、|。)+', "", v))
full_lyrics_txts.append(lyric)
if len(separates) != len(full_lyrics_txts):
logger.error("歌詞と秒数区切りのペアが正しく設定されていません。\nひとつ上に具体的なエラー箇所が記されてますので、確認してください。\n{0}", args.lyrics_file, decoration=MLogger.DECORATION_BOX)
return False
prev_separate_start_sec = 0
for sidx, (separate_start_sec, separate_end_sec, separate_txt) in enumerate(separates):
if separate_start_sec > separate_end_sec:
logger.error("{0}番目のブロックが、終了秒に開始秒より前の値が設定されています。\n終了秒数: {1}, 開始秒数: {2}({3}), \n{4}", \
sidx, separate_end_sec, separate_start_sec, separate_txt, args.lyrics_file, decoration=MLogger.DECORATION_BOX)
return False
if sidx > 0 and separate_start_sec < prev_separate_start_sec:
logger.error("{0}番目のブロックが、ひとつ前のブロックの開始秒より前の値が設定されています。\n前回の開始秒数: {1}, 今回の開始秒数: {2}({3}), \n{4}", \
sidx, prev_separate_start_sec, separate_start_sec, separate_txt, args.lyrics_file, decoration=MLogger.DECORATION_BOX)
return False
prev_separate_start_sec = separate_start_sec
# 全角カタカナはひらがなに変換
full_lyric = "".join(full_lyrics_txts)
full_lyric = katakana2hiragana(full_lyric)
# ひらがな以外はNG
not_hira_list = re.findall(r'[^っぁ-んー\-{10}( sp )]', full_lyric)
if len(not_hira_list) > 0:
# ひらがな以外はエラー
logger.error("指定された歌詞ファイルに全角カナ・ひらがな以外が含まれています。\n{0}\nエラー文字:{1}", args.lyrics_file, ",".join(not_hira_list), decoration=MLogger.DECORATION_BOX)
return False
# wavを読み込み
audio_adapter = FFMPEGMonauralProcessAudioAdapter()
data, org_rate = audio_adapter.load(vocal_audio_file, sample_rate=16000)
org_rate = int(org_rate)
#横軸(時間)の配列を作成
time = np.arange(0, data.shape[0]/org_rate, 1/org_rate)
end_fno = int(math.ceil(time[-1] * 30))
# モーションデータ
motion = VmdMotion()
# exoデータ
process_datetime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
exo_file_path = os.path.join(args.audio_dir, f"{process_datetime}_lyric.exo")
lyric_exo_f = open(exo_file_path, "w", encoding='shift-jis')
lyric_exo_f.write(exo_header_txt.replace("<<length>>", str(end_fno)))
start_fno = 0
fno = 0
now_exo_chara_txt = ""
fidx = 0
end_s = 0
# WAV形式で 16kHz, 16bit, PCM(無圧縮)形式
rate = 16000
is_failure = False
for tidx, ((separate_start_sec, separate_end_sec, separate_txt), lyrics) in enumerate(zip(separates, full_lyrics_txts)):
tidx_dir_name = f"{tidx:03}"
hira_lyric = katakana2hiragana(lyrics)
logger.info("【No.{0}】入力歌詞:\n{1}", tidx, lyrics)
# ディレクトリ作成
os.makedirs(os.path.join(args.audio_dir, tidx_dir_name), exist_ok=True)
start_fno = int(separate_start_sec * 30)
fno = start_fno
if len(hira_lyric) > 300:
# 300文字以上はスルー(spを想定して少し幅を持たせてある)
logger.warning("【No.{0}】250文字以上の区間は一度に出力できないため、処理をスキップします。分割してください。\n{1}", f'{tidx:03}', hira_lyric, decoration=MLogger.DECORATION_BOX)
is_failure = True
continue
block_audio_file = os.path.join(args.audio_dir, tidx_dir_name, 'block.wav')
# wavファイルの一部を分割保存
audio_adapter.save(block_audio_file, data[round(separate_start_sec*org_rate):(round(separate_end_sec*org_rate)-1)], rate, "wav")
# 分割データを再読み込み
sep_data, rate = audio_adapter.load(block_audio_file, sample_rate=16000)
rate = int(rate)
#横軸(時間)の配列を作成
time = np.arange(0, sep_data.shape[0]/rate, 1/rate)
# 分割した歌詞を出力
with open(os.path.join(args.audio_dir, tidx_dir_name, 'block.txt'), "w", encoding='utf-8') as f:
f.write(hira_lyric)
logger.info("【No.{0}】音素分解開始", f'{tidx:03}', decoration=MLogger.DECORATION_LINE)
# Perl スクリプトで音素分解
popen = subprocess.Popen(["perl", "segment_julius.pl" , os.path.join(args.audio_dir, tidx_dir_name)], stdout=subprocess.PIPE)
try:
# 終了まで待つ(30秒でタイムアウト)
popen.wait(timeout=30)
except subprocess.TimeoutExpired as e:
try:
popen.kill()
except Exception:
pass
is_failure = True
logger.warning("【No.{0}】音素分解に失敗しました。", f'{tidx:03}', ", ".join(not_hira_list), decoration=MLogger.DECORATION_BOX)
continue
logger.info("【No.{0}】リップモーフ生成開始", f'{tidx:03}', decoration=MLogger.DECORATION_LINE)
lab_file = os.path.join(args.audio_dir, tidx_dir_name, f'block.lab')
if not os.path.exists(lab_file) or os.path.getsize(lab_file) == 0:
logger.warning("【No.{0}】音節取得に失敗しました。\n{1}", f'{tidx:03}', lab_file, ", ".join(not_hira_list), decoration=MLogger.DECORATION_BOX)
is_failure = True
continue
lab_txts = []
with open(lab_file, "r") as f:
# 音素解析結果をそのまま読み込む
lab_txts = [v.split() for v in f.readlines()]
prev_start_s = 0
prev_syllable = ""
prev_morph_name = ""
now_morph_name = ""
VOWELS = ["a", "i", "u", "e", "o", "a:", "i:", "u:", "e:", "o:"]
ENDS = ["N"]
for lidx, (start_s_txt, end_s_txt, syllable) in enumerate(lab_txts):
start_s = min(len(lab_txts), max(0, float(start_s_txt) - 0.05))
end_s = min(len(lab_txts), max(0, float(end_s_txt) + 0.05))
# キーフレは開始と終了の間
now_start_fno = start_fno + round(prev_start_s * 30)
now_end_fno = start_fno + round(end_s * 30)
now_ratios = []
for vowel, morph_name in [("a", "あ"), ("i", "い"), ("u", "う"), ("e", "え"), ("o", "お")]:
if syllable.startswith(vowel):
now_morph_name = morph_name
if syllable in ENDS:
# んの場合、閉じる
vsmf = VmdMorphFrame(max(0, now_start_fno))
vsmf.set_name(now_morph_name)
vsmf.ratio = 0
motion.regist_mf(vsmf, vsmf.name, vsmf.fno)
vemf = VmdMorphFrame(max(0, now_end_fno))
vemf.set_name(now_morph_name)
vemf.ratio = 0
motion.regist_mf(vemf, vemf.name, vemf.fno)
now_morph_name = "ん"
elif syllable in VOWELS:
now_start_s = start_s if prev_syllable in VOWELS or prev_syllable in ENDS else prev_start_s
now_vs = []
if args.threshold < 1:
# 前が母音もしくは終端の場合、現在から始める。子音の場合は前から繋げる
s = now_start_s
while s < end_s:
# 音量範囲のINDEX
rs = round(s * rate)
rf = min(len(sep_data), round((s + 1 / 30) * rate) - 1)
vs = sep_data[rs:rf]
f = round(s * 30) + start_fno
if len(vs) > 0:
# 母音の変動
vsmf = VmdMorphFrame(max(0, f))
vsmf.set_name(now_morph_name)
# 端っこは小さめにする
vsmf.ratio = min(1, float(np.max(vs)) * min(((min(2, ((s - now_start_s) * 30)) / 2), ((min(2, (end_s - s) * 30)) / 2))))
motion.regist_mf(vsmf, vsmf.name, vsmf.fno)
now_ratios.append(str(round(vsmf.ratio, 3)))
s += 1 / 30
if prev_morph_name != now_morph_name:
# 母音の開始(上書き)
# 母音が同じ場合、既にratioが入っているので入れない
mf = motion.calc_mf(now_morph_name, now_start_fno)
mf.ratio = 0
motion.regist_mf(mf, mf.name, mf.fno)
# 母音の終了
# 前の母音が残っていたら終了
for m in ["あ", "い", "う", "え", "お"]:
mf = motion.calc_mf(m, now_end_fno)
if mf.ratio != 0:
mf.ratio = 0
motion.regist_mf(mf, mf.name, mf.fno)
elif args.threshold == 1:
# 1の場合、最高値を登録する
# 区間内に同じモーフで違うキーがある場合、削除
s = now_start_s
while s < end_s:
f = round(s * 30) + start_fno
mf = motion.calc_mf(now_morph_name, f)
if mf.key:
del motion.morphs[now_morph_name][f]
s += 1 / 30
rs = round(now_start_s * rate)
rf = round(end_s * rate)
vs = sep_data[rs:rf]
fs = min(now_start_fno + 2, now_end_fno - 2)
fe = max(now_end_fno - 2, now_start_fno + 2)
if len(vs) > 0:
# 台形になるように、開始と終了に同じ値
vsmf = VmdMorphFrame(max(0, fs))
vsmf.set_name(now_morph_name)
vsmf.ratio = min(1, float(np.max(vs)))
motion.regist_mf(vsmf, vsmf.name, vsmf.fno)
now_ratios.append(f'{fs}:{round(vsmf.ratio, 3)}')
vemf = VmdMorphFrame(max(0, fe))
vemf.set_name(now_morph_name)
vemf.ratio = min(1, float(np.max(vs)))
motion.regist_mf(vemf, vemf.name, vemf.fno)
now_ratios.append(f'{fe}:{round(vemf.ratio, 3)}')
vsmf = VmdMorphFrame(now_start_fno)
vsmf.set_name(now_morph_name)
vsmf.ratio = 0
motion.regist_mf(vsmf, vsmf.name, vsmf.fno)
vsmf = VmdMorphFrame(now_end_fno)
vsmf.set_name(now_morph_name)
vsmf.ratio = 0
motion.regist_mf(vsmf, vsmf.name, vsmf.fno)
if syllable in VOWELS or syllable in ENDS:
# exoデータを出力
now_exo_chara_txt = str(exo_chara_txt)
now_chara = prev_syllable + syllable if prev_syllable not in VOWELS else syllable
# ひらがな変換
now_kana = romaji2hiragana(now_chara)
# 長音追加
now_kana = now_kana.replace(":", "ー")
# ユニコードエスケープ
now_uni_chara =to_unicode_escape(now_kana)
layer = int(fidx % 3) + 1
logger.test(f"fno: {fno}, index: {fidx}, start_fno: {now_start_fno}, layer: {layer}, text: {romaji2hiragana(now_chara)}, uni: {now_uni_chara}")
for format_txt, value in [("<<index>>", fidx), ("<<start_fno>>", now_start_fno), ("<<end_fno>>", now_end_fno), ("<<layer>>", layer), \
("<<encoded_txt>>", now_uni_chara.ljust(4096, '0'))]:
now_exo_chara_txt = now_exo_chara_txt.replace(format_txt, str(value))
lyric_exo_f.write(now_exo_chara_txt)
fidx += 1
logger.info(f"[{tidx}-{lidx}][{now_kana}:{now_morph_name}] start: {now_start_fno}({round(start_s,4)}), range: {','.join(now_ratios)} end: {now_end_fno}({round(end_s,4)})")
prev_start_s = start_s
prev_syllable = syllable
prev_morph_name = now_morph_name
logger.info("【No.{0}】リップモーフ生成終了", f'{tidx:03}', decoration=MLogger.DECORATION_LINE)
if 0 < args.threshold < 1:
logger.info("不要モーフキー削除処理", decoration=MLogger.DECORATION_LINE)
for morph_name in tqdm(['あ', 'い', 'う', 'え', 'お']):
motion.remove_unnecessary_mf(-1, morph_name, threshold=args.threshold)
logger.info("モーション生成開始", decoration=MLogger.DECORATION_LINE)
motion_path = os.path.join(args.audio_dir, f"{process_datetime}_lip.vmd")
model = PmxModel()
model.name = "リップモデル"
writer = VmdWriter(model, motion, motion_path)
writer.write()
logger.info("モーション生成終了: {0}", motion_path, decoration=MLogger.DECORATION_BOX)
lyric_exo_f.close()
logger.info("exoファイル生成終了: {0}", exo_file_path, decoration=MLogger.DECORATION_BOX)
if is_failure:
logger.warning("モーフ生成に失敗してる区間があります。ログを確認してください。", decoration=MLogger.DECORATION_BOX)
return True
except Exception as e:
logger.critical("リップ生成で予期せぬエラーが発生しました。", e, decoration=MLogger.DECORATION_BOX)
return False
def to_unicode_escape(txt):
escape_txt = ""
for c in txt:
escape_chara = c.encode('unicode_escape').decode('utf-8')
escape_txt += escape_chara[4:6]
escape_txt += escape_chara[2:4]
return escape_txt
def _make_kana_convertor():
"""ひらがな⇔カタカナ変換器を作る"""
kata = {
'ア':'あ', 'イ':'い', 'ウ':'う', 'エ':'え', 'オ':'お',
'カ':'か', 'キ':'き', 'ク':'く', 'ケ':'け', 'コ':'こ',
'サ':'さ', 'シ':'し', 'ス':'す', 'セ':'せ', 'ソ':'そ',
'タ':'た', 'チ':'ち', 'ツ':'つ', 'テ':'て', 'ト':'と',
'ナ':'な', 'ニ':'に', 'ヌ':'ぬ', 'ネ':'ね', 'ノ':'の',
'ハ':'は', 'ヒ':'ひ', 'フ':'ふ', 'ヘ':'へ', 'ホ':'ほ',
'マ':'ま', 'ミ':'み', 'ム':'む', 'メ':'め', 'モ':'も',
'ヤ':'や', 'ユ':'ゆ', 'ヨ':'よ', 'ラ':'ら', 'リ':'り',
'ル':'る', 'レ':'れ', 'ロ':'ろ', 'ワ':'わ', 'ヲ':'を',
'ン':'ん',
'ガ':'が', 'ギ':'ぎ', 'グ':'ぐ', 'ゲ':'げ', 'ゴ':'ご',
'ザ':'ざ', 'ジ':'じ', 'ズ':'ず', 'ゼ':'ぜ', 'ゾ':'ぞ',
'ダ':'だ', 'ヂ':'ぢ', 'ヅ':'づ', 'デ':'で', 'ド':'ど',
'バ':'ば', 'ビ':'び', 'ブ':'ぶ', 'ベ':'べ', 'ボ':'ぼ',
'パ':'ぱ', 'ピ':'ぴ', 'プ':'ぷ', 'ペ':'ぺ', 'ポ':'ぽ',
'ァ':'ぁ', 'ィ':'ぃ', 'ゥ':'ぅ', 'ェ':'ぇ', 'ォ':'ぉ',
'ャ':'ゃ', 'ュ':'ゅ', 'ョ':'ょ',
'ヴ':'う', 'ッ':'っ', 'ー': 'ー'
}
# ひらがな → カタカナ のディクショナリをつくる
hira = dict([(v, k) for k, v in kata.items() ])
re_hira2kata = re.compile("|".join(map(re.escape, hira)))
re_kata2hira = re.compile("|".join(map(re.escape, kata)))
def _hiragana2katakana(text):
return re_hira2kata.sub(lambda x: hira[x.group(0)], text)
def _katakana2hiragana(text):
return re_kata2hira.sub(lambda x: kata[x.group(0)], text)
return (_hiragana2katakana, _katakana2hiragana)
hiragana2katakana, katakana2hiragana = _make_kana_convertor()
def _make_romaji_convertor():
"""ローマ字⇔かな変換器を作る"""
master = {
'la':'ァ', 'li':'ィ', 'lu':'ゥ', 'le':'ェ', 'lo':'ォ',
'a' :'ア', 'i' :'イ', 'u' :'ウ', 'e' :'エ', 'o' :'オ',
'ka' :'カ', 'ki' :'キ', 'ku' :'ク', 'ke' :'ケ', 'ko' :'コ',
'sa' :'サ', 'shi':'シ', 'su' :'ス', 'se' :'セ', 'so' :'ソ',
'ta' :'タ', 'chi':'チ', 'tsu' :'ツ', 'te' :'テ', 'to' :'ト',
'na' :'ナ', 'ni' :'ニ', 'nu' :'ヌ', 'ne' :'ネ', 'no' :'ノ',
'ha' :'ハ', 'hi' :'ヒ', 'fu' :'フ', 'he' :'ヘ', 'ho' :'ホ',
'ma' :'マ', 'mi' :'ミ', 'mu' :'ム', 'me' :'メ', 'mo' :'モ',
'ya' :'ヤ', 'yu' :'ユ', 'yo' :'ヨ',
'ra' :'ラ', 'ri' :'リ', 'ru' :'ル', 're' :'レ', 'ro' :'ロ',
'wa' :'ワ', 'wo' :'ヲ', 'n' :'ン', 'vu' :'ヴ',
'ga' :'ガ', 'gi' :'ギ', 'gu' :'グ', 'ge' :'ゲ', 'go' :'ゴ',
'za' :'ザ', 'ji' :'ジ', 'zu' :'ズ', 'ze' :'ゼ', 'zo' :'ゾ',
'da' :'ダ', 'di' :'ヂ', 'du' :'ヅ', 'de' :'デ', 'do' :'ド',
'ba' :'バ', 'bi' :'ビ', 'bu' :'ブ', 'be' :'ベ', 'bo' :'ボ',
'pa' :'パ', 'pi' :'ピ', 'pu' :'プ', 'pe' :'ペ', 'po' :'ポ',
'kya':'キャ', 'kyi':'キィ', 'kyu':'キュ', 'kye':'キェ', 'kyo':'キョ',
'gya':'ギャ', 'gyi':'ギィ', 'gyu':'ギュ', 'gye':'ギェ', 'gyo':'ギョ',
'sha':'シャ', 'shu':'シュ', 'she':'シェ', 'sho':'ショ',
'ja' :'ジャ', 'ju' :'ジュ', 'je' :'ジェ', 'jo' :'ジョ',
'cha':'チャ', 'chu':'チュ', 'che':'チェ', 'cho':'チョ',
'dya':'ヂャ', 'dyi':'ヂィ', 'dyu':'ヂュ', 'dhe':'デェ', 'dyo':'ヂョ',
'nya':'ニャ', 'nyi':'ニィ', 'nyu':'ニュ', 'nye':'ニェ', 'nyo':'ニョ',
'hya':'ヒャ', 'hyi':'ヒィ', 'hyu':'ヒュ', 'hye':'ヒェ', 'hyo':'ヒョ',
'bya':'ビャ', 'byi':'ビィ', 'byu':'ビュ', 'bye':'ビェ', 'byo':'ビョ',
'pya':'ピャ', 'pyi':'ピィ', 'pyu':'ピュ', 'pye':'ピェ', 'pyo':'ピョ',
'mya':'ミャ', 'myi':'ミィ', 'myu':'ミュ', 'mye':'ミェ', 'myo':'ミョ',
'rya':'リャ', 'ryi':'リィ', 'ryu':'リュ', 'rye':'リェ', 'ryo':'リョ',
# 'fa' :'ファ', 'fi' :'フィ', 'fe' :'フェ', 'fo' :'フォ',
# 'wi' :'ウィ', 'we' :'ウェ',
# 'va' :'ヴァ', 'vi' :'ヴィ', 've' :'ヴェ', 'vo' :'ヴォ',
# 'kwa':'クァ', 'kwi':'クィ', 'kwu':'クゥ', 'kwe':'クェ', 'kwo':'クォ',
# 'kha':'クァ', 'khi':'クィ', 'khu':'クゥ', 'khe':'クェ', 'kho':'クォ',
# 'gwa':'グァ', 'gwi':'グィ', 'gwu':'グゥ', 'gwe':'グェ', 'gwo':'グォ',
# 'gha':'グァ', 'ghi':'グィ', 'ghu':'グゥ', 'ghe':'グェ', 'gho':'グォ',
# 'swa':'スァ', 'swi':'スィ', 'swu':'スゥ', 'swe':'スェ', 'swo':'スォ',
# 'swa':'スァ', 'swi':'スィ', 'swu':'スゥ', 'swe':'スェ', 'swo':'スォ',
# 'zwa':'ズヮ', 'zwi':'ズィ', 'zwu':'ズゥ', 'zwe':'ズェ', 'zwo':'ズォ',
# 'twa':'トァ', 'twi':'トィ', 'twu':'トゥ', 'twe':'トェ', 'two':'トォ',
# 'dwa':'ドァ', 'dwi':'ドィ', 'dwu':'ドゥ', 'dwe':'ドェ', 'dwo':'ドォ',
# 'mwa':'ムヮ', 'mwi':'ムィ', 'mwu':'ムゥ', 'mwe':'ムェ', 'mwo':'ムォ',
# 'bwa':'ビヮ', 'bwi':'ビィ', 'bwu':'ビゥ', 'bwe':'ビェ', 'bwo':'ビォ',
# 'pwa':'プヮ', 'pwi':'プィ', 'pwu':'プゥ', 'pwe':'プェ', 'pwo':'プォ',
# 'phi':'プィ', 'phu':'プゥ', 'phe':'プェ', 'pho':'フォ',
}
romaji_asist = {
# 'si' :'シ' , 'ti' :'チ' , 'hu' :'フ' , 'zi':'ジ',
# 'sya':'シャ', 'syu':'シュ', 'syo':'ショ',
# 'tya':'チャ', 'tyu':'チュ', 'tyo':'チョ',
# 'cya':'チャ', 'cyu':'チュ', 'cyo':'チョ',
# 'jya':'ジャ', 'jyu':'ジュ', 'jyo':'ジョ', 'pha':'ファ',
# 'qa' :'クァ', 'qi' :'クィ', 'qu' :'クゥ', 'qe' :'クェ', 'qo':'クォ',
# 'ca' :'カ', 'ci':'シ', 'cu':'ク', 'ce':'セ', 'co':'コ',
# 'la' :'ラ', 'li':'ィ', 'lu':'ル', 'le':'レ', 'lo':'ロ',
# 'mb' :'ム', 'py':'パイ', 'tho': 'ソ', 'thy':'ティ', 'oh':'オウ',
# 'by':'ビィ', 'cy':'シィ', 'dy':'ディ', 'fy':'フィ', 'gy':'ジィ',
# 'hy':'シー', 'ly':'リィ', 'ny':'ニィ', 'my':'ミィ', 'ry':'リィ',
# 'ty':'ティ', 'vy':'ヴィ', 'zy':'ジィ',
# 'li':'ィ',
# 'b':'ブ', 'c':'ク', 'd':'ド', 'f':'フ' , 'g':'グ', 'h':'フ', 'j':'ジ',
# 'k':'ク', 'l':'ル', 'm':'ム', 'p':'プ' , 'q':'ク', 'r':'ル', 's':'ス',
# 't':'ト', 'v':'ヴ', 'w':'ゥ', 'x':'クス', 'y':'ィ', 'z':'ズ',
}
kana_asist = { 'la':'ァ', 'li':'ィ', 'lu':'ゥ', 'le':'ェ', 'lo':'ォ', }
def __romaji2kana():
romaji_dict = {}
for tbl in master, romaji_asist:
for k, v in tbl.items(): romaji_dict[k] = v
romaji_keys = list(romaji_dict.keys())
romaji_keys.sort(key=lambda x:len(x), reverse=True)
re_roma2kana = re.compile("|".join(map(re.escape, romaji_keys)))
# m の後ろにバ行、パ行のときは "ン" と変換
rx_mba = re.compile("m(b|p)([aiueo])")
# 子音が続く時は "ッ" と変換
rx_xtu = re.compile(r"([bcdfghjklmpqrstvwxyz])\1")
# 母音が続く時は "ー" と変換
rx_a__ = re.compile(r"([aiueo])\1")
def _romaji2katakana(text):
result = text.lower()
result = rx_mba.sub(r"ン\1\2", result)
result = rx_xtu.sub(r"ッ\1" , result)
result = rx_a__.sub(r"\1ー" , result)
return re_roma2kana.sub(lambda x: romaji_dict[x.group(0)], result)
def _romaji2hiragana(text):
result = _romaji2katakana(text)
return katakana2hiragana(result)
return _romaji2katakana, _romaji2hiragana
def __kana2romaji():
kana_dict = {}
for tbl in master, kana_asist:
for k, v in tbl.items(): kana_dict[v] = k
kana_keys = list(kana_dict.keys())
kana_keys.sort(key=lambda x:len(x), reverse=True)
re_kana2roma = re.compile("|".join(map(re.escape, kana_keys)))
rx_xtu = re.compile("ッ(.)") # 小さい "ッ" は直後の文字を2回に変換
# rx_ltu = re.compile("ッ$" ) # 最後の小さい "ッ" は消去(?)
# rx_er = re.compile("(.)ー") # "ー"は直前の文字を2回に変換
# rx_n = re.compile(r"n(b|p)([aiueo])") # n の後ろが バ行、パ行 なら m に修正
# rx_oo = re.compile(r"([aiueo])\1") # oosaka → osaka
def _kana2romaji(text):
result = hiragana2katakana(text)
result = re_kana2roma.sub(lambda x: kana_dict[x.group(0)], result)
result = rx_xtu.sub(r"\1\1" , result)
# result = rx_ltu.sub(r"" , result)
# result = rx_er.sub (r"\1\1" , result)
# result = rx_n.sub (r"m\1\2", result)
# result = rx_oo.sub (r"\1" , result)
return result
return _kana2romaji
a, b = __romaji2kana()
c = __kana2romaji()
return a, b, c
romaji2katakana, romaji2hiragana, kana2romaji = _make_romaji_convertor()
| [
"mmd.utils.MServiceUtils.get_file_encoding",
"tqdm.tqdm",
"mmd.mmd.VmdData.VmdMorphFrame",
"mmd.mmd.PmxData.PmxModel",
"re.compile",
"math.ceil",
"os.path.getsize",
"os.path.exists",
"mmd.monaural_adapter.FFMPEGMonauralProcessAudioAdapter",
"datetime.datetime.now",
"numpy.max",
"re.findall",
... | [((432, 458), 'mmd.utils.MLogger.MLogger', 'MLogger', (['__name__'], {'level': '(1)'}), '(__name__, level=1)\n', (439, 458), False, 'from mmd.utils.MLogger import MLogger\n'), ((1003, 1045), 'os.path.join', 'os.path.join', (['args.audio_dir', '"""vocals.wav"""'], {}), "(args.audio_dir, 'vocals.wav')\n", (1015, 1045), False, 'import os\n'), ((4275, 4322), 're.findall', 're.findall', (['"""[^っぁ-んー\\\\-{10}( sp )]"""', 'full_lyric'], {}), "('[^っぁ-んー\\\\-{10}( sp )]', full_lyric)\n", (4285, 4322), False, 'import re\n'), ((4607, 4642), 'mmd.monaural_adapter.FFMPEGMonauralProcessAudioAdapter', 'FFMPEGMonauralProcessAudioAdapter', ([], {}), '()\n', (4640, 4642), False, 'from mmd.monaural_adapter import FFMPEGMonauralProcessAudioAdapter\n'), ((4794, 4846), 'numpy.arange', 'np.arange', (['(0)', '(data.shape[0] / org_rate)', '(1 / org_rate)'], {}), '(0, data.shape[0] / org_rate, 1 / org_rate)\n', (4803, 4846), True, 'import numpy as np\n'), ((4929, 4940), 'mmd.mmd.VmdData.VmdMotion', 'VmdMotion', ([], {}), '()\n', (4938, 4940), False, 'from mmd.mmd.VmdData import VmdMorphFrame, VmdMotion\n'), ((5060, 5121), 'os.path.join', 'os.path.join', (['args.audio_dir', 'f"""{process_datetime}_lyric.exo"""'], {}), "(args.audio_dir, f'{process_datetime}_lyric.exo')\n", (5072, 5121), False, 'import os\n'), ((15645, 15704), 'os.path.join', 'os.path.join', (['args.audio_dir', 'f"""{process_datetime}_lip.vmd"""'], {}), "(args.audio_dir, f'{process_datetime}_lip.vmd')\n", (15657, 15704), False, 'import os\n'), ((15722, 15732), 'mmd.mmd.PmxData.PmxModel', 'PmxModel', ([], {}), '()\n', (15730, 15732), False, 'from mmd.mmd.PmxData import PmxModel\n'), ((15781, 15818), 'mmd.mmd.VmdWriter.VmdWriter', 'VmdWriter', (['model', 'motion', 'motion_path'], {}), '(model, motion, motion_path)\n', (15790, 15818), False, 'from mmd.mmd.VmdWriter import VmdWriter\n'), ((22502, 22531), 're.compile', 're.compile', (['"""m(b|p)([aiueo])"""'], {}), "('m(b|p)([aiueo])')\n", (22512, 22531), False, 'import re\n'), ((22575, 22616), 're.compile', 're.compile', (['"""([bcdfghjklmpqrstvwxyz])\\\\1"""'], {}), "('([bcdfghjklmpqrstvwxyz])\\\\1')\n", (22585, 22616), False, 'import re\n'), ((22660, 22686), 're.compile', 're.compile', (['"""([aiueo])\\\\1"""'], {}), "('([aiueo])\\\\1')\n", (22670, 22686), False, 'import re\n'), ((23538, 23556), 're.compile', 're.compile', (['"""ッ(.)"""'], {}), "('ッ(.)')\n", (23548, 23556), False, 'import re\n'), ((619, 649), 'os.path.exists', 'os.path.exists', (['args.audio_dir'], {}), '(args.audio_dir)\n', (633, 649), False, 'import os\n'), ((800, 832), 'os.path.exists', 'os.path.exists', (['args.lyrics_file'], {}), '(args.lyrics_file)\n', (814, 832), False, 'import os\n'), ((4866, 4890), 'math.ceil', 'math.ceil', (['(time[-1] * 30)'], {}), '(time[-1] * 30)\n', (4875, 4890), False, 'import math\n'), ((6283, 6339), 'os.path.join', 'os.path.join', (['args.audio_dir', 'tidx_dir_name', '"""block.wav"""'], {}), "(args.audio_dir, tidx_dir_name, 'block.wav')\n", (6295, 6339), False, 'import os\n'), ((6698, 6746), 'numpy.arange', 'np.arange', (['(0)', '(sep_data.shape[0] / rate)', '(1 / rate)'], {}), '(0, sep_data.shape[0] / rate, 1 / rate)\n', (6707, 6746), True, 'import numpy as np\n'), ((7742, 7799), 'os.path.join', 'os.path.join', (['args.audio_dir', 'tidx_dir_name', 'f"""block.lab"""'], {}), "(args.audio_dir, tidx_dir_name, f'block.lab')\n", (7754, 7799), False, 'import os\n'), ((15432, 15463), 'tqdm.tqdm', 'tqdm', (["['あ', 'い', 'う', 'え', 'お']"], {}), "(['あ', 'い', 'う', 'え', 'お'])\n", (15436, 15463), False, 'from tqdm import tqdm\n'), ((1195, 1233), 'os.path.join', 'os.path.join', (['"""config"""', '"""exo.head.txt"""'], {}), "('config', 'exo.head.txt')\n", (1207, 1233), False, 'import os\n'), ((1381, 1420), 'os.path.join', 'os.path.join', (['"""config"""', '"""exo.chara.txt"""'], {}), "('config', 'exo.chara.txt')\n", (1393, 1420), False, 'import os\n'), ((4986, 5009), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5007, 5009), False, 'import datetime\n'), ((5798, 5841), 'os.path.join', 'os.path.join', (['args.audio_dir', 'tidx_dir_name'], {}), '(args.audio_dir, tidx_dir_name)\n', (5810, 5841), False, 'import os\n'), ((1617, 1652), 'mmd.utils.MServiceUtils.get_file_encoding', 'get_file_encoding', (['args.lyrics_file'], {}), '(args.lyrics_file)\n', (1634, 1652), False, 'from mmd.utils.MServiceUtils import get_file_encoding\n'), ((6790, 6846), 'os.path.join', 'os.path.join', (['args.audio_dir', 'tidx_dir_name', '"""block.txt"""'], {}), "(args.audio_dir, tidx_dir_name, 'block.txt')\n", (6802, 6846), False, 'import os\n'), ((7105, 7148), 'os.path.join', 'os.path.join', (['args.audio_dir', 'tidx_dir_name'], {}), '(args.audio_dir, tidx_dir_name)\n', (7117, 7148), False, 'import os\n'), ((7820, 7844), 'os.path.exists', 'os.path.exists', (['lab_file'], {}), '(lab_file)\n', (7834, 7844), False, 'import os\n'), ((7848, 7873), 'os.path.getsize', 'os.path.getsize', (['lab_file'], {}), '(lab_file)\n', (7863, 7873), False, 'import os\n'), ((2895, 2939), 're.sub', 're.sub', (['"""(!|\\\\!|?|\\\\?| |\u3000|、|。)+"""', '""""""', 'v'], {}), "('(!|\\\\!|?|\\\\?| |\\u3000|、|。)+', '', v)\n", (2901, 2939), False, 'import re\n'), ((13292, 13320), 'mmd.mmd.VmdData.VmdMorphFrame', 'VmdMorphFrame', (['now_start_fno'], {}), '(now_start_fno)\n', (13305, 13320), False, 'from mmd.mmd.VmdData import VmdMorphFrame, VmdMotion\n'), ((13514, 13540), 'mmd.mmd.VmdData.VmdMorphFrame', 'VmdMorphFrame', (['now_end_fno'], {}), '(now_end_fno)\n', (13527, 13540), False, 'from mmd.mmd.VmdData import VmdMorphFrame, VmdMotion\n'), ((12736, 12746), 'numpy.max', 'np.max', (['vs'], {}), '(vs)\n', (12742, 12746), True, 'import numpy as np\n'), ((13073, 13083), 'numpy.max', 'np.max', (['vs'], {}), '(vs)\n', (13079, 13083), True, 'import numpy as np\n'), ((10673, 10683), 'numpy.max', 'np.max', (['vs'], {}), '(vs)\n', (10679, 10683), True, 'import numpy as np\n')] |
import numpy as np
import warnings
from ..core import Bullet
from ..scene_maker import BulletSceneMaker
from ..collision_checker import BulletCollisionChecker
from ..robots import PandaDualArm
class PandaDualArmEnvBase:
def __init__(self, render=False, arm_distance=0.4):
self.is_render = render
self.bullet = Bullet(render=render)
self.scene_maker = BulletSceneMaker(self.bullet)
self.robot = PandaDualArm(
self.bullet,
panda1_position=[0,arm_distance/2,0],
panda2_position=[0,-arm_distance/2,0]
)
self._make_env()
self.checker = BulletCollisionChecker(self.bullet)
self.task_ll = [0, -0.5, 0]
self.task_ul = [0.5, 0.5, 0.5]
def _make_env(self):
self.scene_maker.create_plane(z_offset=-0.4)
self.scene_maker.create_table(length=1.0, width=1.0, height=0.4, x_offset=0)
self.bullet.place_visualizer(
target_position=np.zeros(3),
distance=1.6,
yaw=45,
pitch=-30
)
def render(
self,
mode: str,
width: int = 720,
height: int = 480,
target_position: np.ndarray = np.zeros(3),
distance: float = 1.4,
yaw: float = 45,
pitch: float = -30,
roll: float = 0,
):
return self.bullet.render(
mode,
width=width,
height=height,
target_position=target_position,
distance=distance,
yaw=yaw,
pitch=pitch,
roll=roll,
)
def get_random_configuration(self, collision_free=False):
if not collision_free:
return self.robot.get_random_joint_angles(set=False)
else:
random_joint_angles = None
with self.robot.no_set_joint():
for i in range(100):
self.robot.get_random_joint_angles(set=True)
if not self.checker.is_collision():
random_joint_angles = self.robot.get_joint_angles()
return random_joint_angles
def get_random_free_configuration_in_taskspace(self, panda1_first=True):
if panda1_first:
first_robot = self.robot.panda1
second_robot = self.robot.panda2
else:
first_robot = self.robot.panda2
second_robot = self.robot.panda1
for robot in [first_robot, second_robot]:
while True:
robot.get_random_joint_angles(set=True)
ee_position = robot.get_ee_position()
is_collision_free = not self.checker.is_collision()
is_in_taskspace = np.all(self.task_ll < ee_position) \
& np.all(ee_position < self.task_ul)
if is_collision_free & is_in_taskspace:
break
return self.robot.get_joint_angles()
def reset(self):
joints_init = self.get_random_configuration(collision_free=True)
if joints_init is not None:
self.robot.set_joint_angles(joints_init)
return joints_init
else:
warnings.warn('env.reset() can`t find feasible reset configuration')
return None
def is_collision(self, joint_angles=None):
if joint_angles is None:
joint_angles = self.robot.get_joint_angles()
result = False
with self.robot.no_set_joint():
self.robot.set_joint_angles(joint_angles)
if self.checker.is_collision():
result = True
return result
| [
"warnings.warn",
"numpy.zeros",
"numpy.all"
] | [((1208, 1219), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1216, 1219), True, 'import numpy as np\n'), ((3196, 3264), 'warnings.warn', 'warnings.warn', (['"""env.reset() can`t find feasible reset configuration"""'], {}), "('env.reset() can`t find feasible reset configuration')\n", (3209, 3264), False, 'import warnings\n'), ((969, 980), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (977, 980), True, 'import numpy as np\n'), ((2720, 2754), 'numpy.all', 'np.all', (['(self.task_ll < ee_position)'], {}), '(self.task_ll < ee_position)\n', (2726, 2754), True, 'import numpy as np\n'), ((2793, 2827), 'numpy.all', 'np.all', (['(ee_position < self.task_ul)'], {}), '(ee_position < self.task_ul)\n', (2799, 2827), True, 'import numpy as np\n')] |
# real time prediction
import cv2
import time
import mediapipe as mp
import numpy as np
import os
from utils import mediapipe_detection, draw_landmarks, draw_landmarks_custom, draw_limit_rh, draw_limit_lh, check_detection, points_detection
#from keras.models import model_from_json
import pickle
from sklearn import svm
from argparse import ArgumentParser
# - INPUT PARAMETERS ------------------------------- #
parser = ArgumentParser()
parser.add_argument("-m", "--model", dest="ML_model", default='models/model_svm_all.sav',
help="PATH of model FILE.", metavar="FILE")
parser.add_argument("-t", "--threshold", dest="threshold_prediction", default=0.5, type=float,
help="Threshold for prediction. A number between 0 and 1. default is 0.5")
parser.add_argument("-dc", "--det_conf", dest="min_detection_confidence", default=0.5, type=float,
help="Threshold for prediction. A number between 0 and 1. default is 0.5")
parser.add_argument("-tc", "--trk_conf", dest="min_tracking_confidence", default=0.5, type=float,
help="Threshold for prediction. A number between 0 and 1. default is 0.5")
args = parser.parse_args()
# -------------------------------------------------- #
# load svm model
model = pickle.load(open(args.ML_model, 'rb'))
labels = np.array(model.classes_) # put the entire alphabet in the future
mp_holistic = mp.solutions.holistic
mp_drawing = mp.solutions.drawing_utils
words = []
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(min_detection_confidence=args.min_detection_confidence,
min_tracking_confidence=args.min_tracking_confidence) as holistic:
while cap.isOpened():
ret, frame = cap.read()
#frame = cv2.flip(frame, 1)
h, w, c = frame.shape
# make detection
image, results = mediapipe_detection(frame, holistic)
color = (0,0,0)
#cv2.rectangle(frame, (0+int(0.03*h),int(h-0.14*h)), (0+int(0.75*h), int(h-0.015*h)), color,-1)
cv2.rectangle(frame, (0, 0),
(int(w*0.18), int(h-h*0.12)), (255,255,255),-1)
for i in range(len(labels)):
# cv2.rectangle(frame, (90, 10+ i*int(50)), (90, 60+ i*int(50)), color,-1)
cv2.putText(frame, labels[i], (50, (i+1)*int(h/(len(labels)+4))), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,0), 2, cv2.LINE_AA)
cv2.rectangle(frame, (90, (i)*int(h/(len(labels)+4))+30),
(90, (i+1)*int(h/(len(labels)+4)) ), color,-1)
# perform prediction with relative probability
if results.right_hand_landmarks:
# draw_limit_rh(frame, results)
# uncomment for NN
# prediction = labels[np.argmax(model.predict(np.array([points_detection(results)])))]
prediction = model.predict(np.array([points_detection(results)]))[0]
pred_prob = np.max(model.predict_proba(np.array([points_detection(results)])))
for i in range(len(labels)):
# cv2.rectangle(frame, (70, 10+ i*int(50)), (70+int(model.predict_proba(np.array([points_detection(results)]))[0][i]*100)*3, 60+ i*int(50)), color,-1)
cv2.putText(frame, labels[i], (50, (i+1)*int(h/(len(labels)+4))), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,0), 2, cv2.LINE_AA)
cv2.rectangle(frame, (90, (i)*int(h/(len(labels)+4))+30),
(90+int(model.predict_proba(np.array([points_detection(results)]))[0][i]*100)*2, (i+1)*int(h/(len(labels)+4)) ), color,-1)
# uncomment for NN
# for i in range(len(labels)):
# cv2.rectangle(frame, (70, 10+ i*int(50)), (70+int(model.predict(np.array([points_detection(results)]))[0][i]*100)*3, 60+ i*int(50)), color,-1)
# cv2.putText(frame, labels[i], (10, 50+ i*int(50)), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,255,0), 4, cv2.LINE_AA)
# add text with prediction
if pred_prob > args.threshold_prediction:
cv2.putText(frame, f'{prediction.capitalize()} ({int(pred_prob*100)}%)',
(0+int(0.05*h),h-int(0.05*h)),
cv2.FONT_HERSHEY_SIMPLEX,
2 ,
(0,255,0),
2,
cv2.LINE_AA)
elif pred_prob < 0.3:
cv2.putText(frame, 'I am not sure...',
(0+int(0.05*h),h-int(0.05*h)),
cv2.FONT_HERSHEY_SIMPLEX,
2 ,
(0, 0, 255),
2,
cv2.LINE_AA)
else:
cv2.putText(frame, f'Maybe {prediction.capitalize()} ({int(pred_prob*100)}%)',
(0+int(0.05*h),h-int(0.05*h)),
cv2.FONT_HERSHEY_SIMPLEX,
2 ,
(45, 255, 255),
2,
cv2.LINE_AA)
else:
cv2.putText(frame, 'Detecting Hand...',
(w-int(0.5*h),int(0.05*h)),
cv2.FONT_HERSHEY_SIMPLEX,
2,
(0,0,0),
2,
cv2.LINE_AA)
#draw_landmarks_custom(frame, results)
cv2.imshow('LIS: real time alphabet prediction', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"argparse.ArgumentParser",
"utils.points_detection",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.array",
"utils.mediapipe_detection"
] | [((423, 439), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (437, 439), False, 'from argparse import ArgumentParser\n'), ((1328, 1352), 'numpy.array', 'np.array', (['model.classes_'], {}), '(model.classes_)\n', (1336, 1352), True, 'import numpy as np\n'), ((1487, 1506), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1503, 1506), False, 'import cv2\n'), ((5611, 5634), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5632, 5634), False, 'import cv2\n'), ((1858, 1894), 'utils.mediapipe_detection', 'mediapipe_detection', (['frame', 'holistic'], {}), '(frame, holistic)\n', (1877, 1894), False, 'from utils import mediapipe_detection, draw_landmarks, draw_landmarks_custom, draw_limit_rh, draw_limit_lh, check_detection, points_detection\n'), ((5467, 5522), 'cv2.imshow', 'cv2.imshow', (['"""LIS: real time alphabet prediction"""', 'frame'], {}), "('LIS: real time alphabet prediction', frame)\n", (5477, 5522), False, 'import cv2\n'), ((5535, 5549), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5546, 5549), False, 'import cv2\n'), ((2867, 2892), 'utils.points_detection', 'points_detection', (['results'], {}), '(results)\n', (2883, 2892), False, 'from utils import mediapipe_detection, draw_landmarks, draw_landmarks_custom, draw_limit_rh, draw_limit_lh, check_detection, points_detection\n'), ((2960, 2985), 'utils.points_detection', 'points_detection', (['results'], {}), '(results)\n', (2976, 2985), False, 'from utils import mediapipe_detection, draw_landmarks, draw_landmarks_custom, draw_limit_rh, draw_limit_lh, check_detection, points_detection\n'), ((3482, 3507), 'utils.points_detection', 'points_detection', (['results'], {}), '(results)\n', (3498, 3507), False, 'from utils import mediapipe_detection, draw_landmarks, draw_landmarks_custom, draw_limit_rh, draw_limit_lh, check_detection, points_detection\n')] |
"""
SQLite database backend
Store traces from tallyable objects in individual SQL tables.
Implementation Notes
--------------------
For each object, a table is created with the following format:
key (INT), trace (INT), v1 (FLOAT), v2 (FLOAT), v3 (FLOAT) ...
The key is autoincremented each time a new row is added to the table.
trace denotes the chain index, and starts at 0.
Additional Dependencies
-----------------------
sqlite3 <http://www.sqlite.org>
Changeset
---------
Created by <NAME> on 2007-02-01.
Updated by DH on 2007-04-04.
DB API changes, October 2008, DH.
"""
# TODO: Add support for integer valued objects.
import numpy as np
from numpy import zeros, shape, squeeze, transpose
import sqlite3
import base, pickle, ram, pymc
import pdb,os
from pymc.database import base
__all__ = ['Trace', 'Database', 'load']
class Trace(base.Trace):
"""SQLite Trace class."""
def _initialize(self, chain, length):
"""Create an SQL table.
"""
# If the table already exists, exit now.
if chain != 0:
return
# Determine size
try:
size = len(self._getfunc())
except TypeError:
size = 1
query = "create table %s (recid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, trace int(5), %s FLOAT)" % (self.name, ' FLOAT, '.join(['v%s' % (x+1) for x in range(size)]))
self.db.cur.execute(query)
def tally(self, chain):
"""Adds current value to trace."""
size = 1
try:
size = len(self._getfunc())
except TypeError:
pass
try:
# I changed str(x) to '%f'%x to solve a bug appearing due to
# locale settings. In french for instance, str prints a comma
# instead of a colon to indicate the decimal, which confuses
# the database into thinking that there are more values than there
# is. A better solution would be to use another delimiter than the
# comma. -DH
valstring = ', '.join(['%f'%x for x in self._getfunc()])
except:
valstring = str(self._getfunc())
# Add value to database
self.db.cur.execute("INSERT INTO %s (recid, trace, %s) values (NULL, %s, %s)" % \
(self.name, ' ,'.join(['v%s' % (x+1) for x in range(size)]), chain, valstring))
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Return the trace (last by default).
Input:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all
chains. By default, the last chain is returned.
- slicing: A slice, overriding burn and thin assignement.
"""
if not slicing:
slicing = slice(burn, None, thin)
# If chain is None, get the data from all chains.
if chain is None:
self.db.cur.execute('SELECT * FROM %s' % self.name)
trace = self.db.cur.fetchall()
else:
# Deal with negative chains (starting from the end)
if chain < 0:
chain = range(self.db.chains)[chain]
self.db.cur.execute('SELECT * FROM %s WHERE trace=%s' % (self.name, chain))
trace = self.db.cur.fetchall()
trace = np.array(trace)[:,2:]
return squeeze(trace[slicing])
__call__ = gettrace
# def nchains(self):
# """Return the number of existing chains, completed or not."""
# try:
# self.db.cur.execute('SELECT MAX(trace) FROM %s'%self.name)
# trace = self.db.cur.fetchall()[0][0]
#
# if trace is None:
# return 0
# else:
# return trace + 1
# except:
# return 0
def length(self, chain=-1):
"""Return the sample length of given chain. If chain is None,
return the total length of all chains."""
return len(self.gettrace(chain=chain))
class Database(base.Database):
"""SQLite database.
"""
def __init__(self, dbname, dbmode='a'):
"""Open or create an SQL database.
:Parameters:
dbname : string
The name of the database file.
dbmode : {'a', 'w'}
File mode. Use `a` to append values, and `w` to overwrite
an existing file.
"""
self.__name__ = 'sqlite'
self.dbname = dbname
self.__Trace__ = Trace
self.variables_to_tally = [] # A list of sequences of names of the objects to tally.
self._traces = {} # A dictionary of the Trace objects.
self.chains = 0
self._default_chain = -1
if os.path.exists(dbname) and dbmode=='w':
os.remove(dbname)
self.DB = sqlite3.connect(dbname, check_same_thread=False)
self.cur = self.DB.cursor()
def commit(self):
"""Commit updates to database"""
self.DB.commit()
def close(self, *args, **kwds):
"""Close database."""
self.cur.close()
self.commit()
self.DB.close()
# TODO: Code savestate and getstate to enable storing of the model's state.
# state is a dictionary with two keys: sampler and step_methods.
# state['sampler'] is another dictionary containing information about
# the sampler's state (_current_iter, _iter, _burn, etc.)
# state['step_methods'] is a dictionary with keys refering to ids for
# each samplingmethod defined in sampler.
# Each id refers to another dictionary containing the state of the
# step method.
# To do this efficiently, we would need functions that stores and retrieves
# a dictionary to and from a sqlite database. Unfortunately, I'm not familiar with
# SQL enough to do that without having to read too much SQL documentation
# for my taste.
def savestate(self, state):
"""Store a dictionnary containing the state of the Sampler and its
StepMethods."""
pass
def getstate(self):
"""Return a dictionary containing the state of the Sampler and its
StepMethods."""
return {}
def load(dbname):
"""Load an existing SQLite database.
Return a Database instance.
"""
db = Database(dbname)
# Get the name of the objects
tables = get_table_list(db.cur)
# Create a Trace instance for each object
chains = 0
for name in tables:
db._traces[name] = Trace(name=name, db=db)
setattr(db, name, db._traces[name])
db.cur.execute('SELECT MAX(trace) FROM %s'%name)
chains = max(chains, db.cur.fetchall()[0][0]+1)
db.chains=chains
db.variables_to_tally = chains * [tables,]
db._state_ = {}
return db
# Copied form Django.
def get_table_list(cursor):
"""Returns a list of table names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
| [
"os.remove",
"os.path.exists",
"sqlite3.connect",
"numpy.array",
"numpy.squeeze"
] | [((3417, 3440), 'numpy.squeeze', 'squeeze', (['trace[slicing]'], {}), '(trace[slicing])\n', (3424, 3440), False, 'from numpy import zeros, shape, squeeze, transpose\n'), ((4835, 4883), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {'check_same_thread': '(False)'}), '(dbname, check_same_thread=False)\n', (4850, 4883), False, 'import sqlite3\n'), ((3380, 3395), 'numpy.array', 'np.array', (['trace'], {}), '(trace)\n', (3388, 3395), True, 'import numpy as np\n'), ((4746, 4768), 'os.path.exists', 'os.path.exists', (['dbname'], {}), '(dbname)\n', (4760, 4768), False, 'import pdb, os\n'), ((4798, 4815), 'os.remove', 'os.remove', (['dbname'], {}), '(dbname)\n', (4807, 4815), False, 'import pdb, os\n')] |
# Importing the needed python packages
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import time
import sys
from pylab import *
from matplotlib.patches import Rectangle
# Defining the right hand side of the ODEs (rate of changes of predator and prey)
def NegativeFBmodel(A,B,kAB,kBA):
dA = 1-kBA*B
dB = kAB*A
return np.array((dA,dB))
kAB = 1
kBA = 1
coords = np.linspace(-5,5,21)
X, Y = np.meshgrid (coords, coords)
Vx, Vy = NegativeFBmodel(X,Y,kAB,kBA)
p=plt.quiver(X,Y,Vx,Vy)
plt.xlabel('conc. A')
plt.ylabel('conc. B')
plt.title('Linear negative feedback phase portrait') | [
"matplotlib.pyplot.title",
"numpy.meshgrid",
"matplotlib.pyplot.quiver",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((447, 469), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(21)'], {}), '(-5, 5, 21)\n', (458, 469), True, 'import numpy as np\n'), ((476, 503), 'numpy.meshgrid', 'np.meshgrid', (['coords', 'coords'], {}), '(coords, coords)\n', (487, 503), True, 'import numpy as np\n'), ((547, 571), 'matplotlib.pyplot.quiver', 'plt.quiver', (['X', 'Y', 'Vx', 'Vy'], {}), '(X, Y, Vx, Vy)\n', (557, 571), True, 'import matplotlib.pyplot as plt\n'), ((570, 591), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""conc. A"""'], {}), "('conc. A')\n", (580, 591), True, 'import matplotlib.pyplot as plt\n'), ((593, 614), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""conc. B"""'], {}), "('conc. B')\n", (603, 614), True, 'import matplotlib.pyplot as plt\n'), ((616, 668), 'matplotlib.pyplot.title', 'plt.title', (['"""Linear negative feedback phase portrait"""'], {}), "('Linear negative feedback phase portrait')\n", (625, 668), True, 'import matplotlib.pyplot as plt\n'), ((395, 413), 'numpy.array', 'np.array', (['(dA, dB)'], {}), '((dA, dB))\n', (403, 413), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from rclpy.executors import MultiThreadedExecutor
from rclpy.action import ActionServer, CancelResponse
from rclpy.action.client import GoalStatus
# Interfaces
from geometry_msgs.msg import PolygonStamped, Polygon, Point32, PoseStamped
from std_msgs.msg import Header
from nav_msgs.msg import Path
from robot_control_interfaces.action import FollowWaypoints
from robot_command_interfaces.action import SweepSearch
from diagnostic_msgs.msg import KeyValue
# General
from shapely.geometry import Polygon as SPolygon
import numpy as np
from .split_polygon import split_polygon_voronoi
from .polygon_path import sweep_polygon
from ros2_utils import NpVector4, NpPose
import functools
from ros2_utils.ros import convert_axes_from_msg, AxesFrame
from robot_control.cli.drone_client import DroneClient
from ros2_utils.cli import gh_state_machine, CompleteActionState
def convert_shapely_to_msg(polygon: SPolygon, alt=1.0) -> Polygon:
msg = PolygonStamped()
msg_poly = Polygon()
for x,y in np.asfarray(polygon.exterior.xy).T:
pnt = Point32(x=x,y=y,z=alt)
msg_poly.points.append(pnt)
msg.polygon = msg_poly
return msg
def convert_msg_to_shapely(msg: Polygon) -> SPolygon:
points = []
for p in msg.points:
points.append([p.x, p.y])
return SPolygon(points)
class ControlCenter(Node):
def __init__(self):
super().__init__("control_center")
# Setup loop
timer_period = 1/20 # seconds
self._timer_update = self.create_timer(timer_period, self.update)
self._check_rate = self.create_rate(20) # Rate to check conditions in actions
# Internals
self._clients = {}
# Publishers
self._pub_search_area = self.create_publisher(PolygonStamped, "search/area", 1)
self._search_area = SPolygon([[10, 10], [40, 0], [60,20], [20, 50], [0, 30], [10, 10]])
# Actions
# TODO: add action for receiving polygon and list of vehicle names, then distributing paths to those vehicles and searching, closest vehicle to split region should go there
self._cli_sweep_search = ActionServer(self, SweepSearch, "sweep_search", self._handle_sweep_search_goal, cancel_callback=self._handle_sweep_search_cancel)
def update(self):
header = Header()
header.stamp = self.get_clock().now().to_msg()
header.frame_id = "map"
poly_msg = convert_shapely_to_msg(self._search_area,alt=-3.0)
poly_msg.header = header
poly_msg.polygon = convert_axes_from_msg(poly_msg.polygon, AxesFrame.URHAND, AxesFrame.RHAND)
self._pub_search_area.publish(poly_msg)
def _handle_sweep_search_goal(self, goal):
def get_request(goal) -> SweepSearch:
return goal.request
req = get_request(goal)
names = req.names
alt = req.alt
poly = convert_msg_to_shapely(req.area)
self._search_area = poly
data = {}
# Split area into paths
num_vehicles = len(names)
regions = split_polygon_voronoi(poly, num_vehicles)
self.get_logger().info(f"SweepSearch: split area with voronoi")
paths = []
for i, r in enumerate(regions.geoms):
paths.append(sweep_polygon(r))
self.get_logger().info(f"SweepSearch: generated sweep for region {i}")
# Distribute to vehicles and instantiate connections to action API
for n in names:
path = paths.pop()
shape = (path.shape[0],1)
xyz_yaw_frame = np.hstack((path, alt*np.ones(shape), np.zeros(shape), 1*np.ones(shape)))
data[n] = {}
if n not in self._clients: self._clients[n] = DroneClient(MultiThreadedExecutor(), namespace=n, log_feedback=False)
data[n]["client"] = self._clients[n]
data[n]["future"] = self._clients[n].send_follow_waypoints(xyz_yaw_frame, 1.0)
data[n]["state"] = CompleteActionState.WAIT_GH
# Wait for results
self.get_logger().info(f"SweepSearch: waiting on goal handle")
wait_names = names.copy()
group_state = CompleteActionState.WAIT_GH
while True:
feedback = SweepSearch.Feedback()
for n in names:
if self._clients[n].feedback: feedback.info.append(KeyValue(key=n, value=str(self._clients[n].feedback.idx)))
goal.publish_feedback(feedback)
# Can only cancel when goal handle result has not been retrieved
if goal.is_cancel_requested and group_state in [CompleteActionState.WAIT_GH, CompleteActionState.WAIT_GH_FUTURE]: # Can't cancel until goal handles are received
for n in names:
if "cancel_future" not in data[n]: data[n]["cancel_future"] = data[n]["goal_handle"].cancel_goal_async()
cancel_response = True
for n in names:
if not data[n]["cancel_future"].done(): cancel_response = False
if not cancel_response:
for n in names:
self._clients[n].executor.spin_once()
continue # skip stepping forward in stages until cancel gets a response
# Consider it canceled when all action clients have returned a result
if goal.is_cancel_requested and group_state == CompleteActionState.CHECK_STATUS:
group_state = CompleteActionState.CANCELED
self.get_logger().info("SweepSearch: Cancelled!")
goal.canceled()
return SweepSearch.Result()
# Step goal handles through stages until a result is found
for n in wait_names:
self._clients[n].executor.spin_once()
data[n]["state"] = gh_state_machine(data[n])
if wait_names:
for n in wait_names:
if data[n]["state"] != group_state: # Remove once its moved to the next stage
wait_names.remove(n)
else: # Reset when all states have moved on to the next stage
if list(data.values())[0]["state"] in [CompleteActionState.SUCCEEDED, CompleteActionState.CANCELED, CompleteActionState.FAILURE]:
for n in names:
if data[n]["state"] == CompleteActionState.FAILURE:
self.get_logger().error(f"SweepSearch: {n} failed...")
goal.abort()
return SweepSearch.Result()
break # assume it was a success
self.get_logger().info(f"SweepSearch: moving to next stage {data[n]['state'].name}")
if group_state+1 < CompleteActionState.END: wait_names = names.copy()
group_state = CompleteActionState(group_state+1)
self.get_logger().info("SweepSearch: searched area!")
goal.succeed()
return SweepSearch.Result()
def _handle_sweep_search_cancel(self, cancel):
return CancelResponse.ACCEPT
def perform_search(self, names, alt):
data = {}
for n in names:
def sub_pose(msg: PoseStamped, name: str):
data[name]["pose"] = NpPose.from_ros(msg.pose)
def sub_path(msg: Path, name: str):
data[name]["path"] = msg
data[n] = {
"pub": {
"path": self.create_publisher(Path, f"/{n}/cmd/path/ned", 1)
},
"sub": {
"pose": self.create_subscription(PoseStamped, f"/{n}/pose", functools.partial(sub_pose, name=n), 1),
"path": self.create_subscription(Path, f"/{n}/path", functools.partial(sub_path, name=n), 1),
}
}
num_vehicles = len(names)
poly = self._search_area
regions = split_polygon_voronoi(poly, num_vehicles)
self.get_logger().info(f"Search: split area with voronoi")
paths = []
for i, r in enumerate(regions.geoms):
paths.append(sweep_polygon(r))
self.get_logger().info(f"Search: generated sweep for region {i}")
for name in data.keys():
path = paths.pop()
msg = Path()
header = Header()
header.stamp = self.get_clock().now().to_msg()
header.frame_id = "map"
for (x, y) in path:
p = PoseStamped()
p.pose.position.x = x
p.pose.position.y = y
p.pose.position.z = alt
p.header = header
msg.poses.append(p)
msg.header = header
data[name]["pub"]["path"].publish(msg)
self.get_logger().info("Search: sent all path commands")
while True:
if data[n].get("path") and not data[n]["path"].paths:
break
self.get_logger().info("Search: vehicles finished paths")
def main(args=None):
rclpy.init(args=args)
cc = ControlCenter()
executor = MultiThreadedExecutor()
# cc.perform_search(["drone_0", "drone_1", "drone_2"], -3.0)
rclpy.spin(cc, executor=executor)
if __name__=="__main__":
main()
| [
"geometry_msgs.msg.PolygonStamped",
"std_msgs.msg.Header",
"ros2_utils.cli.gh_state_machine",
"numpy.ones",
"geometry_msgs.msg.Point32",
"ros2_utils.cli.CompleteActionState",
"geometry_msgs.msg.PoseStamped",
"rclpy.spin",
"shapely.geometry.Polygon",
"numpy.asfarray",
"robot_command_interfaces.ac... | [((1003, 1019), 'geometry_msgs.msg.PolygonStamped', 'PolygonStamped', ([], {}), '()\n', (1017, 1019), False, 'from geometry_msgs.msg import PolygonStamped, Polygon, Point32, PoseStamped\n'), ((1035, 1044), 'geometry_msgs.msg.Polygon', 'Polygon', ([], {}), '()\n', (1042, 1044), False, 'from geometry_msgs.msg import PolygonStamped, Polygon, Point32, PoseStamped\n'), ((1353, 1369), 'shapely.geometry.Polygon', 'SPolygon', (['points'], {}), '(points)\n', (1361, 1369), True, 'from shapely.geometry import Polygon as SPolygon\n'), ((9004, 9025), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (9014, 9025), False, 'import rclpy\n'), ((9066, 9089), 'rclpy.executors.MultiThreadedExecutor', 'MultiThreadedExecutor', ([], {}), '()\n', (9087, 9089), False, 'from rclpy.executors import MultiThreadedExecutor\n'), ((9159, 9192), 'rclpy.spin', 'rclpy.spin', (['cc'], {'executor': 'executor'}), '(cc, executor=executor)\n', (9169, 9192), False, 'import rclpy\n'), ((1060, 1092), 'numpy.asfarray', 'np.asfarray', (['polygon.exterior.xy'], {}), '(polygon.exterior.xy)\n', (1071, 1092), True, 'import numpy as np\n'), ((1110, 1134), 'geometry_msgs.msg.Point32', 'Point32', ([], {'x': 'x', 'y': 'y', 'z': 'alt'}), '(x=x, y=y, z=alt)\n', (1117, 1134), False, 'from geometry_msgs.msg import PolygonStamped, Polygon, Point32, PoseStamped\n'), ((1871, 1939), 'shapely.geometry.Polygon', 'SPolygon', (['[[10, 10], [40, 0], [60, 20], [20, 50], [0, 30], [10, 10]]'], {}), '([[10, 10], [40, 0], [60, 20], [20, 50], [0, 30], [10, 10]])\n', (1879, 1939), True, 'from shapely.geometry import Polygon as SPolygon\n'), ((2171, 2310), 'rclpy.action.ActionServer', 'ActionServer', (['self', 'SweepSearch', '"""sweep_search"""', 'self._handle_sweep_search_goal'], {'cancel_callback': 'self._handle_sweep_search_cancel'}), "(self, SweepSearch, 'sweep_search', self.\n _handle_sweep_search_goal, cancel_callback=self._handle_sweep_search_cancel\n )\n", (2183, 2310), False, 'from rclpy.action import ActionServer, CancelResponse\n'), ((2341, 2349), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (2347, 2349), False, 'from std_msgs.msg import Header\n'), ((2567, 2641), 'ros2_utils.ros.convert_axes_from_msg', 'convert_axes_from_msg', (['poly_msg.polygon', 'AxesFrame.URHAND', 'AxesFrame.RHAND'], {}), '(poly_msg.polygon, AxesFrame.URHAND, AxesFrame.RHAND)\n', (2588, 2641), False, 'from ros2_utils.ros import convert_axes_from_msg, AxesFrame\n'), ((6966, 6986), 'robot_command_interfaces.action.SweepSearch.Result', 'SweepSearch.Result', ([], {}), '()\n', (6984, 6986), False, 'from robot_command_interfaces.action import SweepSearch\n'), ((4230, 4252), 'robot_command_interfaces.action.SweepSearch.Feedback', 'SweepSearch.Feedback', ([], {}), '()\n', (4250, 4252), False, 'from robot_command_interfaces.action import SweepSearch\n'), ((8271, 8277), 'nav_msgs.msg.Path', 'Path', ([], {}), '()\n', (8275, 8277), False, 'from nav_msgs.msg import Path\n'), ((8299, 8307), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (8305, 8307), False, 'from std_msgs.msg import Header\n'), ((5600, 5620), 'robot_command_interfaces.action.SweepSearch.Result', 'SweepSearch.Result', ([], {}), '()\n', (5618, 5620), False, 'from robot_command_interfaces.action import SweepSearch\n'), ((5814, 5839), 'ros2_utils.cli.gh_state_machine', 'gh_state_machine', (['data[n]'], {}), '(data[n])\n', (5830, 5839), False, 'from ros2_utils.cli import gh_state_machine, CompleteActionState\n'), ((6831, 6867), 'ros2_utils.cli.CompleteActionState', 'CompleteActionState', (['(group_state + 1)'], {}), '(group_state + 1)\n', (6850, 6867), False, 'from ros2_utils.cli import gh_state_machine, CompleteActionState\n'), ((7253, 7278), 'ros2_utils.NpPose.from_ros', 'NpPose.from_ros', (['msg.pose'], {}), '(msg.pose)\n', (7268, 7278), False, 'from ros2_utils import NpVector4, NpPose\n'), ((8455, 8468), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (8466, 8468), False, 'from geometry_msgs.msg import PolygonStamped, Polygon, Point32, PoseStamped\n'), ((3617, 3632), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (3625, 3632), True, 'import numpy as np\n'), ((3748, 3771), 'rclpy.executors.MultiThreadedExecutor', 'MultiThreadedExecutor', ([], {}), '()\n', (3769, 3771), False, 'from rclpy.executors import MultiThreadedExecutor\n'), ((3601, 3615), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (3608, 3615), True, 'import numpy as np\n'), ((3636, 3650), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (3643, 3650), True, 'import numpy as np\n'), ((7622, 7657), 'functools.partial', 'functools.partial', (['sub_pose'], {'name': 'n'}), '(sub_pose, name=n)\n', (7639, 7657), False, 'import functools\n'), ((7736, 7771), 'functools.partial', 'functools.partial', (['sub_path'], {'name': 'n'}), '(sub_path, name=n)\n', (7753, 7771), False, 'import functools\n'), ((6540, 6560), 'robot_command_interfaces.action.SweepSearch.Result', 'SweepSearch.Result', ([], {}), '()\n', (6558, 6560), False, 'from robot_command_interfaces.action import SweepSearch\n')] |
#
# bamakoS
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.0011753, 0.25004, 0.3],
[0.9999, 0.89988, 0.59995],
[0.37638, 0.49034, 0.080686],
[0.72588, 0.6474, 0.14503],
[0.16705, 0.35489, 0.20248],
[0.88958, 0.78905, 0.38098],
[0.084163, 0.29972, 0.25365],
[0.53088, 0.55835, 0.010839],
[0.25954, 0.41517, 0.14733],
[0.044597, 0.27432, 0.27734],
[0.44924, 0.53207, 0.039697],
[0.61954, 0.58268, 0.046299],
[0.31426, 0.45054, 0.11568],
[0.12416, 0.32653, 0.22873],
[0.82142, 0.72482, 0.26361],
[0.94603, 0.84535, 0.49169],
[0.21001, 0.383, 0.1766],
[0.4112, 0.51164, 0.061172],
[0.10384, 0.31294, 0.24134],
[0.28611, 0.43238, 0.13185],
[0.6718, 0.61057, 0.0922],
[0.14528, 0.34051, 0.21578],
[0.85818, 0.7586, 0.32333],
[0.23423, 0.39875, 0.16224],
[0.97318, 0.87268, 0.54594],
[0.064737, 0.28684, 0.26564],
[0.77702, 0.68709, 0.20308],
[0.022743, 0.26208, 0.28878],
[0.48982, 0.54827, 0.020312],
[0.57285, 0.56684, 0.016927],
[0.91835, 0.81764, 0.43686],
[0.34422, 0.46984, 0.098693],
[0.18959, 0.36966, 0.18887],
[0.64514, 0.59517, 0.068288],
[0.55153, 0.56225, 0.011649],
[0.95966, 0.85904, 0.51886],
[0.11391, 0.3197, 0.23509],
[0.69891, 0.62825, 0.11783],
[0.15607, 0.34765, 0.20917],
[0.054846, 0.28053, 0.27151],
[0.46935, 0.54092, 0.028957],
[0.32901, 0.46003, 0.10731],
[0.36, 0.47994, 0.089849],
[0.59541, 0.57336, 0.028014],
[0.84066, 0.74221, 0.29369],
[0.22199, 0.39081, 0.1695],
[0.51038, 0.55397, 0.014132],
[0.27265, 0.42366, 0.13963],
[0.90417, 0.8035, 0.40909],
[0.87436, 0.77414, 0.35241],
[0.093906, 0.3063, 0.24754],
[0.39341, 0.50095, 0.071224],
[0.29998, 0.44132, 0.12382],
[0.24673, 0.40687, 0.15484],
[0.93228, 0.83157, 0.46438],
[0.074435, 0.29324, 0.25969],
[0.13465, 0.33348, 0.22228],
[0.012175, 0.25604, 0.2944],
[0.98659, 0.88628, 0.57296],
[0.7521, 0.66724, 0.17352],
[0.17824, 0.36222, 0.19572],
[0.42981, 0.52214, 0.050712],
[0.80021, 0.7064, 0.23325],
[0.033437, 0.26816, 0.28309],
[0.20116, 0.37724, 0.18189],
[0.13988, 0.33699, 0.21904],
[0.069647, 0.29003, 0.26267],
[0.079232, 0.29647, 0.25667],
[0.039136, 0.27124, 0.28021],
[0.32156, 0.45525, 0.11153],
[0.3848, 0.49562, 0.075955],
[0.27935, 0.42799, 0.13575],
[0.78886, 0.69684, 0.21813],
[0.58394, 0.56978, 0.021653],
[0.81106, 0.71574, 0.24845],
[0.95286, 0.85221, 0.50528],
[0.9799, 0.87948, 0.55945],
[0.12938, 0.33, 0.22551],
[0.098845, 0.30961, 0.24443],
[0.65837, 0.60254, 0.079969],
[0.059903, 0.28369, 0.2686],
[0.43943, 0.52722, 0.045272],
[0.45922, 0.53666, 0.034001],
[0.88207, 0.78167, 0.36676],
[0.36811, 0.4851, 0.085276],
[0.26606, 0.41939, 0.14352],
[0.63218, 0.58853, 0.057023],
[0.73912, 0.65728, 0.15912],
[0.4022, 0.5063, 0.066264],
[0.22809, 0.39476, 0.16585],
[0.10884, 0.31632, 0.23821],
[0.52063, 0.55628, 0.01218],
[0.18387, 0.36593, 0.1923],
[0.01745, 0.25907, 0.29161],
[0.83127, 0.73364, 0.2787],
[0.19535, 0.37344, 0.18542],
[0.35204, 0.47486, 0.094325],
[0.71245, 0.63769, 0.13125],
[0.2531, 0.411, 0.15109],
[0.84961, 0.75052, 0.30859]]
bamakoS_map = LinearSegmentedColormap.from_list('bamakoS', cm_data)
# For use of "viscm view"
test_cm = bamakoS_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(bamakoS_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=bamakoS_map)
plt.show()
| [
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.linspace",
"matplotlib.pyplot.show",
"viscm.viscm"
] | [((4764, 4817), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""bamakoS"""', 'cm_data'], {}), "('bamakoS', cm_data)\n", (4797, 4817), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((5311, 5321), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5319, 5321), True, 'import matplotlib.pyplot as plt\n'), ((5063, 5081), 'viscm.viscm', 'viscm', (['bamakoS_map'], {}), '(bamakoS_map)\n', (5068, 5081), False, 'from viscm import viscm\n'), ((5208, 5232), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(256)'], {}), '(0, 100, 256)\n', (5219, 5232), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
import numpy as np
import cv2 as cv
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image, CameraInfo, PointCloud2
import sensor_msgs.point_cloud2 as pc2
# visual sensors
class RPIv2:
def __init__(self):
self.bridge=CvBridge()
# camera information
self.cameraInfoUpdate = False
# ros-realsense
self.caminfo_sub = rospy.Subscriber('/rpi/image_info', CameraInfo, self._caminfo_callback)
self.color_sub = rospy.Subscriber('/rpi/image', Image, self._color_callback)
# data
self.cv_color = []
self.width = 640
self.height = 480
def ready(self):
return self.cameraInfoUpdate and len(self.cv_color) > 0
def image_size(self):
return self.height, self.width
def color_image(self):
return self.cv_color
def _caminfo_callback(self, data):
if self.cameraInfoUpdate == False:
self.width = data.width
self.height = data.height
self.cameraInfoUpdate = True
def _color_callback(self, data):
# print("color callback")
if self.cameraInfoUpdate:
try:
self.cv_color = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
def draw(self):
cv.imshow('rpiv2',self.cv_color)
cv.waitKey(1)
# realsense d435
class RSD435:
# create a image view with a frame size for the ROI
def __init__(self):
print("create realsense d435 instance...")
self.bridge=CvBridge()
# camera information
self.cameraInfoUpdate = False
self.intrinsic = None
# ros-realsense
self.caminfo_sub = rospy.Subscriber('/rs435/color/camera_info', CameraInfo, self._caminfo_callback)
self.depth_sub = rospy.Subscriber('/rs435/depth/image_raw', Image, self._depth_callback)
self.color_sub = rospy.Subscriber('/rs435/color/image_raw', Image, self._color_callback)
# data
self.cv_color = []
self.cv_depth = []
self.width = 1024
self.height = 720
def ready(self):
return self.cameraInfoUpdate and len(self.cv_color) > 0 and len(self.cv_depth) > 0
def image_size(self):
return self.height, self.width
#### depth info
# calculate mean distance in a small pixel frame around u,v
# a non-zero mean value for the pixel with its neighboring pixels
def distance(self,u,v,size=3):
dist_list=[]
for i in range(-size,size):
for j in range(-size,size):
value = self.cv_depth[v+j,u+i]
if value > 0.0:
dist_list.append(value)
if not dist_list:
return -1
else:
return np.mean(dist_list)
#### find 3d point with pixel and depth information
def point3d(self,u,v):
depth = self.distance(u,v)
if depth < 0:
return [-1,-1,-1]
# focal length
fx = self.intrinsic[0]
fy = self.intrinsic[4]
# principle point
cx = self.intrinsic[2]
cy = self.intrinsic[5]
# deproject
x = (u-cx)/fx
y = (v-cy)/fy
# scale = 0.001 # for simulation is 1
scale = 1
point3d = [scale*depth*x,scale*depth*y,scale*depth]
return point3d
def draw(self):
cv.imshow('realsense',self.cv_color)
cv.waitKey(1)
#### data
def depth_image(self):
return self.cv_depth
def color_image(self):
return self.cv_color
def _caminfo_callback(self, data):
if self.cameraInfoUpdate == False:
self.intrinsic = data.K
self.width = data.width
self.height = data.height
self.cameraInfoUpdate = True
def _depth_callback(self, data):
if self.cameraInfoUpdate:
try:
self.cv_depth = self.bridge.imgmsg_to_cv2(data, data.encoding) #"16UC1"
except CvBridgeError as e:
print(e)
def _color_callback(self, data):
if self.cameraInfoUpdate:
try:
self.cv_color = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
| [
"cv_bridge.CvBridge",
"rospy.Subscriber",
"cv2.waitKey",
"numpy.mean",
"cv2.imshow"
] | [((291, 301), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (299, 301), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((420, 491), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/rpi/image_info"""', 'CameraInfo', 'self._caminfo_callback'], {}), "('/rpi/image_info', CameraInfo, self._caminfo_callback)\n", (436, 491), False, 'import rospy\n'), ((517, 576), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/rpi/image"""', 'Image', 'self._color_callback'], {}), "('/rpi/image', Image, self._color_callback)\n", (533, 576), False, 'import rospy\n'), ((1365, 1398), 'cv2.imshow', 'cv.imshow', (['"""rpiv2"""', 'self.cv_color'], {}), "('rpiv2', self.cv_color)\n", (1374, 1398), True, 'import cv2 as cv\n'), ((1406, 1419), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (1416, 1419), True, 'import cv2 as cv\n'), ((1603, 1613), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1611, 1613), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1762, 1847), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/rs435/color/camera_info"""', 'CameraInfo', 'self._caminfo_callback'], {}), "('/rs435/color/camera_info', CameraInfo, self._caminfo_callback\n )\n", (1778, 1847), False, 'import rospy\n'), ((1868, 1939), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/rs435/depth/image_raw"""', 'Image', 'self._depth_callback'], {}), "('/rs435/depth/image_raw', Image, self._depth_callback)\n", (1884, 1939), False, 'import rospy\n'), ((1965, 2036), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/rs435/color/image_raw"""', 'Image', 'self._color_callback'], {}), "('/rs435/color/image_raw', Image, self._color_callback)\n", (1981, 2036), False, 'import rospy\n'), ((3432, 3469), 'cv2.imshow', 'cv.imshow', (['"""realsense"""', 'self.cv_color'], {}), "('realsense', self.cv_color)\n", (3441, 3469), True, 'import cv2 as cv\n'), ((3477, 3490), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (3487, 3490), True, 'import cv2 as cv\n'), ((2829, 2847), 'numpy.mean', 'np.mean', (['dist_list'], {}), '(dist_list)\n', (2836, 2847), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import os
import cv2
import megengine as mge
import numpy as np
from megengine import jit
import math
from official.vision.keypoints.transforms import get_affine_transform
from official.vision.keypoints.config import Config as cfg
import official.vision.keypoints.models as M
import official.vision.detection.retinanet_res50_coco_1x_800size as Det
from official.vision.detection.tools.test import DetEvaluator
from official.vision.keypoints.test import find_keypoints
logger = mge.get_logger(__name__)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--arch",
default="simplebaseline_res50",
type=str,
choices=[
"simplebaseline_res50",
"simplebaseline_res101",
"simplebaseline_res152",
"mspn_4stage",
],
)
parser.add_argument(
"-det", "--detector", default="retinanet_res50_coco_1x_800size", type=str,
)
parser.add_argument(
"-m",
"--model",
default="/data/models/simplebaseline_res50_256x192/epoch_199.pkl",
type=str,
)
parser.add_argument(
"-image", "--image", default="/data/test_keypoint.jpeg", type=str
)
return parser
class KeypointEvaluator:
def __init__(self, detect_model, det_func, keypoint_model, keypoint_func):
self.detector = detect_model
self.det_func = det_func
self.keypoint_model = keypoint_model
self.keypoint_func = keypoint_func
def detect_persons(self, image):
data, im_info = DetEvaluator.process_inputs(
image.copy(),
self.detector.cfg.test_image_short_size,
self.detector.cfg.test_image_max_size,
)
self.detector.inputs["im_info"].set_value(im_info)
self.detector.inputs["image"].set_value(data.astype(np.float32))
evaluator = DetEvaluator(self.detector)
det_res = evaluator.predict(self.det_func)
persons = []
for d in det_res:
cls_id = int(d[5] + 1)
if cls_id == 1:
bbox = d[:4]
persons.append(bbox)
return persons
def predict_single_person(self, image, bbox):
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
center_x = (bbox[0] + bbox[2]) / 2
center_y = (bbox[1] + bbox[3]) / 2
extend_w = w * (1 + cfg.test_x_ext)
extend_h = h * (1 + cfg.test_y_ext)
w_h_ratio = cfg.input_shape[1] / cfg.input_shape[0]
if extend_w / extend_h > w_h_ratio:
extend_h = extend_w / w_h_ratio
else:
extend_w = extend_h * w_h_ratio
trans = get_affine_transform(
np.array([center_x, center_y]),
np.array([extend_h, extend_w]),
1,
0,
cfg.input_shape,
)
croped_img = cv2.warpAffine(
image,
trans,
(int(cfg.input_shape[1]), int(cfg.input_shape[0])),
flags=cv2.INTER_LINEAR,
borderValue=0,
)
fliped_img = croped_img[:, ::-1]
keypoint_input = np.stack([croped_img, fliped_img], 0)
keypoint_input = keypoint_input.transpose(0, 3, 1, 2)
keypoint_input = np.ascontiguousarray(keypoint_input).astype(np.float32)
self.keypoint_model.inputs["image"].set_value(keypoint_input)
outs = self.keypoint_func()
outs = outs.numpy()
pred = outs[0]
fliped_pred = outs[1][cfg.keypoint_flip_order][:, :, ::-1]
pred = (pred + fliped_pred) / 2
keypoints = find_keypoints(pred, bbox)
return keypoints
def predict(self, image, bboxes):
normalized_img = (image - np.array(cfg.img_mean).reshape(1, 1, 3)) / np.array(
cfg.img_std
).reshape(1, 1, 3)
all_keypoints = []
for bbox in bboxes:
keypoints = self.predict_single_person(normalized_img, bbox)
all_keypoints.append(keypoints)
return all_keypoints
@staticmethod
def vis_skeletons(img, all_keypoints):
canvas = img.copy()
for keypoints in all_keypoints:
for ind, skeleton in enumerate(cfg.vis_skeletons):
jotint1 = skeleton[0]
jotint2 = skeleton[1]
X = np.array([keypoints[jotint1, 0], keypoints[jotint2, 0]])
Y = np.array([keypoints[jotint1, 1], keypoints[jotint2, 1]])
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1]))
polygon = cv2.ellipse2Poly(
(int(mX), int(mY)), (int(length / 2), 4), int(angle), 0, 360, 1
)
cur_canvas = canvas.copy()
cv2.fillConvexPoly(cur_canvas, polygon, cfg.vis_colors[ind])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
return canvas
def main():
parser = make_parser()
args = parser.parse_args()
detector = getattr(Det, args.detector)(pretrained=True)
detector.eval()
logger.info("Load Model : %s completed", args.detector)
keypoint_model = getattr(M, args.arch)()
keypoint_model.load_state_dict(mge.load(args.model)["state_dict"])
keypoint_model.eval()
logger.info("Load Model : %s completed", args.arch)
@jit.trace(symbolic=True)
def det_func():
pred = detector(detector.inputs)
return pred
@jit.trace(symbolic=True)
def keypoint_func():
pred = keypoint_model.predict()
return pred
evaluator = KeypointEvaluator(detector, det_func, keypoint_model, keypoint_func)
image = cv2.imread(args.image)
logger.info("Detecting Humans")
person_boxes = evaluator.detect_persons(image)
logger.info("Detecting Keypoints")
all_keypoints = evaluator.predict(image, person_boxes)
logger.info("Visualizing")
canvas = evaluator.vis_skeletons(image, all_keypoints)
cv2.imwrite("vis_skeleton.jpg", canvas)
if __name__ == "__main__":
main()
| [
"numpy.stack",
"megengine.load",
"argparse.ArgumentParser",
"math.atan2",
"cv2.imwrite",
"numpy.ascontiguousarray",
"cv2.addWeighted",
"cv2.imread",
"numpy.mean",
"numpy.array",
"official.vision.keypoints.test.find_keypoints",
"cv2.fillConvexPoly",
"megengine.jit.trace",
"official.vision.d... | [((886, 910), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (900, 910), True, 'import megengine as mge\n'), ((945, 970), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (968, 970), False, 'import argparse\n'), ((5876, 5900), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (5885, 5900), False, 'from megengine import jit\n'), ((5988, 6012), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (5997, 6012), False, 'from megengine import jit\n'), ((6197, 6219), 'cv2.imread', 'cv2.imread', (['args.image'], {}), '(args.image)\n', (6207, 6219), False, 'import cv2\n'), ((6502, 6541), 'cv2.imwrite', 'cv2.imwrite', (['"""vis_skeleton.jpg"""', 'canvas'], {}), "('vis_skeleton.jpg', canvas)\n", (6513, 6541), False, 'import cv2\n'), ((2305, 2332), 'official.vision.detection.tools.test.DetEvaluator', 'DetEvaluator', (['self.detector'], {}), '(self.detector)\n', (2317, 2332), False, 'from official.vision.detection.tools.test import DetEvaluator\n'), ((3555, 3592), 'numpy.stack', 'np.stack', (['[croped_img, fliped_img]', '(0)'], {}), '([croped_img, fliped_img], 0)\n', (3563, 3592), True, 'import numpy as np\n'), ((4023, 4049), 'official.vision.keypoints.test.find_keypoints', 'find_keypoints', (['pred', 'bbox'], {}), '(pred, bbox)\n', (4037, 4049), False, 'from official.vision.keypoints.test import find_keypoints\n'), ((3130, 3160), 'numpy.array', 'np.array', (['[center_x, center_y]'], {}), '([center_x, center_y])\n', (3138, 3160), True, 'import numpy as np\n'), ((3174, 3204), 'numpy.array', 'np.array', (['[extend_h, extend_w]'], {}), '([extend_h, extend_w])\n', (3182, 3204), True, 'import numpy as np\n'), ((5752, 5772), 'megengine.load', 'mge.load', (['args.model'], {}), '(args.model)\n', (5760, 5772), True, 'import megengine as mge\n'), ((3680, 3716), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['keypoint_input'], {}), '(keypoint_input)\n', (3700, 3716), True, 'import numpy as np\n'), ((4744, 4800), 'numpy.array', 'np.array', (['[keypoints[jotint1, 0], keypoints[jotint2, 0]]'], {}), '([keypoints[jotint1, 0], keypoints[jotint2, 0]])\n', (4752, 4800), True, 'import numpy as np\n'), ((4822, 4878), 'numpy.array', 'np.array', (['[keypoints[jotint1, 1], keypoints[jotint2, 1]]'], {}), '([keypoints[jotint1, 1], keypoints[jotint2, 1]])\n', (4830, 4878), True, 'import numpy as np\n'), ((4901, 4911), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (4908, 4911), True, 'import numpy as np\n'), ((4933, 4943), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (4940, 4943), True, 'import numpy as np\n'), ((5300, 5360), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['cur_canvas', 'polygon', 'cfg.vis_colors[ind]'], {}), '(cur_canvas, polygon, cfg.vis_colors[ind])\n', (5318, 5360), False, 'import cv2\n'), ((5386, 5434), 'cv2.addWeighted', 'cv2.addWeighted', (['canvas', '(0.4)', 'cur_canvas', '(0.6)', '(0)'], {}), '(canvas, 0.4, cur_canvas, 0.6, 0)\n', (5401, 5434), False, 'import cv2\n'), ((4192, 4213), 'numpy.array', 'np.array', (['cfg.img_std'], {}), '(cfg.img_std)\n', (4200, 4213), True, 'import numpy as np\n'), ((5056, 5092), 'math.atan2', 'math.atan2', (['(Y[0] - Y[1])', '(X[0] - X[1])'], {}), '(Y[0] - Y[1], X[0] - X[1])\n', (5066, 5092), False, 'import math\n'), ((4149, 4171), 'numpy.array', 'np.array', (['cfg.img_mean'], {}), '(cfg.img_mean)\n', (4157, 4171), True, 'import numpy as np\n')] |
# @formatter:off
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @formatter:on
from __future__ import absolute_import, print_function
import copy
import os
import pickle
import cobra.test
from cobra.util import SolverNotFound
import numpy
import pandas
import pytest
from cobra.util import fix_objective_as_constraint
from cobra import Model, Reaction, Metabolite
from cobra.exceptions import OptimizationError
from cameo import load_model
from cameo.config import solvers
from cameo.core.utils import get_reaction_for, load_medium, medium
from cameo.flux_analysis.structural import create_stoichiometric_array
from cameo.flux_analysis.analysis import find_essential_metabolites
from cobra.flux_analysis import find_essential_genes, find_essential_reactions
TRAVIS = bool(os.getenv('TRAVIS', False))
TESTDIR = os.path.dirname(__file__)
REFERENCE_FVA_SOLUTION_ECOLI_CORE = pandas.read_csv(os.path.join(TESTDIR, 'data/REFERENCE_flux_ranges_EcoliCore.csv'),
index_col=0)
ESSENTIAL_GENES = ['b2779', 'b1779', 'b0720', 'b0451', 'b2416', 'b2926', 'b1136', 'b2415']
ESSENTIAL_METABOLITES = ['13dpg_c', '2pg_c', '3pg_c', 'accoa_c', 'acon_DASH_C_c', 'adp_c', 'akg_c', 'atp_c', 'cit_c',
'coa_c', 'e4p_c', 'f6p_c', 'g3p_c', 'g6p_c', 'glc_DASH_D_e', 'gln_DASH_L_c', 'glu_DASH_L_c',
'h2o_c', 'h_c', 'h_e', 'icit_c', 'nad_c', 'nadh_c', 'nadp_c', 'nadph_c', 'nh4_c', 'nh4_e',
'oaa_c', 'pep_c', 'pi_c', 'pi_e', 'pyr_c', 'r5p_c']
ESSENTIAL_REACTIONS = ['GLNS', 'Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2', 'PIt2r', 'GAPD', 'ACONTb',
'EX_nh4_LPAREN_e_RPAREN_', 'ENO', 'EX_h_LPAREN_e_RPAREN_', 'EX_glc_LPAREN_e_RPAREN_', 'ICDHyr',
'CS', 'NH4t', 'GLCpts', 'PGM', 'EX_pi_LPAREN_e_RPAREN_', 'PGK', 'RPI', 'ACONTa']
@pytest.fixture(scope="function", params=list(solvers))
def solved_model(request, data_directory):
core_model = load_model(os.path.join(data_directory, 'EcoliCore.xml'), sanitize=False)
core_model.solver = request.param
solution = core_model.optimize()
return solution, core_model
@pytest.fixture(scope="module", params=list(solvers))
def tiny_toy_model(request):
tiny = Model("Toy Model")
m1 = Metabolite("M1")
d1 = Reaction("ex1")
d1.add_metabolites({m1: -1})
d1.bounds = -1000, 0
tiny.add_reactions([d1])
tiny.solver = request.param
return tiny
# class TestLazySolution:
# def test_self_invalidation(self, solved_model):
# solution, model = solved_model
# assert abs(solution.objective_value - 0.873921506968431) < 0.000001
# model.optimize()
# with pytest.raises(UndefinedSolution):
# getattr(solution, 'f')
#
# def test_solution_contains_only_reaction_specific_values(self, solved_model):
# solution, model = solved_model
# reaction_ids = set([reaction.id for reaction in model.reactions])
# assert set(solution.fluxes.keys()).difference(reaction_ids) == set()
# assert set(solution.reduced_costs.keys()).difference(reaction_ids) == set()
# assert set(solution.reduced_costs.keys()).difference(reaction_ids) == set()
# metabolite_ids = set([metabolite.id for metabolite in model.metabolites])
# assert set(solution.shadow_prices.keys()).difference(metabolite_ids) == set()
class TestReaction:
# def test_clone_cobrapy_reaction(self):
# model = cobra.test.create_test_model('textbook')
# for reaction in model.reactions:
# cloned_reaction = Reaction.clone(reaction)
# assert cloned_reaction.gene_reaction_rule == reaction.gene_reaction_rule
# assert set([gene.id for gene in cloned_reaction.genes]) == set([gene.id for gene in reaction.genes])
# assert all(isinstance(gene, cobra.Gene) for gene in list(cloned_reaction.genes))
# assert {metabolite.id for metabolite in cloned_reaction.metabolites} == {metabolite.id for metabolite in
# reaction.metabolites}
# assert all(isinstance(metabolite, cameo.core.Metabolite) for metabolite in cloned_reaction.metabolites)
# assert {metabolite.id for metabolite in cloned_reaction.products} == {metabolite.id for metabolite in
# reaction.products}
# assert {metabolite.id for metabolite in cloned_reaction.reactants} == {metabolite.id for metabolite in
# reaction.reactants}
# assert reaction.id == cloned_reaction.id
# assert reaction.name == cloned_reaction.name
# assert reaction.upper_bound == cloned_reaction.upper_bound
# assert reaction.lower_bound == cloned_reaction.lower_bound
# test moved to cobra
# def test_gene_reaction_rule_setter(self, core_model):
# rxn = Reaction('rxn')
# rxn.add_metabolites({Metabolite('A'): -1, Metabolite('B'): 1})
# rxn.gene_reaction_rule = 'A2B1 or A2B2 and A2B3'
# assert hasattr(list(rxn.genes)[0], 'knock_out')
# core_model.add_reaction(rxn)
# with cameo.util.TimeMachine() as tm:
# core_model.genes.A2B1.knock_out(time_machine=tm)
# assert not core_model.genes.A2B1.functional
# core_model.genes.A2B3.knock_out(time_machine=tm)
# assert not rxn.functional
# assert core_model.genes.A2B3.functional
# assert rxn.functional
# core_model.genes.A2B1.knock_out()
# assert not core_model.genes.A2B1.functional
# assert core_model.reactions.rxn.functional
# core_model.genes.A2B3.knock_out()
# assert not core_model.reactions.rxn.functional
# non_functional = [gene.id for gene in core_model.non_functional_genes]
# assert all(gene in non_functional for gene in ['A2B3', 'A2B1'])
def test_gene_reaction_rule_setter_reaction_already_added_to_model(self, core_model):
rxn = Reaction('rxn')
rxn.add_metabolites({Metabolite('A'): -1, Metabolite('B'): 1})
core_model.add_reaction(rxn)
rxn.gene_reaction_rule = 'A2B'
assert hasattr(list(rxn.genes)[0], 'knock_out')
def test_str(self, core_model):
assert core_model.reactions[0].__str__().startswith('ACALD')
def test_add_metabolite(self, solved_model):
solution, model = solved_model
pgi_reaction = model.reactions.PGI
test_met = model.metabolites[0]
pgi_reaction.add_metabolites({test_met: 42}, combine=False)
constraints = model.solver.constraints
assert pgi_reaction.metabolites[test_met] == 42
assert constraints[test_met.id].expression.as_coefficients_dict()[pgi_reaction.forward_variable] == 42
assert constraints[test_met.id].expression.as_coefficients_dict()[pgi_reaction.reverse_variable] == -42
pgi_reaction.add_metabolites({test_met: -10}, combine=True)
assert pgi_reaction.metabolites[test_met] == 32
assert constraints[test_met.id].expression.as_coefficients_dict()[pgi_reaction.forward_variable] == 32
assert constraints[test_met.id].expression.as_coefficients_dict()[pgi_reaction.reverse_variable] == -32
pgi_reaction.add_metabolites({test_met: 0}, combine=False)
with pytest.raises(KeyError):
assert pgi_reaction.metabolites[test_met]
assert constraints[test_met.id].expression.as_coefficients_dict()[pgi_reaction.forward_variable] == 0
assert constraints[test_met.id].expression.as_coefficients_dict()[pgi_reaction.reverse_variable] == 0
def test_removal_from_model_retains_bounds(self, core_model):
core_model_cp = core_model.copy()
reaction = core_model_cp.reactions.ACALD
assert reaction.model == core_model_cp
assert reaction.lower_bound == -1000.0
assert reaction.upper_bound == 1000.0
assert reaction._lower_bound == -1000.0
assert reaction._upper_bound == 1000.0
core_model_cp.remove_reactions([reaction])
assert reaction.model is None
assert reaction.lower_bound == -1000.0
assert reaction.upper_bound == 1000.0
assert reaction._lower_bound == -1000.0
assert reaction._upper_bound == 1000.0
def test_set_bounds_scenario_1(self, core_model):
acald_reaction = core_model.reactions.ACALD
assert acald_reaction.lower_bound == -1000.
assert acald_reaction.upper_bound == 1000.
assert acald_reaction.forward_variable.lb == 0.
assert acald_reaction.forward_variable.ub == 1000.
assert acald_reaction.reverse_variable.lb == 0
assert acald_reaction.reverse_variable.ub == 1000.
acald_reaction.upper_bound = acald_reaction.lower_bound - 100
assert acald_reaction.lower_bound == -1100.0
assert acald_reaction.upper_bound == -1100.0
assert acald_reaction.forward_variable.lb == 0
assert acald_reaction.forward_variable.ub == 0
assert acald_reaction.reverse_variable.lb == 1100.
assert acald_reaction.reverse_variable.ub == 1100.
acald_reaction.upper_bound = 100
assert acald_reaction.lower_bound == -1100.0
assert acald_reaction.upper_bound == 100
assert acald_reaction.forward_variable.lb == 0
assert acald_reaction.forward_variable.ub == 100
assert acald_reaction.reverse_variable.lb == 0
assert acald_reaction.reverse_variable.ub == 1100.0
def test_set_bounds_scenario_3(self, core_model):
reac = core_model.reactions.ACALD
reac.bounds = -10, -10
assert reac.lower_bound == -10
assert reac.upper_bound == -10
reac.lower_bound = -9
assert reac.lower_bound == -9
assert reac.upper_bound == -9
reac.lower_bound = 2
assert reac.lower_bound == 2
assert reac.upper_bound == 2
reac.upper_bound = -10
assert reac.lower_bound == -10
assert reac.upper_bound == -10
reac.upper_bound = -11
assert reac.lower_bound == -11
assert reac.upper_bound == -11
reac.upper_bound = 2
assert reac.lower_bound == -11
assert reac.upper_bound == 2
def test_set_bounds_scenario_4(self, core_model):
reac = core_model.reactions.ACALD
reac.lower_bound = reac.upper_bound = 0
reac.lower_bound = 2
assert reac.lower_bound == 2
assert reac.upper_bound == 2
assert reac.forward_variable.lb == 2
assert reac.forward_variable.ub == 2
reac.knock_out()
reac.upper_bound = -2
assert reac.lower_bound == -2
assert reac.upper_bound == -2
assert reac.reverse_variable.lb == 2
assert reac.reverse_variable.ub == 2
def test_set_upper_before_lower_bound_to_0(self, core_model):
core_model.reactions.GAPD.bounds = 0, 0
core_model.reactions.GAPD.lower_bound = 0
assert core_model.reactions.GAPD.lower_bound == 0
assert core_model.reactions.GAPD.upper_bound == 0
assert core_model.reactions.GAPD.forward_variable.lb == 0
assert core_model.reactions.GAPD.forward_variable.ub == 0
assert core_model.reactions.GAPD.reverse_variable.lb == 0
assert core_model.reactions.GAPD.reverse_variable.ub == 0
def test_set_bounds_scenario_2(self, core_model):
acald_reaction = core_model.reactions.ACALD
assert acald_reaction.lower_bound == -1000.
assert acald_reaction.upper_bound == 1000.
assert acald_reaction.forward_variable.lb == 0.
assert acald_reaction.forward_variable.ub == 1000.
assert acald_reaction.reverse_variable.lb == 0
assert acald_reaction.reverse_variable.ub == 1000.
acald_reaction.lower_bound = acald_reaction.upper_bound + 100
assert acald_reaction.lower_bound == 1100.0
assert acald_reaction.upper_bound == 1100.0
assert acald_reaction.forward_variable.lb == 1100.0
assert acald_reaction.forward_variable.ub == 1100.0
assert acald_reaction.reverse_variable.lb == 0
assert acald_reaction.reverse_variable.ub == 0
acald_reaction.lower_bound = -100
assert acald_reaction.lower_bound == -100.
assert acald_reaction.upper_bound == 1100.
assert acald_reaction.forward_variable.lb == 0
assert acald_reaction.forward_variable.ub == 1100.
assert acald_reaction.reverse_variable.lb == 0
assert acald_reaction.reverse_variable.ub == 100
def test_change_bounds(self, core_model):
reac = core_model.reactions.ACALD
reac.bounds = (2, 2)
assert reac.lower_bound == 2
assert reac.upper_bound == 2
with core_model:
reac.lower_bound = 5
assert reac.lower_bound == 5
assert reac.upper_bound == 5
assert reac.lower_bound == 2
assert reac.upper_bound == 2
def test_make_irreversible(self, core_model):
acald_reaction = core_model.reactions.ACALD
assert acald_reaction.lower_bound == -1000.
assert acald_reaction.upper_bound == 1000.
assert acald_reaction.forward_variable.lb == 0.
assert acald_reaction.forward_variable.ub == 1000.
assert acald_reaction.reverse_variable.lb == 0
assert acald_reaction.reverse_variable.ub == 1000.
acald_reaction.lower_bound = 0
assert acald_reaction.lower_bound == 0
assert acald_reaction.upper_bound == 1000.
assert acald_reaction.forward_variable.lb == 0
assert acald_reaction.forward_variable.ub == 1000.0
assert acald_reaction.reverse_variable.lb == 0
assert acald_reaction.reverse_variable.ub == 0
acald_reaction.lower_bound = -100
assert acald_reaction.lower_bound == -100.
assert acald_reaction.upper_bound == 1000.
assert acald_reaction.forward_variable.lb == 0
assert acald_reaction.forward_variable.ub == 1000.
assert acald_reaction.reverse_variable.lb == 0
assert acald_reaction.reverse_variable.ub == 100
def test_make_reversible(self, core_model):
pfk_reaction = core_model.reactions.PFK
assert pfk_reaction.lower_bound == 0.
assert pfk_reaction.upper_bound == 1000.
assert pfk_reaction.forward_variable.lb == 0.
assert pfk_reaction.forward_variable.ub == 1000.
assert pfk_reaction.reverse_variable.lb == 0
assert pfk_reaction.reverse_variable.ub == 0
pfk_reaction.lower_bound = -100.
assert pfk_reaction.lower_bound == -100.
assert pfk_reaction.upper_bound == 1000.
assert pfk_reaction.forward_variable.lb == 0
assert pfk_reaction.forward_variable.ub == 1000.0
assert pfk_reaction.reverse_variable.lb == 0
assert pfk_reaction.reverse_variable.ub == 100.
pfk_reaction.lower_bound = 0
assert pfk_reaction.lower_bound == 0
assert pfk_reaction.upper_bound == 1000.
assert pfk_reaction.forward_variable.lb == 0
assert pfk_reaction.forward_variable.ub == 1000.
assert pfk_reaction.reverse_variable.lb == 0
assert pfk_reaction.reverse_variable.ub == 0
def test_make_irreversible_irreversible_to_the_other_side(self, core_model):
pfk_reaction = core_model.reactions.PFK
assert pfk_reaction.lower_bound == 0.
assert pfk_reaction.upper_bound == 1000.
assert pfk_reaction.forward_variable.lb == 0.
assert pfk_reaction.forward_variable.ub == 1000.
assert pfk_reaction.reverse_variable.lb == 0
assert pfk_reaction.reverse_variable.ub == 0
pfk_reaction.upper_bound = -100.
assert pfk_reaction.forward_variable.lb == 0
assert pfk_reaction.forward_variable.ub == 0
assert pfk_reaction.reverse_variable.lb == 100
assert pfk_reaction.reverse_variable.ub == 100
pfk_reaction.lower_bound = -1000.
assert pfk_reaction.lower_bound == -1000.
assert pfk_reaction.upper_bound == -100.
assert pfk_reaction.forward_variable.lb == 0
assert pfk_reaction.forward_variable.ub == 0
assert pfk_reaction.reverse_variable.lb == 100
assert pfk_reaction.reverse_variable.ub == 1000.
def test_make_lhs_irreversible_reversible(self, core_model):
rxn = Reaction('test')
rxn.add_metabolites(
{core_model.metabolites[0]: -1., core_model.metabolites[1]: 1.})
rxn.lower_bound = -1000.
rxn.upper_bound = -100
core_model.add_reaction(rxn)
assert rxn.lower_bound == -1000.
assert rxn.upper_bound == -100.
assert rxn.forward_variable.lb == 0.
assert rxn.forward_variable.ub == 0.
assert rxn.reverse_variable.lb == 100.
assert rxn.reverse_variable.ub == 1000.
rxn.upper_bound = 666.
assert rxn.lower_bound == -1000.
assert rxn.upper_bound == 666.
assert rxn.forward_variable.lb == 0.
assert rxn.forward_variable.ub == 666
assert rxn.reverse_variable.lb == 0.
assert rxn.reverse_variable.ub == 1000.
def test_model_less_reaction(self, solved_model):
solution, model = solved_model
for reaction in model.reactions:
assert isinstance(reaction.flux, float)
assert isinstance(reaction.reduced_cost, float)
for reaction in model.reactions:
model.remove_reactions([reaction])
with pytest.raises(RuntimeError):
assert reaction.flux
with pytest.raises(RuntimeError):
assert reaction.reduced_cost
def test_knockout(self, core_model):
original_bounds = dict()
for reaction in core_model.reactions:
original_bounds[reaction.id] = (
reaction.lower_bound, reaction.upper_bound)
reaction.knock_out()
assert reaction.lower_bound == 0
assert reaction.upper_bound == 0
for k, (lb, ub) in original_bounds.items():
core_model.reactions.get_by_id(k).lower_bound = lb
core_model.reactions.get_by_id(k).upper_bound = ub
for reaction in core_model.reactions:
assert reaction.lower_bound == original_bounds[reaction.id][0]
assert reaction.upper_bound == original_bounds[reaction.id][1]
with core_model:
for reaction in core_model.reactions:
original_bounds[reaction.id] = (
reaction.lower_bound, reaction.upper_bound)
reaction.knock_out()
assert reaction.lower_bound == 0
assert reaction.upper_bound == 0
for reaction in core_model.reactions:
assert reaction.lower_bound == original_bounds[reaction.id][0]
assert reaction.upper_bound == original_bounds[reaction.id][1]
@pytest.mark.xfail(reason="to be implemented in cobra")
def test_repr_html_(self, core_model):
assert '<table>' in core_model.reactions[0]._repr_html_()
def test_reaction_without_model(self):
r = Reaction('blub')
assert r.flux_expression is None
assert r.forward_variable is None
assert r.reverse_variable is None
def test_weird_left_to_right_reaction_issue(self, tiny_toy_model):
d1 = tiny_toy_model.reactions.get_by_id('ex1')
assert not d1.reversibility
assert d1.lower_bound == -1000
assert d1._lower_bound == -1000
assert d1.upper_bound == 0
assert d1._upper_bound == 0
with tiny_toy_model:
d1.knock_out()
assert d1.lower_bound == 0
assert d1._lower_bound == 0
assert d1.upper_bound == 0
assert d1._upper_bound == 0
assert d1.lower_bound == -1000
assert d1._lower_bound == -1000
assert d1.upper_bound == 0
assert d1._upper_bound == 0
def test_one_left_to_right_reaction_set_positive_ub(self, tiny_toy_model):
d1 = tiny_toy_model.reactions.get_by_id('ex1')
assert d1.reverse_variable.lb == 0
assert d1.reverse_variable.ub == 1000
assert d1._lower_bound == -1000
assert d1.lower_bound == -1000
assert d1._upper_bound == 0
assert d1.upper_bound == 0
assert d1.forward_variable.lb == 0
assert d1.forward_variable.ub == 0
d1.upper_bound = .1
assert d1.forward_variable.lb == 0
assert d1.forward_variable.ub == .1
assert d1.reverse_variable.lb == 0
assert d1.reverse_variable.ub == 1000
assert d1._lower_bound == -1000
assert d1.upper_bound == .1
assert d1._lower_bound == -1000
assert d1.upper_bound == .1
def test_irrev_reaction_set_negative_lb(self, core_model):
assert not core_model.reactions.PFK.reversibility
assert core_model.reactions.PFK.lower_bound == 0
assert core_model.reactions.PFK.upper_bound == 1000.0
assert core_model.reactions.PFK.forward_variable.lb == 0
assert core_model.reactions.PFK.forward_variable.ub == 1000.0
assert core_model.reactions.PFK.reverse_variable.lb == 0
assert core_model.reactions.PFK.reverse_variable.ub == 0
core_model.reactions.PFK.lower_bound = -1000
assert core_model.reactions.PFK.lower_bound == -1000
assert core_model.reactions.PFK.upper_bound == 1000.0
assert core_model.reactions.PFK.forward_variable.lb == 0
assert core_model.reactions.PFK.forward_variable.ub == 1000.0
assert core_model.reactions.PFK.reverse_variable.lb == 0
assert core_model.reactions.PFK.reverse_variable.ub == 1000
def test_twist_irrev_right_to_left_reaction_to_left_to_right(self, core_model):
assert not core_model.reactions.PFK.reversibility
assert core_model.reactions.PFK.lower_bound == 0
assert core_model.reactions.PFK.upper_bound == 1000.0
assert core_model.reactions.PFK.forward_variable.lb == 0
assert core_model.reactions.PFK.forward_variable.ub == 1000.0
assert core_model.reactions.PFK.reverse_variable.lb == 0
assert core_model.reactions.PFK.reverse_variable.ub == 0
core_model.reactions.PFK.lower_bound = -1000
core_model.reactions.PFK.upper_bound = 0
assert core_model.reactions.PFK.lower_bound == -1000
assert core_model.reactions.PFK.upper_bound == 0
assert core_model.reactions.PFK.forward_variable.lb == 0
assert core_model.reactions.PFK.forward_variable.ub == 0
assert core_model.reactions.PFK.reverse_variable.lb == 0
assert core_model.reactions.PFK.reverse_variable.ub == 1000
@pytest.mark.skipif(TRAVIS, reason='too slow for ci')
def test_imm904_4hglsdm_problem(self, imm904):
# set upper bound before lower bound after knockout
cp = imm904.copy()
rxn = cp.reactions.get_by_id('4HGLSDm')
prev_lb, prev_ub = rxn.lower_bound, rxn.upper_bound
rxn.lower_bound = 0
rxn.upper_bound = 0
rxn.upper_bound = prev_ub
rxn.lower_bound = prev_lb
assert rxn.lower_bound == prev_lb
assert rxn.upper_bound == prev_ub
# set lower bound before upper bound after knockout
cp = imm904.copy()
rxn = cp.reactions.get_by_id('4HGLSDm')
prev_lb, prev_ub = rxn.lower_bound, rxn.upper_bound
rxn.lower_bound = 0
rxn.upper_bound = 0
rxn.lower_bound = prev_lb
rxn.upper_bound = prev_ub
assert rxn.lower_bound == prev_lb
assert rxn.upper_bound == prev_ub
def test_set_lb_higher_than_ub_sets_ub_to_new_lb(self, core_model):
for reaction in core_model.reactions:
assert reaction.lower_bound <= reaction.upper_bound
reaction.lower_bound = reaction.upper_bound + 100
assert reaction.lower_bound == reaction.upper_bound
def test_set_ub_lower_than_lb_sets_lb_to_new_ub(self, core_model):
for reaction in core_model.reactions:
assert reaction.lower_bound <= reaction.upper_bound
reaction.upper_bound = reaction.lower_bound - 100
assert reaction.lower_bound == reaction.upper_bound
def test_add_metabolites_combine_true(self, core_model):
test_metabolite = Metabolite('test')
for reaction in core_model.reactions:
reaction.add_metabolites({test_metabolite: -66}, combine=True)
assert reaction.metabolites[test_metabolite] == -66
assert core_model.solver.constraints['test'].expression.has(-66. * reaction.forward_variable)
assert core_model.solver.constraints['test'].expression.has(66. * reaction.reverse_variable)
already_included_metabolite = list(reaction.metabolites.keys())[0]
previous_coefficient = reaction.get_coefficient(
already_included_metabolite.id)
reaction.add_metabolites({already_included_metabolite: 10},
combine=True)
new_coefficient = previous_coefficient + 10
assert reaction.metabolites[already_included_metabolite] == new_coefficient
assert core_model.solver.constraints[already_included_metabolite.id].expression.has(
new_coefficient * reaction.forward_variable)
assert core_model.solver.constraints[already_included_metabolite.id].expression.has(
-1 * new_coefficient * reaction.reverse_variable)
@pytest.mark.skipif(TRAVIS, reason='non-deterministic')
def test_add_metabolites_combine_false(self, core_model):
test_metabolite = Metabolite('test')
for reaction in core_model.reactions:
reaction.add_metabolites({test_metabolite: -66}, combine=False)
assert reaction.metabolites[test_metabolite] == -66
assert core_model.solver.constraints['test'].expression.has(-66. * reaction.forward_variable)
assert core_model.solver.constraints['test'].expression.has(66. * reaction.reverse_variable)
already_included_metabolite = list(reaction.metabolites.keys())[0]
reaction.add_metabolites({already_included_metabolite: 10}, combine=False)
assert reaction.metabolites[already_included_metabolite] == 10
assert core_model.solver.constraints[already_included_metabolite.id].expression.has(
10 * reaction.forward_variable)
assert core_model.solver.constraints[already_included_metabolite.id].expression.has(
-10 * reaction.reverse_variable)
# def test_pop(self, core_model):
# pgi = core_model.reactions.PGI
# g6p = core_model.metabolites.get_by_id("g6p_c")
# f6p = core_model.metabolites.get_by_id("f6p_c")
# g6p_expr = core_model.solver.constraints["g6p_c"].expression
# g6p_coef = pgi.pop("g6p_c")
# assert g6p not in pgi.metabolites
# actual = core_model.solver.constraints["g6p_c"].expression.as_coefficients_dict()
# expected = (g6p_expr - g6p_coef * pgi.flux_expression).as_coefficients_dict()
# assert actual == expected
# assert pgi.metabolites[f6p] == 1
#
# f6p_expr = core_model.solver.constraints["f6p_c"].expression
# f6p_coef = pgi.pop(f6p)
# assert f6p not in pgi.metabolites
# assert core_model.solver.constraints["f6p_c"].expression.as_coefficients_dict() == (
# f6p_expr - f6p_coef * pgi.flux_expression
# ).as_coefficients_dict()
def test_remove_from_model(self, core_model):
pgi = core_model.reactions.PGI
pgi.remove_from_model()
assert pgi.model is None
assert not ("PGI" in core_model.reactions)
assert not (pgi.id in core_model.solver.variables)
assert not (pgi.reverse_id in core_model.solver.variables)
def test_delete(self, core_model):
pgi = core_model.reactions.PGI
pgi.delete()
assert pgi.model is None
assert not ("PGI" in core_model.reactions)
assert not (pgi.id in core_model.solver.variables)
assert not (pgi.reverse_id in core_model.solver.variables)
def test_change_id_is_reflected_in_solver(self, core_model):
for i, reaction in enumerate(core_model.reactions):
old_reaction_id = reaction.id
assert core_model.solver.variables[old_reaction_id].name == old_reaction_id
assert old_reaction_id in core_model.solver.variables
new_reaction_id = reaction.id + '_' + str(i)
reaction.id = new_reaction_id
assert reaction.id == new_reaction_id
assert not (old_reaction_id in core_model.solver.variables)
assert reaction.id in core_model.solver.variables
assert reaction.reverse_id in core_model.solver.variables
name = core_model.solver.variables[reaction.id].name
assert name == reaction.id
class TestModel:
# def test_model_is_subclassed(self, core_model):
# assert isinstance(core_model, cobra.Model)
# for reac in core_model.reactions:
# assert isinstance(reac, Reaction)
# for met in reac.metabolites:
# assert isinstance(met, Metabolite)
# assert met in core_model.metabolites
# assert met is core_model.metabolites.get_by_id(met.id)
# for gene in reac.genes:
# assert isinstance(gene, Gene)
# assert gene in core_model.genes
# assert gene is core_model.genes.get_by_id(gene.id)
#
# for gene in core_model.genes:
# assert isinstance(gene, Gene)
# for reac in gene.reactions:
# assert isinstance(reac, Reaction)
# assert reac in core_model.reactions
# assert reac is core_model.reactions.get_by_id(reac.id)
#
# for met in core_model.metabolites:
# assert isinstance(met, Metabolite)
# for reac in met.reactions:
# assert isinstance(reac, Reaction)
# assert reac in core_model.reactions
# assert reac is core_model.reactions.get_by_id(reac.id)
def test_objective_coefficient_reflects_changed_objective(self, core_model):
biomass_r = core_model.reactions.get_by_id('Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2')
assert biomass_r.objective_coefficient == 1
core_model.objective = "PGI"
assert biomass_r.objective_coefficient == 0
assert core_model.reactions.PGI.objective_coefficient == 1
def test_change_objective_through_objective_coefficient(self, core_model):
biomass_r = core_model.reactions.get_by_id('Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2')
pgi = core_model.reactions.PGI
pgi.objective_coefficient = 2
coef_dict = core_model.solver.objective.expression.as_coefficients_dict()
# Check that objective has been updated
assert coef_dict[pgi.forward_variable] == 2
assert coef_dict[pgi.reverse_variable] == -2
# Check that original objective is still in there
assert coef_dict[biomass_r.forward_variable] == 1
assert coef_dict[biomass_r.reverse_variable] == -1
# def test_model_from_other_model(self, core_model):
# core_model = Model(id_or_model=core_model)
# for reaction in core_model.reactions:
# assert reaction == core_model.reactions.get_by_id(reaction.id)
def test_add_reactions(self, core_model):
r1 = Reaction('r1')
r1.add_metabolites({Metabolite('A'): -1, Metabolite('B'): 1})
r1.lower_bound, r1.upper_bound = -999999., 999999.
r2 = Reaction('r2')
r2.add_metabolites(
{Metabolite('A'): -1, Metabolite('C'): 1, Metabolite('D'): 1})
r2.lower_bound, r2.upper_bound = 0., 999999.
core_model.add_reactions([r1, r2])
r2.objective_coefficient = 3.
assert r2.objective_coefficient == 3.
assert core_model.reactions[-2] == r1
assert core_model.reactions[-1] == r2
assert isinstance(core_model.reactions[-2].reverse_variable, core_model.solver.interface.Variable)
coefficients_dict = core_model.solver.objective.expression.as_coefficients_dict()
biomass_r = core_model.reactions.get_by_id('Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2')
assert coefficients_dict[biomass_r.forward_variable] == 1.
assert coefficients_dict[biomass_r.reverse_variable] == -1.
assert coefficients_dict[core_model.reactions.r2.forward_variable] == 3.
assert coefficients_dict[core_model.reactions.r2.reverse_variable] == -3.
def test_remove_reactions_1(self, core_model):
core_model.remove_reactions([core_model.reactions.PGI, core_model.reactions.PGK])
assert "PGI" not in core_model.reactions
assert "PGK" not in core_model.reactions
assert "PGI" not in core_model.reactions
assert "PGK" not in core_model.reactions
def test_remove_reactions_2(self, core_model):
reactions_to_remove = core_model.reactions[10:30]
assert all([reaction.model is core_model for reaction in reactions_to_remove])
assert all([core_model.reactions.get_by_id(reaction.id) == reaction for reaction in reactions_to_remove])
core_model.remove_reactions(reactions_to_remove)
assert all([reaction.model is None for reaction in reactions_to_remove])
for reaction in reactions_to_remove:
assert reaction.id not in list(core_model.solver.variables.keys())
core_model.add_reactions(reactions_to_remove)
for reaction in reactions_to_remove:
assert reaction in core_model.reactions
def test_remove_and_add_reactions(self, core_model):
model_copy = core_model.copy()
pgi, pgk = model_copy.reactions.PGI, model_copy.reactions.PGK
model_copy.remove_reactions([pgi, pgk])
assert "PGI" not in model_copy.reactions
assert "PGK" not in model_copy.reactions
assert "PGI" in core_model.reactions
assert "PGK" in core_model.reactions
model_copy.add_reactions([pgi, pgk])
assert "PGI" in core_model.reactions
assert "PGK" in core_model.reactions
assert "PGI" in model_copy.reactions
assert "PGK" in model_copy.reactions
# def test_add_cobra_reaction(self, core_model):
# r = cobra.Reaction(id="c1")
# core_model.add_reaction(r)
# assert isinstance(core_model.reactions.c1, Reaction)
def test_all_objects_point_to_all_other_correct_objects(self, core_model):
for reaction in core_model.reactions:
assert reaction.model == core_model
for gene in reaction.genes:
assert gene == core_model.genes.get_by_id(gene.id)
assert gene.model == core_model
for reaction2 in gene.reactions:
assert reaction2.model == core_model
assert reaction2 == core_model.reactions.get_by_id(reaction2.id)
for metabolite in reaction.metabolites:
assert metabolite.model == core_model
assert metabolite == core_model.metabolites.get_by_id(metabolite.id)
for reaction2 in metabolite.reactions:
assert reaction2.model == core_model
assert reaction2 == core_model.reactions.get_by_id(reaction2.id)
def test_objects_point_to_correct_other_after_copy(self, core_model):
for reaction in core_model.reactions:
assert reaction.model == core_model
for gene in reaction.genes:
assert gene == core_model.genes.get_by_id(gene.id)
assert gene.model == core_model
for reaction2 in gene.reactions:
assert reaction2.model == core_model
assert reaction2 == core_model.reactions.get_by_id(reaction2.id)
for metabolite in reaction.metabolites:
assert metabolite.model == core_model
assert metabolite == core_model.metabolites.get_by_id(metabolite.id)
for reaction2 in metabolite.reactions:
assert reaction2.model == core_model
assert reaction2 == core_model.reactions.get_by_id(reaction2.id)
def test_objective(self, core_model):
obj = core_model.objective
assert {var.name: coef for var, coef in obj.expression.as_coefficients_dict().items()} == \
{'Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2_reverse_9ebcd': -1,
'Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2': 1}
assert obj.direction == "max"
def test_change_objective(self, core_model):
expression = 1.0 * core_model.solver.variables['ENO'] + 1.0 * core_model.solver.variables['PFK']
core_model.solver.objective = core_model.solver.interface.Objective(expression)
assert core_model.solver.objective.expression == expression
core_model.objective = "ENO"
eno_obj = core_model.solver.interface.Objective(
core_model.reactions.ENO.flux_expression, direction="max")
pfk_obj = core_model.solver.interface.Objective(
core_model.reactions.PFK.flux_expression, direction="max")
assert core_model.solver.objective == eno_obj
with core_model:
core_model.objective = "PFK"
assert core_model.solver.objective == pfk_obj
assert core_model.solver.objective == eno_obj
def test_set_reaction_objective(self, core_model):
core_model.objective = core_model.reactions.ACALD
assert str(core_model.solver.objective.expression) == str(
1.0 * core_model.reactions.ACALD.forward_variable -
1.0 * core_model.reactions.ACALD.reverse_variable)
def test_set_reaction_objective_str(self, core_model):
core_model.objective = core_model.reactions.ACALD.id
assert str(core_model.solver.objective.expression) == str(
1.0 * core_model.reactions.ACALD.forward_variable -
1.0 * core_model.reactions.ACALD.reverse_variable)
def test_invalid_objective_raises(self, core_model):
with pytest.raises(ValueError):
core_model.objective = 'This is not a valid objective!'
with pytest.raises(TypeError):
setattr(core_model, 'objective', 3.)
#
# def test_solver_change(self, core_model):
# solver_id = id(core_model.solver)
# problem_id = id(core_model.solver.problem)
# solution = core_model.optimize().fluxes
# core_model.solver = 'glpk'
# assert id(core_model.solver) != solver_id
# assert id(core_model.solver.problem) != problem_id
# new_solution = core_model.optimize()
# for key in list(solution.keys()):
# assert round(abs(new_solution.fluxes[key] - solution[key]), 7) == 0
#
# def test_solver_change_with_optlang_interface(self, core_model):
# solver_id = id(core_model.solver)
# problem_id = id(core_model.solver.problem)
# solution = core_model.optimize().fluxes
# core_model.solver = optlang.glpk_interface
# assert id(core_model.solver) != solver_id
# assert id(core_model.solver.problem) != problem_id
# new_solution = core_model.optimize()
# for key in list(solution.keys()):
# assert round(abs(new_solution.fluxes[key] - solution[key]), 7) == 0
def test_invalid_solver_change_raises(self, core_model):
with pytest.raises(SolverNotFound):
setattr(core_model, 'solver', [1, 2, 3])
with pytest.raises(SolverNotFound):
setattr(core_model, 'solver', 'ThisIsDefinitelyNotAvalidSolver')
with pytest.raises(SolverNotFound):
setattr(core_model, 'solver', os)
@pytest.mark.skipif('cplex' not in solvers, reason='no cplex')
def test_change_solver_to_cplex_and_check_copy_works(self, core_model):
assert round(abs(core_model.slim_optimize() - 0.8739215069684306), 7) == 0
core_model_copy = core_model.copy()
assert round(abs(core_model_copy.slim_optimize() - 0.8739215069684306), 7) == 0
# Second, change existing glpk based model to cplex
core_model.solver = 'cplex'
assert round(abs(core_model.slim_optimize() - 0.8739215069684306), 7) == 0
core_model_copy = copy.copy(core_model)
assert round(abs(core_model_copy.slim_optimize() - 0.8739215069684306), 7) == 0
def test_copy_preserves_existing_solution(self, solved_model):
solution, model = solved_model
model_cp = copy.copy(model)
primals_original = [variable.primal for variable in model.solver.variables]
primals_copy = [variable.primal for variable in model_cp.solver.variables]
abs_diff = abs(numpy.array(primals_copy) - numpy.array(primals_original))
assert not any(abs_diff > 1e-6)
def test_essential_genes(self, core_model):
observed_essential_genes = [g.id for g in find_essential_genes(core_model)]
assert sorted(observed_essential_genes) == sorted(ESSENTIAL_GENES)
with pytest.raises(OptimizationError):
core_model.reactions.Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2.lower_bound = 999999.
find_essential_genes(core_model)
def test_essential_reactions(self, core_model):
observed_essential_reactions = [r.id for r in find_essential_reactions(core_model)]
assert sorted(observed_essential_reactions) == sorted(ESSENTIAL_REACTIONS)
with pytest.raises(OptimizationError):
core_model.reactions.Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2.lower_bound = 999999.
find_essential_reactions(core_model)
def test_essential_metabolites_steady_state(self, core_model):
essential_metabolites_balanced = [m.id for m in find_essential_metabolites(core_model,
force_steady_state=True)]
assert sorted(essential_metabolites_balanced) == sorted(ESSENTIAL_METABOLITES)
with pytest.raises(OptimizationError):
core_model.reactions.Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2.lower_bound = 999999.
find_essential_metabolites(core_model, force_steady_state=True)
@pytest.mark.xfail(reason='needs some refactoring, uses missing bounds, not allowed by cplex')
def test_essential_metabolites(self, core_model):
essential_metabolites_unbalanced = [m.id for m in find_essential_metabolites(core_model,
force_steady_state=False)]
assert sorted(essential_metabolites_unbalanced) == sorted(ESSENTIAL_METABOLITES)
with pytest.raises(OptimizationError):
core_model.reactions.Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2.lower_bound = 999999.
find_essential_metabolites(core_model, force_steady_state=False)
# def test_effective_bounds(self, core_model):
# core_model.reactions.Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2.lower_bound = 0.873921
# for reaction in core_model.reactions:
# assert abs(reaction.effective_lower_bound - REFERENCE_FVA_SOLUTION_ECOLI_CORE['lower_bound'][
# reaction.id]) < 0.000001
# assert abs(reaction.effective_upper_bound - REFERENCE_FVA_SOLUTION_ECOLI_CORE['upper_bound'][
# reaction.id]) < 0.000001
# def test_add_ratio_constraint(self, solved_model):
# solution, model = solved_model
# assert round(abs(solution.objective_value - 0.873921506968), 7) == 0
# assert 2 * solution.fluxes['PGI'] != solution.fluxes['G6PDH2r']
# cp = model.copy()
# ratio_constr = cp.add_ratio_constraint(cp.reactions.PGI, cp.reactions.G6PDH2r, 0.5)
# assert ratio_constr.name == 'ratio_constraint_PGI_G6PDH2r'
# solution = cp.optimize()
# assert round(abs(solution.objective_value - 0.870407873712), 7) == 0
# assert round(abs(2 * solution.fluxes['PGI'] - solution.fluxes['G6PDH2r']), 7) == 0
# cp = model.copy()
#
# ratio_constr = cp.add_ratio_constraint(cp.reactions.PGI, cp.reactions.G6PDH2r, 0.5)
# assert ratio_constr.name == 'ratio_constraint_PGI_G6PDH2r'
# solution = cp.optimize()
# assert round(abs(solution.objective_value - 0.870407873712), 7) == 0
# assert round(abs(2 * solution.fluxes['PGI'] - solution.fluxes['G6PDH2r']), 7) == 0
#
# cp = model.copy()
# ratio_constr = cp.add_ratio_constraint('PGI', 'G6PDH2r', 0.5)
# assert ratio_constr.name == 'ratio_constraint_PGI_G6PDH2r'
# solution = cp.optimize()
# assert abs(solution.objective_value - 0.870407) < 1e-6
# assert abs(2 * solution.fluxes['PGI'] - solution.fluxes['G6PDH2r']) < 1e-6
#
# cp = model.copy()
# ratio_constr = cp.add_ratio_constraint([cp.reactions.PGI, cp.reactions.ACALD],
# [cp.reactions.G6PDH2r, cp.reactions.ACONTa], 0.5)
# assert ratio_constr.name == 'ratio_constraint_PGI+ACALD_G6PDH2r+ACONTa'
# solution = cp.optimize()
# assert abs(solution.objective_value - 0.872959) < 1e-6
# assert abs((solution.fluxes['PGI'] + solution.fluxes['ACALD']) -
# 0.5 * (solution.fluxes['G6PDH2r'] + solution.fluxes['ACONTa'])) < 1e-5
def test_fix_objective_as_constraint(self, core_model):
# with TimeMachine
with core_model:
fix_objective_as_constraint(core_model)
constraint_name = core_model.solver.constraints[-1]
assert core_model.solver.constraints[-1].expression - core_model.objective.expression == 0
assert constraint_name not in core_model.solver.constraints
# without TimeMachine
fix_objective_as_constraint(core_model)
constraint_name = core_model.solver.constraints[-1]
assert core_model.solver.constraints[-1].expression - core_model.objective.expression == 0
assert constraint_name in core_model.solver.constraints
def test_get_reaction_for(self, core_model):
with core_model:
for r in core_model.reactions:
assert isinstance(get_reaction_for(core_model, r.id), cobra.Reaction)
assert isinstance(get_reaction_for(core_model, r), cobra.Reaction)
for m in core_model.metabolites:
assert isinstance(get_reaction_for(core_model, m.id), cobra.Reaction)
assert isinstance(get_reaction_for(core_model, m), cobra.Reaction)
with pytest.raises(TypeError):
get_reaction_for(core_model, None)
with pytest.raises(KeyError):
get_reaction_for(core_model, "blablabla")
with pytest.raises(KeyError):
get_reaction_for(core_model, "accoa_lp_c_lp_", add=False)
def test_stoichiometric_matrix(self, core_model):
stoichiometric_matrix = create_stoichiometric_array(core_model)
assert len(core_model.reactions) == stoichiometric_matrix.shape[1]
assert len(core_model.metabolites) == stoichiometric_matrix.shape[0]
for i, reaction in enumerate(core_model.reactions):
for j, metabolite in enumerate(core_model.metabolites):
if metabolite in reaction.metabolites:
coefficient = reaction.metabolites[metabolite]
else:
coefficient = 0
assert stoichiometric_matrix[j, i] == coefficient
def test_set_medium(self, core_model):
this_medium = medium(core_model)
for reaction in core_model.exchanges:
if reaction.lower_bound == 0:
assert reaction.id not in this_medium.reaction_id.values
if reaction.lower_bound < 0:
assert reaction.id in this_medium.reaction_id.values
load_medium(core_model, this_medium)
for rid in medium(core_model).reaction_id:
assert len(this_medium[this_medium.reaction_id == rid]) == 1
def test_solver_change_preserves_non_metabolic_constraints(self, core_model):
with core_model:
constraint = core_model.problem.Constraint(core_model.reactions.PGK.flux_expression -
0.5 * core_model.reactions.PFK.flux_expression,
lb=0, ub=0)
core_model.add_cons_vars(constraint)
all_constraint_ids = core_model.solver.constraints.keys()
assert all_constraint_ids[-1], 'ratio_constraint_PGK_PFK'
resurrected = pickle.loads(pickle.dumps(core_model))
assert resurrected.solver.constraints.keys() == all_constraint_ids
class TestMetabolite:
def test_set_id(self, core_model):
met = Metabolite("test")
with pytest.raises(TypeError):
setattr(met, 'id', 1)
core_model.add_metabolites([met])
with pytest.raises(ValueError):
setattr(met, "id", 'g6p_c')
met.id = "test2"
assert "test2" in core_model.metabolites
assert "test" not in core_model.metabolites
def test_remove_from_model(self, core_model):
met = core_model.metabolites.get_by_id("g6p_c")
met.remove_from_model()
assert not (met.id in core_model.metabolites)
assert not (met.id in core_model.solver.constraints)
@pytest.mark.xfail(reason='to be implemented in cobra')
def test_notebook_repr(self):
met = Metabolite(id="test", name="test metabolites", formula="CH4")
expected = """
<table>
<tr>
<td><strong>Id</strong></td><td>test</td>
</tr>
<tr>
<td><strong>Name</strong></td><td>test metabolites</td>
</tr>
<tr>
<td><strong>Formula</strong></td><td>CH4</td>
</tr>
</table>""".replace(' ', '')
assert met._repr_html_().replace(' ', '') == expected
| [
"cobra.flux_analysis.find_essential_genes",
"pytest.mark.skipif",
"os.path.join",
"cameo.flux_analysis.analysis.find_essential_metabolites",
"cobra.util.fix_objective_as_constraint",
"os.path.dirname",
"pytest.raises",
"pickle.dumps",
"cameo.core.utils.medium",
"os.getenv",
"pytest.mark.xfail",
... | [((1394, 1419), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1409, 1419), False, 'import os\n'), ((1356, 1382), 'os.getenv', 'os.getenv', (['"""TRAVIS"""', '(False)'], {}), "('TRAVIS', False)\n", (1365, 1382), False, 'import os\n'), ((1472, 1537), 'os.path.join', 'os.path.join', (['TESTDIR', '"""data/REFERENCE_flux_ranges_EcoliCore.csv"""'], {}), "(TESTDIR, 'data/REFERENCE_flux_ranges_EcoliCore.csv')\n", (1484, 1537), False, 'import os\n'), ((2858, 2876), 'cobra.Model', 'Model', (['"""Toy Model"""'], {}), "('Toy Model')\n", (2863, 2876), False, 'from cobra import Model, Reaction, Metabolite\n'), ((2886, 2902), 'cobra.Metabolite', 'Metabolite', (['"""M1"""'], {}), "('M1')\n", (2896, 2902), False, 'from cobra import Model, Reaction, Metabolite\n'), ((2912, 2927), 'cobra.Reaction', 'Reaction', (['"""ex1"""'], {}), "('ex1')\n", (2920, 2927), False, 'from cobra import Model, Reaction, Metabolite\n'), ((19711, 19765), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""to be implemented in cobra"""'}), "(reason='to be implemented in cobra')\n", (19728, 19765), False, 'import pytest\n'), ((23532, 23584), 'pytest.mark.skipif', 'pytest.mark.skipif', (['TRAVIS'], {'reason': '"""too slow for ci"""'}), "(TRAVIS, reason='too slow for ci')\n", (23550, 23584), False, 'import pytest\n'), ((26344, 26398), 'pytest.mark.skipif', 'pytest.mark.skipif', (['TRAVIS'], {'reason': '"""non-deterministic"""'}), "(TRAVIS, reason='non-deterministic')\n", (26362, 26398), False, 'import pytest\n'), ((40865, 40926), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('cplex' not in solvers)"], {'reason': '"""no cplex"""'}), "('cplex' not in solvers, reason='no cplex')\n", (40883, 40926), False, 'import pytest\n'), ((43408, 43506), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""needs some refactoring, uses missing bounds, not allowed by cplex"""'}), "(reason=\n 'needs some refactoring, uses missing bounds, not allowed by cplex')\n", (43425, 43506), False, 'import pytest\n'), ((50639, 50693), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""to be implemented in cobra"""'}), "(reason='to be implemented in cobra')\n", (50656, 50693), False, 'import pytest\n'), ((2592, 2637), 'os.path.join', 'os.path.join', (['data_directory', '"""EcoliCore.xml"""'], {}), "(data_directory, 'EcoliCore.xml')\n", (2604, 2637), False, 'import os\n'), ((6786, 6801), 'cobra.Reaction', 'Reaction', (['"""rxn"""'], {}), "('rxn')\n", (6794, 6801), False, 'from cobra import Model, Reaction, Metabolite\n'), ((17169, 17185), 'cobra.Reaction', 'Reaction', (['"""test"""'], {}), "('test')\n", (17177, 17185), False, 'from cobra import Model, Reaction, Metabolite\n'), ((19931, 19947), 'cobra.Reaction', 'Reaction', (['"""blub"""'], {}), "('blub')\n", (19939, 19947), False, 'from cobra import Model, Reaction, Metabolite\n'), ((25147, 25165), 'cobra.Metabolite', 'Metabolite', (['"""test"""'], {}), "('test')\n", (25157, 25165), False, 'from cobra import Model, Reaction, Metabolite\n'), ((26487, 26505), 'cobra.Metabolite', 'Metabolite', (['"""test"""'], {}), "('test')\n", (26497, 26505), False, 'from cobra import Model, Reaction, Metabolite\n'), ((32454, 32468), 'cobra.Reaction', 'Reaction', (['"""r1"""'], {}), "('r1')\n", (32462, 32468), False, 'from cobra import Model, Reaction, Metabolite\n'), ((32611, 32625), 'cobra.Reaction', 'Reaction', (['"""r2"""'], {}), "('r2')\n", (32619, 32625), False, 'from cobra import Model, Reaction, Metabolite\n'), ((41423, 41444), 'copy.copy', 'copy.copy', (['core_model'], {}), '(core_model)\n', (41432, 41444), False, 'import copy\n'), ((41659, 41675), 'copy.copy', 'copy.copy', (['model'], {}), '(model)\n', (41668, 41675), False, 'import copy\n'), ((47023, 47062), 'cobra.util.fix_objective_as_constraint', 'fix_objective_as_constraint', (['core_model'], {}), '(core_model)\n', (47050, 47062), False, 'from cobra.util import fix_objective_as_constraint\n'), ((48161, 48200), 'cameo.flux_analysis.structural.create_stoichiometric_array', 'create_stoichiometric_array', (['core_model'], {}), '(core_model)\n', (48188, 48200), False, 'from cameo.flux_analysis.structural import create_stoichiometric_array\n'), ((48794, 48812), 'cameo.core.utils.medium', 'medium', (['core_model'], {}), '(core_model)\n', (48800, 48812), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((49092, 49128), 'cameo.core.utils.load_medium', 'load_medium', (['core_model', 'this_medium'], {}), '(core_model, this_medium)\n', (49103, 49128), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((50039, 50057), 'cobra.Metabolite', 'Metabolite', (['"""test"""'], {}), "('test')\n", (50049, 50057), False, 'from cobra import Model, Reaction, Metabolite\n'), ((50742, 50803), 'cobra.Metabolite', 'Metabolite', ([], {'id': '"""test"""', 'name': '"""test metabolites"""', 'formula': '"""CH4"""'}), "(id='test', name='test metabolites', formula='CH4')\n", (50752, 50803), False, 'from cobra import Model, Reaction, Metabolite\n'), ((8106, 8129), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (8119, 8129), False, 'import pytest\n'), ((39219, 39244), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (39232, 39244), False, 'import pytest\n'), ((39327, 39351), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (39340, 39351), False, 'import pytest\n'), ((40564, 40593), 'pytest.raises', 'pytest.raises', (['SolverNotFound'], {}), '(SolverNotFound)\n', (40577, 40593), False, 'import pytest\n'), ((40661, 40690), 'pytest.raises', 'pytest.raises', (['SolverNotFound'], {}), '(SolverNotFound)\n', (40674, 40690), False, 'import pytest\n'), ((40782, 40811), 'pytest.raises', 'pytest.raises', (['SolverNotFound'], {}), '(SolverNotFound)\n', (40795, 40811), False, 'import pytest\n'), ((42186, 42218), 'pytest.raises', 'pytest.raises', (['OptimizationError'], {}), '(OptimizationError)\n', (42199, 42218), False, 'import pytest\n'), ((42342, 42374), 'cobra.flux_analysis.find_essential_genes', 'find_essential_genes', (['core_model'], {}), '(core_model)\n', (42362, 42374), False, 'from cobra.flux_analysis import find_essential_genes, find_essential_reactions\n'), ((42616, 42648), 'pytest.raises', 'pytest.raises', (['OptimizationError'], {}), '(OptimizationError)\n', (42629, 42648), False, 'import pytest\n'), ((42772, 42808), 'cobra.flux_analysis.find_essential_reactions', 'find_essential_reactions', (['core_model'], {}), '(core_model)\n', (42796, 42808), False, 'from cobra.flux_analysis import find_essential_genes, find_essential_reactions\n'), ((43182, 43214), 'pytest.raises', 'pytest.raises', (['OptimizationError'], {}), '(OptimizationError)\n', (43195, 43214), False, 'import pytest\n'), ((43338, 43401), 'cameo.flux_analysis.analysis.find_essential_metabolites', 'find_essential_metabolites', (['core_model'], {'force_steady_state': '(True)'}), '(core_model, force_steady_state=True)\n', (43364, 43401), False, 'from cameo.flux_analysis.analysis import find_essential_metabolites\n'), ((43868, 43900), 'pytest.raises', 'pytest.raises', (['OptimizationError'], {}), '(OptimizationError)\n', (43881, 43900), False, 'import pytest\n'), ((44024, 44088), 'cameo.flux_analysis.analysis.find_essential_metabolites', 'find_essential_metabolites', (['core_model'], {'force_steady_state': '(False)'}), '(core_model, force_steady_state=False)\n', (44050, 44088), False, 'from cameo.flux_analysis.analysis import find_essential_metabolites\n'), ((46710, 46749), 'cobra.util.fix_objective_as_constraint', 'fix_objective_as_constraint', (['core_model'], {}), '(core_model)\n', (46737, 46749), False, 'from cobra.util import fix_objective_as_constraint\n'), ((47801, 47825), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (47814, 47825), False, 'import pytest\n'), ((47839, 47873), 'cameo.core.utils.get_reaction_for', 'get_reaction_for', (['core_model', 'None'], {}), '(core_model, None)\n', (47855, 47873), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((47887, 47910), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (47900, 47910), False, 'import pytest\n'), ((47924, 47965), 'cameo.core.utils.get_reaction_for', 'get_reaction_for', (['core_model', '"""blablabla"""'], {}), "(core_model, 'blablabla')\n", (47940, 47965), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((47979, 48002), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (47992, 48002), False, 'import pytest\n'), ((48016, 48073), 'cameo.core.utils.get_reaction_for', 'get_reaction_for', (['core_model', '"""accoa_lp_c_lp_"""'], {'add': '(False)'}), "(core_model, 'accoa_lp_c_lp_', add=False)\n", (48032, 48073), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((49148, 49166), 'cameo.core.utils.medium', 'medium', (['core_model'], {}), '(core_model)\n', (49154, 49166), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((50071, 50095), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (50084, 50095), False, 'import pytest\n'), ((50186, 50211), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (50199, 50211), False, 'import pytest\n'), ((6831, 6846), 'cobra.Metabolite', 'Metabolite', (['"""A"""'], {}), "('A')\n", (6841, 6846), False, 'from cobra import Model, Reaction, Metabolite\n'), ((6852, 6867), 'cobra.Metabolite', 'Metabolite', (['"""B"""'], {}), "('B')\n", (6862, 6867), False, 'from cobra import Model, Reaction, Metabolite\n'), ((18306, 18333), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (18319, 18333), False, 'import pytest\n'), ((18389, 18416), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (18402, 18416), False, 'import pytest\n'), ((32497, 32512), 'cobra.Metabolite', 'Metabolite', (['"""A"""'], {}), "('A')\n", (32507, 32512), False, 'from cobra import Model, Reaction, Metabolite\n'), ((32518, 32533), 'cobra.Metabolite', 'Metabolite', (['"""B"""'], {}), "('B')\n", (32528, 32533), False, 'from cobra import Model, Reaction, Metabolite\n'), ((32667, 32682), 'cobra.Metabolite', 'Metabolite', (['"""A"""'], {}), "('A')\n", (32677, 32682), False, 'from cobra import Model, Reaction, Metabolite\n'), ((32688, 32703), 'cobra.Metabolite', 'Metabolite', (['"""C"""'], {}), "('C')\n", (32698, 32703), False, 'from cobra import Model, Reaction, Metabolite\n'), ((32708, 32723), 'cobra.Metabolite', 'Metabolite', (['"""D"""'], {}), "('D')\n", (32718, 32723), False, 'from cobra import Model, Reaction, Metabolite\n'), ((41866, 41891), 'numpy.array', 'numpy.array', (['primals_copy'], {}), '(primals_copy)\n', (41877, 41891), False, 'import numpy\n'), ((41894, 41923), 'numpy.array', 'numpy.array', (['primals_original'], {}), '(primals_original)\n', (41905, 41923), False, 'import numpy\n'), ((42064, 42096), 'cobra.flux_analysis.find_essential_genes', 'find_essential_genes', (['core_model'], {}), '(core_model)\n', (42084, 42096), False, 'from cobra.flux_analysis import find_essential_genes, find_essential_reactions\n'), ((42482, 42518), 'cobra.flux_analysis.find_essential_reactions', 'find_essential_reactions', (['core_model'], {}), '(core_model)\n', (42506, 42518), False, 'from cobra.flux_analysis import find_essential_genes, find_essential_reactions\n'), ((42933, 42996), 'cameo.flux_analysis.analysis.find_essential_metabolites', 'find_essential_metabolites', (['core_model'], {'force_steady_state': '(True)'}), '(core_model, force_steady_state=True)\n', (42959, 42996), False, 'from cameo.flux_analysis.analysis import find_essential_metabolites\n'), ((43614, 43678), 'cameo.flux_analysis.analysis.find_essential_metabolites', 'find_essential_metabolites', (['core_model'], {'force_steady_state': '(False)'}), '(core_model, force_steady_state=False)\n', (43640, 43678), False, 'from cameo.flux_analysis.analysis import find_essential_metabolites\n'), ((49857, 49881), 'pickle.dumps', 'pickle.dumps', (['core_model'], {}), '(core_model)\n', (49869, 49881), False, 'import pickle\n'), ((47438, 47472), 'cameo.core.utils.get_reaction_for', 'get_reaction_for', (['core_model', 'r.id'], {}), '(core_model, r.id)\n', (47454, 47472), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((47524, 47555), 'cameo.core.utils.get_reaction_for', 'get_reaction_for', (['core_model', 'r'], {}), '(core_model, r)\n', (47540, 47555), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((47652, 47686), 'cameo.core.utils.get_reaction_for', 'get_reaction_for', (['core_model', 'm.id'], {}), '(core_model, m.id)\n', (47668, 47686), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n'), ((47738, 47769), 'cameo.core.utils.get_reaction_for', 'get_reaction_for', (['core_model', 'm'], {}), '(core_model, m)\n', (47754, 47769), False, 'from cameo.core.utils import get_reaction_for, load_medium, medium\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import bytes_to_native_str
from hypothesis import given
import hypothesis.strategies as st
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util
from caffe2.python.core import CreateOperator, GradientRegistry
from caffe2.python import workspace
import numpy as np
# First, we will set up a few gradient registry entries so that we can manually
# construct some test cases.
def NeedAll(op, g_output):
"""A sanity check to make sure that all the gradient are given."""
for name, g in zip(op.output, g_output):
if g is None:
raise RuntimeError(
'Need gradient for "%s" but it is not provided.' % name)
return g_output
def GIS(op):
"""A test util function to generate the gradient name for input."""
return [s + '_grad' for s in op.input]
def CopyDeviceOption(op, src_op):
if src_op.HasField('device_option'):
op.device_option.CopyFrom(src_op.device_option)
return op
# First gradient: (in -> out) leading to (out_grad -> in_grad)
@GradientRegistry.RegisterGradient('Direct')
def AddDirectGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator('DirectGradient', NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
# Second gradient: (in -> out) leading to (out, out_grad -> in_grad)
@GradientRegistry.RegisterGradient('UseOutput')
def AddUseOutputGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator(
'UseOutputGradient',
list(op.output) + NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
@GradientRegistry.RegisterGradient('UseInput')
def AddUseInputGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator(
'UseInputGradient',
list(op.input) + NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
@GradientRegistry.RegisterGradient('Nogradient')
def AddNogradient(op, g_output):
return (
[],
[None for s in op.input]
)
class TestGradientCalculation(test_util.TestCase):
@given(device_option=st.sampled_from([
None,
core.DeviceOption(caffe2_pb2.CUDA, 1)]))
def testDirect(self, device_option):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
if device_option:
for op in operators:
op.device_option.CopyFrom(device_option)
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
if device_option:
for op in desired_grad_operators:
op.device_option.CopyFrom(device_option)
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testDirectImplicitGradientSource(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator(
"ConstantFill", 'out', "out_autogen_grad", value=1.0),
CreateOperator(
'DirectGradient', 'out_autogen_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, ['out'])
self.assertEqual(gradients, desired_grad_operators)
def testDoesNotGenerateUnnecessaryGradients(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'hidden': 'hidden_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testDirectButNoOutputGradientGiven(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {})
self.assertEqual(gradients, [])
def testDirectInPlace(self):
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'in_grad'),
CreateOperator('DirectGradient', 'in_grad', 'in_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testVersionMismatch(self):
operators = [
CreateOperator('Direct', 'x', 'x'),
CreateOperator('Direct', 'y', 'x'),
CreateOperator('Direct', 'x', 'y'),
]
try:
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'y': 'y_grad'})
self.assertFalse(True, "Should raise exception of incorrect version")
except RuntimeError as e:
print(e)
self.assertTrue("version" in str(e))
pass
def testUseOutput(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseOutputGradient',
['out', 'out_grad'], 'hidden_grad'
),
CreateOperator(
'UseOutputGradient',
['hidden', 'hidden_grad'], 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testUseOutputInPlace(self):
operators = [
CreateOperator('UseOutput', 'in', 'in'),
CreateOperator('UseOutput', 'in', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseOutputGradient',
['out', 'out_grad'], 'in_grad'
),
CreateOperator(
'UseOutputGradient',
['in', 'in_grad'], 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testUseOutputButOutputHasBeenChanged(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
# Note here: we overwrite hidden, but hidden will be needed by the
# gradient calculation of the first operator, so the gradient
# registry should return an error.
CreateOperator('Direct', 'hidden', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
def testUseInput(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('UseInput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseInputGradient',
['hidden', 'out_grad'], 'hidden_grad'
),
CreateOperator(
'DirectGradient',
'hidden_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testUseInputButInputHasBeenChanged(self):
"""Test gradient for the following case:
in -> out, with UseInput
in -> in
Since we overwrite in in op#1, but in will be needed by the gradient
calculation of op#0, the gradient registry should raise an error.
"""
operators = [
CreateOperator('UseInput', 'in', 'out'),
CreateOperator('Direct', 'in', 'in'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
@given(device_option=st.sampled_from([
None,
core.DeviceOption(caffe2_pb2.CUDA, 1)]))
def testMultiUseInput(self, device_option):
"""Test gradient for the following case:
in -> hidden1
in -> hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
if device_option:
for op in operators:
op.device_option.CopyFrom(device_option)
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'], 'in_grad'
),
]
if device_option:
for op in desired_grad_operators:
op.device_option.CopyFrom(device_option)
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {"out": "out_grad"})
self.assertEqual(gradients, desired_grad_operators)
def testMultiUseInputButWithNoGradient(self):
"""Test gradient for the following case:
in -> hidden1
in -(no gradient)-> hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Nogradient', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden1_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testMultiUseInputAndMultipleVersions(self):
"""Test gradient for the following case:
in -> in
in -> hidden1, hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'], 'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testMultiUseInputAndMultipleVersionsBig(self):
"""Test gradient for the following case:
in -> in
in -> hidden1, hidden2
hidden1, hidden2 -> in
in -> hidden3, hidden4, hidden5
hidden3, hidden4, hidden5 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'in'),
CreateOperator('Direct', 'in', 'hidden3'),
CreateOperator('Direct', 'in', 'hidden4'),
CreateOperator('Direct', 'in', 'hidden5'),
CreateOperator('Direct', ['hidden3', 'hidden4', 'hidden5'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden3_grad', 'hidden4_grad', 'hidden5_grad']
),
CreateOperator(
'DirectGradient',
'hidden5_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden4_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'DirectGradient',
'hidden3_grad', '_in_grad_autosplit_1'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0',
'_in_grad_autosplit_1'],
'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'],
'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
for s in gradients:
print(str(s))
self.assertEqual(gradients, desired_grad_operators)
def testGradientMappingUsingSumOp(self):
"""Since Sum is used in accumulating gradients, we will test if
it is OK to also explicitly use it in the graph."""
operators = [
CreateOperator('FC', ['in', 'w', 'b'], 'fc'),
CreateOperator('Sum', 'fc', 'agg'),
CreateOperator('AveragedLoss', 'agg', 'loss'),
]
# This should run correctly.
gradient_ops, _ = GradientRegistry.GetBackwardPass(
operators, {'loss': 'loss_grad'})
for s in gradient_ops:
print(str(s))
def testGradientCalculationWithPrint(self):
"""Test a common use case where we have Print in the forward pass."""
operators = [
CreateOperator('FC', ['in', 'w', 'b'], 'fc'),
CreateOperator('Print', 'fc', []),
CreateOperator('AveragedLoss', 'fc', 'loss'),
]
desired_grad_operators = [
CreateOperator('AveragedLossGradient',
['fc', 'loss_grad'], 'fc_grad'),
CreateOperator('FCGradient', ['in', 'w', 'fc_grad'],
['w_grad', 'b_grad', 'in_grad']),
]
for g in desired_grad_operators:
g.is_gradient_op = 1
# This should run correctly.
gradient_ops, _ = GradientRegistry.GetBackwardPass(
operators, {'loss': 'loss_grad'})
for s in gradient_ops:
print(str(s))
self.assertEqual(gradient_ops, desired_grad_operators)
def testStopGradient(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'hidden2'),
CreateOperator('Direct', 'hidden2', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden2_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testStopGradientOrphan(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'auto_blobx'),
CreateOperator('Direct', 'hidden', 'out'),
]
with self.assertRaises(ValueError):
# This should complain about incorrect use of StopGradient
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
def testStopGradientInplace(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden_grad'),
]
gradients, grad_map = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
self.assertEqual(grad_map, {'out': 'out_grad'})
def testStopGradientWithMultiUseOperators(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'hidden2'),
CreateOperator('StopGradient', 'hidden', 'hidden3'),
CreateOperator('Direct', ['hidden2', 'hidden3'], 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad',
['hidden2_grad', 'hidden3_grad']),
CreateOperator('DirectGradient', 'hidden2_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
gradients, grad_map = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
self.assertEqual(
grad_map, {'out': 'out_grad', 'hidden2': 'hidden2_grad',
'hidden3': 'hidden3_grad', 'hidden': 'hidden_grad',
'in': 'in_grad'})
def test_zero_gradient(self):
net = core.Net("zero_grad_test")
hidden_prev, cell, gates, seq_lengths, timestep =\
net.AddExternalInput("h", "c", "g", "s", "t")
hidden, cell = net.LSTMUnit(
[hidden_prev, cell, gates, seq_lengths, timestep],
["hidden_t", "cell_t"])
with self.assertRaises(Exception):
net.AddGradientOperators([hidden])
net.ZeroGradient(cell, [])
net.AddGradientOperators([hidden])
def test_two_grads(self):
net = core.Net("test_two_grads")
input, two, three = net.AddExternalInput("input", "two", "three")
m1 = net.Mul([input, two], "mul_1")
m2 = net.Mul([m1, three], "mul_2")
grad_map = net.AddGradientOperators([m2, m1])
workspace.ResetWorkspace()
workspace.blobs[input] = np.array([1]).astype(np.float32)
workspace.blobs[two] = np.array([2]).astype(np.float32)
workspace.blobs[three] = np.array([3]).astype(np.float32)
workspace.RunNetOnce(net)
print(net.Proto())
for blob in workspace.blobs:
print(blob, workspace.blobs[blob])
print("Input grad: ", workspace.blobs[grad_map[str(input)]])
assert workspace.blobs[grad_map[str(input)]] == 8.0
# Skip if sparse operators are not available
@unittest.skipIf(not core.IsOperator('SparseFunHash'),
'Sparse operators not available')
class TestSparseGradientsAccumulation(test_util.TestCase):
def testSparseAccumulationWithValues(self):
# The gradient for "Gather" only computes values. indices are directly
# passed from the input
#
# x1-->Gather-->x4-->
# | |
# x2-----+ DotProduct-->x6
# | |
# x3-->Gather-->x5-->
net = core.Net("test_net")
net.Gather(["x2", "x1"], "x4")
net.Gather(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
net.AddGradientOperators(["x6"])
sum_op_i = net.Proto().op[-2]
sum_op_v = net.Proto().op[-1]
self.assertEqual(sum_op_i.input[0], "x3")
self.assertEqual(sum_op_i.input[1], "x1")
self.assertEqual(sum_op_i.output[0], "x2_grad_indices_concat")
self.assertEqual(sum_op_v.input[0], "x5_grad")
self.assertEqual(sum_op_v.input[1], "x4_grad")
self.assertEqual(sum_op_v.output[0], "x2_grad_values_concat")
def testSparseGradientToDense(self):
#
# x1-->Gather-->x4-->
# | |
# x0, w, b-->FC-->x2-->EnsureDenseGradient-->x2---+ DotProduct-->x6
# | |
# x3-->Gather-->x5-->
net = core.Net("test_net")
net.FC(["x0", "w", "b"], "x2")
net.EnsureDense(["x2"], "x2")
net.Gather(["x2", "x1"], "x4")
net.Gather(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
net.AddGradientOperators(["x6"])
ensure_dense_op = net.Proto().op[-2]
self.assertEqual(ensure_dense_op.input[0], "x2_grad_indices_concat")
self.assertEqual(ensure_dense_op.input[1], "x2_grad_values_concat")
self.assertEqual(ensure_dense_op.output[0], "x2_grad")
def testSparseAccumulationWithIndicesAndValues(self):
# The gradient for "SparseFunHash" computes both indices and values
#
# x1-------->
# |
# x2----> |
# | |
# x3---SparseFunHash-->x8
# / \
# x4---+ DotProduct-->x10
# \ /
# x5---SparseFunHash-->x9
# | |
# x6----> |
# |
# x7-------->
net = core.Net("test_net")
net.SparseFunHash(["x1", "x2", "x3", "x4"], "x8")
net.SparseFunHash(["x5", "x6", "x7", "x4"], "x9")
net.DotProduct(["x8", "x9"], "x10")
net.AddGradientOperators(["x10"])
sum_op_i = net.Proto().op[-2]
sum_op_v = net.Proto().op[-1]
self.assertEqual(sum_op_i.input[0], "_x4_grad_indices_autosplit_0")
self.assertEqual(sum_op_i.input[1], "_x4_grad_indices_autosplit_1")
self.assertEqual(sum_op_i.output[0], "x4_grad_indices_concat")
self.assertEqual(sum_op_v.input[0], "_x4_grad_values_autosplit_0")
self.assertEqual(sum_op_v.input[1], "_x4_grad_values_autosplit_1")
self.assertEqual(sum_op_v.output[0], "x4_grad_values_concat")
class TestGradientsAccumulationWithNoGradientOps(test_util.TestCase):
def testNormalAccumulation(self):
# x1-->Relu--x2----------------->DotProduct-->x4
# | |
# -->Softmax-->x3-->
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.DotProduct(["x2", "x3"], "x4")
net.AddGradientOperators(["x4"])
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
def testAccumulationWithNoGradientBranch(self):
# -->PRINT
# |
# x1-->Relu--x2----------------->DotProduct-->x4
# | |
# -->Softmax-->x3-->
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Print("x2", [])
net.Softmax("x2", "x3")
net.DotProduct(["x2", "x3"], "x4")
net.AddGradientOperators(["x4"])
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
class TestGradientsAccumulationWithPassThroughGradients(test_util.TestCase):
def testAddOpInMiddle(self):
# x1-->Relu--x2----------------->Add-->x4
# | |
# -->Softmax-->x3-->
#
# Expected gradient graph:
#
# x1_g<--ReluG<--x2_g<--Sum<------------<---------x4_g
# | |
# <--_x2_g_split_0<--SoftmaxG
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.Add(["x2", "x3"], "x4")
input_to_grad = net.AddGradientOperators({"x4": "x4_grad"})
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "x4_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddAndDynamicConstant(self):
net = core.Net("test_net")
net.FC(["x1", "x1_w", "x1_b"], ["x2"])
net.Relu("x2", "x2")
net.ConstantFill(["x2"], ["x3"])
net.Add(["x2", "x3"], "x4")
net.FC(["x4", "x4_w", "x4_b"], ["x5"])
net.SoftmaxWithLoss(["x5", "labels"], ["softmax", "loss"])
input_to_grad = net.AddGradientOperators(["loss"])
for op in net.Proto().op:
self.assertFalse(op.type == 'Sum')
self.assertTrue("x4" in input_to_grad)
self.assertTrue("x1" in input_to_grad)
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddAndStaticConstant(self):
net = core.Net("test_net")
net.FC(["x1", "x1_w", "x1_b"], ["x2"])
net.Relu("x2", "x2")
net.ConstantFill([], ["x3"], shape=[1])
net.Add(["x2", "x3"], "x4", broadcast=1)
net.FC(["x4", "x4_w", "x4_b"], ["x5"])
net.SoftmaxWithLoss(["x5", "labels"], ["softmax", "loss"])
input_to_grad = net.AddGradientOperators(["loss"])
print(input_to_grad)
self.assertTrue("x1" in input_to_grad)
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testSubOpInMiddle(self):
# x1-->Relu--x2----------------->Sub-->x4
# | |
# -->Softmax-->x3-->
#
# Expected gradient graph:
#
# x1_g<--ReluG<--x2_g<--Sum<------------<-----------------------x4_g
# | |
# <--_x2_g_split_0<--SoftmaxG<--x3_g<--neg
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.Sub(["x2", "x3"], "x4")
input_to_grad = net.AddGradientOperators({"x4": "x4_grad"})
print(str(net.Proto()))
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "x4_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddOpAtLeaf(self):
# x1
# \
# -->Add-->x4
# / \
# x2 -->DotProduct-->x6
# \ /
# -->Add-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<--x4_g<--DotProductG<--x6_g
# | | |
# <---x5_g<-------
net = core.Net("test_net")
net.Add(["x1", "x2"], "x4")
net.Add(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x5_grad")
self.assertEqual(sum_op.input[1], "x4_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x4_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x5_grad")
def testSubOpAtLeaf(self):
# x1
# \
# -->Sub-->x4
# / \
# x2 -->DotProduct-->x6
# \ /
# -->Sub-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<-------Sum<--x2_g_split_0<--neg<--x4_g<--DotProductG<--x6_g
# | |
# x3_g<--neg<--<--x5_g<--------------------------------
net = core.Net("test_net")
net.Sub(["x1", "x2"], "x4")
net.Sub(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "x5_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x4_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testMultiLayerAddOps(self):
# x1
# \
# -->Add-->x4
# / \
# x2 -->Add-->x6
# \ /
# -->Add-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<-----x6_g
# | |
# <--------
net = core.Net("test_net")
net.Add(["x1", "x2"], "x4")
net.Add(["x2", "x3"], "x5")
net.Add(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x6_grad")
self.assertEqual(sum_op.input[1], "x6_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x6_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x6_grad")
def testMultiLayerSubOps(self):
# x1
# \
# -->Sub-->x4
# / \
# x2 -->Sub-->x6
# \ /
# -->Sub-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<-----x6_g
# | |
# <--------
net = core.Net("test_net")
net.Sub(["x1", "x2"], "x4")
net.Sub(["x2", "x3"], "x5")
net.Sub(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "x5_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x6_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testAccumulationRuns(self):
net = core.Net("test_net")
input, one, two, three = net.AddExternalInput(
"input", "one", "two", "three")
m1 = net.Mul([input, two], "mul_1")
m2 = net.Mul([input, three], "mul_2")
sub = net.Sub([m1, one])
grad_map = net.AddGradientOperators([m2, sub])
workspace.ResetWorkspace()
workspace.blobs[one] = np.array([1]).astype(np.float32)
workspace.blobs[input] = np.array([1]).astype(np.float32)
workspace.blobs[two] = np.array([2]).astype(np.float32)
workspace.blobs[three] = np.array([3]).astype(np.float32)
workspace.RunNetOnce(net)
print("Input grad: ", workspace.blobs[grad_map[str(input)]])
assert workspace.blobs[grad_map[str(input)]] == 5.0
def testIncorrectOperator(self):
net = core.Net("test_net")
a, b, one = net.AddExternalInput("a", "b", "one")
m1 = net.Mul(a, b) # does not have second output
sub = net.Sub([m1, one])
try:
net.AddGradientOperators([sub])
self.assertFalse(True, "Did not throw exception")
except Exception as e:
self.assertTrue("schema" in str(e))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"caffe2.python.core.Net",
"caffe2.python.core.GradientRegistry.GetBackwardPass",
"caffe2.python.core.GradientRegistry.RegisterGradient",
"caffe2.python.workspace.RunNetOnce",
"caffe2.python.core.DeviceOption",
"caffe2.python.workspace.ResetWorkspace",
"caffe2.python.core.CreateOperato... | [((1220, 1263), 'caffe2.python.core.GradientRegistry.RegisterGradient', 'GradientRegistry.RegisterGradient', (['"""Direct"""'], {}), "('Direct')\n", (1253, 1263), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((1529, 1575), 'caffe2.python.core.GradientRegistry.RegisterGradient', 'GradientRegistry.RegisterGradient', (['"""UseOutput"""'], {}), "('UseOutput')\n", (1562, 1575), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((1829, 1874), 'caffe2.python.core.GradientRegistry.RegisterGradient', 'GradientRegistry.RegisterGradient', (['"""UseInput"""'], {}), "('UseInput')\n", (1862, 1874), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((2125, 2172), 'caffe2.python.core.GradientRegistry.RegisterGradient', 'GradientRegistry.RegisterGradient', (['"""Nogradient"""'], {}), "('Nogradient')\n", (2158, 2172), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((33784, 33799), 'unittest.main', 'unittest.main', ([], {}), '()\n', (33797, 33799), False, 'import unittest\n'), ((3069, 3133), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (3101, 3133), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((3734, 3786), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "['out']"], {}), "(operators, ['out'])\n", (3766, 3786), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4197, 4267), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'hidden': 'hidden_grad'}"], {}), "(operators, {'hidden': 'hidden_grad'})\n", (4229, 4267), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4556, 4603), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', '{}'], {}), '(operators, {})\n', (4588, 4603), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5029, 5093), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (5061, 5093), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6333, 6399), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'sink': 'sink_grad'}"], {}), "(operators, {'sink': 'sink_grad'})\n", (6365, 6399), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((7093, 7159), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'sink': 'sink_grad'}"], {}), "(operators, {'sink': 'sink_grad'})\n", (7125, 7159), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((8516, 8582), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'sink': 'sink_grad'}"], {}), "(operators, {'sink': 'sink_grad'})\n", (8548, 8582), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((10596, 10660), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (10628, 10660), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((11477, 11541), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (11509, 11541), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((12771, 12835), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (12803, 12835), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((15130, 15194), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (15162, 15194), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((15760, 15826), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'loss': 'loss_grad'}"], {}), "(operators, {'loss': 'loss_grad'})\n", (15792, 15826), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((16638, 16704), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'loss': 'loss_grad'}"], {}), "(operators, {'loss': 'loss_grad'})\n", (16670, 16704), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17220, 17284), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (17252, 17284), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18223, 18287), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (18255, 18287), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((19093, 19157), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (19125, 19157), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((19491, 19517), 'caffe2.python.core.Net', 'core.Net', (['"""zero_grad_test"""'], {}), "('zero_grad_test')\n", (19499, 19517), False, 'from caffe2.python import core, test_util\n'), ((19985, 20011), 'caffe2.python.core.Net', 'core.Net', (['"""test_two_grads"""'], {}), "('test_two_grads')\n", (19993, 20011), False, 'from caffe2.python import core, test_util\n'), ((20236, 20262), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (20260, 20262), False, 'from caffe2.python import workspace\n'), ((20467, 20492), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (20487, 20492), False, 'from caffe2.python import workspace\n'), ((21287, 21307), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (21295, 21307), False, 'from caffe2.python import core, test_util\n'), ((22316, 22336), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (22324, 22336), False, 'from caffe2.python import core, test_util\n'), ((23353, 23373), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (23361, 23373), False, 'from caffe2.python import core, test_util\n'), ((20801, 20833), 'caffe2.python.core.IsOperator', 'core.IsOperator', (['"""SparseFunHash"""'], {}), "('SparseFunHash')\n", (20816, 20833), False, 'from caffe2.python import core, test_util\n'), ((24367, 24387), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (24375, 24387), False, 'from caffe2.python import core, test_util\n'), ((25019, 25039), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (25027, 25039), False, 'from caffe2.python import core, test_util\n'), ((25933, 25953), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (25941, 25953), False, 'from caffe2.python import core, test_util\n'), ((26428, 26448), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (26436, 26448), False, 'from caffe2.python import core, test_util\n'), ((27063, 27083), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (27071, 27083), False, 'from caffe2.python import core, test_util\n'), ((28039, 28059), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (28047, 28059), False, 'from caffe2.python import core, test_util\n'), ((28960, 28980), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (28968, 28980), False, 'from caffe2.python import core, test_util\n'), ((30039, 30059), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (30047, 30059), False, 'from caffe2.python import core, test_util\n'), ((31002, 31022), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (31010, 31022), False, 'from caffe2.python import core, test_util\n'), ((31958, 31978), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (31966, 31978), False, 'from caffe2.python import core, test_util\n'), ((32573, 32593), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (32581, 32593), False, 'from caffe2.python import core, test_util\n'), ((32881, 32907), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (32905, 32907), False, 'from caffe2.python import workspace\n'), ((33176, 33201), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (33196, 33201), False, 'from caffe2.python import workspace\n'), ((33383, 33403), 'caffe2.python.core.Net', 'core.Net', (['"""test_net"""'], {}), "('test_net')\n", (33391, 33403), False, 'from caffe2.python import core, test_util\n'), ((2504, 2544), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (2518, 2544), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((2558, 2599), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden"""', '"""out"""'], {}), "('Direct', 'hidden', 'out')\n", (2572, 2599), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((2774, 2833), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', '"""hidden_grad"""'], {}), "('DirectGradient', 'out_grad', 'hidden_grad')\n", (2788, 2833), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((2847, 2905), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden_grad', 'in_grad')\n", (2861, 2905), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((3290, 3330), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (3304, 3330), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((3344, 3385), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden"""', '"""out"""'], {}), "('Direct', 'hidden', 'out')\n", (3358, 3385), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((3444, 3512), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""ConstantFill"""', '"""out"""', '"""out_autogen_grad"""'], {'value': '(1.0)'}), "('ConstantFill', 'out', 'out_autogen_grad', value=1.0)\n", (3458, 3512), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((3543, 3610), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_autogen_grad"""', '"""hidden_grad"""'], {}), "('DirectGradient', 'out_autogen_grad', 'hidden_grad')\n", (3557, 3610), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((3641, 3699), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden_grad', 'in_grad')\n", (3655, 3699), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((3950, 3990), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (3964, 3990), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4004, 4045), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden"""', '"""out"""'], {}), "('Direct', 'hidden', 'out')\n", (4018, 4045), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4104, 4162), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden_grad', 'in_grad')\n", (4118, 4162), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4426, 4466), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (4440, 4466), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4480, 4521), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden"""', '"""out"""'], {}), "('Direct', 'hidden', 'out')\n", (4494, 4521), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4725, 4761), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""in"""'], {}), "('Direct', 'in', 'in')\n", (4739, 4761), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4775, 4812), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""out"""'], {}), "('Direct', 'in', 'out')\n", (4789, 4812), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4871, 4926), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'out_grad', 'in_grad')\n", (4885, 4926), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((4940, 4994), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""in_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'in_grad', 'in_grad')\n", (4954, 4994), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5237, 5271), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""x"""', '"""x"""'], {}), "('Direct', 'x', 'x')\n", (5251, 5271), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5285, 5319), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""y"""', '"""x"""'], {}), "('Direct', 'y', 'x')\n", (5299, 5319), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5333, 5367), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""x"""', '"""y"""'], {}), "('Direct', 'x', 'y')\n", (5347, 5367), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5419, 5479), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'y': 'y_grad'}"], {}), "(operators, {'y': 'y_grad'})\n", (5451, 5479), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5764, 5807), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutput"""', '"""in"""', '"""hidden"""'], {}), "('UseOutput', 'in', 'hidden')\n", (5778, 5807), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5821, 5865), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutput"""', '"""hidden"""', '"""out"""'], {}), "('UseOutput', 'hidden', 'out')\n", (5835, 5865), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5879, 5918), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""out"""', '"""sink"""'], {}), "('Direct', 'out', 'sink')\n", (5893, 5918), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((5977, 6034), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""sink_grad"""', '"""out_grad"""'], {}), "('DirectGradient', 'sink_grad', 'out_grad')\n", (5991, 6034), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6048, 6119), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutputGradient"""', "['out', 'out_grad']", '"""hidden_grad"""'], {}), "('UseOutputGradient', ['out', 'out_grad'], 'hidden_grad')\n", (6062, 6119), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6179, 6252), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutputGradient"""', "['hidden', 'hidden_grad']", '"""in_grad"""'], {}), "('UseOutputGradient', ['hidden', 'hidden_grad'], 'in_grad')\n", (6193, 6252), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6544, 6583), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutput"""', '"""in"""', '"""in"""'], {}), "('UseOutput', 'in', 'in')\n", (6558, 6583), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6597, 6637), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutput"""', '"""in"""', '"""out"""'], {}), "('UseOutput', 'in', 'out')\n", (6611, 6637), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6651, 6690), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""out"""', '"""sink"""'], {}), "('Direct', 'out', 'sink')\n", (6665, 6690), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6749, 6806), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""sink_grad"""', '"""out_grad"""'], {}), "('DirectGradient', 'sink_grad', 'out_grad')\n", (6763, 6806), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6820, 6887), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutputGradient"""', "['out', 'out_grad']", '"""in_grad"""'], {}), "('UseOutputGradient', ['out', 'out_grad'], 'in_grad')\n", (6834, 6887), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((6947, 7012), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutputGradient"""', "['in', 'in_grad']", '"""in_grad"""'], {}), "('UseOutputGradient', ['in', 'in_grad'], 'in_grad')\n", (6961, 7012), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((7320, 7363), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutput"""', '"""in"""', '"""hidden"""'], {}), "('UseOutput', 'in', 'hidden')\n", (7334, 7363), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((7577, 7621), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden"""', '"""hidden"""'], {}), "('Direct', 'hidden', 'hidden')\n", (7591, 7621), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((7635, 7679), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseOutput"""', '"""hidden"""', '"""out"""'], {}), "('UseOutput', 'hidden', 'out')\n", (7649, 7679), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((7693, 7732), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""out"""', '"""sink"""'], {}), "('Direct', 'out', 'sink')\n", (7707, 7732), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((7817, 7883), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'sink': 'sink_grad'}"], {}), "(operators, {'sink': 'sink_grad'})\n", (7849, 7883), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((7964, 8004), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (7978, 8004), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((8018, 8061), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseInput"""', '"""hidden"""', '"""out"""'], {}), "('UseInput', 'hidden', 'out')\n", (8032, 8061), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((8075, 8114), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""out"""', '"""sink"""'], {}), "('Direct', 'out', 'sink')\n", (8089, 8114), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((8173, 8230), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""sink_grad"""', '"""out_grad"""'], {}), "('DirectGradient', 'sink_grad', 'out_grad')\n", (8187, 8230), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((8244, 8317), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseInputGradient"""', "['hidden', 'out_grad']", '"""hidden_grad"""'], {}), "('UseInputGradient', ['hidden', 'out_grad'], 'hidden_grad')\n", (8258, 8317), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((8377, 8435), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden_grad', 'in_grad')\n", (8391, 8435), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((9005, 9044), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""UseInput"""', '"""in"""', '"""out"""'], {}), "('UseInput', 'in', 'out')\n", (9019, 9044), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((9058, 9094), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""in"""'], {}), "('Direct', 'in', 'in')\n", (9072, 9094), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((9179, 9243), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (9211, 9243), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((9588, 9629), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden1"""'], {}), "('Direct', 'in', 'hidden1')\n", (9602, 9629), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((9643, 9684), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden2"""'], {}), "('Direct', 'in', 'hidden2')\n", (9657, 9684), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((9698, 9753), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', "['hidden1', 'hidden2']", '"""out"""'], {}), "('Direct', ['hidden1', 'hidden2'], 'out')\n", (9712, 9753), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((9928, 10006), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', "['hidden1_grad', 'hidden2_grad']"], {}), "('DirectGradient', 'out_grad', ['hidden1_grad', 'hidden2_grad'])\n", (9942, 10006), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((10066, 10125), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden2_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden2_grad', 'in_grad')\n", (10080, 10125), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((10185, 10257), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden1_grad"""', '"""_in_grad_autosplit_0"""'], {}), "('DirectGradient', 'hidden1_grad', '_in_grad_autosplit_0')\n", (10199, 10257), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((10317, 10386), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Sum"""', "['in_grad', '_in_grad_autosplit_0']", '"""in_grad"""'], {}), "('Sum', ['in_grad', '_in_grad_autosplit_0'], 'in_grad')\n", (10331, 10386), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((10971, 11012), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden1"""'], {}), "('Direct', 'in', 'hidden1')\n", (10985, 11012), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((11026, 11071), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Nogradient"""', '"""in"""', '"""hidden2"""'], {}), "('Nogradient', 'in', 'hidden2')\n", (11040, 11071), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((11085, 11140), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', "['hidden1', 'hidden2']", '"""out"""'], {}), "('Direct', ['hidden1', 'hidden2'], 'out')\n", (11099, 11140), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((11199, 11277), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', "['hidden1_grad', 'hidden2_grad']"], {}), "('DirectGradient', 'out_grad', ['hidden1_grad', 'hidden2_grad'])\n", (11213, 11277), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((11337, 11396), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden1_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden1_grad', 'in_grad')\n", (11351, 11396), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((11844, 11880), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""in"""'], {}), "('Direct', 'in', 'in')\n", (11858, 11880), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((11894, 11935), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden1"""'], {}), "('Direct', 'in', 'hidden1')\n", (11908, 11935), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((11949, 11990), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden2"""'], {}), "('Direct', 'in', 'hidden2')\n", (11963, 11990), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((12004, 12059), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', "['hidden1', 'hidden2']", '"""out"""'], {}), "('Direct', ['hidden1', 'hidden2'], 'out')\n", (12018, 12059), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((12118, 12196), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', "['hidden1_grad', 'hidden2_grad']"], {}), "('DirectGradient', 'out_grad', ['hidden1_grad', 'hidden2_grad'])\n", (12132, 12196), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((12256, 12315), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden2_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden2_grad', 'in_grad')\n", (12270, 12315), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((12375, 12447), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden1_grad"""', '"""_in_grad_autosplit_0"""'], {}), "('DirectGradient', 'hidden1_grad', '_in_grad_autosplit_0')\n", (12389, 12447), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((12507, 12576), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Sum"""', "['in_grad', '_in_grad_autosplit_0']", '"""in_grad"""'], {}), "('Sum', ['in_grad', '_in_grad_autosplit_0'], 'in_grad')\n", (12521, 12576), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((12636, 12690), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""in_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'in_grad', 'in_grad')\n", (12650, 12690), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13221, 13257), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""in"""'], {}), "('Direct', 'in', 'in')\n", (13235, 13257), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13271, 13312), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden1"""'], {}), "('Direct', 'in', 'hidden1')\n", (13285, 13312), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13326, 13367), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden2"""'], {}), "('Direct', 'in', 'hidden2')\n", (13340, 13367), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13381, 13435), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', "['hidden1', 'hidden2']", '"""in"""'], {}), "('Direct', ['hidden1', 'hidden2'], 'in')\n", (13395, 13435), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13449, 13490), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden3"""'], {}), "('Direct', 'in', 'hidden3')\n", (13463, 13490), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13504, 13545), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden4"""'], {}), "('Direct', 'in', 'hidden4')\n", (13518, 13545), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13559, 13600), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden5"""'], {}), "('Direct', 'in', 'hidden5')\n", (13573, 13600), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13614, 13680), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', "['hidden3', 'hidden4', 'hidden5']", '"""out"""'], {}), "('Direct', ['hidden3', 'hidden4', 'hidden5'], 'out')\n", (13628, 13680), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13739, 13837), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', "['hidden3_grad', 'hidden4_grad', 'hidden5_grad']"], {}), "('DirectGradient', 'out_grad', ['hidden3_grad',\n 'hidden4_grad', 'hidden5_grad'])\n", (13753, 13837), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((13893, 13952), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden5_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden5_grad', 'in_grad')\n", (13907, 13952), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((14012, 14084), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden4_grad"""', '"""_in_grad_autosplit_0"""'], {}), "('DirectGradient', 'hidden4_grad', '_in_grad_autosplit_0')\n", (14026, 14084), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((14144, 14216), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden3_grad"""', '"""_in_grad_autosplit_1"""'], {}), "('DirectGradient', 'hidden3_grad', '_in_grad_autosplit_1')\n", (14158, 14216), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((14276, 14373), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Sum"""', "['in_grad', '_in_grad_autosplit_0', '_in_grad_autosplit_1']", '"""in_grad"""'], {}), "('Sum', ['in_grad', '_in_grad_autosplit_0',\n '_in_grad_autosplit_1'], 'in_grad')\n", (14290, 14373), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((14462, 14539), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""in_grad"""', "['hidden1_grad', 'hidden2_grad']"], {}), "('DirectGradient', 'in_grad', ['hidden1_grad', 'hidden2_grad'])\n", (14476, 14539), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((14599, 14658), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden2_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden2_grad', 'in_grad')\n", (14613, 14658), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((14718, 14790), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden1_grad"""', '"""_in_grad_autosplit_0"""'], {}), "('DirectGradient', 'hidden1_grad', '_in_grad_autosplit_0')\n", (14732, 14790), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((14850, 14919), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Sum"""', "['in_grad', '_in_grad_autosplit_0']", '"""in_grad"""'], {}), "('Sum', ['in_grad', '_in_grad_autosplit_0'], 'in_grad')\n", (14864, 14919), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((14995, 15049), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""in_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'in_grad', 'in_grad')\n", (15009, 15049), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((15534, 15578), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""FC"""', "['in', 'w', 'b']", '"""fc"""'], {}), "('FC', ['in', 'w', 'b'], 'fc')\n", (15548, 15578), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((15592, 15626), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Sum"""', '"""fc"""', '"""agg"""'], {}), "('Sum', 'fc', 'agg')\n", (15606, 15626), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((15640, 15685), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""AveragedLoss"""', '"""agg"""', '"""loss"""'], {}), "('AveragedLoss', 'agg', 'loss')\n", (15654, 15685), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((16058, 16102), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""FC"""', "['in', 'w', 'b']", '"""fc"""'], {}), "('FC', ['in', 'w', 'b'], 'fc')\n", (16072, 16102), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((16116, 16149), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Print"""', '"""fc"""', '[]'], {}), "('Print', 'fc', [])\n", (16130, 16149), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((16163, 16207), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""AveragedLoss"""', '"""fc"""', '"""loss"""'], {}), "('AveragedLoss', 'fc', 'loss')\n", (16177, 16207), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((16266, 16336), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""AveragedLossGradient"""', "['fc', 'loss_grad']", '"""fc_grad"""'], {}), "('AveragedLossGradient', ['fc', 'loss_grad'], 'fc_grad')\n", (16280, 16336), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((16377, 16466), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""FCGradient"""', "['in', 'w', 'fc_grad']", "['w_grad', 'b_grad', 'in_grad']"], {}), "('FCGradient', ['in', 'w', 'fc_grad'], ['w_grad', 'b_grad',\n 'in_grad'])\n", (16391, 16466), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((16905, 16945), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (16919, 16945), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((16959, 17010), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""StopGradient"""', '"""hidden"""', '"""hidden2"""'], {}), "('StopGradient', 'hidden', 'hidden2')\n", (16973, 17010), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17024, 17066), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden2"""', '"""out"""'], {}), "('Direct', 'hidden2', 'out')\n", (17038, 17066), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17125, 17185), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', '"""hidden2_grad"""'], {}), "('DirectGradient', 'out_grad', 'hidden2_grad')\n", (17139, 17185), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17431, 17471), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (17445, 17471), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17485, 17539), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""StopGradient"""', '"""hidden"""', '"""auto_blobx"""'], {}), "('StopGradient', 'hidden', 'auto_blobx')\n", (17499, 17539), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17553, 17594), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden"""', '"""out"""'], {}), "('Direct', 'hidden', 'out')\n", (17567, 17594), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17748, 17812), 'caffe2.python.core.GradientRegistry.GetBackwardPass', 'GradientRegistry.GetBackwardPass', (['operators', "{'out': 'out_grad'}"], {}), "(operators, {'out': 'out_grad'})\n", (17780, 17812), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17904, 17944), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (17918, 17944), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((17958, 18008), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""StopGradient"""', '"""hidden"""', '"""hidden"""'], {}), "('StopGradient', 'hidden', 'hidden')\n", (17972, 18008), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18022, 18063), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden"""', '"""out"""'], {}), "('Direct', 'hidden', 'out')\n", (18036, 18063), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18122, 18181), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', '"""hidden_grad"""'], {}), "('DirectGradient', 'out_grad', 'hidden_grad')\n", (18136, 18181), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18505, 18545), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""in"""', '"""hidden"""'], {}), "('Direct', 'in', 'hidden')\n", (18519, 18545), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18559, 18604), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', '"""hidden"""', '"""hidden2"""'], {}), "('Direct', 'hidden', 'hidden2')\n", (18573, 18604), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18618, 18669), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""StopGradient"""', '"""hidden"""', '"""hidden3"""'], {}), "('StopGradient', 'hidden', 'hidden3')\n", (18632, 18669), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18683, 18738), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""Direct"""', "['hidden2', 'hidden3']", '"""out"""'], {}), "('Direct', ['hidden2', 'hidden3'], 'out')\n", (18697, 18738), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18797, 18875), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""out_grad"""', "['hidden2_grad', 'hidden3_grad']"], {}), "('DirectGradient', 'out_grad', ['hidden2_grad', 'hidden3_grad'])\n", (18811, 18875), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18916, 18979), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden2_grad"""', '"""hidden_grad"""'], {}), "('DirectGradient', 'hidden2_grad', 'hidden_grad')\n", (18930, 18979), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((18993, 19051), 'caffe2.python.core.CreateOperator', 'CreateOperator', (['"""DirectGradient"""', '"""hidden_grad"""', '"""in_grad"""'], {}), "('DirectGradient', 'hidden_grad', 'in_grad')\n", (19007, 19051), False, 'from caffe2.python.core import CreateOperator, GradientRegistry\n'), ((20296, 20309), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (20304, 20309), True, 'import numpy as np\n'), ((20360, 20373), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (20368, 20373), True, 'import numpy as np\n'), ((20426, 20439), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (20434, 20439), True, 'import numpy as np\n'), ((32939, 32952), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (32947, 32952), True, 'import numpy as np\n'), ((33005, 33018), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (33013, 33018), True, 'import numpy as np\n'), ((33069, 33082), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (33077, 33082), True, 'import numpy as np\n'), ((33135, 33148), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (33143, 33148), True, 'import numpy as np\n'), ((2388, 2425), 'caffe2.python.core.DeviceOption', 'core.DeviceOption', (['caffe2_pb2.CUDA', '(1)'], {}), '(caffe2_pb2.CUDA, 1)\n', (2405, 2425), False, 'from caffe2.python import core, test_util\n'), ((9327, 9364), 'caffe2.python.core.DeviceOption', 'core.DeviceOption', (['caffe2_pb2.CUDA', '(1)'], {}), '(caffe2_pb2.CUDA, 1)\n', (9344, 9364), False, 'from caffe2.python import core, test_util\n')] |
import numpy as np
import cv2
import os
import argparse
import glob
import math
import matplotlib.pyplot as plt
from ReadCameraModel import *
from UndistortImage import *
def rotationMatrixToEulerAngles(R) :
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x*180/math.pi, y*180/math.pi, z*180/math.pi])
def find_features_orb(img1, img2):
orb = cv2.ORB_create(nfeatures=2000)
kp1 = orb.detect(img1, None)
kp1, des1 = orb.compute(img1, kp1)
kp2 = orb.detect(img2, None)
kp2, des2 = orb.compute(img2, kp2)
bf=cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches=bf.match(des1,des2)
img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
matches = sorted(matches, key = lambda x:x.distance)
for mat in matches[:50]:
# Get the matching keypoints for each of the images
img1_idx = mat.queryIdx
img2_idx = mat.trainIdx
# x - columns
# y - rows
# Get the coordinates
[x1,y1] = kp1[img1_idx].pt
[x2,y2] = kp2[img2_idx].pt
cv2.circle(img1, tuple([int(x1), int(y1)]), 10, (0, 255, 0))
cv2.circle(img2, tuple([int(x2), int(y2)]), 10, (0, 255, 0))
img1_points.append([int(x1), int(y1)])
img2_points.append([int(x2), int(y2)])
return np.asarray(img1_points), np.asarray(img2_points)
def find_features(img1, img2):
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT in current as well as next frame
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
features1 = [] # Variable for storing all the required features from the current frame
features2 = [] # Variable for storing all the required features from the next frame
# Ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.5*n.distance:
features1.append(kp1[m.queryIdx].pt)
features2.append(kp2[m.trainIdx].pt)
return np.asarray(features1), np.asarray(features2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--input", default = '/cmlscratch/arjgpt27/projects/Oxford_dataset/stereo/centre/', help = "Path of the images")
parser.add_argument("--model", default = './model', help = "Path of the images")
parser.add_argument("--output", default = './plots/', help = "Path to store the images")
Flags = parser.parse_args()
prev_pose_cv = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]], dtype = np.float32)
files = np.sort(glob.glob(os.path.join(Flags.input, '*png'), recursive=True))
cut_files = files[0:400]
files = np.append(files, cut_files)
fig = plt.figure()
fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel(Flags.model)
intrinsic = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
for i in range(0, len(files) - 1):
print("Reading Frame ",i)
img1 = cv2.imread(files[i], 0)
color_image = cv2.cvtColor(img1, cv2.COLOR_BayerGR2BGR)
undistorted_image1 = UndistortImage(color_image, LUT)
img1 = cv2.cvtColor(undistorted_image1, cv2.COLOR_BGR2GRAY)
img2 = cv2.imread(files[i+1], 0)
color_image = cv2.cvtColor(img2, cv2.COLOR_BayerGR2BGR)
undistorted_image2 = UndistortImage(color_image, LUT)
img2 = cv2.cvtColor(undistorted_image2, cv2.COLOR_BGR2GRAY)
img1_feat = img1[150:750, :]
img2_feat = img2[150:750, :]
img1_points, img2_points = find_features(img1_feat, img2_feat)
if (len(img1_points) <= 5) or (len(img2_points) <= 5):
continue
###############################################################
E_cv, mask = cv2.findEssentialMat(img1_points, img2_points, focal=fx, pp=(cx, cy), method=cv2.RANSAC, prob=0.999, threshold=0.5)
_,R,t,_ = cv2.recoverPose(E_cv, img1_points, img2_points, focal=fx, pp=(cx, cy))
angles = rotationMatrixToEulerAngles(R)
if angles[0] < 50 and angles[0] > -50 and angles[2] < 50 and angles[2] > -50:
# print(np.linalg.det(R))
if(np.linalg.det(R) < 0):
R = -R
t = -t
print("Inside")
current_pose_cv = np.hstack((R, t))
###############################################################
# if current_pose_cv[2, 3] > 0:
# current_pose_cv[:, 3] = -current_pose_cv[:, 3]
# current_pose = homogenous_matrix(current_pose)
curr_pose_homo_cv = np.vstack((current_pose_cv, [0.0, 0.0, 0.0, 1.0]))
prev_pose_homo_cv = np.vstack((prev_pose_cv, [0.0, 0.0, 0.0, 1.0]))
prev_pose_homo_cv = np.matmul(prev_pose_homo_cv, curr_pose_homo_cv)
new_x_cv, new_y_cv, new_z_cv = prev_pose_homo_cv[:3, 3]
prev_pose_cv = prev_pose_homo_cv[:3, :]
plt.scatter(new_x_cv, -new_z_cv, color='r')
plt.savefig(Flags.output + str(i) + ".png")
print(new_x_cv, new_y_cv, new_z_cv)
| [
"argparse.ArgumentParser",
"math.atan2",
"matplotlib.pyplot.figure",
"os.path.join",
"cv2.recoverPose",
"cv2.cvtColor",
"cv2.BFMatcher",
"numpy.append",
"numpy.linalg.det",
"math.sqrt",
"cv2.findEssentialMat",
"numpy.asarray",
"cv2.FlannBasedMatcher",
"numpy.hstack",
"cv2.ORB_create",
... | [((217, 265), 'math.sqrt', 'math.sqrt', (['(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])'], {}), '(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n', (226, 265), False, 'import math\n'), ((492, 559), 'numpy.array', 'np.array', (['[x * 180 / math.pi, y * 180 / math.pi, z * 180 / math.pi]'], {}), '([x * 180 / math.pi, y * 180 / math.pi, z * 180 / math.pi])\n', (500, 559), True, 'import numpy as np\n'), ((592, 622), 'cv2.ORB_create', 'cv2.ORB_create', ([], {'nfeatures': '(2000)'}), '(nfeatures=2000)\n', (606, 622), False, 'import cv2\n'), ((761, 809), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv2.NORM_HAMMING, crossCheck=True)\n', (774, 809), False, 'import cv2\n'), ((847, 885), 'cv2.cvtColor', 'cv2.cvtColor', (['img1', 'cv2.COLOR_GRAY2BGR'], {}), '(img1, cv2.COLOR_GRAY2BGR)\n', (859, 885), False, 'import cv2\n'), ((894, 932), 'cv2.cvtColor', 'cv2.cvtColor', (['img2', 'cv2.COLOR_GRAY2BGR'], {}), '(img2, cv2.COLOR_GRAY2BGR)\n', (906, 932), False, 'import cv2\n'), ((1541, 1570), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (1568, 1570), False, 'import cv2\n'), ((1903, 1953), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (1924, 1953), False, 'import cv2\n'), ((2448, 2473), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2471, 2473), False, 'import argparse\n'), ((2826, 2896), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]'], {'dtype': 'np.float32'}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]], dtype=np.float32)\n', (2834, 2896), True, 'import numpy as np\n'), ((3027, 3054), 'numpy.append', 'np.append', (['files', 'cut_files'], {}), '(files, cut_files)\n', (3036, 3054), True, 'import numpy as np\n'), ((3062, 3074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3072, 3074), True, 'import matplotlib.pyplot as plt\n'), ((3156, 3203), 'numpy.array', 'np.array', (['[[fx, 0, cx], [0, fy, cy], [0, 0, 1]]'], {}), '([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])\n', (3164, 3203), True, 'import numpy as np\n'), ((311, 339), 'math.atan2', 'math.atan2', (['R[2, 1]', 'R[2, 2]'], {}), '(R[2, 1], R[2, 2])\n', (321, 339), False, 'import math\n'), ((345, 369), 'math.atan2', 'math.atan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (355, 369), False, 'import math\n'), ((375, 403), 'math.atan2', 'math.atan2', (['R[1, 0]', 'R[0, 0]'], {}), '(R[1, 0], R[0, 0])\n', (385, 403), False, 'import math\n'), ((416, 445), 'math.atan2', 'math.atan2', (['(-R[1, 2])', 'R[1, 1]'], {}), '(-R[1, 2], R[1, 1])\n', (426, 445), False, 'import math\n'), ((450, 474), 'math.atan2', 'math.atan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (460, 474), False, 'import math\n'), ((1451, 1474), 'numpy.asarray', 'np.asarray', (['img1_points'], {}), '(img1_points)\n', (1461, 1474), True, 'import numpy as np\n'), ((1476, 1499), 'numpy.asarray', 'np.asarray', (['img2_points'], {}), '(img2_points)\n', (1486, 1499), True, 'import numpy as np\n'), ((2363, 2384), 'numpy.asarray', 'np.asarray', (['features1'], {}), '(features1)\n', (2373, 2384), True, 'import numpy as np\n'), ((2386, 2407), 'numpy.asarray', 'np.asarray', (['features2'], {}), '(features2)\n', (2396, 2407), True, 'import numpy as np\n'), ((3279, 3302), 'cv2.imread', 'cv2.imread', (['files[i]', '(0)'], {}), '(files[i], 0)\n', (3289, 3302), False, 'import cv2\n'), ((3319, 3360), 'cv2.cvtColor', 'cv2.cvtColor', (['img1', 'cv2.COLOR_BayerGR2BGR'], {}), '(img1, cv2.COLOR_BayerGR2BGR)\n', (3331, 3360), False, 'import cv2\n'), ((3426, 3478), 'cv2.cvtColor', 'cv2.cvtColor', (['undistorted_image1', 'cv2.COLOR_BGR2GRAY'], {}), '(undistorted_image1, cv2.COLOR_BGR2GRAY)\n', (3438, 3478), False, 'import cv2\n'), ((3489, 3516), 'cv2.imread', 'cv2.imread', (['files[i + 1]', '(0)'], {}), '(files[i + 1], 0)\n', (3499, 3516), False, 'import cv2\n'), ((3531, 3572), 'cv2.cvtColor', 'cv2.cvtColor', (['img2', 'cv2.COLOR_BayerGR2BGR'], {}), '(img2, cv2.COLOR_BayerGR2BGR)\n', (3543, 3572), False, 'import cv2\n'), ((3638, 3690), 'cv2.cvtColor', 'cv2.cvtColor', (['undistorted_image2', 'cv2.COLOR_BGR2GRAY'], {}), '(undistorted_image2, cv2.COLOR_BGR2GRAY)\n', (3650, 3690), False, 'import cv2\n'), ((3972, 4091), 'cv2.findEssentialMat', 'cv2.findEssentialMat', (['img1_points', 'img2_points'], {'focal': 'fx', 'pp': '(cx, cy)', 'method': 'cv2.RANSAC', 'prob': '(0.999)', 'threshold': '(0.5)'}), '(img1_points, img2_points, focal=fx, pp=(cx, cy),\n method=cv2.RANSAC, prob=0.999, threshold=0.5)\n', (3992, 4091), False, 'import cv2\n'), ((4100, 4170), 'cv2.recoverPose', 'cv2.recoverPose', (['E_cv', 'img1_points', 'img2_points'], {'focal': 'fx', 'pp': '(cx, cy)'}), '(E_cv, img1_points, img2_points, focal=fx, pp=(cx, cy))\n', (4115, 4170), False, 'import cv2\n'), ((2940, 2973), 'os.path.join', 'os.path.join', (['Flags.input', '"""*png"""'], {}), "(Flags.input, '*png')\n", (2952, 2973), False, 'import os\n'), ((4413, 4430), 'numpy.hstack', 'np.hstack', (['(R, t)'], {}), '((R, t))\n', (4422, 4430), True, 'import numpy as np\n'), ((4661, 4711), 'numpy.vstack', 'np.vstack', (['(current_pose_cv, [0.0, 0.0, 0.0, 1.0])'], {}), '((current_pose_cv, [0.0, 0.0, 0.0, 1.0]))\n', (4670, 4711), True, 'import numpy as np\n'), ((4735, 4782), 'numpy.vstack', 'np.vstack', (['(prev_pose_cv, [0.0, 0.0, 0.0, 1.0])'], {}), '((prev_pose_cv, [0.0, 0.0, 0.0, 1.0]))\n', (4744, 4782), True, 'import numpy as np\n'), ((4806, 4853), 'numpy.matmul', 'np.matmul', (['prev_pose_homo_cv', 'curr_pose_homo_cv'], {}), '(prev_pose_homo_cv, curr_pose_homo_cv)\n', (4815, 4853), True, 'import numpy as np\n'), ((4961, 5004), 'matplotlib.pyplot.scatter', 'plt.scatter', (['new_x_cv', '(-new_z_cv)'], {'color': '"""r"""'}), "(new_x_cv, -new_z_cv, color='r')\n", (4972, 5004), True, 'import matplotlib.pyplot as plt\n'), ((4327, 4343), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (4340, 4343), True, 'import numpy as np\n')] |
import cv2
import tensorflow as tf
import os
from tqdm import tqdm
import numpy
DATADIR = "valid"
CATEGORIES = ["d4", "d6", "d8","d10","d12", "d20"]
dobre=0
calosc=0
def prepare(filepath):
IMG_SIZE = 100
img_array = cv2.imread(filepath)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
new_array/255.0
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
model = tf.keras.models.load_model("dice6-CNN.model")
for category in CATEGORIES:
path = os.path.join(DATADIR,category)
class_num = CATEGORIES.index(category)
for img in tqdm(os.listdir(path)):
pathimg= os.path.join(path,img)
prediction = model.predict([prepare(pathimg)])
calosc=calosc+1
przewi=prediction.tolist()[0]
numpy.around(przewi,0)
# try:
# print(przewi.index(1),CATEGORIES.index(category))
# except ValueError:
# pass
try:
if przewi.index(1)==CATEGORIES.index(category):
dobre=dobre+1
except ValueError:
calosc-=1
pass
print("Dobre ", dobre, " na ", calosc)
procent= (float(dobre)/float(calosc))*100.0
print(numpy.round(procent,1), "%")
prediction2 = model.predict([prepare(pathimg)])
print(prediction2) # will be a list in a list.
przewi=prediction.tolist()[0]
numpy.around(przewi,0)
try:
print(CATEGORIES[przewi.index(1)])
except ValueError:
pass
| [
"tensorflow.keras.models.load_model",
"numpy.around",
"cv2.imread",
"numpy.round",
"os.path.join",
"os.listdir",
"cv2.resize"
] | [((396, 441), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""dice6-CNN.model"""'], {}), "('dice6-CNN.model')\n", (422, 441), True, 'import tensorflow as tf\n'), ((1337, 1360), 'numpy.around', 'numpy.around', (['przewi', '(0)'], {}), '(przewi, 0)\n', (1349, 1360), False, 'import numpy\n'), ((230, 250), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (240, 250), False, 'import cv2\n'), ((267, 310), 'cv2.resize', 'cv2.resize', (['img_array', '(IMG_SIZE, IMG_SIZE)'], {}), '(img_array, (IMG_SIZE, IMG_SIZE))\n', (277, 310), False, 'import cv2\n'), ((484, 515), 'os.path.join', 'os.path.join', (['DATADIR', 'category'], {}), '(DATADIR, category)\n', (496, 515), False, 'import os\n'), ((1181, 1204), 'numpy.round', 'numpy.round', (['procent', '(1)'], {}), '(procent, 1)\n', (1192, 1204), False, 'import numpy\n'), ((583, 599), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (593, 599), False, 'import os\n'), ((628, 651), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (640, 651), False, 'import os\n'), ((776, 799), 'numpy.around', 'numpy.around', (['przewi', '(0)'], {}), '(przewi, 0)\n', (788, 799), False, 'import numpy\n')] |
import gym
import numpy as np
import matplotlib.pyplot as plt
env = gym.make("MountainCar-v0")
env.reset()
LEARNING_RATE = 0.1
DISCOUNT = 0.95 # weight, how important do we value future rewards over current rewards
EPISODES = 5000
SHOW_EVERY = 2000
DISCRETE_OS_SIZE = [20] *len(env.observation_space.high)
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE
q_table = np.random.uniform(low = -2, high = 0, size = (DISCRETE_OS_SIZE+[env.action_space.n]))
ep_rewards = []
# dictionnary that tracks episod number
aggr_ep_rewards = {'ep': [], 'avg': [], 'min': [], 'max': []}
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low) / discrete_os_win_size
return tuple(discrete_state.astype(np.int))
epsilon = 0.5
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES // 2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING-START_EPSILON_DECAYING)
done = False
for episode in range(EPISODES):
episode_reward = 0
if episode % SHOW_EVERY == 0:
print(episode)
render = True
else:
render = False
discrete_state = get_discrete_state(env.reset())
done = False
while not done:
if np.random.random() > epsilon:
action = np.argmax(q_table[discrete_state])
else:
action = np.random.randint(0, env.action_space.n)
new_state, reward, done, _ = env.step(action)
episode_reward += reward
new_discrete_state = get_discrete_state(new_state)
if render:
env.render()
if not done:
max_future_q = np.max(q_table[new_discrete_state])
current_q = q_table[discrete_state+(action, )]
new_q = (1-LEARNING_RATE)*current_q+LEARNING_RATE*(reward+DISCOUNT*max_future_q)
q_table[discrete_state+(action, )] = new_q
elif new_state[0] >= env.goal_position:
#print(f"We made it on episode {episode}")
q_table[discrete_state+(action, )] = 0
discrete_state = new_discrete_state
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon -= epsilon_decay_value
ep_rewards.append(episode_reward)
if not episode % SHOW_EVERY:
average_reward = sum(ep_rewards[-SHOW_EVERY:])/len(ep_rewards[-SHOW_EVERY:])
aggr_ep_rewards['ep'].append(episode)
aggr_ep_rewards['avg'].append(average_reward)
aggr_ep_rewards['min'].append(min(ep_rewards[-SHOW_EVERY:]))
aggr_ep_rewards['max'].append(max(ep_rewards[-SHOW_EVERY:]))
print(f"Episode: {episode} avg: {average_reward} min: {min(ep_rewards[-SHOW_EVERY:])} max: {max(ep_rewards[-SHOW_EVERY:])}")
env.close()
plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['avg'], label = 'avg')
plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['min'], label = 'min')
plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['max'], label = 'max')
plt.legend(loc = 4)
plt.show()
| [
"numpy.random.uniform",
"matplotlib.pyplot.show",
"gym.make",
"matplotlib.pyplot.plot",
"numpy.argmax",
"matplotlib.pyplot.legend",
"numpy.max",
"numpy.random.random",
"numpy.random.randint"
] | [((69, 95), 'gym.make', 'gym.make', (['"""MountainCar-v0"""'], {}), "('MountainCar-v0')\n", (77, 95), False, 'import gym\n'), ((421, 500), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-2)', 'high': '(0)', 'size': '(DISCRETE_OS_SIZE + [env.action_space.n])'}), '(low=-2, high=0, size=DISCRETE_OS_SIZE + [env.action_space.n])\n', (438, 500), True, 'import numpy as np\n'), ((2496, 2564), 'matplotlib.pyplot.plot', 'plt.plot', (["aggr_ep_rewards['ep']", "aggr_ep_rewards['avg']"], {'label': '"""avg"""'}), "(aggr_ep_rewards['ep'], aggr_ep_rewards['avg'], label='avg')\n", (2504, 2564), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2635), 'matplotlib.pyplot.plot', 'plt.plot', (["aggr_ep_rewards['ep']", "aggr_ep_rewards['min']"], {'label': '"""min"""'}), "(aggr_ep_rewards['ep'], aggr_ep_rewards['min'], label='min')\n", (2575, 2635), True, 'import matplotlib.pyplot as plt\n'), ((2638, 2706), 'matplotlib.pyplot.plot', 'plt.plot', (["aggr_ep_rewards['ep']", "aggr_ep_rewards['max']"], {'label': '"""max"""'}), "(aggr_ep_rewards['ep'], aggr_ep_rewards['max'], label='max')\n", (2646, 2706), True, 'import matplotlib.pyplot as plt\n'), ((2709, 2726), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (2719, 2726), True, 'import matplotlib.pyplot as plt\n'), ((2729, 2739), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2737, 2739), True, 'import matplotlib.pyplot as plt\n'), ((1180, 1198), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1196, 1198), True, 'import numpy as np\n'), ((1222, 1256), 'numpy.argmax', 'np.argmax', (['q_table[discrete_state]'], {}), '(q_table[discrete_state])\n', (1231, 1256), True, 'import numpy as np\n'), ((1277, 1317), 'numpy.random.randint', 'np.random.randint', (['(0)', 'env.action_space.n'], {}), '(0, env.action_space.n)\n', (1294, 1317), True, 'import numpy as np\n'), ((1511, 1546), 'numpy.max', 'np.max', (['q_table[new_discrete_state]'], {}), '(q_table[new_discrete_state])\n', (1517, 1546), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 06:18:58 2020
@author: kenneth
"""
from __future__ import absolute_import
import numpy as np
from Utils.utils import EvalC
from Utils.Loss import loss
from Utils.kernels import Kernels
class TwoClassSVDD(EvalC, loss, Kernels):
def __init__(self, kernel = None, C = None):
'''Two Class SVDD
parameters:
kernel: kernel
C = hyperparam
'''
super().__init__()
if not kernel:
kernel = 'rbf'
self.kernel = kernel
else:
self.kernel = kernel
if not C:
C = .01
self.C = C
else:
self.C = C
return
def y_i(self, y):
'''
:param: y: Nx1
'''
return np.outer(y, y)
def kernelize(self, x1, x2):
'''
:params: x1: NxD
:params: x2: NxD
'''
if self.kernel == 'linear':
return Kernels.linear(x1, x2)
elif self.kernel == 'rbf':
return Kernels.rbf(x1, x2)
elif self.kernel == 'sigmoid':
return Kernels.sigmoid(x1, x2)
elif self.kernel == 'polynomial':
return Kernels.polynomial(x1, x2)
elif self.kernel == 'cosine':
return Kernels.cosine(x1, x2)
elif self.kernel == 'correlation':
return Kernels.correlation(x1, x2)
elif self.kernel == 'linrbf':
return Kernels.linrbf(x1, x2)
elif self.kernel == 'rbfpoly':
return Kernels.rbfpoly(x1, x2)
elif self.kernel == 'rbfcosine':
return Kernels.rbfpoly(x1, x2)
elif self.kernel == 'etakernel':
return Kernels.etakernel(x1, x2)
elif self.kernel == 'alignment':
return Kernels.alignment(x1, x2)
elif self.kernel == 'laplace':
return Kernels.laplacian(x1, x2)
elif self.kernel == 'locguass':
return Kernels.locguass(x1, x2)
elif self.kernel == 'chi':
return Kernels.chi(x1)
def cost(self, x, y):
'''
:Return type: cost
'''
return self.alpha.dot(np.dot(self.alpha, self.knl * self.y_i(self.y))) - np.sum(self.alpha*y*(np.ones_like(self.alpha)*np.linalg.norm(x)))
def alpha_y_i_kernel(self, X):
'''
:params: X: NxD feature space
:params: y: Dx1 dimension
'''
alpha = np.ones(X.shape[0])
self.alph_s = np.outer(alpha, alpha) #alpha_i's alpha_j's
self.k = self.kernelize(X, X)
return (alpha, self.alph_s, self.k)
def fit(self, X, y, lr:float = None, iterations:int = None):
'''
:params: X: NxD feature matrix
:params: y: Dx1 target vector
:params: lr: scalar learning rate value
:params: iterations: integer iteration
'''
self.X = X
self.y = y
if not lr:
lr = 1e-2
self.lr = lr
else:
self.lr = lr
if not iterations:
iterations = 2
self.iterations = iterations
else:
self.iterations = iterations
self.alpha, self.alpha_i_s, self.knl = self.alpha_y_i_kernel(self.X)
self.cost_rec = np.zeros(self.iterations)
for ii in range(self.iterations):
self.cost_rec[ii] = self.cost(self.X, self.y)
print(f'Cost of computation: {self.cost_rec[ii]}')
self.alpha = self.alpha + self.lr * (self.alpha * np.dot(self.y_i(self.y), self.knl).diagonal() - np.dot(self.knl, self.alpha * self.y))
self.alpha[self.alpha < 0 ] = 0
self.alpha[self.alpha > self.C] = self.C
self.indices = np.where((self.alpha >= 0) & (self.alpha <= self.C))[0]
self.R_squared = self.kernelize(self.X[self.indices], self.X[self.indices]).diagonal() - 2*np.dot(self.alpha[self.indices], self.kernelize(self.X[self.indices], self.X[self.indices])) + \
self.alpha[self.indices].dot(np.dot(self.alpha[self.indices], self.kernelize(self.X[self.indices], self.X[self.indices])))
self.b = np.mean(self.R_squared - self.alpha[self.indices].dot(np.dot(self.alpha[self.indices], self.kernelize(self.X[self.indices], self.X[self.indices]))))
self.support_vectors = self.indices
print(f'Total support vectors required for classification: {len(self.support_vectors)}')
return self
def predict(self, X):
yhat:int = np.sign(2*np.dot(self.alpha, self.kernelize(self.X, X)) + self.kernelize(X, self.X)[:, 0] + self.b)
for enum, ii in enumerate(yhat):
if yhat[enum] == -1:
yhat[enum] = 0
return yhat
#%%
##%% Testing
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_moons, make_circles
from sklearn.model_selection import train_test_split
X, y = make_circles(1000, noise = .07, factor = .3)
#X = np.hstack((X, y.reshape(-1, 1)))
df = X[X[:, 2] == 1][:, [0, 1]]
dy = X[X[:, 2] == 1][:, 2]
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size = 0.3)
plt.scatter(df[:, 0], df[:, 1])
plt.scatter(X[:, 0], X[:, 1], c = y, cmap = 'coolwarm', s = 2)
tcl_dsvdd = TwoClassSVDD(kernel='polynomial').fit(X_train, Y_train)
plt.scatter(X_test[:, 0], X_test[:, 1], c = tcl_dsvdd.predict(X_test), cmap = 'coolwarm', s = 2)
| [
"sklearn.model_selection.train_test_split",
"numpy.ones",
"Utils.kernels.Kernels.polynomial",
"numpy.linalg.norm",
"Utils.kernels.Kernels.linear",
"Utils.kernels.Kernels.chi",
"Utils.kernels.Kernels.etakernel",
"numpy.ones_like",
"Utils.kernels.Kernels.linrbf",
"Utils.kernels.Kernels.rbf",
"Util... | [((4963, 5005), 'sklearn.datasets.make_circles', 'make_circles', (['(1000)'], {'noise': '(0.07)', 'factor': '(0.3)'}), '(1000, noise=0.07, factor=0.3)\n', (4975, 5005), False, 'from sklearn.datasets import make_blobs, make_moons, make_circles\n'), ((5141, 5178), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (5157, 5178), False, 'from sklearn.model_selection import train_test_split\n'), ((5182, 5213), 'matplotlib.pyplot.scatter', 'plt.scatter', (['df[:, 0]', 'df[:, 1]'], {}), '(df[:, 0], df[:, 1])\n', (5193, 5213), True, 'import matplotlib.pyplot as plt\n'), ((5214, 5270), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y', 'cmap': '"""coolwarm"""', 's': '(2)'}), "(X[:, 0], X[:, 1], c=y, cmap='coolwarm', s=2)\n", (5225, 5270), True, 'import matplotlib.pyplot as plt\n'), ((826, 840), 'numpy.outer', 'np.outer', (['y', 'y'], {}), '(y, y)\n', (834, 840), True, 'import numpy as np\n'), ((2485, 2504), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (2492, 2504), True, 'import numpy as np\n'), ((2527, 2549), 'numpy.outer', 'np.outer', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (2535, 2549), True, 'import numpy as np\n'), ((3318, 3343), 'numpy.zeros', 'np.zeros', (['self.iterations'], {}), '(self.iterations)\n', (3326, 3343), True, 'import numpy as np\n'), ((1011, 1033), 'Utils.kernels.Kernels.linear', 'Kernels.linear', (['x1', 'x2'], {}), '(x1, x2)\n', (1025, 1033), False, 'from Utils.kernels import Kernels\n'), ((3776, 3828), 'numpy.where', 'np.where', (['((self.alpha >= 0) & (self.alpha <= self.C))'], {}), '((self.alpha >= 0) & (self.alpha <= self.C))\n', (3784, 3828), True, 'import numpy as np\n'), ((1088, 1107), 'Utils.kernels.Kernels.rbf', 'Kernels.rbf', (['x1', 'x2'], {}), '(x1, x2)\n', (1099, 1107), False, 'from Utils.kernels import Kernels\n'), ((1166, 1189), 'Utils.kernels.Kernels.sigmoid', 'Kernels.sigmoid', (['x1', 'x2'], {}), '(x1, x2)\n', (1181, 1189), False, 'from Utils.kernels import Kernels\n'), ((1251, 1277), 'Utils.kernels.Kernels.polynomial', 'Kernels.polynomial', (['x1', 'x2'], {}), '(x1, x2)\n', (1269, 1277), False, 'from Utils.kernels import Kernels\n'), ((2288, 2312), 'numpy.ones_like', 'np.ones_like', (['self.alpha'], {}), '(self.alpha)\n', (2300, 2312), True, 'import numpy as np\n'), ((2313, 2330), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (2327, 2330), True, 'import numpy as np\n'), ((3617, 3654), 'numpy.dot', 'np.dot', (['self.knl', '(self.alpha * self.y)'], {}), '(self.knl, self.alpha * self.y)\n', (3623, 3654), True, 'import numpy as np\n'), ((1335, 1357), 'Utils.kernels.Kernels.cosine', 'Kernels.cosine', (['x1', 'x2'], {}), '(x1, x2)\n', (1349, 1357), False, 'from Utils.kernels import Kernels\n'), ((1420, 1447), 'Utils.kernels.Kernels.correlation', 'Kernels.correlation', (['x1', 'x2'], {}), '(x1, x2)\n', (1439, 1447), False, 'from Utils.kernels import Kernels\n'), ((1505, 1527), 'Utils.kernels.Kernels.linrbf', 'Kernels.linrbf', (['x1', 'x2'], {}), '(x1, x2)\n', (1519, 1527), False, 'from Utils.kernels import Kernels\n'), ((1586, 1609), 'Utils.kernels.Kernels.rbfpoly', 'Kernels.rbfpoly', (['x1', 'x2'], {}), '(x1, x2)\n', (1601, 1609), False, 'from Utils.kernels import Kernels\n'), ((1670, 1693), 'Utils.kernels.Kernels.rbfpoly', 'Kernels.rbfpoly', (['x1', 'x2'], {}), '(x1, x2)\n', (1685, 1693), False, 'from Utils.kernels import Kernels\n'), ((1754, 1779), 'Utils.kernels.Kernels.etakernel', 'Kernels.etakernel', (['x1', 'x2'], {}), '(x1, x2)\n', (1771, 1779), False, 'from Utils.kernels import Kernels\n'), ((1840, 1865), 'Utils.kernels.Kernels.alignment', 'Kernels.alignment', (['x1', 'x2'], {}), '(x1, x2)\n', (1857, 1865), False, 'from Utils.kernels import Kernels\n'), ((1924, 1949), 'Utils.kernels.Kernels.laplacian', 'Kernels.laplacian', (['x1', 'x2'], {}), '(x1, x2)\n', (1941, 1949), False, 'from Utils.kernels import Kernels\n'), ((2009, 2033), 'Utils.kernels.Kernels.locguass', 'Kernels.locguass', (['x1', 'x2'], {}), '(x1, x2)\n', (2025, 2033), False, 'from Utils.kernels import Kernels\n'), ((2088, 2103), 'Utils.kernels.Kernels.chi', 'Kernels.chi', (['x1'], {}), '(x1)\n', (2099, 2103), False, 'from Utils.kernels import Kernels\n')] |
"""
CHAED includes
1) 1000 images, 10 for each of 100 Chinese characters
2) including 20 characters with single element and
3) 80 with multi components in different structure types including horizontal composition, vertical composition,
half surrounding composition and surrounding composition.
We invited users to
4) evaluate each image's visual quality by 3 levels, good, medium and bad
through the website http://5192.168.3.11/eval.
In "Aesthetic Visual Quality Evaluation of Chinese Handwritings", for every character,
5) the odd number of images are used to train the ANN and
6) the even number of images are used to test it.
That's to say, 001.jpg, 003.jpg, 005.jpg, 007.jpg, 009.jpg are the training set and
002.jpg, 004.jpg, 006.jpg, 008.jpg, 010.jpg are the test set.
"""
import os
import numpy as np
from PIL import Image
from torchvision.datasets import VisionDataset
from common.utils import join
from dataset import DatasetFactory
class CHAEDBaseDataset(VisionDataset):
"""
CHAEDDataset that contains all the elements in CHAED datasets,
which can be inherited into the following datasets:
1) aesthetic ternary classification dataset
2) aesthetic score regression dataset
3) aesthetic score distribution dataset
The elements in AVABaseDataset include:
1) all images
3) visual quality scores from 3 levels, good, medium and bad
4) visual quality score range from 0 to 100
Generally, the probabilities of good, medium and bad evaluation results of each image
are denoted as pgood, pmedium and pbad.
The aesthetic score is defined by S = 100 × pgood + 50 × pmedium + 0 × pbad
"""
def __init__(self, root, split='train', transforms=None):
super().__init__(root=root, transforms=transforms)
assert split in ['train', 'test'], 'Got unsupported split: `%s`' % split
self.split = split
self.characters_txt_path = join(self.root, 'Characters.txt')
self.evaluation_data_txt_path = join(self.root, 'EvaluationData.txt')
self.score_analysis_per_image_txt_path = join(self.root, 'ScoreAnalysisPerImage.txt')
self.characters = self.read_characters_txt()
self.charID_to_scores = self.read_evaluation_data_txt()
self.charID_to_tuple = self.read_score_analysis_txt()
self.charID_to_img_path = {k: join(self.root, v[0], v[1]) for k, v in self.charID_to_tuple.items()}
self.charID_to_avg_score = {k: v[2] for k, v in self.charID_to_tuple.items()}
self.charID_to_sigma = {k: v[3] for k, v in self.charID_to_tuple.items()}
self.charID_to_distribution, self.charID_to_ternary_label = self.get_distribution_and_ternary()
self.remove_missing_character()
self.split_charID = self.get_charID_according_to_split()
self._images = {k: self.charID_to_img_path[k] for k in self.split_charID}
self._targets = None
@property
def images(self):
return self._images
@property
def targets(self):
return self._targets
def __len__(self):
return len(self.images)
def __getitem__(self, index):
charID = self.split_charID[index]
image_path = self.images[charID]
target = self.targets[charID]
image = Image.open(image_path).convert('RGB')
if self.transforms:
image = self.transforms(image)
return image, target
def get_distribution_and_ternary(self):
charID_to_distribution = {}
charID_to_ternary_label = {}
for k, v in self.charID_to_scores.items():
p0 = list.count(v, 0) / len(v)
p50 = list.count(v, 50) / len(v)
p100 = list.count(v, 100) / len(v)
charID_to_distribution[k] = [p0, p50, p100]
charID_to_ternary_label[k] = np.argmax(charID_to_distribution[k])
return charID_to_distribution, charID_to_ternary_label
def read_characters_txt(self):
with open(self.characters_txt_path, encoding='gbk') as f:
lines = f.readlines()
# strip line break
lines = [line.strip('\n') for line in lines]
# get characters
characters = [character for line in lines for character in line]
return characters
def read_evaluation_data_txt(self):
with open(self.evaluation_data_txt_path) as f:
lines = f.readlines()
lines = [line.strip('\n') for line in lines]
# remove 'charID score'
lines = lines[1:]
charID_to_scores = {}
# retrieve 33 score for each char (some chars may only have 30, 32, 34 scores)
for line in lines:
charID, score = line.split('\t')
score = int(score)
if charID not in charID_to_scores:
charID_to_scores[charID] = []
charID_to_scores[charID].append(score)
return charID_to_scores
def read_score_analysis_txt(self):
with open(self.score_analysis_per_image_txt_path) as f:
lines = f.readlines()
lines = [line.strip('\n') for line in lines]
# remove 'charName imageID charID average score sigma'
lines = lines[1:]
charID_to_tuple = {}
for line in lines:
charName, imageID, charID, avg_score, sigma = line.split('\t')
avg_score = float(avg_score)
sigma = float(sigma)
charID_to_tuple[charID] = (charName, imageID, avg_score, sigma)
return charID_to_tuple
def remove_missing_character(self):
# character GB3632 is missing
# correspond to key id range from 231 to 240
for i in range(231, 240 + 1):
self.charID_to_scores.pop(str(i))
self.charID_to_tuple.pop(str(i))
self.charID_to_img_path.pop(str(i))
self.charID_to_avg_score.pop(str(i))
self.charID_to_sigma.pop(str(i))
def get_charID_according_to_split(self):
split = self.split
split_charID = []
for k, v in self.charID_to_img_path.items():
filename = os.path.basename(v)
file_number = int(filename.split('.')[0])
# if split == 'train' and int():
# ...
if file_number % 2 == 0 and split == 'test':
split_charID.append(k)
if file_number % 2 != 0 and split == 'train':
split_charID.append(k)
return split_charID
@DatasetFactory.register('CHAEDClassificationDataset')
class CHAEDClassificationDataset(CHAEDBaseDataset):
"""
CHAED Classification Dataset for ternary classification.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._targets = self.charID_to_ternary_label
@DatasetFactory.register('CHAEDDistributionDataset')
class CHAEDDistributionDataset(CHAEDBaseDataset):
"""
CHAED Classification Dataset for ternary score distribution matching.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._targets = self.charID_to_distribution
@DatasetFactory.register('CHAERegressionDataset')
class CHAERegressionDataset(CHAEDBaseDataset):
"""
CHAED Classification Dataset for score regression.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._targets = self.charID_to_avg_score
if __name__ == '__main__':
def run_chaed():
d = CHAEDBaseDataset(root='/home/liulizhao/datasets/CHAED', split='test')
print("===> d ", d)
def run_CHAEDDataset(CHAEDDataset, _break=True):
from tqdm import tqdm
d_train = CHAEDDataset(root='/home/liulizhao/datasets/CHAED', split='train')
d_test = CHAEDDataset(root='/home/liulizhao/datasets/CHAED', split='test')
print("===> Train: \n", d_train)
print("===> Test: \n", d_test)
for d in [d_train, d_test]:
for image, target in tqdm(d):
# print("===> Image: ", image)
# print("===> target: ", target)
if _break:
print("===> image: ", image.size)
print("===> target: ", target)
# break
def run_all_dataset():
break_or_not = True
run_CHAEDDataset(CHAEDClassificationDataset, break_or_not)
run_CHAEDDataset(CHAEDDistributionDataset, break_or_not)
run_CHAEDDataset(CHAERegressionDataset, break_or_not)
def visualize_CHAEDDataset(CHAEDDataset, num=3):
from random import shuffle
import matplotlib.pyplot as plt
d_train = CHAEDDataset(root='/home/liulizhao/datasets/CHAED', split='train')
d_test = CHAEDDataset(root='/home/liulizhao/datasets/CHAED', split='test')
train_ids = list(range(len(d_train)))
test_ids = list(range(len(d_test)))
shuffle(train_ids)
shuffle(test_ids)
for train_id, test_id in zip(train_ids[:num], test_ids[:num]):
image, target = d_train[train_id]
if isinstance(target, list):
target = [round(t, 2) for t in target]
else:
target = round(target, 2)
plt.subplot(121)
plt.axis('off')
plt.imshow(image)
plt.title("{0} Train\n Label: {1}".format(CHAEDDataset.__name__, target))
plt.tight_layout()
image, target = d_test[test_id]
if isinstance(target, list):
target = [round(t, 2) for t in target]
else:
target = round(target, 2)
plt.subplot(121)
plt.subplot(122)
plt.axis('off')
plt.imshow(image)
plt.title("{0} Test\n Label: {1}".format(CHAEDDataset.__name__, target))
plt.tight_layout()
plt.show()
plt.close()
def visualize_all_dataset():
visualize_CHAEDDataset(CHAEDClassificationDataset, 3)
visualize_CHAEDDataset(CHAEDDistributionDataset, 3)
visualize_CHAEDDataset(CHAERegressionDataset, 3)
# run_chaed()
run_all_dataset()
# visualize_all_dataset()
| [
"matplotlib.pyplot.subplot",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"os.path.basename",
"numpy.argmax",
"random.shuffle",
"matplotlib.pyplot.imshow",
"common.utils.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"PIL.Image.open",
"dataset.DatasetFactory.register",
"matplotlib.pyp... | [((6582, 6635), 'dataset.DatasetFactory.register', 'DatasetFactory.register', (['"""CHAEDClassificationDataset"""'], {}), "('CHAEDClassificationDataset')\n", (6605, 6635), False, 'from dataset import DatasetFactory\n'), ((6905, 6956), 'dataset.DatasetFactory.register', 'DatasetFactory.register', (['"""CHAEDDistributionDataset"""'], {}), "('CHAEDDistributionDataset')\n", (6928, 6956), False, 'from dataset import DatasetFactory\n'), ((7236, 7284), 'dataset.DatasetFactory.register', 'DatasetFactory.register', (['"""CHAERegressionDataset"""'], {}), "('CHAERegressionDataset')\n", (7259, 7284), False, 'from dataset import DatasetFactory\n'), ((1979, 2012), 'common.utils.join', 'join', (['self.root', '"""Characters.txt"""'], {}), "(self.root, 'Characters.txt')\n", (1983, 2012), False, 'from common.utils import join\n'), ((2053, 2090), 'common.utils.join', 'join', (['self.root', '"""EvaluationData.txt"""'], {}), "(self.root, 'EvaluationData.txt')\n", (2057, 2090), False, 'from common.utils import join\n'), ((2140, 2184), 'common.utils.join', 'join', (['self.root', '"""ScoreAnalysisPerImage.txt"""'], {}), "(self.root, 'ScoreAnalysisPerImage.txt')\n", (2144, 2184), False, 'from common.utils import join\n'), ((9014, 9032), 'random.shuffle', 'shuffle', (['train_ids'], {}), '(train_ids)\n', (9021, 9032), False, 'from random import shuffle\n'), ((9041, 9058), 'random.shuffle', 'shuffle', (['test_ids'], {}), '(test_ids)\n', (9048, 9058), False, 'from random import shuffle\n'), ((2403, 2430), 'common.utils.join', 'join', (['self.root', 'v[0]', 'v[1]'], {}), '(self.root, v[0], v[1])\n', (2407, 2430), False, 'from common.utils import join\n'), ((3867, 3903), 'numpy.argmax', 'np.argmax', (['charID_to_distribution[k]'], {}), '(charID_to_distribution[k])\n', (3876, 3903), True, 'import numpy as np\n'), ((6217, 6236), 'os.path.basename', 'os.path.basename', (['v'], {}), '(v)\n', (6233, 6236), False, 'import os\n'), ((8100, 8107), 'tqdm.tqdm', 'tqdm', (['d'], {}), '(d)\n', (8104, 8107), False, 'from tqdm import tqdm\n'), ((9345, 9361), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (9356, 9361), True, 'import matplotlib.pyplot as plt\n'), ((9374, 9389), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9382, 9389), True, 'import matplotlib.pyplot as plt\n'), ((9402, 9419), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (9412, 9419), True, 'import matplotlib.pyplot as plt\n'), ((9518, 9536), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9534, 9536), True, 'import matplotlib.pyplot as plt\n'), ((9749, 9765), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (9760, 9765), True, 'import matplotlib.pyplot as plt\n'), ((9778, 9794), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (9789, 9794), True, 'import matplotlib.pyplot as plt\n'), ((9807, 9822), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9815, 9822), True, 'import matplotlib.pyplot as plt\n'), ((9835, 9852), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (9845, 9852), True, 'import matplotlib.pyplot as plt\n'), ((9950, 9968), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9966, 9968), True, 'import matplotlib.pyplot as plt\n'), ((9981, 9991), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9989, 9991), True, 'import matplotlib.pyplot as plt\n'), ((10004, 10015), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10013, 10015), True, 'import matplotlib.pyplot as plt\n'), ((3325, 3347), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (3335, 3347), False, 'from PIL import Image\n')] |
import numpy as np
import pandas as pd
import tensorflow as tf
import cv2
import io
import os
from PIL import Image
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from preprocess import preprocess, rgb2gray, show_image
data = []
labels = []
label_index = 0
DATAPATH = 'C:/Users/Arun/alex/flywave/gestures_data/'
width = None
height = None
scale = 3
for label in os.listdir(DATAPATH):
if label == 'ok' or label == 'rock':
img_dir = DATAPATH + label + '/'
for fn in os.listdir(img_dir):
print(img_dir + fn)
if fn != "_DS_Store":
try:
img = cv2.imread(img_dir + fn, 1)
print(img.shape)
if width == None or height == None:
height = img.shape[0]
width = img.shape[1]
img = preprocess(img, scale)
if fn == 'image_865':
cv2.imshow("ex", img)
data.append(img)
labels.append(label_index)
except:
pass
label_index += 1
print("Finished loading data")
data = np.array(data)
labels = np.array(labels)
np.save('arm_data.npy', data)
np.save('arm_labels.npy', labels)
# data = np.load('data.npy')
# labels = np.load('labels.npy')
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=6)
batch_size = 8
num_classes = 2
epochs = 12
channels = 1
# input image dimensions
img_rows, img_cols = height//scale, width//scale
# the data, split between train and test sets
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], channels, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], channels, img_rows, img_cols)
input_shape = (channels, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, channels)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels)
input_shape = (img_rows, img_cols, channels)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
from cnn_model import createModel
model = createModel(input_shape, 2)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| [
"preprocess.preprocess",
"keras.optimizers.Adadelta",
"numpy.save",
"cnn_model.createModel",
"keras.backend.image_data_format",
"sklearn.model_selection.train_test_split",
"cv2.imread",
"numpy.array",
"cv2.imshow",
"os.listdir",
"keras.utils.to_categorical"
] | [((612, 632), 'os.listdir', 'os.listdir', (['DATAPATH'], {}), '(DATAPATH)\n', (622, 632), False, 'import os\n'), ((1415, 1429), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1423, 1429), True, 'import numpy as np\n'), ((1439, 1455), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1447, 1455), True, 'import numpy as np\n'), ((1457, 1486), 'numpy.save', 'np.save', (['"""arm_data.npy"""', 'data'], {}), "('arm_data.npy', data)\n", (1464, 1486), True, 'import numpy as np\n'), ((1487, 1520), 'numpy.save', 'np.save', (['"""arm_labels.npy"""', 'labels'], {}), "('arm_labels.npy', labels)\n", (1494, 1520), True, 'import numpy as np\n'), ((1620, 1681), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(0.2)', 'random_state': '(6)'}), '(data, labels, test_size=0.2, random_state=6)\n', (1636, 1681), False, 'from sklearn.model_selection import train_test_split\n'), ((2596, 2644), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (2622, 2644), False, 'import keras\n'), ((2654, 2701), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (2680, 2701), False, 'import keras\n'), ((2745, 2772), 'cnn_model.createModel', 'createModel', (['input_shape', '(2)'], {}), '(input_shape, 2)\n', (2756, 2772), False, 'from cnn_model import createModel\n'), ((1864, 1885), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (1883, 1885), True, 'from keras import backend as K\n'), ((734, 753), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (744, 753), False, 'import os\n'), ((2856, 2883), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (2881, 2883), False, 'import keras\n'), ((868, 895), 'cv2.imread', 'cv2.imread', (['(img_dir + fn)', '(1)'], {}), '(img_dir + fn, 1)\n', (878, 895), False, 'import cv2\n'), ((1106, 1128), 'preprocess.preprocess', 'preprocess', (['img', 'scale'], {}), '(img, scale)\n', (1116, 1128), False, 'from preprocess import preprocess, rgb2gray, show_image\n'), ((1195, 1216), 'cv2.imshow', 'cv2.imshow', (['"""ex"""', 'img'], {}), "('ex', img)\n", (1205, 1216), False, 'import cv2\n')] |
#import required stuff
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
nltk.download('wordnet')
import re #for working with regular expression
import nltk #for natural language processing (nlp)
import string #This is a module, Python also has built-in class str, these are different
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from gensim.models.word2vec import Word2Vec
from gensim.models import KeyedVectors
df_cleaned = pd.read_csv('df_final.csv')
df_cleaned.drop(['Issue', 'Art. No.', 'Page start', 'Page end',
'Page count', 'Cited by', 'Document Type',
'Source'],axis=1, inplace=True)
#Let us see what do we have.
print(df_cleaned.head(3))
#Note that he have a text column, which we will use n this demo
#Info on the training set
df_cleaned.info()
df_cleaned['Abstract']=df_cleaned['Abstract'].str.lower()
df_cleaned['Source title']=df_cleaned['Source title'].str.lower()
df_cleaned['Author Keywords']=df_cleaned['Author Keywords'].str.lower()
df_cleaned['Index Keywords']=df_cleaned['Index Keywords'].str.lower()
punctuation=string.punctuation
#print(type(punctuation), punctuation)
mapping=str.maketrans("","",punctuation)
#print(type(mapping), mapping)
df_cleaned['Abstract']=df_cleaned['Abstract'].str.translate(mapping)
df_cleaned['Source title']=df_cleaned['Source title'].str.translate(mapping)
df_cleaned['Author Keywords']=df_cleaned['Author Keywords'].str.translate(mapping)
df_cleaned['Index Keywords']=df_cleaned['Index Keywords'].str.translate(mapping)
df_cleaned['Author Keywords']=df_cleaned['Author Keywords'].fillna('')
df_cleaned['Index Keywords']=df_cleaned['Index Keywords'].fillna('')
nltk.download('stopwords')
nltk_stopwords = stopwords.words('english')
nltk_stopwords += ['mentalhealthmatters','positive','mentalhealthawareness','like','life','amp','feeling','background','recent','however','also','use','using','used']
print(type(stopwords.words()), len(stopwords.words()))
print(type(stopwords.words('english')), len(stopwords.words('english')))
def remove_stopwords(in_str):
new_str=''
words = in_str.split() #string is splitted through white space in a list of words
for tx in words:
if tx not in nltk_stopwords:
new_str=new_str + tx + " "
return new_str
df_cleaned['Abstract']=df_cleaned['Abstract'].apply(lambda x: remove_stopwords(x))
df_cleaned['Source title']=df_cleaned['Source title'].apply(lambda x: remove_stopwords(x))
# df_cleaned['Author_Keywords_Abstract'] = df_cleaned['Abstract'] + df_cleaned['Author Keywords']
# df_cleaned['Index_Keywords_Abstract'] = df_cleaned['Abstract'] + df_cleaned['Index Keywords']
# df_cleaned['Author_Keywords_Source_title'] = df_cleaned['Source title'] + df_cleaned['Author Keywords']
# df_cleaned['Index_Keywords_Source_title'] = df_cleaned['Source title'] + df_cleaned['Index Keywords']
# df_cleaned['Author_Keywords_Title'] = df_cleaned['Title'] + df_cleaned['Author Keywords']
# df_cleaned['Index_Keywords_Title'] = df_cleaned['Title'] + df_cleaned['Index Keywords']
df_cleaned['All'] = df_cleaned['Abstract'] + df_cleaned['Source title'] + df_cleaned['Title'] + df_cleaned['Index Keywords'] + df_cleaned['Author Keywords']
# df_cleaned['Author_Keywords_Title'] = df_cleaned['Author_Keywords_Title'].apply(lambda x: remove_stopwords(x))
# df_cleaned['Index_Keywords_Title'] = df_cleaned['Index_Keywords_Title'].apply(lambda x: remove_stopwords(x))
# df_cleaned['Author_Keywords_Source_title'] = df_cleaned['Author_Keywords_Source_title'].apply(lambda x: remove_stopwords(x))
# df_cleaned['Index_Keywords_Source_title'] = df_cleaned['Index_Keywords_Source_title'].apply(lambda x: remove_stopwords(x))
# df_cleaned['Author_Keywords_Abstract'] = df_cleaned['Author_Keywords_Abstract'].apply(lambda x: remove_stopwords(x))
# df_cleaned['Index_Keywords_Abstract'] = df_cleaned['Index_Keywords_Abstract'].apply(lambda x: remove_stopwords(x))
df_cleaned['All'] = df_cleaned['All'].apply(lambda x: remove_stopwords(x))
# print(df_cleaned['Abstract'].head(10))
#------------------------------------------
from nltk.stem.porter import PorterStemmer
#Create instance of a PorterStemmer
stemmer=PorterStemmer()
def do_stemming(in_str):
new_str=""
for word in in_str.split():
new_str=new_str + stemmer.stem(word) + " "
return new_str
# trdf["Stemmed"]=trdf["lowered_stop_freq_rare_removed"].apply(lambda x: do_stemming(x))
df_cleaned['Abstract']=df_cleaned['Abstract'].apply(lambda x: do_stemming(x))
df_cleaned['Source title']= df_cleaned['Source title'].apply(lambda x: do_stemming(x))
# df_cleaned['Author_Keywords_Title'] = df_cleaned['Author_Keywords_Title'].apply(lambda x: do_stemming(x))
# df_cleaned['Index_Keywords_Title'] = df_cleaned['Index_Keywords_Title'].apply(lambda x: do_stemming(x))
# df_cleaned['Author_Keywords_Source_title'] = df_cleaned['Author_Keywords_Source_title'].apply(lambda x: do_stemming(x))
# df_cleaned['Index_Keywords_Source_title'] = df_cleaned['Index_Keywords_Source_title'].apply(lambda x: do_stemming(x))
# df_cleaned['Author_Keywords_Abstract'] = df_cleaned['Author_Keywords_Abstract'].apply(lambda x: do_stemming(x))
# df_cleaned['Index_Keywords_Abstract'] = df_cleaned['Index_Keywords_Abstract'].apply(lambda x: do_stemming(x))
df_cleaned['All'] = df_cleaned['All'].apply(lambda x: do_stemming(x))
#Confirm after stemming
# print(trdf["Stemmed"].head(5))
#Note changes in the output, you may not be happy, another option is SnowballStemmer
#---------------
# def remove_frequentwords(in_str):
# new_str=''
# words = in_str.split() #string is splitted through white space in a list of words
# for tx in words:
# if tx not in nltk_stopwords:
# new_str=new_str + tx + " "
# return new_str
# #-------------------------
# from collections import Counter
# def CountFrequent(in_str):
# counter=Counter()
# for text in in_str:
# for word in text.split():
# counter[word]+=1
# print(type(counter))
# #list with 10 most frequent word. List is a list of (10) tuples
# most_cmn_list=counter.most_common(10)
# print(type(most_cmn_list), most_cmn_list) #type is list (list of tuples/word,frequency pair)
# most_cmn_words_list=[]
# for word, freq in most_cmn_list:
# most_cmn_words_list.append(word)
# return most_cmn_words_list
# #------------------------------------
# #Remove top 10 frequent words
# most_cmn_words_abstract = CountFrequent(df_cleaned['Abstract'])
# most_cmn_words_src = CountFrequent(df_cleaned['Source title'])
#function to remove words
# def remove_frequent(in_str, cmn_words):
# new_str=''
# for word in in_str.split():
# if word not in cmn_words:
# new_str=new_str + word + " "
# return new_str
# df_cleaned['Abstract']=df_cleaned['Abstract'].apply(lambda x: remove_frequent(x,most_cmn_words_abstract))
# df_cleaned['Source title']=df_cleaned['Source title'].apply(lambda x: remove_frequent(x,most_cmn_words_src))
#print(train["lowered_text_stop_removed_freq_removed"].head(10))
# df_cleaned.to_csv('cleaned_first_draft.csv')
# df = pd.read_csv('cleaned_first_draft.csv')
#------------------------
# import matplotlib.pyplot as plt
# import seaborn as sns
# nltk.download('stopwords')
# nltk.download('wordnet')
# nltk_stopwords = nltk.corpus.stopwords.words('english')
#-----------------------------WORDCLOUD--------------
# from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# def create_wordcloud(text_series, title):
# wc = WordCloud(background_color = 'white', stopwords=nltk_stopwords, height = 600, width = 600)
# words = ' '.join(w for w in text_series)
# wc.generate(words)
# plt.figure(figsize=(10,10))
# plt.imshow(wc, interpolation='bilinear')
# plt.title(title, fontsize= 20)
# plt.axis('off')
# plt.show()
# create_wordcloud(df['Abstract'], 'Cloud 1')
# #------------------------
# create_wordcloud(df['Author_Keywords_Title'], 'Cloud 1')
# #---------------NLTK---------------
nltk.download('punkt')
from nltk.tokenize import word_tokenize
# df_cleaned['Abstract'] = df_cleaned['Abstract'].apply(word_tokenize)
# df_cleaned['Source title'] = df_cleaned['Source title'].apply(word_tokenize)
# df_cleaned['Title'] = df_cleaned['Title'].apply(word_tokenize)
# df_cleaned['Author_Keywords_Abstract'] = df_cleaned['Author_Keywords_Abstract'].apply(word_tokenize)
# df_cleaned['Index_Keywords_Abstract'] = df_cleaned['Index_Keywords_Abstract'].apply(word_tokenize)
# df_cleaned['Author_Keywords_Title'] = df_cleaned['Author_Keywords_Title'].apply(word_tokenize)
# df_cleaned['Index_Keywords_Title'] = df_cleaned['Index_Keywords_Title'].apply(word_tokenize)
# df_cleaned['Author_Keywords_Source_title'] = df_cleaned['Author_Keywords_Source_title'].apply(word_tokenize)
# df_cleaned['Index_Keywords_Source_title'] = df_cleaned['Index_Keywords_Source_title'].apply(word_tokenize)
df_cleaned['All'] = df_cleaned['All'].apply(word_tokenize)
#------------------TFIDF APPROACH--------------
# from nltk.stem import WordNetLemmatizer
# def tokenizer(sentence, stopwords=nltk_stopwords, lemmatize=True):
# """
# Lemmatize, tokenize, crop and remove stop words.
# """
# if lemmatize:
# stemmer = WordNetLemmatizer()
# tokens = [stemmer.lemmatize(w) for w in word_tokenize(sentence)]
# else:
# tokens = [w for w in word_tokenize(sentence)]
# token = [w for w in tokens if (len(w) > 2 and len(w) < 400
# and w not in stopwords)]
# return tokens
#-------------------
# # Adapt stop words
# token_stop = tokenizer(' '.join(nltk_stopwords), lemmatize=False)
# # Fit TFIDF
# vectorizer = TfidfVectorizer(stop_words=token_stop, tokenizer=tokenizer)
# tfidf_mat = vectorizer.fit_transform(df_cleaned['All'].apply(lambda x: ' '.join(x)).values) # -> (num_sentences, num_vocabulary)
# tfidf_mat.shape
#-----------------------
# from sklearn.metrics.pairwise import cosine_similarity
# def extract_best_indices(m, topk, mask=None):
# # return the sum on all tokens of cosinus for each sentence
# if len(m.shape) > 1:
# cos_sim = np.mean(m, axis=0)
# else:
# cos_sim = m
# index = np.argsort(cos_sim)[::-1] # from highest idx to smallest score
# if mask is not None:
# assert mask.shape == m.shape
# mask = mask[index]
# else:
# mask = np.ones(len(cos_sim))
# mask = np.logical_or(cos_sim[index] != 0, mask) #eliminate 0 cosine distance
# best_index = index[mask][:topk]
# return best_index
# def get_recommendations_tfidf(sentence, tfidf_mat):
# # Embed the query sentence
# tokens = [str(tok) for tok in tokenizer(sentence)]
# vec = vectorizer.transform(tokens)
# # Create list with similarity between query and dataset
# mat = cosine_similarity(vec, tfidf_mat)
# # Best cosine distance for each token independantly
# # print(mat.shape)
# best_index = extract_best_indices(mat, topk=5)
# return best_index
# query_sentence= 'classification'
# Def to send few random recommendation initially
def sampling():
df = df_cleaned[['Title','Authors', 'Year', 'Link']].sample(5).reset_index(drop=True)
df.index = df.index + 1
return df
# Run this while training
#------------------WORD2VEC APPROACH--------------
# Create model
w2v_model = Word2Vec(min_count=0, workers = 8, size=400)
# Prepare vocab
w2v_model.build_vocab(df_cleaned['All'].values)
# Train
w2v_model.train(df_cleaned['All'].values, total_examples=w2v_model.corpus_count, epochs=30)
#Saving the model for backup
w2v_model.save('vectors.kv')
def prediction_w2v(query_sentence, dataset, model, topk=5):
query_sentence = query_sentence.split()
in_vocab_list = []
best_index = [0]*topk
for w in query_sentence:
in_vocab_list.append(w)
# Retrieve the similarity between two words as a distance
if len(in_vocab_list) > 0:
similarity_matrix = np.zeros(len(dataset)) # TO DO
for i, data_sentence in enumerate(dataset):
if data_sentence:
similar_sentence = model.wv.n_similarity(in_vocab_list, data_sentence)
else:
similar_sentence = 0
similarity_matrix[i] = np.array(similar_sentence)
# Take the five highest norm
best_index = np.argsort(similarity_matrix)[::-1][:topk]
return best_index
def recommend(query_sentence):
query_sentence = query_sentence.lower()
best_index =prediction_w2v(query_sentence, df_cleaned['All'].values, w2v_model)
df = df_cleaned[['Title','Authors', 'Year', 'Link']].iloc[best_index].reset_index(drop=True)
df.index = df.index + 1
return df
def more(query_sentence, n):
query_sentence = query_sentence.lower()
best_index =prediction_w2v(query_sentence, df_cleaned['All'].values, w2v_model , topk = 5*n)
df = df_cleaned[['Title','Authors', 'Year', 'Link']].iloc[best_index].reset_index(drop=True)
# df = df.iloc[5*n:5*n+1]
df.index = df.index + 1
return df
# display(df[['Title']].iloc[best_index])
#----------------------AUTOENCODER APPROACH--------------------------------
# # defining constants / hyperparametrs
# num_words = 2000
# maxlen = 30
# embed_dim = 150
# batch_size = 16
# # preprocessing the input
# from keras.preprocessing.sequence import pad_sequences
# from keras.preprocessing.text import Tokenizer
# tokenizer = Tokenizer(num_words = num_words, split=' ')
# tokenizer.fit_on_texts(df_cleaned['All'].apply(lambda x: ' '.join(x)).values)
# seq = tokenizer.texts_to_sequences(df_cleaned['All'].apply(lambda x: ' '.join(x)).values)
# pad_seq = pad_sequences(seq, maxlen)
# import tensorflow as tf
# import keras
# from keras import Input
# from keras.layers import Embedding,Bidirectional,LSTM,Dense,RepeatVector,Dense
# from keras import Model
# print(pad_seq.shape)
# # creating the encoder model
# encoder_inputs = Input(shape=(maxlen,), name='Encoder-Input')
# emb_layer = Embedding(num_words, embed_dim,input_length = maxlen, name='Body-Word-Embedding', mask_zero=False)
# x = emb_layer(encoder_inputs)
# #encoder LSTM
# state_h = Bidirectional(LSTM(128, activation='relu', name='Encoder-Last-LSTM'))(x)
# encoder_model = Model(inputs=encoder_inputs, outputs=state_h, name='Encoder-Model')
# seq2seq_encoder_out = encoder_model(encoder_inputs)
# decoded = RepeatVector(maxlen)(seq2seq_encoder_out)
# decoder_lstm = Bidirectional(LSTM(128, return_sequences=True, name='Decoder-LSTM-before'))
# decoder_lstm_output = decoder_lstm(decoded)
# decoder_dense = Dense(num_words, activation='softmax', name='Final-Output-Dense-before')
# decoder_outputs = decoder_dense(decoder_lstm_output)
# # fitting the model
# seq2seq_Model = Model(encoder_inputs, decoder_outputs)
# seq2seq_Model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
# history = seq2seq_Model.fit(pad_seq, np.expand_dims(pad_seq, -1),
# batch_size=batch_size,
# epochs=10)
# vecs = encoder_model.predict(pad_seq)
# from sklearn.metrics.pairwise import cosine_similarity
# from sklearn.feature_extraction.text import TfidfVectorizer
# def extract_best_indices(m, topk, mask=None):
# # return the sum on all tokens of cosinus for each sentence
# if len(m.shape) > 1:
# cos_sim = np.mean(m, axis=0)
# else:
# cos_sim = m
# index = np.argsort(cos_sim)[::-1] # from highest idx to smallest score
# if mask is not None:
# assert mask.shape == m.shape
# mask = mask[index]
# else:
# mask = np.ones(len(cos_sim))
# mask = np.logical_or(cos_sim[index] != 0, mask) #eliminate 0 cosine distance
# best_index = index[mask][:topk]
# return best_index
# def get_recommendations_tfidf(vec, vecs):
# # Embed the query sentence
# # vec = vecs
# # Create list with similarity between query and dataset
# mat = cosine_similarity(vec, vecs)
# # Best cosine distance for each token independantly
# # print(mat.shape)
# best_index = extract_best_indices(mat, topk=5)
# return best_index
# # prediction
# query_sentence= 'deep learning using stock'
# seq = tokenizer.texts_to_sequences(query_sentence)
# pad_seq = pad_sequences(seq, maxlen)
# sentence_vec = encoder_model.predict(pad_seq)
# best_index = get_recommendations_tfidf(sentence_vec, vecs)
# display(df[['Title']].iloc[best_index])
| [
"pandas.read_csv",
"nltk.stem.porter.PorterStemmer",
"numpy.argsort",
"numpy.array",
"nltk.corpus.stopwords.words",
"nltk.download",
"gensim.models.word2vec.Word2Vec"
] | [((113, 137), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (126, 137), False, 'import nltk\n'), ((574, 601), 'pandas.read_csv', 'pd.read_csv', (['"""df_final.csv"""'], {}), "('df_final.csv')\n", (585, 601), True, 'import pandas as pd\n'), ((1836, 1862), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (1849, 1862), False, 'import nltk\n'), ((1885, 1911), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1900, 1911), False, 'from nltk.corpus import stopwords\n'), ((4391, 4406), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (4404, 4406), False, 'from nltk.stem.porter import PorterStemmer\n'), ((8363, 8385), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (8376, 8385), False, 'import nltk\n'), ((11844, 11886), 'gensim.models.word2vec.Word2Vec', 'Word2Vec', ([], {'min_count': '(0)', 'workers': '(8)', 'size': '(400)'}), '(min_count=0, workers=8, size=400)\n', (11852, 11886), False, 'from gensim.models.word2vec import Word2Vec\n'), ((2097, 2114), 'nltk.corpus.stopwords.words', 'stopwords.words', ([], {}), '()\n', (2112, 2114), False, 'from nltk.corpus import stopwords\n'), ((2121, 2138), 'nltk.corpus.stopwords.words', 'stopwords.words', ([], {}), '()\n', (2136, 2138), False, 'from nltk.corpus import stopwords\n'), ((2155, 2181), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2170, 2181), False, 'from nltk.corpus import stopwords\n'), ((2188, 2214), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2203, 2214), False, 'from nltk.corpus import stopwords\n'), ((12769, 12795), 'numpy.array', 'np.array', (['similar_sentence'], {}), '(similar_sentence)\n', (12777, 12795), True, 'import numpy as np\n'), ((12856, 12885), 'numpy.argsort', 'np.argsort', (['similarity_matrix'], {}), '(similarity_matrix)\n', (12866, 12885), True, 'import numpy as np\n')] |
import numpy as np
from pymor.basic import *
from pymor.bindings.dunext import DuneXTMatrixOperator, DuneXTVectorSpace
from pymor.bindings.dunegdt import DuneGDTVisualizer
from pymor.vectorarrays.list import ListVectorArray
from dune.xt.la import IstlDenseVectorDouble
from dune.xt.functions import ConstantFunction__2d_to_1x1 as ConstantFunction
from dune.xt.functions import ExpressionFunction__2d_to_1x1 as ExpressionFunction
logger = getLogger('main.main')
set_log_levels({'main': 'INFO', 'main.simulate_single_greedy_step': 'WARN', 'pymor': 'WARN'})
diffusion = {
'functions': (ExpressionFunction('x', ['1+cos(0.5*pi*x[0])*cos(0.5*pi*x[1])'], 3, 'lambda_1'),
ExpressionFunction('x', ['-cos(0.5*pi*x[0])*cos(0.5*pi*x[1])'], 3, 'lambda_2')),
'coefficients': (ExpressionParameterFunctional('1', {'switch': ()}), ProjectionParameterFunctional('switch', ()))
}
diffusion_expression = '1+(1-{})*cos(0.5*pi*x[0])*cos(0.5*pi*x[1])'
mu_bar = {'switch': 0.1}
mu_hat = {'switch': 0.1}
diffusion_bar = ExpressionFunction('x', [diffusion_expression.format(mu_bar['switch'])], 3, 'diffusion_mu_bar')
diffusion_hat = ExpressionFunction('x', [diffusion_expression.format(mu_hat['switch'])], 3, 'diffusion_mu_hat')
f = ExpressionFunction('x', ['0.5*pi*pi*cos(0.5*pi*x[0])*cos(0.5*pi*x[1])'], 3, 'f')
zero = ConstantFunction(0.)
def alpha(mu, mu_bar):
return np.min([theta.evaluate(mu) / theta.evaluate(mu_bar) for theta in diffusion['coefficients']])
def gamma(mu, mu_bar):
return np.max([theta.evaluate(mu) / theta.evaluate(mu_bar) for theta in diffusion['coefficients']])
from dune.gdt.gamm_2019_talk_on_conservative_rb import (
DiscontinuousLagrangeSpace,
GridProvider,
RaviartThomasSpace,
assemble_energy_semi_product_matrix,
assemble_DG_product_matrix,
assemble_Hdiv_product_matrix,
assemble_L2_vector,
assemble_SWIPDG_matrix,
compute_estimate,
compute_flux_reconstruction,
compute_local_conservation_error,
make_discrete_function,
prolong,
visualize,
)
def make_marix_operator(mat, ID):
return DuneXTMatrixOperator(mat, source_id=ID, range_id=ID)
logger.info('discretizing ...')
def discretize(num_refinements):
grid = GridProvider([-1, -1], [1, 1], [4, 4]) # The ESV2007 test is [-1, 1]^2, 4x4 elements, ...
grid.refine(num_refinements)
dg_space = DiscontinuousLagrangeSpace(grid, 1)
lhs_op = LincombOperator(
[make_marix_operator(assemble_SWIPDG_matrix(dg_space, diff), 'PRESSURE') for diff in diffusion['functions']],
diffusion['coefficients'])
rhs_func = VectorFunctional(lhs_op.range.make_array((assemble_L2_vector(dg_space, f),)))
dg_product = make_marix_operator(assemble_DG_product_matrix(dg_space), 'PRESSURE')
fom = StationaryDiscretization(
lhs_op, rhs_func, products={'energy_penalty': dg_product}, visualizer=DuneGDTVisualizer(dg_space))
fom = fom.with_(parameter_space=CubicParameterSpace(fom.parameter_type, 0.1, 1.))
fom.enable_caching('disk')
return grid, dg_space, dg_product, fom
grid, dg_space, dg_product, fom = discretize(2) # ... and 2 refinements with ALU_2D_SIMPLEX_CONFORMING
PressureVectorSpace = DuneXTVectorSpace(IstlDenseVectorDouble, dg_space.num_DoFs, 'PRESSURE')
logger.info('grid has {} elements'.format(grid.num_elements))
logger.info('space has {} DoFs'.format(dg_space.num_DoFs))
logger.info('computing reference discretization ...')
reference_grid, reference_dg_space, _, reference_fom = discretize(2 + 3 * 2)
reference_energy_semi_product = make_marix_operator(
assemble_energy_semi_product_matrix(reference_dg_space, diffusion_bar), 'PRESSURE')
ReferencePressureVectorSpace = DuneXTVectorSpace(IstlDenseVectorDouble, reference_dg_space.num_DoFs, 'PRESSURE')
def reference_dg_norm(u):
if not isinstance(u, ListVectorArray):
u = ReferencePressureVectorSpace.from_data([
u,
])
return np.sqrt(reference_energy_semi_product.apply2(u, u)[0][0])
logger.info('reference grid has {} elements'.format(reference_grid.num_elements))
logger.info('reference space has {} DoFs'.format(reference_dg_space.num_DoFs))
logger.info('assembling Hdiv product ...')
rtn_space = RaviartThomasSpace(grid, 0)
FluxVectorSpace = DuneXTVectorSpace(IstlDenseVectorDouble, rtn_space.num_DoFs, 'FLUX')
rtn_product = make_marix_operator(assemble_Hdiv_product_matrix(rtn_space), 'FLUX')
def rtn_norm(t):
if not isinstance(t, ListVectorArray):
t = FluxVectorSpace.from_data([
t,
])
return np.sqrt(rtn_product.apply2(t, t)[0][0])
logger.info('computing ESV2007 pressure and flux ...')
u_h_f = make_marix_operator(assemble_SWIPDG_matrix(dg_space, ConstantFunction(1.)),
'PRESSURE').apply_inverse(fom.rhs.as_source_array())
t_h_f = FluxVectorSpace.from_data([
compute_flux_reconstruction(grid, dg_space, rtn_space, ConstantFunction(1.), u_h_f._list[0].impl),
])
logger.info('computing [OS2015, table 1] estimates (should be 0.166, 0.723, 0.355) ...')
_, eta_NC, eta_R, eta_DF = compute_estimate(grid, make_discrete_function(dg_space, u_h_f._list[0].impl, 'u_h_f'),
make_discrete_function(rtn_space, t_h_f._list[0].impl, 't_h_f'), f,
ConstantFunction(1.), ConstantFunction(1.), ConstantFunction(1.), 1, 1, 1)
logger.info(' are {}, {}, {}'.format(eta_NC, eta_R, eta_DF))
logger.info('computing other OS2015 estimates (should be '
'[table 3, eta_NC] 0.182, <= [table 1, eta_R] 0.166, [table 2, eta_DF] 0.316) ...')
mu = {'switch': 1}
diffusion_mu = ExpressionFunction('x', [diffusion_expression.format(mu['switch'])], 3, 'diffusion_mu')
u_h = fom.solve(mu)
t_h = FluxVectorSpace.from_data([
compute_flux_reconstruction(grid, dg_space, rtn_space, diffusion_mu, u_h._list[0].impl),
])
_, eta_NC, eta_R, eta_DF = compute_estimate(grid, make_discrete_function(dg_space, u_h._list[0].impl, 'u_h'),
make_discrete_function(rtn_space, t_h._list[0].impl, 't_h'), f,
diffusion_mu, diffusion_bar, diffusion_hat, alpha(mu, mu_bar),
alpha(mu, mu_hat), gamma(mu, mu_bar))
logger.info(' are {}, {}, {}'.format(eta_NC, eta_R, eta_DF))
from gamm_2019_talk_on_conservative_rb_base import simulate_single_greedy_step
RB_size = 0
for nn in range(1, 100):
logger.info('simulating greedy step {} ...'.format(nn))
greedy_data, estimate_data = simulate_single_greedy_step(
fom,
dg_product=fom.energy_penalty_product,
FluxVectorSpace=FluxVectorSpace,
rtn_product=rtn_product,
t_h_f=t_h_f,
compute_flux_reconstruction=lambda mu, u_RB: compute_flux_reconstruction(
grid, dg_space, rtn_space, ExpressionFunction('x', [diffusion_expression.format(mu['switch'])], 3, 'diffusion_mu'), u_RB),
compute_estimate=lambda mu, u_RB, t_RB_f: compute_estimate(
grid,
make_discrete_function(dg_space, u_RB, 'u_RB'),
make_discrete_function(rtn_space, t_RB_f, 't_RB_f'),
f,
ExpressionFunction('x', [diffusion_expression.format(mu['switch'])], 3, 'diffusion_mu'),
diffusion_bar, diffusion_hat,
alpha(mu, mu_bar), alpha(mu, mu_hat), gamma(mu, mu_bar)),
compute_reference_error=lambda mu, u_RB: reference_dg_norm(
reference_fom.solve(mu)._list[0].impl
- prolong(dg_space, u_RB, reference_dg_space)),
max_extensions=nn,
num_samples=10
)
if greedy_data['extensions'] > RB_size:
RB_size = greedy_data['extensions']
else:
logger.info(' finished')
break
logger.info(' max greedy error: {}'.format(greedy_data['max_errs'][-1]))
logger.info(' worst error: {}'.format(np.max(estimate_data['errors'])))
logger.info(' worst estimate: {}'.format(np.max(estimate_data['etas'])))
logger.info(' worst efficiency: {}'.format(np.max(estimate_data['efficiencies'])))
| [
"dune.xt.functions.ConstantFunction__2d_to_1x1",
"dune.gdt.gamm_2019_talk_on_conservative_rb.assemble_SWIPDG_matrix",
"pymor.bindings.dunegdt.DuneGDTVisualizer",
"dune.gdt.gamm_2019_talk_on_conservative_rb.compute_flux_reconstruction",
"dune.gdt.gamm_2019_talk_on_conservative_rb.assemble_Hdiv_product_matrix... | [((1237, 1322), 'dune.xt.functions.ExpressionFunction__2d_to_1x1', 'ExpressionFunction', (['"""x"""', "['0.5*pi*pi*cos(0.5*pi*x[0])*cos(0.5*pi*x[1])']", '(3)', '"""f"""'], {}), "('x', ['0.5*pi*pi*cos(0.5*pi*x[0])*cos(0.5*pi*x[1])'], 3, 'f'\n )\n", (1255, 1322), True, 'from dune.xt.functions import ExpressionFunction__2d_to_1x1 as ExpressionFunction\n'), ((1325, 1346), 'dune.xt.functions.ConstantFunction__2d_to_1x1', 'ConstantFunction', (['(0.0)'], {}), '(0.0)\n', (1341, 1346), True, 'from dune.xt.functions import ConstantFunction__2d_to_1x1 as ConstantFunction\n'), ((3204, 3275), 'pymor.bindings.dunext.DuneXTVectorSpace', 'DuneXTVectorSpace', (['IstlDenseVectorDouble', 'dg_space.num_DoFs', '"""PRESSURE"""'], {}), "(IstlDenseVectorDouble, dg_space.num_DoFs, 'PRESSURE')\n", (3221, 3275), False, 'from pymor.bindings.dunext import DuneXTMatrixOperator, DuneXTVectorSpace\n'), ((3702, 3787), 'pymor.bindings.dunext.DuneXTVectorSpace', 'DuneXTVectorSpace', (['IstlDenseVectorDouble', 'reference_dg_space.num_DoFs', '"""PRESSURE"""'], {}), "(IstlDenseVectorDouble, reference_dg_space.num_DoFs,\n 'PRESSURE')\n", (3719, 3787), False, 'from pymor.bindings.dunext import DuneXTMatrixOperator, DuneXTVectorSpace\n'), ((4222, 4249), 'dune.gdt.gamm_2019_talk_on_conservative_rb.RaviartThomasSpace', 'RaviartThomasSpace', (['grid', '(0)'], {}), '(grid, 0)\n', (4240, 4249), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((4268, 4336), 'pymor.bindings.dunext.DuneXTVectorSpace', 'DuneXTVectorSpace', (['IstlDenseVectorDouble', 'rtn_space.num_DoFs', '"""FLUX"""'], {}), "(IstlDenseVectorDouble, rtn_space.num_DoFs, 'FLUX')\n", (4285, 4336), False, 'from pymor.bindings.dunext import DuneXTMatrixOperator, DuneXTVectorSpace\n'), ((2094, 2146), 'pymor.bindings.dunext.DuneXTMatrixOperator', 'DuneXTMatrixOperator', (['mat'], {'source_id': 'ID', 'range_id': 'ID'}), '(mat, source_id=ID, range_id=ID)\n', (2114, 2146), False, 'from pymor.bindings.dunext import DuneXTMatrixOperator, DuneXTVectorSpace\n'), ((2227, 2265), 'dune.gdt.gamm_2019_talk_on_conservative_rb.GridProvider', 'GridProvider', (['[-1, -1]', '[1, 1]', '[4, 4]'], {}), '([-1, -1], [1, 1], [4, 4])\n', (2239, 2265), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((2369, 2404), 'dune.gdt.gamm_2019_talk_on_conservative_rb.DiscontinuousLagrangeSpace', 'DiscontinuousLagrangeSpace', (['grid', '(1)'], {}), '(grid, 1)\n', (2395, 2404), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((3587, 3657), 'dune.gdt.gamm_2019_talk_on_conservative_rb.assemble_energy_semi_product_matrix', 'assemble_energy_semi_product_matrix', (['reference_dg_space', 'diffusion_bar'], {}), '(reference_dg_space, diffusion_bar)\n', (3622, 3657), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((4371, 4410), 'dune.gdt.gamm_2019_talk_on_conservative_rb.assemble_Hdiv_product_matrix', 'assemble_Hdiv_product_matrix', (['rtn_space'], {}), '(rtn_space)\n', (4399, 4410), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((5105, 5167), 'dune.gdt.gamm_2019_talk_on_conservative_rb.make_discrete_function', 'make_discrete_function', (['dg_space', 'u_h_f._list[0].impl', '"""u_h_f"""'], {}), "(dg_space, u_h_f._list[0].impl, 'u_h_f')\n", (5127, 5167), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((5213, 5276), 'dune.gdt.gamm_2019_talk_on_conservative_rb.make_discrete_function', 'make_discrete_function', (['rtn_space', 't_h_f._list[0].impl', '"""t_h_f"""'], {}), "(rtn_space, t_h_f._list[0].impl, 't_h_f')\n", (5235, 5276), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((5325, 5346), 'dune.xt.functions.ConstantFunction__2d_to_1x1', 'ConstantFunction', (['(1.0)'], {}), '(1.0)\n', (5341, 5346), True, 'from dune.xt.functions import ConstantFunction__2d_to_1x1 as ConstantFunction\n'), ((5347, 5368), 'dune.xt.functions.ConstantFunction__2d_to_1x1', 'ConstantFunction', (['(1.0)'], {}), '(1.0)\n', (5363, 5368), True, 'from dune.xt.functions import ConstantFunction__2d_to_1x1 as ConstantFunction\n'), ((5369, 5390), 'dune.xt.functions.ConstantFunction__2d_to_1x1', 'ConstantFunction', (['(1.0)'], {}), '(1.0)\n', (5385, 5390), True, 'from dune.xt.functions import ConstantFunction__2d_to_1x1 as ConstantFunction\n'), ((5943, 6001), 'dune.gdt.gamm_2019_talk_on_conservative_rb.make_discrete_function', 'make_discrete_function', (['dg_space', 'u_h._list[0].impl', '"""u_h"""'], {}), "(dg_space, u_h._list[0].impl, 'u_h')\n", (5965, 6001), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((6047, 6106), 'dune.gdt.gamm_2019_talk_on_conservative_rb.make_discrete_function', 'make_discrete_function', (['rtn_space', 't_h._list[0].impl', '"""t_h"""'], {}), "(rtn_space, t_h._list[0].impl, 't_h')\n", (6069, 6106), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((591, 670), 'dune.xt.functions.ExpressionFunction__2d_to_1x1', 'ExpressionFunction', (['"""x"""', "['1+cos(0.5*pi*x[0])*cos(0.5*pi*x[1])']", '(3)', '"""lambda_1"""'], {}), "('x', ['1+cos(0.5*pi*x[0])*cos(0.5*pi*x[1])'], 3, 'lambda_1')\n", (609, 670), True, 'from dune.xt.functions import ExpressionFunction__2d_to_1x1 as ExpressionFunction\n'), ((690, 768), 'dune.xt.functions.ExpressionFunction__2d_to_1x1', 'ExpressionFunction', (['"""x"""', "['-cos(0.5*pi*x[0])*cos(0.5*pi*x[1])']", '(3)', '"""lambda_2"""'], {}), "('x', ['-cos(0.5*pi*x[0])*cos(0.5*pi*x[1])'], 3, 'lambda_2')\n", (708, 768), True, 'from dune.xt.functions import ExpressionFunction__2d_to_1x1 as ExpressionFunction\n'), ((2719, 2755), 'dune.gdt.gamm_2019_talk_on_conservative_rb.assemble_DG_product_matrix', 'assemble_DG_product_matrix', (['dg_space'], {}), '(dg_space)\n', (2745, 2755), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((5801, 5893), 'dune.gdt.gamm_2019_talk_on_conservative_rb.compute_flux_reconstruction', 'compute_flux_reconstruction', (['grid', 'dg_space', 'rtn_space', 'diffusion_mu', 'u_h._list[0].impl'], {}), '(grid, dg_space, rtn_space, diffusion_mu, u_h.\n _list[0].impl)\n', (5828, 5893), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((2884, 2911), 'pymor.bindings.dunegdt.DuneGDTVisualizer', 'DuneGDTVisualizer', (['dg_space'], {}), '(dg_space)\n', (2901, 2911), False, 'from pymor.bindings.dunegdt import DuneGDTVisualizer\n'), ((4917, 4938), 'dune.xt.functions.ConstantFunction__2d_to_1x1', 'ConstantFunction', (['(1.0)'], {}), '(1.0)\n', (4933, 4938), True, 'from dune.xt.functions import ConstantFunction__2d_to_1x1 as ConstantFunction\n'), ((8022, 8053), 'numpy.max', 'np.max', (["estimate_data['errors']"], {}), "(estimate_data['errors'])\n", (8028, 8053), True, 'import numpy as np\n'), ((8104, 8133), 'numpy.max', 'np.max', (["estimate_data['etas']"], {}), "(estimate_data['etas'])\n", (8110, 8133), True, 'import numpy as np\n'), ((8184, 8221), 'numpy.max', 'np.max', (["estimate_data['efficiencies']"], {}), "(estimate_data['efficiencies'])\n", (8190, 8221), True, 'import numpy as np\n'), ((2465, 2503), 'dune.gdt.gamm_2019_talk_on_conservative_rb.assemble_SWIPDG_matrix', 'assemble_SWIPDG_matrix', (['dg_space', 'diff'], {}), '(dg_space, diff)\n', (2487, 2503), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((2646, 2677), 'dune.gdt.gamm_2019_talk_on_conservative_rb.assemble_L2_vector', 'assemble_L2_vector', (['dg_space', 'f'], {}), '(dg_space, f)\n', (2664, 2677), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((4718, 4739), 'dune.xt.functions.ConstantFunction__2d_to_1x1', 'ConstantFunction', (['(1.0)'], {}), '(1.0)\n', (4734, 4739), True, 'from dune.xt.functions import ConstantFunction__2d_to_1x1 as ConstantFunction\n'), ((7117, 7163), 'dune.gdt.gamm_2019_talk_on_conservative_rb.make_discrete_function', 'make_discrete_function', (['dg_space', 'u_RB', '"""u_RB"""'], {}), "(dg_space, u_RB, 'u_RB')\n", (7139, 7163), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((7181, 7232), 'dune.gdt.gamm_2019_talk_on_conservative_rb.make_discrete_function', 'make_discrete_function', (['rtn_space', 't_RB_f', '"""t_RB_f"""'], {}), "(rtn_space, t_RB_f, 't_RB_f')\n", (7203, 7232), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n'), ((7630, 7673), 'dune.gdt.gamm_2019_talk_on_conservative_rb.prolong', 'prolong', (['dg_space', 'u_RB', 'reference_dg_space'], {}), '(dg_space, u_RB, reference_dg_space)\n', (7637, 7673), False, 'from dune.gdt.gamm_2019_talk_on_conservative_rb import DiscontinuousLagrangeSpace, GridProvider, RaviartThomasSpace, assemble_energy_semi_product_matrix, assemble_DG_product_matrix, assemble_Hdiv_product_matrix, assemble_L2_vector, assemble_SWIPDG_matrix, compute_estimate, compute_flux_reconstruction, compute_local_conservation_error, make_discrete_function, prolong, visualize\n')] |
import numpy as np
from numpy.linalg import inv
import sys
if ".." not in sys.path:
sys.path.append("..")
# from PyCommon.modules import pydart2 as pydart
import pydart2 as pydart
from PyCommon.modules.Math import mmMath as mm
import math
class PDController:
"""
:type h : float
:type skel : pydart.Skeleton
:type qhat : np.array
"""
def __init__(self, skel, h, Kt=400., Dt=20.):
self.h = h
self.skel = skel
ndofs = self.skel.ndofs
# self.qhat = self.skel.q
# Kt = 1000.
# Dt = 2.*(Kt**.5)
# self.Kp = np.diagflat([0.0] * 6 + [400.0] * (ndofs - 6))
# self.Kd = np.diagflat([0.0] * 6 + [20.0] * (ndofs - 6))
self.Kp = np.diagflat([0.0] * 6 + [Kt] * (ndofs - 6))
self.Kd = np.diagflat([0.0] * 6 + [Dt] * (ndofs - 6))
self.preoffset = 0.0
self.Rs = None
self.vel = None
def setKpKd(self, index, Kp, Kd):
self.Kp[index, index] = Kp
self.Kd[index, index] = Kd
def compute(self):
skel = self.skel
deltaq = self.calcDeltaq()
invM = inv(skel.M + self.Kd * self.h)
# p = -self.Kp.dot(skel.q + skel.dq * self.h - self.qhat)
p = -self.Kp.dot(-deltaq + skel.dq * self.h)
d = -self.Kd.dot(skel.dq)
qddot = invM.dot(-skel.c + p + d + skel.constraint_forces())
tau = p + d - self.Kd.dot(qddot) * self.h
'''
# Check the balance
COP = skel.body('h_heel_left').to_world([0.05, 0, 0])
offset = skel.C[0] - COP[0]
# Adjust the target pose
k1 = 200.0 if 0.0 < offset and offset < 0.1 else 2000.0
k2 = 100.0
kd = 10.0 if 0.0 < offset and offset < 0.1 else 100.0
q_delta1 = np.array([-k1, -k2, -k1, -k2]) * offset
q_delta2 = np.ones(4) * kd * (self.preoffset - offset)
tau[np.array([17, 25, 19, 26])] += (q_delta1 + q_delta2)
self.preoffset = offset
#'''
# Make sure the first six are zero
tau[:6] = 0
# return qddot
# return p+d
return tau
def setTartgetPose(self, Rs):
self.Rs = Rs
def setTargetVel(self, vel):
self.vel = vel
def calcDeltaq(self):
deltaq = np.zeros(self.skel.q.shape)
if self.Rs is not None:
p_r0 = self.Rs[0][0]
p0 = self.skel.q[3:6]
th_r0 = self.Rs[0][1]
th0 = mm.exp(self.skel.q[:3])
deltaq[:6] = np.hstack((mm.logSO3(np.dot(th0.transpose(), th_r0)), p_r0 - p0))
# TODO:
# apply variety dofs
dofOffset = 6
for i in range(1, len(self.skel.joints)):
# for i in range(1, len(self.Rs)):
joint = self.skel.joints[i]
if joint.num_dofs() == 3:
deltaq[dofOffset:dofOffset+3] = mm.logSO3(np.dot(joint.get_local_transform()[:3, :3].transpose(), self.Rs[i]))
elif joint.num_dofs() == 2:
targetAngle1 = math.atan2(-self.Rs[i][1,2], self.Rs[i][2,2])
targetAngle2 = math.atan2(-self.Rs[i][0,1], self.Rs[i][0,0])
deltaq[dofOffset:dofOffset+2] = np.array([targetAngle1, targetAngle2])
elif joint.num_dofs() == 1:
deltaq[dofOffset] = math.atan2(self.Rs[i][2, 1], self.Rs[i][1,1])
dofOffset += joint.num_dofs()
# a_des0 = kt*(p_r0 - p0) + dt*(- v0) #+ a_r0
# ddth_des0 = kt*(mm.logSO3(np.dot(th0.transpose(), th_r0))) + dt*(- dth0) #+ ddth_r0
return deltaq
def calcDeltadq(self):
deltadq = np.zeros(self.skel.dq.shape)
if self.vel is not None:
dth0
deltadq[:6] = np.hstack(())
| [
"sys.path.append",
"numpy.diagflat",
"math.atan2",
"numpy.zeros",
"numpy.hstack",
"numpy.linalg.inv",
"numpy.array",
"PyCommon.modules.Math.mmMath.exp"
] | [((88, 109), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (103, 109), False, 'import sys\n'), ((718, 761), 'numpy.diagflat', 'np.diagflat', (['([0.0] * 6 + [Kt] * (ndofs - 6))'], {}), '([0.0] * 6 + [Kt] * (ndofs - 6))\n', (729, 761), True, 'import numpy as np\n'), ((780, 823), 'numpy.diagflat', 'np.diagflat', (['([0.0] * 6 + [Dt] * (ndofs - 6))'], {}), '([0.0] * 6 + [Dt] * (ndofs - 6))\n', (791, 823), True, 'import numpy as np\n'), ((1110, 1140), 'numpy.linalg.inv', 'inv', (['(skel.M + self.Kd * self.h)'], {}), '(skel.M + self.Kd * self.h)\n', (1113, 1140), False, 'from numpy.linalg import inv\n'), ((2247, 2274), 'numpy.zeros', 'np.zeros', (['self.skel.q.shape'], {}), '(self.skel.q.shape)\n', (2255, 2274), True, 'import numpy as np\n'), ((3644, 3672), 'numpy.zeros', 'np.zeros', (['self.skel.dq.shape'], {}), '(self.skel.dq.shape)\n', (3652, 3672), True, 'import numpy as np\n'), ((2427, 2450), 'PyCommon.modules.Math.mmMath.exp', 'mm.exp', (['self.skel.q[:3]'], {}), '(self.skel.q[:3])\n', (2433, 2450), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((3750, 3763), 'numpy.hstack', 'np.hstack', (['()'], {}), '(())\n', (3759, 3763), True, 'import numpy as np\n'), ((3024, 3071), 'math.atan2', 'math.atan2', (['(-self.Rs[i][1, 2])', 'self.Rs[i][2, 2]'], {}), '(-self.Rs[i][1, 2], self.Rs[i][2, 2])\n', (3034, 3071), False, 'import math\n'), ((3105, 3152), 'math.atan2', 'math.atan2', (['(-self.Rs[i][0, 1])', 'self.Rs[i][0, 0]'], {}), '(-self.Rs[i][0, 1], self.Rs[i][0, 0])\n', (3115, 3152), False, 'import math\n'), ((3203, 3241), 'numpy.array', 'np.array', (['[targetAngle1, targetAngle2]'], {}), '([targetAngle1, targetAngle2])\n', (3211, 3241), True, 'import numpy as np\n'), ((3326, 3372), 'math.atan2', 'math.atan2', (['self.Rs[i][2, 1]', 'self.Rs[i][1, 1]'], {}), '(self.Rs[i][2, 1], self.Rs[i][1, 1])\n', (3336, 3372), False, 'import math\n')] |
import json
import numpy
import numpy as np
import pkg_resources
import pickle
from .. import utils
datafile = pkg_resources.resource_filename(
"excursion", "testcases/data/checkmate_dense.json"
)
def modify(zv):
return np.log(zv) - np.log(0.05)
truthX, truthy_obs, truthy_exp = [], [], []
for p, _, result in json.load(open(datafile))["precomputed"]:
if p[0] < p[1] + 200:
continue
truthX.append(p)
truthy_obs.append(
max(float(result[1]["observed_CLs"]), 0.001) if result[1] else 0.001
)
truthy_exp.append(
max(float(result[1]["expected_CLs"]), 0.001) if result[1] else 0.001
)
truthX = np.array(truthX)
truthy_obs = np.array(truthy_obs)
truthy_obs = modify(truthy_obs)
truthy_exp = np.array(truthy_exp)
truthy_exp = modify(truthy_exp)
import sklearn.preprocessing
scaler = sklearn.preprocessing.MinMaxScaler()
scaler.fit(truthX)
truthX = scaler.transform(truthX)
picklefile = pkg_resources.resource_filename(
"excursion", "testcases/data/checkmate.pkl"
)
d = pickle.load(open(picklefile, "rb"))
def truth_obs(X):
return 2 * d["obs"].predict(X)
def truth_exp(X):
return 2 * d["exp"].predict(X)
thresholds = [modify(0.05)]
truth_functions = [truth_obs, truth_exp]
def invalid_region(x):
oX = scaler.inverse_transform(x)
return oX[:, 0] < oX[:, 1] + 202
plot_rangedef = np.array([[0.0, 1.0, 101], [0.0, 1.0, 101]])
plotG = utils.mgrid(plot_rangedef)
plotX = utils.mesh2points(plotG, plot_rangedef[:, 2])
plotX = plotX[~invalid_region(plotX)]
acq_rd = np.array([[0.0, 1.0, 41], [0.0, 1.0, 41]])
acqG = utils.mgrid(acq_rd)
acqX = utils.mesh2points(acqG, acq_rd[:, 2])
acqX = acqX[~invalid_region(acqX)]
mn_rd = np.array([[0.0, 1.0, 41], [0, 1.0, 41]])
mnG = utils.mgrid(mn_rd)
meanX = utils.mesh2points(mnG, mn_rd[:, 2])
meanX = meanX[~invalid_region(meanX)]
| [
"numpy.array",
"numpy.log",
"pkg_resources.resource_filename"
] | [((113, 200), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""excursion"""', '"""testcases/data/checkmate_dense.json"""'], {}), "('excursion',\n 'testcases/data/checkmate_dense.json')\n", (144, 200), False, 'import pkg_resources\n'), ((652, 668), 'numpy.array', 'np.array', (['truthX'], {}), '(truthX)\n', (660, 668), True, 'import numpy as np\n'), ((683, 703), 'numpy.array', 'np.array', (['truthy_obs'], {}), '(truthy_obs)\n', (691, 703), True, 'import numpy as np\n'), ((750, 770), 'numpy.array', 'np.array', (['truthy_exp'], {}), '(truthy_exp)\n', (758, 770), True, 'import numpy as np\n'), ((948, 1024), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""excursion"""', '"""testcases/data/checkmate.pkl"""'], {}), "('excursion', 'testcases/data/checkmate.pkl')\n", (979, 1024), False, 'import pkg_resources\n'), ((1369, 1413), 'numpy.array', 'np.array', (['[[0.0, 1.0, 101], [0.0, 1.0, 101]]'], {}), '([[0.0, 1.0, 101], [0.0, 1.0, 101]])\n', (1377, 1413), True, 'import numpy as np\n'), ((1551, 1593), 'numpy.array', 'np.array', (['[[0.0, 1.0, 41], [0.0, 1.0, 41]]'], {}), '([[0.0, 1.0, 41], [0.0, 1.0, 41]])\n', (1559, 1593), True, 'import numpy as np\n'), ((1710, 1750), 'numpy.array', 'np.array', (['[[0.0, 1.0, 41], [0, 1.0, 41]]'], {}), '([[0.0, 1.0, 41], [0, 1.0, 41]])\n', (1718, 1750), True, 'import numpy as np\n'), ((232, 242), 'numpy.log', 'np.log', (['zv'], {}), '(zv)\n', (238, 242), True, 'import numpy as np\n'), ((245, 257), 'numpy.log', 'np.log', (['(0.05)'], {}), '(0.05)\n', (251, 257), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
# pylint: disable=bad-whitespace
"""Example code to do convolution."""
import numpy as np
import tvm
import topi
import topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from topi.nn.util import get_pad_tuple
from topi.util import get_const_tuple
_conv2d_nhwc_winograd_tensorcore = {
"cuda": (topi.cuda.conv2d_nhwc_winograd_tensorcore,
topi.cuda.schedule_conv2d_nhwc_winograd_tensorcore)
}
_conv2d_nhwc_winograd_direct = {
"cuda": (topi.cuda.conv2d_nhwc_winograd_direct,
topi.cuda.schedule_conv2d_nhwc_winograd_direct)
}
def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride,
padding, dilation=1, add_bias=False, add_relu=False,
devices='cuda', bgemm="direct"):
"""Test the conv2d with winograd for nhwc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print("Workload: (%d, %d, %d, %d, %d, %d, %d, %d)" % (
batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation))
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name='A')
W = te.placeholder((kernel, kernel, in_channel, num_filter), name='W')
bias = te.placeholder((1, 1, 1, num_filter), name='bias')
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_nhwc.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
if bgemm == "direct":
fcompute, fschedule = topi.testing.dispatch(device,
_conv2d_nhwc_winograd_direct)
elif bgemm == "tensorcore":
fcompute, fschedule = topi.testing.dispatch(device,
_conv2d_nhwc_winograd_tensorcore)
C = fcompute(A, W, stride, padding, dilation, 'float32')
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
if add_bias:
func = tvm.build(s, [A, W, bias, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (
batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation))
func(a, w, b, c)
else:
func = tvm.build(s, [A, W, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (
batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation))
func(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=2e-3)
check_device(devices)
def test_conv2d_nhwc_winograd_direct():
"""Test the conv2d with winograd for nhwc layout"""
# resnet 18 workloads
print("test_winograd_direct...")
verify_conv2d_nhwc(1, 64, 56, 64, 3, 1, 1, bgemm="direct")
verify_conv2d_nhwc(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_nhwc(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, 1)
verify_conv2d_nhwc(1, 48, 35, 64, 5, 1, 2)
# weird workloads
verify_conv2d_nhwc(1, 1, 1, 1, 3, 1, 1)
verify_conv2d_nhwc(3, 3, 3, 3, 3, 1, 1)
verify_conv2d_nhwc(2, 13, 71, 59, 3, 1, 1)
# Asymmetric padding
verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, "SAME")
verify_conv2d_nhwc(2, 48, 56, 48, 3, 1, (1, 1), add_relu=True)
verify_conv2d_nhwc(2, 48, 56, 48, 3, 1, "SAME", add_relu=True, add_bias=True)
verify_conv2d_nhwc(1, 48, 35, 48, 5, 1, "VALID")
def test_conv2d_nhwc_winograd_tensorcore():
"""Test the conv2d with winograd for nhwc layout"""
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if not nvcc.have_tensorcore(tvm.gpu(0).compute_version):
return
verify_conv2d_nhwc(8, 64, 56, 64, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(8, 128, 28, 128, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(8, 256, 14, 256, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, (1, 1), add_relu=True, bgemm="tensorcore")
verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, "SAME", add_relu=True, bgemm="tensorcore")
if __name__ == "__main__":
test_conv2d_nhwc_winograd_direct()
test_conv2d_nhwc_winograd_tensorcore()
| [
"numpy.random.uniform",
"tvm.te.placeholder",
"topi.testing.dispatch",
"topi.nn.util.get_pad_tuple",
"tvm.nd.array",
"numpy.maximum",
"topi.testing.dilate_python",
"tvm.context",
"tvm.target.create",
"tvm.runtime.enabled",
"topi.add",
"tvm.contrib.pickle_memoize.memoize",
"topi.testing.conv2... | [((1786, 1826), 'topi.nn.util.get_pad_tuple', 'get_pad_tuple', (['padding', '(kernel, kernel)'], {}), '(padding, (kernel, kernel))\n', (1799, 1826), False, 'from topi.nn.util import get_pad_tuple\n'), ((2081, 2147), 'tvm.te.placeholder', 'te.placeholder', (['(batch, in_height, in_width, in_channel)'], {'name': '"""A"""'}), "((batch, in_height, in_width, in_channel), name='A')\n", (2095, 2147), False, 'from tvm import te\n'), ((2156, 2222), 'tvm.te.placeholder', 'te.placeholder', (['(kernel, kernel, in_channel, num_filter)'], {'name': '"""W"""'}), "((kernel, kernel, in_channel, num_filter), name='W')\n", (2170, 2222), False, 'from tvm import te\n'), ((2234, 2284), 'tvm.te.placeholder', 'te.placeholder', (['(1, 1, 1, num_filter)'], {'name': '"""bias"""'}), "((1, 1, 1, num_filter), name='bias')\n", (2248, 2284), False, 'from tvm import te\n'), ((2300, 2324), 'topi.util.get_const_tuple', 'get_const_tuple', (['A.shape'], {}), '(A.shape)\n', (2315, 2324), False, 'from topi.util import get_const_tuple\n'), ((2339, 2363), 'topi.util.get_const_tuple', 'get_const_tuple', (['W.shape'], {}), '(W.shape)\n', (2354, 2363), False, 'from topi.util import get_const_tuple\n'), ((2381, 2408), 'topi.util.get_const_tuple', 'get_const_tuple', (['bias.shape'], {}), '(bias.shape)\n', (2396, 2408), False, 'from topi.util import get_const_tuple\n'), ((2435, 2497), 'tvm.contrib.pickle_memoize.memoize', 'memoize', (['"""topi.tests.test_topi_conv2d_nhwc.verify_conv2d_nhwc"""'], {}), "('topi.tests.test_topi_conv2d_nhwc.verify_conv2d_nhwc')\n", (2442, 2497), False, 'from tvm.contrib.pickle_memoize import memoize\n'), ((2724, 2784), 'topi.testing.dilate_python', 'topi.testing.dilate_python', (['w_np', '(dilation, dilation, 1, 1)'], {}), '(w_np, (dilation, dilation, 1, 1))\n', (2750, 2784), False, 'import topi\n'), ((2800, 2861), 'topi.testing.conv2d_nhwc_python', 'topi.testing.conv2d_nhwc_python', (['a_np', 'dw_np', 'stride', 'padding'], {}), '(a_np, dw_np, stride, padding)\n', (2831, 2861), False, 'import topi\n'), ((3164, 3186), 'tvm.context', 'tvm.context', (['device', '(0)'], {}), '(device, 0)\n', (3175, 3186), False, 'import tvm\n'), ((4012, 4035), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (4024, 4035), False, 'import tvm\n'), ((4048, 4071), 'tvm.nd.array', 'tvm.nd.array', (['w_np', 'ctx'], {}), '(w_np, ctx)\n', (4060, 4071), False, 'import tvm\n'), ((4084, 4107), 'tvm.nd.array', 'tvm.nd.array', (['b_np', 'ctx'], {}), '(b_np, ctx)\n', (4096, 4107), False, 'import tvm\n'), ((3016, 3035), 'numpy.maximum', 'np.maximum', (['c_np', '(0)'], {}), '(c_np, 0)\n', (3026, 3035), True, 'import numpy as np\n'), ((3354, 3379), 'tvm.target.create', 'tvm.target.create', (['device'], {}), '(device)\n', (3371, 3379), False, 'import tvm\n'), ((4229, 4393), 'tvm.build', 'tvm.build', (['s', '[A, W, bias, C]', 'device'], {'name': "('relu_%d_%d_%d_%d_%d_%d_%d_%d' % (batch, in_channel, in_size, num_filter,\n kernel, stride, padding_sum, dilation))"}), "(s, [A, W, bias, C], device, name='relu_%d_%d_%d_%d_%d_%d_%d_%d' %\n (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum,\n dilation))\n", (4238, 4393), False, 'import tvm\n'), ((4465, 4624), 'tvm.build', 'tvm.build', (['s', '[A, W, C]', 'device'], {'name': "('relu_%d_%d_%d_%d_%d_%d_%d_%d' % (batch, in_channel, in_size, num_filter,\n kernel, stride, padding_sum, dilation))"}), "(s, [A, W, C], device, name='relu_%d_%d_%d_%d_%d_%d_%d_%d' % (\n batch, in_channel, in_size, num_filter, kernel, stride, padding_sum,\n dilation))\n", (4474, 4624), False, 'import tvm\n'), ((5762, 5789), 'tvm.runtime.enabled', 'tvm.runtime.enabled', (['"""cuda"""'], {}), "('cuda')\n", (5781, 5789), False, 'import tvm\n'), ((2537, 2568), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'a_shape'}), '(size=a_shape)\n', (2554, 2568), True, 'import numpy as np\n'), ((2598, 2629), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'w_shape'}), '(size=w_shape)\n', (2615, 2629), True, 'import numpy as np\n'), ((2659, 2693), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'bias_shape'}), '(size=bias_shape)\n', (2676, 2693), True, 'import numpy as np\n'), ((3453, 3512), 'topi.testing.dispatch', 'topi.testing.dispatch', (['device', '_conv2d_nhwc_winograd_direct'], {}), '(device, _conv2d_nhwc_winograd_direct)\n', (3474, 3512), False, 'import topi\n'), ((3889, 3906), 'topi.add', 'topi.add', (['C', 'bias'], {}), '(C, bias)\n', (3897, 3906), False, 'import topi\n'), ((3952, 3967), 'topi.nn.relu', 'topi.nn.relu', (['C'], {}), '(C)\n', (3964, 3967), False, 'import topi\n'), ((4142, 4166), 'topi.util.get_const_tuple', 'get_const_tuple', (['C.shape'], {}), '(C.shape)\n', (4157, 4166), False, 'from topi.util import get_const_tuple\n'), ((5738, 5748), 'tvm.gpu', 'tvm.gpu', (['(0)'], {}), '(0)\n', (5745, 5748), False, 'import tvm\n'), ((5890, 5900), 'tvm.gpu', 'tvm.gpu', (['(0)'], {}), '(0)\n', (5897, 5900), False, 'import tvm\n'), ((2902, 2936), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'bias_shape'}), '(size=bias_shape)\n', (2919, 2936), True, 'import numpy as np\n'), ((3651, 3714), 'topi.testing.dispatch', 'topi.testing.dispatch', (['device', '_conv2d_nhwc_winograd_tensorcore'], {}), '(device, _conv2d_nhwc_winograd_tensorcore)\n', (3672, 3714), False, 'import topi\n')] |
from __future__ import division
import numpy as np
import scipy.linalg as la
from numpy.testing import assert_almost_equal
import multiprocessing as mp
import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
import random
import pandas as pd
import time
def distance(x,Y):
'''
Function to calculate the distance between a point x and a Y, a subset of X
Input: x, a single data point. Y, a collection of data points
Output: The minimum Euclidean norm of x and each element in Y
'''
distances=[la.norm(x-y) for y in Y]
return min(distances)
def k_means_pp(X,k):
'''
Function to initialize centers for the k-means++ algorithm
Input: X, an array of data. k, the number of clusters
Output: C, an array with length k of initial cluster centers.
'''
random.seed(22)
#randomly choose the first c
first_center = np.random.choice(X.shape[0], 1)
C = X[first_center,:]
for i in range(k-1):
#calculate the distance between each x in X and the currently initialized centers
dist_x=np.ones(len(X))
for i in range(len(X)):
if X[i,:] in C:
dist_x[i]=0
else:
dist_x[i]=distance(X[i,:],C)**2
#use dist_x to calculate the probability that each x is chose
probabilities=dist_x/sum(dist_x)
#randomly choose an x according to these probabilities
rand=np.random.choice(X.shape[0],1, p=probabilities)
C = np.vstack([C, X[rand,:]])
#finally, return the array of centers
return C
def weighted_clusters(weights, X,k):
'''
Function to return weighted centers for the k-means++ algorithm. To be used in kmeans||
Input: X, an array of data. k, the number of clusters. weights, a vector of length X
Output: C, an array with length k of initial cluster centers.
'''
first_center = np.random.choice(X.shape[0], 1)
weight_C = X[first_center,:]
for i in range(k-1):
#calculate the distance between each x in X and the currently initialized centers
dist_x=np.ones(len(X))
for i in range(len(X)):
if X[i,:] in weight_C:
dist_x[i]=0
else:
dist_x[i]=distance(X[i,:],weight_C)**2
#use dist_x to calculate the probability that each x is chose
probabilities=dist_x/sum(dist_x)
#randomly choose an x according to these probabilities
rand=np.random.choice(X.shape[0],1, p=probabilities)
weight_C = np.vstack([weight_C, X[rand,:]])
#finally, return the array of centers
return weight_C
def scalable_k_means_pp(X,k,ell):
'''
Function to initialize centers for the k-means|| algorithm
Input: X, an array of data. k, the number of clusters
Output: C, an array with length k of initial cluster centers.
'''
#randomly choose the first c
first_center = np.random.choice(X.shape[0], 1)
C = X[first_center,:]
#calculate the intitial cost. This will tell us how many times to loop.
cost_initial=sum([distance(x,C)**2 for x in X])
for i in range(int(round(np.log(cost_initial)))):
#calculate the distance
dist_x=[distance(x,C)**2 for x in X]
#calculate the probabilities for each x
probabilities=(np.array(dist_x)*ell)/sum(dist_x)
#iterate through each datapoint
for j in range(len(X)):
#draw a random uniform number.
rand=np.random.uniform()
#if rand<= the probability and that datapoint isn't already in C, add to C
if rand<=probabilities[j] and X[j,:] not in C:
C = np.vstack([C, X[j,:]])
#initialize weights
weights=np.zeros(C.shape[0])
#iterate through each item in C
for x in X:
c_no = -1
min_dist = np.inf
for i in range(C.shape[0]):
dist = la.norm(C[i]-x)
if min_dist > dist:
min_dist = dist
c_no = i
weights[c_no] = weights[c_no]+1
#normalize the weights
weights=np.array(weights)/sum(weights)
#return those weights as the chosen centers
return weighted_clusters(weights, C,k)
| [
"numpy.random.uniform",
"numpy.log",
"numpy.zeros",
"scipy.linalg.norm",
"random.seed",
"numpy.array",
"numpy.random.choice",
"numpy.vstack"
] | [((849, 864), 'random.seed', 'random.seed', (['(22)'], {}), '(22)\n', (860, 864), False, 'import random\n'), ((917, 948), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]', '(1)'], {}), '(X.shape[0], 1)\n', (933, 948), True, 'import numpy as np\n'), ((1960, 1991), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]', '(1)'], {}), '(X.shape[0], 1)\n', (1976, 1991), True, 'import numpy as np\n'), ((3016, 3047), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]', '(1)'], {}), '(X.shape[0], 1)\n', (3032, 3047), True, 'import numpy as np\n'), ((3842, 3862), 'numpy.zeros', 'np.zeros', (['C.shape[0]'], {}), '(C.shape[0])\n', (3850, 3862), True, 'import numpy as np\n'), ((546, 560), 'scipy.linalg.norm', 'la.norm', (['(x - y)'], {}), '(x - y)\n', (553, 560), True, 'import scipy.linalg as la\n'), ((1474, 1522), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]', '(1)'], {'p': 'probabilities'}), '(X.shape[0], 1, p=probabilities)\n', (1490, 1522), True, 'import numpy as np\n'), ((1534, 1560), 'numpy.vstack', 'np.vstack', (['[C, X[rand, :]]'], {}), '([C, X[rand, :]])\n', (1543, 1560), True, 'import numpy as np\n'), ((2529, 2577), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]', '(1)'], {'p': 'probabilities'}), '(X.shape[0], 1, p=probabilities)\n', (2545, 2577), True, 'import numpy as np\n'), ((2596, 2629), 'numpy.vstack', 'np.vstack', (['[weight_C, X[rand, :]]'], {}), '([weight_C, X[rand, :]])\n', (2605, 2629), True, 'import numpy as np\n'), ((4204, 4221), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (4212, 4221), True, 'import numpy as np\n'), ((3590, 3609), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3607, 3609), True, 'import numpy as np\n'), ((4014, 4031), 'scipy.linalg.norm', 'la.norm', (['(C[i] - x)'], {}), '(C[i] - x)\n', (4021, 4031), True, 'import scipy.linalg as la\n'), ((3241, 3261), 'numpy.log', 'np.log', (['cost_initial'], {}), '(cost_initial)\n', (3247, 3261), True, 'import numpy as np\n'), ((3423, 3439), 'numpy.array', 'np.array', (['dist_x'], {}), '(dist_x)\n', (3431, 3439), True, 'import numpy as np\n'), ((3777, 3800), 'numpy.vstack', 'np.vstack', (['[C, X[j, :]]'], {}), '([C, X[j, :]])\n', (3786, 3800), True, 'import numpy as np\n')] |
"""RPN for fasterRCNN"""
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore.ops import functional as F
from src.CTPN.bbox_assign_sample import BboxAssignSample
class RpnRegClsBlock(nn.Cell):
"""
Rpn reg cls block for rpn layer
Args:
config(EasyDict) - Network construction config.
in_channels (int) - Input channels of shared convolution.
feat_channels (int) - Output channels of shared convolution.
num_anchors (int) - The anchor number.
cls_out_channels (int) - Output channels of classification convolution.
Returns:
Tensor, output tensor.
"""
def __init__(self,
config,
in_channels,
feat_channels,
num_anchors,
cls_out_channels):
super(RpnRegClsBlock, self).__init__()
self.shape = P.Shape()
self.reshape = P.Reshape()
self.shape = (-1, 2*config.hidden_size)
self.lstm_fc = nn.Dense(2*config.hidden_size, 512).to_float(mstype.float16)
self.rpn_cls = nn.Dense(in_channels=512, out_channels=num_anchors * cls_out_channels).to_float(mstype.float16)
self.rpn_reg = nn.Dense(in_channels=512, out_channels=num_anchors * 4).to_float(mstype.float16)
self.shape1 = (-1, config.num_step, config.rnn_batch_size)
self.shape2 = (config.batch_size, -1, config.rnn_batch_size, config.num_step)
self.transpose = P.Transpose()
self.print = P.Print()
def construct(self, x):
x = self.reshape(x, self.shape)
x = self.lstm_fc(x)
x1 = self.rpn_cls(x)
x1 = self.transpose(x1, (1, 0))
x1 = self.reshape(x1, self.shape1)
x1 = self.transpose(x1, (0, 2, 1))
x1 = self.reshape(x1, self.shape2)
x2 = self.rpn_reg(x)
x2 = self.transpose(x2, (1, 0))
x2 = self.reshape(x2, self.shape1)
x2 = self.transpose(x2, (0, 2, 1))
x2 = self.reshape(x2, self.shape2)
return x1, x2
class RPN(nn.Cell):
"""
ROI proposal network..
Args:
config (dict) - Config.
batch_size (int) - Batchsize.
in_channels (int) - Input channels of shared convolution.
feat_channels (int) - Output channels of shared convolution.
num_anchors (int) - The anchor number.
cls_out_channels (int) - Output channels of classification convolution.
Returns:
Tuple, tuple of output tensor.
Examples:
RPN(config=config, batch_size=2, in_channels=256, feat_channels=1024,
num_anchors=3, cls_out_channels=512)
"""
def __init__(self,
config,
batch_size,
in_channels,
feat_channels,
num_anchors,
cls_out_channels):
super(RPN, self).__init__()
cfg_rpn = config
self.cfg = config
self.num_bboxes = cfg_rpn.num_bboxes
self.feature_anchor_shape = cfg_rpn.feature_shapes
self.feature_anchor_shape = self.feature_anchor_shape[0] * \
self.feature_anchor_shape[1] * num_anchors * batch_size
self.num_anchors = num_anchors
self.batch_size = batch_size
self.test_batch_size = cfg_rpn.test_batch_size
self.num_layers = 1
self.real_ratio = Tensor(np.ones((1, 1)).astype(np.float16))
self.use_sigmoid_cls = config.use_sigmoid_cls
if config.use_sigmoid_cls:
self.reshape_shape_cls = (-1,)
self.loss_cls = P.SigmoidCrossEntropyWithLogits()
cls_out_channels = 1
else:
self.reshape_shape_cls = (-1, cls_out_channels)
self.loss_cls = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="none")
self.rpn_convs_list = self._make_rpn_layer(self.num_layers, in_channels, feat_channels,\
num_anchors, cls_out_channels)
self.transpose = P.Transpose()
self.reshape = P.Reshape()
self.concat = P.Concat(axis=0)
self.fill = P.Fill()
self.placeh1 = Tensor(np.ones((1,)).astype(np.float16))
self.trans_shape = (0, 2, 3, 1)
self.reshape_shape_reg = (-1, 4)
self.softmax = nn.Softmax()
self.rpn_loss_reg_weight = Tensor(np.array(cfg_rpn.rpn_loss_reg_weight).astype(np.float16))
self.rpn_loss_cls_weight = Tensor(np.array(cfg_rpn.rpn_loss_cls_weight).astype(np.float16))
self.num_expected_total = Tensor(np.array(cfg_rpn.num_expected_neg * self.batch_size).astype(np.float16))
self.num_bboxes = cfg_rpn.num_bboxes
self.get_targets = BboxAssignSample(cfg_rpn, self.batch_size, self.num_bboxes, False)
self.CheckValid = P.CheckValid()
self.sum_loss = P.ReduceSum()
self.loss_bbox = P.SmoothL1Loss(beta=1.0/9.0)
self.squeeze = P.Squeeze()
self.cast = P.Cast()
self.tile = P.Tile()
self.zeros_like = P.ZerosLike()
self.loss = Tensor(np.zeros((1,)).astype(np.float16))
self.clsloss = Tensor(np.zeros((1,)).astype(np.float16))
self.regloss = Tensor(np.zeros((1,)).astype(np.float16))
self.print = P.Print()
def _make_rpn_layer(self, num_layers, in_channels, feat_channels, num_anchors, cls_out_channels):
"""
make rpn layer for rpn proposal network
Args:
num_layers (int) - layer num.
in_channels (int) - Input channels of shared convolution.
feat_channels (int) - Output channels of shared convolution.
num_anchors (int) - The anchor number.
cls_out_channels (int) - Output channels of classification convolution.
Returns:
List, list of RpnRegClsBlock cells.
"""
rpn_layer = RpnRegClsBlock(self.cfg, in_channels, feat_channels, num_anchors, cls_out_channels)
return rpn_layer
def construct(self, inputs, img_metas, anchor_list, gt_bboxes, gt_labels, gt_valids):
'''
inputs(Tensor): Inputs tensor from lstm.
img_metas(Tensor): Image shape.
anchor_list(Tensor): Total anchor list.
gt_labels(Tensor): Ground truth labels.
gt_valids(Tensor): Whether ground truth is valid.
'''
rpn_cls_score_ori, rpn_bbox_pred_ori = self.rpn_convs_list(inputs)
rpn_cls_score = self.transpose(rpn_cls_score_ori, self.trans_shape)
rpn_cls_score = self.reshape(rpn_cls_score, self.reshape_shape_cls)
rpn_bbox_pred = self.transpose(rpn_bbox_pred_ori, self.trans_shape)
rpn_bbox_pred = self.reshape(rpn_bbox_pred, self.reshape_shape_reg)
output = ()
bbox_targets = ()
bbox_weights = ()
labels = ()
label_weights = ()
if self.training:
for i in range(self.batch_size):
valid_flag_list = self.cast(self.CheckValid(anchor_list, self.squeeze(img_metas[i:i + 1:1, ::])),\
mstype.int32)
gt_bboxes_i = self.squeeze(gt_bboxes[i:i + 1:1, ::])
gt_labels_i = self.squeeze(gt_labels[i:i + 1:1, ::])
gt_valids_i = self.squeeze(gt_valids[i:i + 1:1, ::])
bbox_target, bbox_weight, label, label_weight = self.get_targets(gt_bboxes_i,
gt_labels_i,
self.cast(valid_flag_list,
mstype.bool_),
anchor_list, gt_valids_i)
bbox_weight = self.cast(bbox_weight, mstype.float16)
label_weight = self.cast(label_weight, mstype.float16)
bbox_targets += (bbox_target,)
bbox_weights += (bbox_weight,)
labels += (label,)
label_weights += (label_weight,)
bbox_target_with_batchsize = self.concat(bbox_targets)
bbox_weight_with_batchsize = self.concat(bbox_weights)
label_with_batchsize = self.concat(labels)
label_weight_with_batchsize = self.concat(label_weights)
bbox_target_ = F.stop_gradient(bbox_target_with_batchsize)
bbox_weight_ = F.stop_gradient(bbox_weight_with_batchsize)
label_ = F.stop_gradient(label_with_batchsize)
label_weight_ = F.stop_gradient(label_weight_with_batchsize)
rpn_cls_score = self.cast(rpn_cls_score, mstype.float32)
if self.use_sigmoid_cls:
label_ = self.cast(label_, mstype.float32)
loss_cls = self.loss_cls(rpn_cls_score, label_)
loss_cls = loss_cls * label_weight_
loss_cls = self.sum_loss(loss_cls, (0,)) / self.num_expected_total
rpn_bbox_pred = self.cast(rpn_bbox_pred, mstype.float32)
bbox_target_ = self.cast(bbox_target_, mstype.float32)
loss_reg = self.loss_bbox(rpn_bbox_pred, bbox_target_)
bbox_weight_ = self.tile(self.reshape(bbox_weight_, (self.feature_anchor_shape, 1)), (1, 4))
loss_reg = loss_reg * bbox_weight_
loss_reg = self.sum_loss(loss_reg, (1,))
loss_reg = self.sum_loss(loss_reg, (0,)) / self.num_expected_total
loss_total = self.rpn_loss_cls_weight * loss_cls + self.rpn_loss_reg_weight * loss_reg
output = (loss_total, rpn_cls_score_ori, rpn_bbox_pred_ori, loss_cls, loss_reg)
else:
output = (self.placeh1, rpn_cls_score_ori, rpn_bbox_pred_ori, self.placeh1, self.placeh1)
return output
| [
"mindspore.ops.operations.SigmoidCrossEntropyWithLogits",
"mindspore.ops.operations.Cast",
"numpy.ones",
"mindspore.ops.operations.CheckValid",
"mindspore.ops.operations.Fill",
"mindspore.ops.operations.Transpose",
"mindspore.ops.operations.Concat",
"mindspore.ops.operations.ReduceSum",
"mindspore.o... | [((1013, 1022), 'mindspore.ops.operations.Shape', 'P.Shape', ([], {}), '()\n', (1020, 1022), True, 'from mindspore.ops import operations as P\n'), ((1046, 1057), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (1055, 1057), True, 'from mindspore.ops import operations as P\n'), ((1591, 1604), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (1602, 1604), True, 'from mindspore.ops import operations as P\n'), ((1626, 1635), 'mindspore.ops.operations.Print', 'P.Print', ([], {}), '()\n', (1633, 1635), True, 'from mindspore.ops import operations as P\n'), ((4073, 4086), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (4084, 4086), True, 'from mindspore.ops import operations as P\n'), ((4110, 4121), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (4119, 4121), True, 'from mindspore.ops import operations as P\n'), ((4144, 4160), 'mindspore.ops.operations.Concat', 'P.Concat', ([], {'axis': '(0)'}), '(axis=0)\n', (4152, 4160), True, 'from mindspore.ops import operations as P\n'), ((4181, 4189), 'mindspore.ops.operations.Fill', 'P.Fill', ([], {}), '()\n', (4187, 4189), True, 'from mindspore.ops import operations as P\n'), ((4360, 4372), 'mindspore.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (4370, 4372), True, 'import mindspore.nn as nn\n'), ((4759, 4825), 'src.CTPN.bbox_assign_sample.BboxAssignSample', 'BboxAssignSample', (['cfg_rpn', 'self.batch_size', 'self.num_bboxes', '(False)'], {}), '(cfg_rpn, self.batch_size, self.num_bboxes, False)\n', (4775, 4825), False, 'from src.CTPN.bbox_assign_sample import BboxAssignSample\n'), ((4852, 4866), 'mindspore.ops.operations.CheckValid', 'P.CheckValid', ([], {}), '()\n', (4864, 4866), True, 'from mindspore.ops import operations as P\n'), ((4891, 4904), 'mindspore.ops.operations.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (4902, 4904), True, 'from mindspore.ops import operations as P\n'), ((4930, 4960), 'mindspore.ops.operations.SmoothL1Loss', 'P.SmoothL1Loss', ([], {'beta': '(1.0 / 9.0)'}), '(beta=1.0 / 9.0)\n', (4944, 4960), True, 'from mindspore.ops import operations as P\n'), ((4982, 4993), 'mindspore.ops.operations.Squeeze', 'P.Squeeze', ([], {}), '()\n', (4991, 4993), True, 'from mindspore.ops import operations as P\n'), ((5014, 5022), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (5020, 5022), True, 'from mindspore.ops import operations as P\n'), ((5043, 5051), 'mindspore.ops.operations.Tile', 'P.Tile', ([], {}), '()\n', (5049, 5051), True, 'from mindspore.ops import operations as P\n'), ((5078, 5091), 'mindspore.ops.operations.ZerosLike', 'P.ZerosLike', ([], {}), '()\n', (5089, 5091), True, 'from mindspore.ops import operations as P\n'), ((5305, 5314), 'mindspore.ops.operations.Print', 'P.Print', ([], {}), '()\n', (5312, 5314), True, 'from mindspore.ops import operations as P\n'), ((3674, 3707), 'mindspore.ops.operations.SigmoidCrossEntropyWithLogits', 'P.SigmoidCrossEntropyWithLogits', ([], {}), '()\n', (3705, 3707), True, 'from mindspore.ops import operations as P\n'), ((3843, 3906), 'mindspore.nn.SoftmaxCrossEntropyWithLogits', 'nn.SoftmaxCrossEntropyWithLogits', ([], {'sparse': '(True)', 'reduction': '"""none"""'}), "(sparse=True, reduction='none')\n", (3875, 3906), True, 'import mindspore.nn as nn\n'), ((8392, 8435), 'mindspore.ops.functional.stop_gradient', 'F.stop_gradient', (['bbox_target_with_batchsize'], {}), '(bbox_target_with_batchsize)\n', (8407, 8435), True, 'from mindspore.ops import functional as F\n'), ((8463, 8506), 'mindspore.ops.functional.stop_gradient', 'F.stop_gradient', (['bbox_weight_with_batchsize'], {}), '(bbox_weight_with_batchsize)\n', (8478, 8506), True, 'from mindspore.ops import functional as F\n'), ((8528, 8565), 'mindspore.ops.functional.stop_gradient', 'F.stop_gradient', (['label_with_batchsize'], {}), '(label_with_batchsize)\n', (8543, 8565), True, 'from mindspore.ops import functional as F\n'), ((8594, 8638), 'mindspore.ops.functional.stop_gradient', 'F.stop_gradient', (['label_weight_with_batchsize'], {}), '(label_weight_with_batchsize)\n', (8609, 8638), True, 'from mindspore.ops import functional as F\n'), ((1129, 1166), 'mindspore.nn.Dense', 'nn.Dense', (['(2 * config.hidden_size)', '(512)'], {}), '(2 * config.hidden_size, 512)\n', (1137, 1166), True, 'import mindspore.nn as nn\n'), ((1213, 1283), 'mindspore.nn.Dense', 'nn.Dense', ([], {'in_channels': '(512)', 'out_channels': '(num_anchors * cls_out_channels)'}), '(in_channels=512, out_channels=num_anchors * cls_out_channels)\n', (1221, 1283), True, 'import mindspore.nn as nn\n'), ((1332, 1387), 'mindspore.nn.Dense', 'nn.Dense', ([], {'in_channels': '(512)', 'out_channels': '(num_anchors * 4)'}), '(in_channels=512, out_channels=num_anchors * 4)\n', (1340, 1387), True, 'import mindspore.nn as nn\n'), ((3478, 3493), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (3485, 3493), True, 'import numpy as np\n'), ((4220, 4233), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (4227, 4233), True, 'import numpy as np\n'), ((4415, 4452), 'numpy.array', 'np.array', (['cfg_rpn.rpn_loss_reg_weight'], {}), '(cfg_rpn.rpn_loss_reg_weight)\n', (4423, 4452), True, 'import numpy as np\n'), ((4515, 4552), 'numpy.array', 'np.array', (['cfg_rpn.rpn_loss_cls_weight'], {}), '(cfg_rpn.rpn_loss_cls_weight)\n', (4523, 4552), True, 'import numpy as np\n'), ((4614, 4666), 'numpy.array', 'np.array', (['(cfg_rpn.num_expected_neg * self.batch_size)'], {}), '(cfg_rpn.num_expected_neg * self.batch_size)\n', (4622, 4666), True, 'import numpy as np\n'), ((5119, 5133), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (5127, 5133), True, 'import numpy as np\n'), ((5184, 5198), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (5192, 5198), True, 'import numpy as np\n'), ((5249, 5263), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (5257, 5263), True, 'import numpy as np\n')] |
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""This is the standard macro module"""
__all__ = ["ct", "mstate", "mv", "mvr", "pwa", "pwm", "repeat", "set_lim",
"set_lm", "set_pos", "settimer", "uct", "umv", "umvr", "wa", "wm",
"tw", "logmacro", "newfile"]
__docformat__ = 'restructuredtext'
import datetime
import os
import numpy as np
from taurus import Device
from taurus.console.table import Table
import PyTango
from PyTango import DevState
from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, \
ViewOption, iMacro, Hookable
from sardana.macroserver.msexception import StopException, UnknownEnv
from sardana.macroserver.scan.scandata import Record
from sardana.macroserver.macro import Optional
##########################################################################
#
# Motion related macros
#
##########################################################################
class _wm(Macro):
"""Show motor positions"""
param_def = [
['motor_list',
ParamRepeat(['motor', Type.Moveable, None, 'Motor to move']),
None, 'List of motor to show'],
]
def run(self, motor_list):
show_dial = self.getViewOption(ViewOption.ShowDial)
show_ctrlaxis = self.getViewOption(ViewOption.ShowCtrlAxis)
pos_format = self.getViewOption(ViewOption.PosFormat)
motor_width = 9
motors = {} # dict(motor name: motor obj)
requests = {} # dict(motor name: request id)
data = {} # dict(motor name: list of motor data)
# sending asynchronous requests: neither Taurus nor Sardana extensions
# allow asynchronous requests - use PyTango asynchronous request model
for motor in motor_list:
name = motor.getName()
motors[name] = motor
args = ('position',)
if show_dial:
args += ('dialposition',)
_id = motor.read_attributes_asynch(args)
requests[name] = _id
motor_width = max(motor_width, len(name))
data[name] = []
# get additional motor information (ctrl name & axis)
if show_ctrlaxis:
for name, motor in motors.items():
ctrl_name = self.getController(motor.controller).name
axis_nb = str(getattr(motor, "axis"))
data[name].extend((ctrl_name, axis_nb))
motor_width = max(motor_width, len(ctrl_name), len(axis_nb))
# collect asynchronous replies
while len(requests) > 0:
req2delete = []
for name, _id in requests.items():
motor = motors[name]
try:
attrs = motor.read_attributes_reply(_id)
for attr in attrs:
value = attr.value
if value is None:
value = float('NaN')
data[name].append(value)
req2delete.append(name)
except PyTango.AsynReplyNotArrived:
continue
except PyTango.DevFailed:
data[name].append(float('NaN'))
if show_dial:
data[name].append(float('NaN'))
req2delete.append(name)
self.debug('Error when reading %s position(s)' % name)
self.debug('Details:', exc_info=1)
continue
# removing motors which alredy replied
for name in req2delete:
requests.pop(name)
# define format for numerical values
fmt = '%c*.%df' % ('%', motor_width - 5)
if pos_format > -1:
fmt = '%c*.%df' % ('%', int(pos_format))
# prepare row headers and formats
row_headers = []
t_format = []
if show_ctrlaxis:
row_headers += ['Ctrl', 'Axis']
t_format += ['%*s', '%*s']
row_headers.append('User')
t_format.append(fmt)
if show_dial:
row_headers.append('Dial')
t_format.append(fmt)
# sort the data dict by keys
col_headers = []
values = []
for mot_name, mot_values in sorted(data.items()):
col_headers.append([mot_name]) # convert name to list
values.append(mot_values)
# create and print table
table = Table(values, elem_fmt=t_format,
col_head_str=col_headers, col_head_width=motor_width,
row_head_str=row_headers)
for line in table.genOutput():
self.output(line)
class _wum(Macro):
"""Show user motor positions"""
param_def = [
['motor_list',
ParamRepeat(['motor', Type.Moveable, None, 'Motor to move']),
None, 'List of motor to show'],
]
def prepare(self, motor_list, **opts):
self.table_opts = {}
def run(self, motor_list):
motor_width = 9
motor_names = []
motor_pos = []
motor_list = sorted(motor_list)
pos_format = self.getViewOption(ViewOption.PosFormat)
for motor in motor_list:
name = motor.getName()
motor_names.append([name])
pos = motor.getPosition(force=True)
if pos is None:
pos = float('NAN')
motor_pos.append((pos,))
motor_width = max(motor_width, len(name))
fmt = '%c*.%df' % ('%', motor_width - 5)
if pos_format > -1:
fmt = '%c*.%df' % ('%', int(pos_format))
table = Table(motor_pos, elem_fmt=[fmt],
col_head_str=motor_names, col_head_width=motor_width,
**self.table_opts)
for line in table.genOutput():
self.output(line)
class wu(Macro):
"""Show all user motor positions"""
def prepare(self, **opts):
self.all_motors = self.findObjs('.*', type_class=Type.Moveable)
self.table_opts = {}
def run(self):
nr_motors = len(self.all_motors)
if nr_motors == 0:
self.output('No motor defined')
return
self.output('Current positions (user) on %s' %
datetime.datetime.now().isoformat(' '))
self.output('')
self.execMacro('_wum', self.all_motors, **self.table_opts)
class wa(Macro):
"""Show all motor positions"""
# TODO: duplication of the default value definition is a workaround
# for #427. See commit message cc3331a for more details.
param_def = [
['filter',
ParamRepeat(['filter', Type.String, '.*',
'a regular expression filter'], min=1),
['.*'], 'a regular expression filter'],
]
def prepare(self, filter, **opts):
self.all_motors = self.findObjs(filter, type_class=Type.Moveable)
self.table_opts = {}
def run(self, filter):
nr_motors = len(self.all_motors)
if nr_motors == 0:
self.output('No motor defined')
return
show_dial = self.getViewOption(ViewOption.ShowDial)
if show_dial:
self.output('Current positions (user, dial) on %s' %
datetime.datetime.now().isoformat(' '))
else:
self.output('Current positions (user) on %s' %
datetime.datetime.now().isoformat(' '))
self.output('')
self.execMacro('_wm', self.all_motors, **self.table_opts)
class pwa(Macro):
"""Show all motor positions in a pretty table"""
# TODO: duplication of the default value definition is a workaround
# for #427. See commit message cc3331a for more details.
param_def = [
['filter',
ParamRepeat(['filter', Type.String, '.*',
'a regular expression filter'], min=1),
['.*'], 'a regular expression filter'],
]
def run(self, filter):
self.execMacro('wa', filter, **Table.PrettyOpts)
class set_lim(Macro):
"""Sets the software limits on the specified motor hello"""
param_def = [
['motor', Type.Moveable, None, 'Motor name'],
['low', Type.Float, None, 'lower limit'],
['high', Type.Float, None, 'upper limit']
]
def run(self, motor, low, high):
name = motor.getName()
self.debug("Setting user limits for %s" % name)
motor.getPositionObj().setLimits(low, high)
self.output("%s limits set to %.4f %.4f (user units)" %
(name, low, high))
class set_lm(Macro):
"""Sets the dial limits on the specified motor"""
param_def = [
['motor', Type.Motor, None, 'Motor name'],
['low', Type.Float, None, 'lower limit'],
['high', Type.Float, None, 'upper limit']
]
def run(self, motor, low, high):
name = motor.getName()
self.debug("Setting dial limits for %s" % name)
motor.getDialPositionObj().setLimits(low, high)
self.output("%s limits set to %.4f %.4f (dial units)" %
(name, low, high))
class set_pos(Macro):
"""Sets the position of the motor to the specified value"""
param_def = [
['motor', Type.Motor, None, 'Motor name'],
['pos', Type.Float, None, 'Position to move to']
]
def run(self, motor, pos):
name = motor.getName()
old_pos = motor.getPosition(force=True)
motor.definePosition(pos)
self.output("%s reset from %.4f to %.4f" % (name, old_pos, pos))
class set_user_pos(Macro):
"""Sets the USER position of the motor to the specified value (by
changing OFFSET and keeping DIAL)"""
param_def = [
['motor', Type.Motor, None, 'Motor name'],
['pos', Type.Float, None, 'Position to move to']
]
def run(self, motor, pos):
name = motor.getName()
old_pos = motor.getPosition(force=True)
offset_attr = motor.getAttribute('Offset')
old_offset = offset_attr.read().value
new_offset = pos - (old_pos - old_offset)
offset_attr.write(new_offset)
msg = "%s reset from %.4f (offset %.4f) to %.4f (offset %.4f)" % (
name, old_pos, old_offset, pos, new_offset)
self.output(msg)
class wm(Macro):
"""Show the position of the specified motors."""
param_def = [
['motor_list',
ParamRepeat(['motor', Type.Moveable, None,
'Motor to see where it is']),
None, 'List of motor to show'],
]
def prepare(self, motor_list, **opts):
self.table_opts = {}
def run(self, motor_list):
motor_width = 10
motor_names = []
motor_pos = []
show_dial = self.getViewOption(ViewOption.ShowDial)
show_ctrlaxis = self.getViewOption(ViewOption.ShowCtrlAxis)
pos_format = self.getViewOption(ViewOption.PosFormat)
for motor in motor_list:
max_len = 0
if show_ctrlaxis:
axis_nb = getattr(motor, "axis")
ctrl_name = self.getController(motor.controller).name
max_len = max(max_len, len(ctrl_name), len(str(axis_nb)))
name = motor.getName()
max_len = max(max_len, len(name))
max_len = max_len + 5
if max_len < 14:
max_len = 14 # Length of 'Not specified'
str_fmt = "%c%ds" % ('%', int(max_len))
name = str_fmt % name
motor_names.append([name])
posObj = motor.getPositionObj()
if pos_format > -1:
fmt = '%c.%df' % ('%', int(pos_format))
try:
val1 = fmt % motor.getPosition(force=True)
val1 = str_fmt % val1
except:
val1 = str_fmt % motor.getPosition(force=True)
val2 = str_fmt % posObj.getMaxValue()
val3 = str_fmt % posObj.getMinValue()
if show_ctrlaxis:
valctrl = str_fmt % (ctrl_name)
valaxis = str_fmt % str(axis_nb)
upos = list(map(str, [valctrl, valaxis, ' ', val2, val1,
val3]))
else:
upos = list(map(str, ['', val2, val1, val3]))
pos_data = upos
if show_dial:
try:
val1 = fmt % motor.getDialPosition(force=True)
val1 = str_fmt % val1
except:
val1 = str_fmt % motor.getDialPosition(force=True)
dPosObj = motor.getDialPositionObj()
val2 = str_fmt % dPosObj.getMaxValue()
val3 = str_fmt % dPosObj.getMinValue()
dpos = list(map(str, [val2, val1, val3]))
pos_data += [''] + dpos
motor_pos.append(pos_data)
elem_fmt = (['%*s'] + ['%*s'] * 5) * 2
row_head_str = []
if show_ctrlaxis:
row_head_str += ['Ctrl', 'Axis']
row_head_str += ['User', ' High', ' Current', ' Low']
if show_dial:
row_head_str += ['Dial', ' High', ' Current', ' Low']
table = Table(motor_pos, elem_fmt=elem_fmt, row_head_str=row_head_str,
col_head_str=motor_names, col_head_width=motor_width,
**self.table_opts)
for line in table.genOutput():
self.output(line)
class wum(Macro):
"""Show the user position of the specified motors."""
param_def = [
['motor_list',
ParamRepeat(['motor', Type.Moveable, None,
'Motor to see where it is']),
None, 'List of motor to show'],
]
def prepare(self, motor_list, **opts):
self.table_opts = {}
def run(self, motor_list):
motor_width = 10
motor_names = []
motor_pos = []
for motor in motor_list:
name = motor.getName()
motor_names.append([name])
posObj = motor.getPositionObj()
upos = list(map(str, [posObj.getMaxValue(), motor.getPosition(
force=True), posObj.getMinValue()]))
pos_data = [''] + upos
motor_pos.append(pos_data)
elem_fmt = (['%*s'] + ['%*s'] * 3) * 2
row_head_str = ['User', ' High', ' Current', ' Low', ]
table = Table(motor_pos, elem_fmt=elem_fmt, row_head_str=row_head_str,
col_head_str=motor_names, col_head_width=motor_width,
**self.table_opts)
for line in table.genOutput():
self.output(line)
class pwm(Macro):
"""Show the position of the specified motors in a pretty table"""
param_def = [
['motor_list',
ParamRepeat(['motor', Type.Moveable, None, 'Motor to move']),
None, 'List of motor to show'],
]
def run(self, motor_list):
self.execMacro('wm', motor_list, **Table.PrettyOpts)
class mv(Macro):
"""Move motor(s) to the specified position(s)"""
param_def = [
['motor_pos_list',
ParamRepeat(['motor', Type.Moveable, None, 'Motor to move'],
['pos', Type.Float, None, 'Position to move to']),
None, 'List of motor/position pairs'],
]
def run(self, motor_pos_list):
motors, positions = [], []
for m, p in motor_pos_list:
motors.append(m)
positions.append(p)
self.debug("Starting %s movement to %s", m.getName(), p)
motion = self.getMotion(motors)
state, pos = motion.move(positions)
if state != DevState.ON:
self.warning("Motion ended in %s", state.name)
msg = []
for motor in motors:
msg.append(motor.information())
self.info("\n".join(msg))
class mstate(Macro):
"""Prints the state of a motor"""
param_def = [['motor', Type.Moveable, None, 'Motor to check state']]
def run(self, motor):
self.info("Motor %s" % str(motor.getState()))
class umv(Macro):
"""Move motor(s) to the specified position(s) and update"""
param_def = mv.param_def
def prepare(self, motor_pos_list, **opts):
self.all_names = []
self.all_pos = []
self.print_pos = False
for motor, pos in motor_pos_list:
self.all_names.append([motor.getName()])
pos, posObj = motor.getPosition(force=True), motor.getPositionObj()
self.all_pos.append([pos])
posObj.subscribeEvent(self.positionChanged, motor)
def run(self, motor_pos_list):
self.print_pos = True
try:
self.execMacro('mv', motor_pos_list)
finally:
self.finish()
def finish(self):
self._clean()
self.printAllPos()
def _clean(self):
for motor, pos in self.getParameters()[0]:
posObj = motor.getPositionObj()
try:
posObj.unsubscribeEvent(self.positionChanged, motor)
except Exception as e:
print(str(e))
raise e
def positionChanged(self, motor, position):
idx = self.all_names.index([motor.getName()])
self.all_pos[idx] = [position]
if self.print_pos:
self.printAllPos()
def printAllPos(self):
motor_width = 10
table = Table(self.all_pos, elem_fmt=['%*.4f'],
col_head_str=self.all_names, col_head_width=motor_width)
self.outputBlock(table.genOutput())
self.flushOutput()
class mvr(Macro):
"""Move motor(s) relative to the current position(s)"""
param_def = [
['motor_disp_list',
ParamRepeat(['motor', Type.Moveable, None, 'Motor to move'],
['disp', Type.Float, None, 'Relative displacement']),
None, 'List of motor/displacement pairs'],
]
def run(self, motor_disp_list):
motor_pos_list = []
for motor, disp in motor_disp_list:
pos = motor.getPosition(force=True)
if pos is None:
self.error("Cannot get %s position" % motor.getName())
return
else:
pos += disp
motor_pos_list.append([motor, pos])
self.execMacro('mv', motor_pos_list)
class umvr(Macro):
"""Move motor(s) relative to the current position(s) and update"""
param_def = mvr.param_def
def run(self, motor_disp_list):
motor_pos_list = []
for motor, disp in motor_disp_list:
pos = motor.getPosition(force=True)
if pos is None:
self.error("Cannot get %s position" % motor.getName())
return
else:
pos += disp
motor_pos_list.append([motor, pos])
self.execMacro('umv', motor_pos_list)
# TODO: implement tw macro with param repeats in order to be able to pass
# multiple motors and multiple deltas. Also allow to pass the integration time
# in order to execute the measurement group acquisition after each move and
# print the results. Basically follow the SPEC's API:
# https://certif.com/spec_help/tw.html
class tw(iMacro):
"""Tweak motor by variable delta"""
param_def = [
['motor', Type.Moveable, "test", 'Motor to move'],
['delta', Type.Float, None, 'Amount to tweak']
]
def run(self, motor, delta):
self.output(
"Indicate direction with + (or p) or - (or n) or enter")
self.output(
"new step size. Type something else (or ctrl-C) to quit.")
self.output("")
if np.sign(delta) == -1:
a = "-"
if np.sign(delta) == 1:
a = "+"
while a in ('+', '-', 'p', 'n'):
pos = motor.position
a = self.input("%s = %s, which way? " % (
motor, pos), default_value=a, data_type=Type.String)
try:
# check if the input is a new delta
delta = float(a)
# obtain the sign of the new delta
if np.sign(delta) == -1:
a = "-"
else:
a = "+"
except:
# convert to the common sign
if a == "p":
a = "+"
# convert to the common sign
elif a == "n":
a = "-"
# the sign is already correct, just continue
elif a in ("+", "-"):
pass
else:
msg = "Typing '%s' caused 'tw' macro to stop." % a
self.info(msg)
raise StopException()
# invert the delta if necessary
if (a == "+" and np.sign(delta) < 0) or \
(a == "-" and np.sign(delta) > 0):
delta = -delta
pos += delta
self.mv(motor, pos)
##########################################################################
#
# Data acquisition related macros
#
##########################################################################
def _value_to_repr(data):
if data is None:
return "<nodata>"
elif np.rank(data) > 0:
return list(np.shape(data))
else:
return data
class ct(Macro, Hookable):
"""Count for the specified time on the measurement group
or experimental channel given as second argument
(if not given the active measurement group is used)"""
hints = {'allowsHooks': ('pre-acq', 'post-acq')}
param_def = [
['integ_time', Type.Float, 1.0, 'Integration time'],
['countable_elem', Type.Countable, Optional,
'Countable element e.g. MeasurementGroup or ExpChannel']
]
def prepare(self, integ_time, countable_elem, **opts):
if countable_elem is None:
try:
self.countable_elem_name = self.getEnv('ActiveMntGrp')
except UnknownEnv:
return
self.countable_elem = self.getObj(
self.countable_elem_name, type_class=Type.MeasurementGroup)
else:
self.countable_elem_name = countable_elem.name
self.countable_elem = countable_elem
def run(self, integ_time, countable_elem):
if self.countable_elem is None:
msg = ('Unknown countable {0} element. Use macro parameter or'
'ActiveMntGrp environment variable'.format(
self.countable_elem_name))
self.error(msg)
return
# integration time has to be accessible from with in the hooks
# so declare it also instance attribute
self.integ_time = integ_time
self.debug("Counting for %s sec", integ_time)
self.outputDate()
self.output('')
self.flushOutput()
for preAcqHook in self.getHooks('pre-acq'):
preAcqHook()
state, data = self.countable_elem.count(integ_time)
for postAcqHook in self.getHooks('post-acq'):
postAcqHook()
names, counts = [], []
if self.countable_elem.type == Type.MeasurementGroup:
meas_grp = self.countable_elem
for ch_info in meas_grp.getChannelsEnabledInfo():
names.append(' %s' % ch_info.label)
ch_data = data.get(ch_info.full_name)
counts.append(_value_to_repr(ch_data))
else:
channel = self.countable_elem
names.append(" %s" % channel.name)
counts.append(_value_to_repr(data))
# to be compatible with measurement group count
data = {channel.full_name: data}
self.setData(Record(data))
table = Table([counts], row_head_str=names, row_head_fmt='%*s',
col_sep=' = ')
for line in table.genOutput():
self.output(line)
class uct(Macro):
"""Count on the active measurement group and update"""
param_def = [
['integ_time', Type.Float, 1.0, 'Integration time'],
['countable_elem', Type.Countable, Optional,
'Countable element e.g. MeasurementGroup or ExpChannel']
]
def prepare(self, integ_time, countable_elem, **opts):
self.print_value = False
if countable_elem is None:
try:
self.countable_elem_name = self.getEnv('ActiveMntGrp')
except UnknownEnv:
return
self.countable_elem = self.getObj(self.countable_elem_name)
else:
self.countable_elem_name = countable_elem.name
self.countable_elem = countable_elem
if self.countable_elem is None:
return
self.channels = []
self.values = []
if self.countable_elem.type == Type.MeasurementGroup:
names = self.countable_elem.getChannelLabels()
self.names = [[n] for n in names]
for channel_info in self.countable_elem.getChannels():
full_name = channel_info["full_name"]
channel = Device(full_name)
self.channels.append(channel)
value = channel.getValue(force=True)
self.values.append([value])
valueObj = channel.getValueObj_()
valueObj.subscribeEvent(self.counterChanged, channel)
else:
channel = self.countable_elem
self.names = [[channel.getName()]]
channel = Device(channel.full_name)
self.channels.append(channel)
value = channel.getValue(force=True)
self.values.append([value])
valueObj = channel.getValueObj_()
valueObj.subscribeEvent(self.counterChanged, channel)
def run(self, integ_time, countable_elem):
if self.countable_elem is None:
msg = ('Unknown countable {0} element. Use macro parameter or'
'ActiveMntGrp environment variable'.format(
self.countable_elem_name))
self.error(msg)
return
self.print_value = True
try:
_, data = self.countable_elem.count(integ_time)
self.setData(Record(data))
finally:
self.finish()
def finish(self):
self._clean()
self.printAllValues()
def _clean(self):
for channel in self.channels:
valueObj = channel.getValueObj_()
valueObj.unsubscribeEvent(self.counterChanged, channel)
def counterChanged(self, channel, value):
idx = self.names.index([channel.getName()])
self.values[idx] = [value]
if self.print_value and not self.isStopped():
self.printAllValues()
def printAllValues(self):
ch_width = 10
table = Table(self.values, elem_fmt=['%*.4f'], col_head_str=self.names,
col_head_width=ch_width)
self.outputBlock(table.genOutput())
self.flushOutput()
class settimer(Macro):
"""Defines the timer channel for the active measurement group"""
env = ('ActiveMntGrp',)
param_def = [
['timer', Type.ExpChannel, None, 'Timer'],
]
def run(self, timer):
mnt_grp_name = self.getEnv('ActiveMntGrp')
mnt_grp = self.getObj(mnt_grp_name, type_class=Type.MeasurementGroup)
if mnt_grp is None:
self.error('ActiveMntGrp is not defined or has invalid value.\n'
'please define a valid active measurement group '
'before setting a timer')
return
try:
mnt_grp.setTimer(timer.getName())
except Exception as e:
self.output(str(e))
self.output(
"%s is not a valid channel in the active measurement group"
% timer)
@macro([['message', ParamRepeat(['message_item', Type.String, None,
'message item to be reported']), None,
'message to be reported']])
def report(self, message):
"""Logs a new record into the message report system (if active)"""
self.report(' '.join(message))
class logmacro(Macro):
""" Turn on/off logging of the spock output.
.. note::
The logmacro class has been included in Sardana
on a provisional basis. Backwards incompatible changes
(up to and including its removal) may occur if
deemed necessary by the core developers
"""
param_def = [
['offon', Type.Boolean, None, 'Unset/Set logging'],
['mode', Type.Integer, -1, 'Mode: 0 append, 1 new file'],
]
def run(self, offon, mode):
if offon:
if mode == 1:
self.setEnv('LogMacroMode', True)
elif mode == 0:
self.setEnv('LogMacroMode', False)
self.setEnv('LogMacro', True)
else:
self.setEnv('LogMacro', False)
class repeat(Hookable, Macro):
"""This macro executes as many repetitions of a set of macros as
specified by nr parameter. The macros to be repeated can be
given as parameters or as body hooks.
If both are given first will be executed the ones given as
parameters and then the ones given as body hooks.
If nr has negative value, repetitions will be executed until you
stop repeat macro.
.. note::
The repeat macro has been included in Sardana
on a provisional basis. Backwards incompatible changes
(up to and including removal of the macro) may occur if
deemed necessary by the core developers."""
hints = {'allowsHooks': ('body',)}
param_def = [
['nr', Type.Integer, None, 'Nr of iterations'],
['macro_name_params', [
['token', Type.String,
None, 'Macro name and parameters (if any)'],
{'min': 0}
],
None, "List with macro name and parameters (if any)"]
]
def prepare(self, nr, macro_name_params):
self.bodyHooks = self.getHooks("body")
self.macro_name_params = macro_name_params
def __loop(self):
self.checkPoint()
if len(self.macro_name_params) > 0:
for macro_cmd in self.macro_name_params:
self.execMacro(macro_cmd)
for bodyHook in self.bodyHooks:
bodyHook()
def run(self, nr, macro_name_params):
if nr < 0:
while True:
self.__loop()
else:
for i in range(nr):
self.__loop()
progress = ((i + 1) / nr) * 100
yield progress
class newfile(Hookable, Macro):
""" Sets the ScanDir and ScanFile as well as ScanID in the environment.
If ScanFilePath is only a file name, the ScanDir must be set externally
via `senv ScanDir <PathToScanFile>` or using the %expconf. Otherwise,
the path in ScanFilePath must be absolute and existing on the
MacroServer host.
The ScanID should be set to the value before the upcoming scan number.
Default value is 0.
"""
hints = {'allowsHooks': ('post-newfile')}
param_def = [
['ScanFilePath_list',
[['ScanFilePath', Type.String, None, '(ScanDir/)ScanFile']],
None, 'List of (ScanDir/)ScanFile'],
['ScanID', Type.Integer, 0, 'Scan ID'],
]
def run(self, ScanFilePath_list, ScanID):
path_list = []
fileName_list = []
# traverse the repeat parameters for the ScanFilePath_list
for i, ScanFilePath in enumerate(ScanFilePath_list):
path = os.path.dirname(ScanFilePath)
fileName = os.path.basename(ScanFilePath)
if not path and i == 0:
# first entry and no given ScanDir: check if ScanDir exists
try:
ScanDir = self.getEnv('ScanDir')
except UnknownEnv:
ScanDir = ''
if not (isinstance(ScanDir, str) and len(ScanDir) > 0):
msg = ('Data is not stored until ScanDir is correctly '
'set! Provide ScanDir with newfile macro: '
'`newfile [<ScanDir>/<ScanFile>] <ScanID>` '
'or `senv ScanDir <ScanDir>` or with %expconf')
self.error(msg)
return
else:
path = ScanDir
elif not path and i > 0:
# not first entry and no given path: use path of last iteration
path = path_list[i-1]
elif not os.path.isabs(path):
# relative path
self.error('Only absolute path are allowed!')
return
else:
# absolute path
path = os.path.normpath(path)
if i > 0 and (path not in path_list):
# check if paths are equal
self.error('Multiple paths to the data files are not allowed')
return
elif not os.path.exists(path):
# check if folder exists
self.error('Path %s does not exists on the host of the '
'MacroServer and has to be created in '
'advance.' % path)
return
else:
self.debug('Path %s appended.' % path)
path_list.append(path)
if not fileName:
self.error('No filename is given.')
return
elif fileName in fileName_list:
self.error('Duplicate filename %s is not allowed.' % fileName)
return
else:
self.debug('Filename is %s.' % fileName)
fileName_list.append(fileName)
if ScanID < 1:
ScanID = 0
self.setEnv('ScanFile', fileName_list)
self.setEnv('ScanDir', path_list[0])
self.setEnv('ScanID', ScanID)
self.output('ScanDir is\t: %s', path_list[0])
for i, ScanFile in enumerate(fileName_list):
if i == 0:
self.output('ScanFile set to\t: %s', ScanFile)
else:
self.output('\t\t %s', ScanFile)
self.output('Next scan is\t: #%d', ScanID+1)
for postNewfileHook in self.getHooks('post-newfile'):
postNewfileHook()
| [
"os.path.isabs",
"sardana.macroserver.scan.scandata.Record",
"os.path.basename",
"os.path.dirname",
"numpy.rank",
"os.path.exists",
"sardana.macroserver.macro.ParamRepeat",
"numpy.shape",
"taurus.console.table.Table",
"os.path.normpath",
"sardana.macroserver.msexception.StopException",
"numpy.... | [((5312, 5429), 'taurus.console.table.Table', 'Table', (['values'], {'elem_fmt': 't_format', 'col_head_str': 'col_headers', 'col_head_width': 'motor_width', 'row_head_str': 'row_headers'}), '(values, elem_fmt=t_format, col_head_str=col_headers, col_head_width=\n motor_width, row_head_str=row_headers)\n', (5317, 5429), False, 'from taurus.console.table import Table\n'), ((6491, 6601), 'taurus.console.table.Table', 'Table', (['motor_pos'], {'elem_fmt': '[fmt]', 'col_head_str': 'motor_names', 'col_head_width': 'motor_width'}), '(motor_pos, elem_fmt=[fmt], col_head_str=motor_names, col_head_width=\n motor_width, **self.table_opts)\n', (6496, 6601), False, 'from taurus.console.table import Table\n'), ((14048, 14188), 'taurus.console.table.Table', 'Table', (['motor_pos'], {'elem_fmt': 'elem_fmt', 'row_head_str': 'row_head_str', 'col_head_str': 'motor_names', 'col_head_width': 'motor_width'}), '(motor_pos, elem_fmt=elem_fmt, row_head_str=row_head_str, col_head_str\n =motor_names, col_head_width=motor_width, **self.table_opts)\n', (14053, 14188), False, 'from taurus.console.table import Table\n'), ((15228, 15368), 'taurus.console.table.Table', 'Table', (['motor_pos'], {'elem_fmt': 'elem_fmt', 'row_head_str': 'row_head_str', 'col_head_str': 'motor_names', 'col_head_width': 'motor_width'}), '(motor_pos, elem_fmt=elem_fmt, row_head_str=row_head_str, col_head_str\n =motor_names, col_head_width=motor_width, **self.table_opts)\n', (15233, 15368), False, 'from taurus.console.table import Table\n'), ((18234, 18334), 'taurus.console.table.Table', 'Table', (['self.all_pos'], {'elem_fmt': "['%*.4f']", 'col_head_str': 'self.all_names', 'col_head_width': 'motor_width'}), "(self.all_pos, elem_fmt=['%*.4f'], col_head_str=self.all_names,\n col_head_width=motor_width)\n", (18239, 18334), False, 'from taurus.console.table import Table\n'), ((24660, 24732), 'taurus.console.table.Table', 'Table', (['[counts]'], {'row_head_str': 'names', 'row_head_fmt': '"""%*s"""', 'col_sep': '""" = """'}), "([counts], row_head_str=names, row_head_fmt='%*s', col_sep=' = ')\n", (24665, 24732), False, 'from taurus.console.table import Table\n'), ((27757, 27849), 'taurus.console.table.Table', 'Table', (['self.values'], {'elem_fmt': "['%*.4f']", 'col_head_str': 'self.names', 'col_head_width': 'ch_width'}), "(self.values, elem_fmt=['%*.4f'], col_head_str=self.names,\n col_head_width=ch_width)\n", (27762, 27849), False, 'from taurus.console.table import Table\n'), ((1927, 1987), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['motor', Type.Moveable, None, 'Motor to move']"], {}), "(['motor', Type.Moveable, None, 'Motor to move'])\n", (1938, 1987), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((5646, 5706), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['motor', Type.Moveable, None, 'Motor to move']"], {}), "(['motor', Type.Moveable, None, 'Motor to move'])\n", (5657, 5706), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((7495, 7580), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['filter', Type.String, '.*', 'a regular expression filter']"], {'min': '(1)'}), "(['filter', Type.String, '.*', 'a regular expression filter'], min=1\n )\n", (7506, 7580), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((8648, 8733), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['filter', Type.String, '.*', 'a regular expression filter']"], {'min': '(1)'}), "(['filter', Type.String, '.*', 'a regular expression filter'], min=1\n )\n", (8659, 8733), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((11273, 11344), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['motor', Type.Moveable, None, 'Motor to see where it is']"], {}), "(['motor', Type.Moveable, None, 'Motor to see where it is'])\n", (11284, 11344), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((14426, 14497), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['motor', Type.Moveable, None, 'Motor to see where it is']"], {}), "(['motor', Type.Moveable, None, 'Motor to see where it is'])\n", (14437, 14497), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((15618, 15678), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['motor', Type.Moveable, None, 'Motor to move']"], {}), "(['motor', Type.Moveable, None, 'Motor to move'])\n", (15629, 15678), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((15947, 16062), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['motor', Type.Moveable, None, 'Motor to move']", "['pos', Type.Float, None, 'Position to move to']"], {}), "(['motor', Type.Moveable, None, 'Motor to move'], ['pos', Type.\n Float, None, 'Position to move to'])\n", (15958, 16062), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((18560, 18678), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['motor', Type.Moveable, None, 'Motor to move']", "['disp', Type.Float, None, 'Relative displacement']"], {}), "(['motor', Type.Moveable, None, 'Motor to move'], ['disp', Type.\n Float, None, 'Relative displacement'])\n", (18571, 18678), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((20490, 20504), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (20497, 20504), True, 'import numpy as np\n'), ((20543, 20557), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (20550, 20557), True, 'import numpy as np\n'), ((22099, 22112), 'numpy.rank', 'np.rank', (['data'], {}), '(data)\n', (22106, 22112), True, 'import numpy as np\n'), ((24630, 24642), 'sardana.macroserver.scan.scandata.Record', 'Record', (['data'], {}), '(data)\n', (24636, 24642), False, 'from sardana.macroserver.scan.scandata import Record\n'), ((26407, 26432), 'taurus.Device', 'Device', (['channel.full_name'], {}), '(channel.full_name)\n', (26413, 26432), False, 'from taurus import Device\n'), ((28814, 28893), 'sardana.macroserver.macro.ParamRepeat', 'ParamRepeat', (["['message_item', Type.String, None, 'message item to be reported']"], {}), "(['message_item', Type.String, None, 'message item to be reported'])\n", (28825, 28893), False, 'from sardana.macroserver.macro import Macro, macro, Type, ParamRepeat, ViewOption, iMacro, Hookable\n'), ((32540, 32569), 'os.path.dirname', 'os.path.dirname', (['ScanFilePath'], {}), '(ScanFilePath)\n', (32555, 32569), False, 'import os\n'), ((32593, 32623), 'os.path.basename', 'os.path.basename', (['ScanFilePath'], {}), '(ScanFilePath)\n', (32609, 32623), False, 'import os\n'), ((22138, 22152), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (22146, 22152), True, 'import numpy as np\n'), ((26001, 26018), 'taurus.Device', 'Device', (['full_name'], {}), '(full_name)\n', (26007, 26018), False, 'from taurus import Device\n'), ((27159, 27171), 'sardana.macroserver.scan.scandata.Record', 'Record', (['data'], {}), '(data)\n', (27165, 27171), False, 'from sardana.macroserver.scan.scandata import Record\n'), ((20953, 20967), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (20960, 20967), True, 'import numpy as np\n'), ((33991, 34011), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (34005, 34011), False, 'import os\n'), ((7129, 7152), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7150, 7152), False, 'import datetime\n'), ((33540, 33559), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (33553, 33559), False, 'import os\n'), ((33751, 33773), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (33767, 33773), False, 'import os\n'), ((8128, 8151), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8149, 8151), False, 'import datetime\n'), ((8265, 8288), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8286, 8288), False, 'import datetime\n'), ((21557, 21572), 'sardana.macroserver.msexception.StopException', 'StopException', ([], {}), '()\n', (21570, 21572), False, 'from sardana.macroserver.msexception import StopException, UnknownEnv\n'), ((21654, 21668), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (21661, 21668), True, 'import numpy as np\n'), ((21712, 21726), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (21719, 21726), True, 'import numpy as np\n')] |
# Implement BPR.
# <NAME>, et al. BPR: Bayesian personalized ranking from implicit feedback.
# Proceedings of the twenty-fifth conference on uncertainty in artificial intelligence. AUAI, 2009.
# @author <NAME>, <NAME>, <NAME>
import random
from collections import defaultdict
import numpy as np
from sklearn.metrics import roc_auc_score
import scores
class BPR:
user_count = 943
item_count = 1682
latent_factors = 20
lr = 0.01
reg = 0.01
train_count = 10000
train_data_path = 'train.txt'
test_data_path = 'test.txt'
size_u_i = user_count * item_count
# latent_factors of U & V
U = np.random.rand(user_count, latent_factors) * 0.01
V = np.random.rand(item_count, latent_factors) * 0.01
test_data = np.zeros((user_count, item_count))
test = np.zeros(size_u_i)
predict_ = np.zeros(size_u_i)
def load_data(self, path):
user_ratings = defaultdict(set)
max_u_id = -1
max_i_id = -1
with open(path, 'r') as f:
for line in f.readlines():
u, i = line.split(" ")
u = int(u)
i = int(i)
user_ratings[u].add(i)
max_u_id = max(u, max_u_id)
max_i_id = max(i, max_i_id)
return user_ratings
def load_test_data(self, path):
file = open(path, 'r')
for line in file:
line = line.split(' ')
user = int(line[0])
item = int(line[1])
self.test_data[user - 1][item - 1] = 1
def train(self, user_ratings_train):
for user in range(self.user_count):
# sample a user
u = random.randint(1, self.user_count)
if u not in user_ratings_train.keys():
continue
# sample a positive item from the observed items
i = random.sample(user_ratings_train[u], 1)[0]
# sample a negative item from the unobserved items
j = random.randint(1, self.item_count)
while j in user_ratings_train[u]:
j = random.randint(1, self.item_count)
u -= 1
i -= 1
j -= 1
r_ui = np.dot(self.U[u], self.V[i].T)
r_uj = np.dot(self.U[u], self.V[j].T)
r_uij = r_ui - r_uj
mid = 1.0 / (1 + np.exp(r_uij))
temp = self.U[u]
self.U[u] += -self.lr * (-mid * (self.V[i] - self.V[j]) + self.reg * self.U[u])
self.V[i] += -self.lr * (-mid * temp + self.reg * self.V[i])
self.V[j] += -self.lr * (-mid * (-temp) + self.reg * self.V[j])
def predict(self, user, item):
predict = np.mat(user) * np.mat(item.T)
return predict
def main(self):
user_ratings_train = self.load_data(self.train_data_path)
self.load_test_data(self.test_data_path)
for u in range(self.user_count):
for item in range(self.item_count):
if int(self.test_data[u][item]) == 1:
self.test[u * self.item_count + item] = 1
else:
self.test[u * self.item_count + item] = 0
for i in range(self.user_count * self.item_count):
self.test[i] = int(self.test[i])
# training
for i in range(self.train_count):
self.train(user_ratings_train)
predict_matrix = self.predict(self.U, self.V)
# prediction
self.predict_ = predict_matrix.getA().reshape(-1)
self.predict_ = pre_handel(user_ratings_train, self.predict_, self.item_count)
auc_score = roc_auc_score(self.test, self.predict_)
print('AUC:', auc_score)
# Top-K evaluation
str(scores.topK_scores(self.test, self.predict_, 20, self.user_count, self.item_count))
def pre_handel(set, predict, item_count):
# Ensure the recommendation cannot be positive items in the training set.
for u in set.keys():
for j in set[u]:
predict[(u-1) * item_count + j - 1]= 0
return predict
if __name__ == '__main__':
bpr = BPR()
bpr.main()
| [
"random.randint",
"random.sample",
"numpy.zeros",
"sklearn.metrics.roc_auc_score",
"collections.defaultdict",
"numpy.exp",
"numpy.random.rand",
"numpy.dot",
"numpy.mat",
"scores.topK_scores"
] | [((776, 810), 'numpy.zeros', 'np.zeros', (['(user_count, item_count)'], {}), '((user_count, item_count))\n', (784, 810), True, 'import numpy as np\n'), ((823, 841), 'numpy.zeros', 'np.zeros', (['size_u_i'], {}), '(size_u_i)\n', (831, 841), True, 'import numpy as np\n'), ((858, 876), 'numpy.zeros', 'np.zeros', (['size_u_i'], {}), '(size_u_i)\n', (866, 876), True, 'import numpy as np\n'), ((650, 692), 'numpy.random.rand', 'np.random.rand', (['user_count', 'latent_factors'], {}), '(user_count, latent_factors)\n', (664, 692), True, 'import numpy as np\n'), ((709, 751), 'numpy.random.rand', 'np.random.rand', (['item_count', 'latent_factors'], {}), '(item_count, latent_factors)\n', (723, 751), True, 'import numpy as np\n'), ((935, 951), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (946, 951), False, 'from collections import defaultdict\n'), ((3688, 3727), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['self.test', 'self.predict_'], {}), '(self.test, self.predict_)\n', (3701, 3727), False, 'from sklearn.metrics import roc_auc_score\n'), ((1716, 1750), 'random.randint', 'random.randint', (['(1)', 'self.user_count'], {}), '(1, self.user_count)\n', (1730, 1750), False, 'import random\n'), ((2032, 2066), 'random.randint', 'random.randint', (['(1)', 'self.item_count'], {}), '(1, self.item_count)\n', (2046, 2066), False, 'import random\n'), ((2250, 2280), 'numpy.dot', 'np.dot', (['self.U[u]', 'self.V[i].T'], {}), '(self.U[u], self.V[i].T)\n', (2256, 2280), True, 'import numpy as np\n'), ((2301, 2331), 'numpy.dot', 'np.dot', (['self.U[u]', 'self.V[j].T'], {}), '(self.U[u], self.V[j].T)\n', (2307, 2331), True, 'import numpy as np\n'), ((2741, 2753), 'numpy.mat', 'np.mat', (['user'], {}), '(user)\n', (2747, 2753), True, 'import numpy as np\n'), ((2756, 2770), 'numpy.mat', 'np.mat', (['item.T'], {}), '(item.T)\n', (2762, 2770), True, 'import numpy as np\n'), ((3803, 3890), 'scores.topK_scores', 'scores.topK_scores', (['self.test', 'self.predict_', '(20)', 'self.user_count', 'self.item_count'], {}), '(self.test, self.predict_, 20, self.user_count, self.\n item_count)\n', (3821, 3890), False, 'import scores\n'), ((1908, 1947), 'random.sample', 'random.sample', (['user_ratings_train[u]', '(1)'], {}), '(user_ratings_train[u], 1)\n', (1921, 1947), False, 'import random\n'), ((2135, 2169), 'random.randint', 'random.randint', (['(1)', 'self.item_count'], {}), '(1, self.item_count)\n', (2149, 2169), False, 'import random\n'), ((2395, 2408), 'numpy.exp', 'np.exp', (['r_uij'], {}), '(r_uij)\n', (2401, 2408), True, 'import numpy as np\n')] |
"""
<NAME>
<EMAIL>
Image warping using per-pixel flow vectors.
Modified from:
https://github.com/tensorflow/addons/blob/v0.6.0/tensorflow_addons/image/dense_image_warp.py
Added Nearest-Neighbor interpolation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
import keras.backend as K
def _interpolate_bilinear(grid,
query_points,
name='interpolate_bilinear',
indexing='ij'):
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.
name: a name for the operation (optional).
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the inputs
invalid.
"""
if indexing != 'ij' and indexing != 'xy':
raise ValueError('Indexing mode must be \'ij\' or \'xy\'')
with ops.name_scope(name):
grid = ops.convert_to_tensor(grid)
query_points = ops.convert_to_tensor(query_points)
shape = grid.get_shape().as_list()
if len(shape) != 4:
msg = 'Grid must be 4 dimensional. Received size: '
raise ValueError(msg + str(grid.get_shape()))
batch_size, height, width, channels = (array_ops.shape(grid)[0],
array_ops.shape(grid)[1],
array_ops.shape(grid)[2],
array_ops.shape(grid)[3])
shape = [batch_size, height, width, channels]
query_type = query_points.dtype
grid_type = grid.dtype
with ops.control_dependencies([
check_ops.assert_equal(
len(query_points.get_shape()),
3,
message='Query points must be 3 dimensional.'),
check_ops.assert_equal(
array_ops.shape(query_points)[2],
2,
message='Query points must be size 2 in dim 2.')
]):
num_queries = array_ops.shape(query_points)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(
height, 2, message='Grid height must be at least 2.'),
check_ops.assert_greater_equal(
width, 2, message='Grid width must be at least 2.')
]):
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == 'ij' else [1, 0]
unstacked_query_points = array_ops.unstack(query_points, axis=2)
for dim in index_order:
with ops.name_scope('dim-' + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = shape[dim + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = math_ops.cast(size_in_indexing_dimension - 2, query_type)
min_floor = constant_op.constant(0.0, dtype=query_type)
floor = math_ops.minimum(
math_ops.maximum(min_floor, math_ops.floor(queries)), max_floor)
int_floor = math_ops.cast(floor, dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = math_ops.cast(queries - floor, grid_type)
min_alpha = constant_op.constant(0.0, dtype=grid_type)
max_alpha = constant_op.constant(1.0, dtype=grid_type)
alpha = math_ops.minimum(math_ops.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = array_ops.expand_dims(alpha, 2)
alphas.append(alpha)
with ops.control_dependencies([
check_ops.assert_less_equal(
math_ops.cast(batch_size * height * width, dtype=dtypes.float32),
np.iinfo(np.int32).max / 8,
message="""The image size or batch size is sufficiently large
that the linearized addresses used by array_ops.gather
may exceed the int32 limit.""")
]):
flattened_grid = array_ops.reshape(
grid, [batch_size * height * width, channels])
batch_offsets = array_ops.reshape(
math_ops.range(batch_size) * height * width, [batch_size, 1])
# This wraps array_ops.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using array_ops.gather_nd.
def gather(y_coords, x_coords, name):
with ops.name_scope('gather-' + name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = array_ops.gather(flattened_grid, linear_coordinates)
return array_ops.reshape(gathered_values,
[batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], 'top_left')
top_right = gather(floors[0], ceils[1], 'top_right')
bottom_left = gather(ceils[0], floors[1], 'bottom_left')
bottom_right = gather(ceils[0], ceils[1], 'bottom_right')
# now, do the actual interpolation
with ops.name_scope('interpolate'):
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
def _interpolate_nearest(grid,
query_points,
name='interpolate_nearest',
indexing='ij'):
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.
name: a name for the operation (optional).
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the inputs
invalid.
"""
if indexing != 'ij' and indexing != 'xy':
raise ValueError('Indexing mode must be \'ij\' or \'xy\'')
with ops.name_scope(name):
grid = ops.convert_to_tensor(grid)
query_points = ops.convert_to_tensor(query_points)
shape = grid.get_shape().as_list()
if len(shape) != 4:
msg = 'Grid must be 4 dimensional. Received size: '
raise ValueError(msg + str(grid.get_shape()))
batch_size, height, width, channels = (array_ops.shape(grid)[0],
array_ops.shape(grid)[1],
array_ops.shape(grid)[2],
array_ops.shape(grid)[3])
shape = [batch_size, height, width, channels]
query_type = query_points.dtype
grid_type = grid.dtype
with ops.control_dependencies([
check_ops.assert_equal(
len(query_points.get_shape()),
3,
message='Query points must be 3 dimensional.'),
check_ops.assert_equal(
array_ops.shape(query_points)[2],
2,
message='Query points must be size 2 in dim 2.')
]):
num_queries = array_ops.shape(query_points)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(
height, 2, message='Grid height must be at least 2.'),
check_ops.assert_greater_equal(
width, 2, message='Grid width must be at least 2.')
]):
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == 'ij' else [1, 0]
unstacked_query_points = array_ops.unstack(query_points, axis=2)
for dim in index_order:
with ops.name_scope('dim-' + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = shape[dim + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = math_ops.cast(size_in_indexing_dimension - 2, query_type)
min_floor = constant_op.constant(0.0, dtype=query_type)
floor = math_ops.minimum(
math_ops.maximum(min_floor, math_ops.floor(queries)), max_floor)
int_floor = math_ops.cast(floor, dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = math_ops.cast(queries - floor, grid_type)
min_alpha = constant_op.constant(0.0, dtype=grid_type)
max_alpha = constant_op.constant(1.0, dtype=grid_type)
alpha = math_ops.minimum(math_ops.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = array_ops.expand_dims(alpha, 2)
alphas.append(alpha)
with ops.control_dependencies([
check_ops.assert_less_equal(
math_ops.cast(batch_size * height * width, dtype=dtypes.float32),
np.iinfo(np.int32).max / 8,
message="""The image size or batch size is sufficiently large
that the linearized addresses used by array_ops.gather
may exceed the int32 limit.""")
]):
flattened_grid = array_ops.reshape(
grid, [batch_size * height * width, channels])
batch_offsets = array_ops.reshape(
math_ops.range(batch_size) * height * width, [batch_size, 1])
# This wraps array_ops.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using array_ops.gather_nd.
def gather(y_coords, x_coords, name):
with ops.name_scope('gather-' + name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = array_ops.gather(flattened_grid, linear_coordinates)
return array_ops.reshape(gathered_values,
[batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], 'top_left')
top_right = gather(floors[0], ceils[1], 'top_right')
bottom_left = gather(ceils[0], floors[1], 'bottom_left')
bottom_right = gather(ceils[0], ceils[1], 'bottom_right')
# now, do the actual interpolation
with ops.name_scope('interpolate'):
interp_top = K.round(alphas[1]) * (top_right - top_left) + top_left
interp_bottom = K.round(alphas[1]) * (bottom_right - bottom_left) + bottom_left
interp = K.round(alphas[0]) * (interp_bottom - interp_top) + interp_top
return interp
def image_warp(image, flow, interp_method, name='dense_image_warp'):
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a dense
flow field of offset vectors that define the correspondences of pixel values
in the output image back to locations in the source image. Specifically, the
pixel value at output[b, j, i, c] is
images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside
of the image, we use the nearest pixel values at the image boundary.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type tf.half, tf.float32, or tf.float64,
and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if height < 2 or width < 2 or the inputs have the wrong number
of dimensions.
"""
with ops.name_scope(name):
batch_size, height, width, channels = (array_ops.shape(image)[0],
array_ops.shape(image)[1],
array_ops.shape(image)[2],
array_ops.shape(image)[3])
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = array_ops.meshgrid(
math_ops.range(width), math_ops.range(height))
stacked_grid = math_ops.cast(
array_ops.stack([grid_y, grid_x], axis=2), flow.dtype)
batched_grid = array_ops.expand_dims(stacked_grid, axis=0)
query_points_on_grid = batched_grid - flow
query_points_flattened = array_ops.reshape(query_points_on_grid,
[batch_size, height * width, 2])
# Compute values at the query points, then reshape the result back to the
# image grid.
if interp_method == 'bilinear' or interp_method == 'Bilinear':
interpolated = _interpolate_bilinear(image, query_points_flattened)
elif interp_method == 'nearest_neighbor' or interp_method == 'NN':
interpolated = _interpolate_nearest(image, query_points_flattened)
else:
print('Running on bi-linear interpolation!')
interpolated = _interpolate_bilinear(image, query_points_flattened)
interpolated = array_ops.reshape(interpolated,
[batch_size, height, width, channels])
return interpolated
| [
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.math_ops.floor",
"tensorflow.python.ops.array_ops.reshape",
"numpy.iinfo",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.framework.const... | [((1554, 1574), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name'], {}), '(name)\n', (1568, 1574), False, 'from tensorflow.python.framework import ops\n'), ((1587, 1614), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['grid'], {}), '(grid)\n', (1608, 1614), False, 'from tensorflow.python.framework import ops\n'), ((1634, 1669), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['query_points'], {}), '(query_points)\n', (1655, 1669), False, 'from tensorflow.python.framework import ops\n'), ((7178, 7198), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name'], {}), '(name)\n', (7192, 7198), False, 'from tensorflow.python.framework import ops\n'), ((7211, 7238), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['grid'], {}), '(grid)\n', (7232, 7238), False, 'from tensorflow.python.framework import ops\n'), ((7258, 7293), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['query_points'], {}), '(query_points)\n', (7279, 7293), False, 'from tensorflow.python.framework import ops\n'), ((13293, 13313), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name'], {}), '(name)\n', (13307, 13313), False, 'from tensorflow.python.framework import ops\n'), ((13920, 13963), 'tensorflow.python.ops.array_ops.expand_dims', 'array_ops.expand_dims', (['stacked_grid'], {'axis': '(0)'}), '(stacked_grid, axis=0)\n', (13941, 13963), False, 'from tensorflow.python.ops import array_ops\n'), ((14040, 14112), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['query_points_on_grid', '[batch_size, height * width, 2]'], {}), '(query_points_on_grid, [batch_size, height * width, 2])\n', (14057, 14112), False, 'from tensorflow.python.ops import array_ops\n'), ((14703, 14773), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['interpolated', '[batch_size, height, width, channels]'], {}), '(interpolated, [batch_size, height, width, channels])\n', (14720, 14773), False, 'from tensorflow.python.ops import array_ops\n'), ((3035, 3074), 'tensorflow.python.ops.array_ops.unstack', 'array_ops.unstack', (['query_points'], {'axis': '(2)'}), '(query_points, axis=2)\n', (3052, 3074), False, 'from tensorflow.python.ops import array_ops\n'), ((4822, 4886), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['grid', '[batch_size * height * width, channels]'], {}), '(grid, [batch_size * height * width, channels])\n', (4839, 4886), False, 'from tensorflow.python.ops import array_ops\n'), ((6013, 6042), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""interpolate"""'], {}), "('interpolate')\n", (6027, 6042), False, 'from tensorflow.python.framework import ops\n'), ((8659, 8698), 'tensorflow.python.ops.array_ops.unstack', 'array_ops.unstack', (['query_points'], {'axis': '(2)'}), '(query_points, axis=2)\n', (8676, 8698), False, 'from tensorflow.python.ops import array_ops\n'), ((10446, 10510), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['grid', '[batch_size * height * width, channels]'], {}), '(grid, [batch_size * height * width, channels])\n', (10463, 10510), False, 'from tensorflow.python.ops import array_ops\n'), ((11637, 11666), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""interpolate"""'], {}), "('interpolate')\n", (11651, 11666), False, 'from tensorflow.python.framework import ops\n'), ((13757, 13778), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['width'], {}), '(width)\n', (13771, 13778), False, 'from tensorflow.python.ops import math_ops\n'), ((13780, 13802), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['height'], {}), '(height)\n', (13794, 13802), False, 'from tensorflow.python.ops import math_ops\n'), ((13846, 13887), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['[grid_y, grid_x]'], {'axis': '(2)'}), '([grid_y, grid_x], axis=2)\n', (13861, 13887), False, 'from tensorflow.python.ops import array_ops\n'), ((1887, 1908), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['grid'], {}), '(grid)\n', (1902, 1908), False, 'from tensorflow.python.ops import array_ops\n'), ((1956, 1977), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['grid'], {}), '(grid)\n', (1971, 1977), False, 'from tensorflow.python.ops import array_ops\n'), ((2025, 2046), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['grid'], {}), '(grid)\n', (2040, 2046), False, 'from tensorflow.python.ops import array_ops\n'), ((2094, 2115), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['grid'], {}), '(grid)\n', (2109, 2115), False, 'from tensorflow.python.ops import array_ops\n'), ((2603, 2632), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['query_points'], {}), '(query_points)\n', (2618, 2632), False, 'from tensorflow.python.ops import array_ops\n'), ((3394, 3451), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(size_in_indexing_dimension - 2)', 'query_type'], {}), '(size_in_indexing_dimension - 2, query_type)\n', (3407, 3451), False, 'from tensorflow.python.ops import math_ops\n'), ((3472, 3515), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {'dtype': 'query_type'}), '(0.0, dtype=query_type)\n', (3492, 3515), False, 'from tensorflow.python.framework import constant_op\n'), ((3647, 3681), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['floor', 'dtypes.int32'], {}), '(floor, dtypes.int32)\n', (3660, 3681), False, 'from tensorflow.python.ops import math_ops\n'), ((3939, 3980), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(queries - floor)', 'grid_type'], {}), '(queries - floor, grid_type)\n', (3952, 3980), False, 'from tensorflow.python.ops import math_ops\n'), ((4001, 4043), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {'dtype': 'grid_type'}), '(0.0, dtype=grid_type)\n', (4021, 4043), False, 'from tensorflow.python.framework import constant_op\n'), ((4064, 4106), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'dtype': 'grid_type'}), '(1.0, dtype=grid_type)\n', (4084, 4106), False, 'from tensorflow.python.framework import constant_op\n'), ((4331, 4362), 'tensorflow.python.ops.array_ops.expand_dims', 'array_ops.expand_dims', (['alpha', '(2)'], {}), '(alpha, 2)\n', (4352, 4362), False, 'from tensorflow.python.ops import array_ops\n'), ((5352, 5384), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (["('gather-' + name)"], {}), "('gather-' + name)\n", (5366, 5384), False, 'from tensorflow.python.framework import ops\n'), ((5485, 5537), 'tensorflow.python.ops.array_ops.gather', 'array_ops.gather', (['flattened_grid', 'linear_coordinates'], {}), '(flattened_grid, linear_coordinates)\n', (5501, 5537), False, 'from tensorflow.python.ops import array_ops\n'), ((5553, 5624), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['gathered_values', '[batch_size, num_queries, channels]'], {}), '(gathered_values, [batch_size, num_queries, channels])\n', (5570, 5624), False, 'from tensorflow.python.ops import array_ops\n'), ((7511, 7532), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['grid'], {}), '(grid)\n', (7526, 7532), False, 'from tensorflow.python.ops import array_ops\n'), ((7580, 7601), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['grid'], {}), '(grid)\n', (7595, 7601), False, 'from tensorflow.python.ops import array_ops\n'), ((7649, 7670), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['grid'], {}), '(grid)\n', (7664, 7670), False, 'from tensorflow.python.ops import array_ops\n'), ((7718, 7739), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['grid'], {}), '(grid)\n', (7733, 7739), False, 'from tensorflow.python.ops import array_ops\n'), ((8227, 8256), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['query_points'], {}), '(query_points)\n', (8242, 8256), False, 'from tensorflow.python.ops import array_ops\n'), ((9018, 9075), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(size_in_indexing_dimension - 2)', 'query_type'], {}), '(size_in_indexing_dimension - 2, query_type)\n', (9031, 9075), False, 'from tensorflow.python.ops import math_ops\n'), ((9096, 9139), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {'dtype': 'query_type'}), '(0.0, dtype=query_type)\n', (9116, 9139), False, 'from tensorflow.python.framework import constant_op\n'), ((9271, 9305), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['floor', 'dtypes.int32'], {}), '(floor, dtypes.int32)\n', (9284, 9305), False, 'from tensorflow.python.ops import math_ops\n'), ((9563, 9604), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(queries - floor)', 'grid_type'], {}), '(queries - floor, grid_type)\n', (9576, 9604), False, 'from tensorflow.python.ops import math_ops\n'), ((9625, 9667), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {'dtype': 'grid_type'}), '(0.0, dtype=grid_type)\n', (9645, 9667), False, 'from tensorflow.python.framework import constant_op\n'), ((9688, 9730), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'dtype': 'grid_type'}), '(1.0, dtype=grid_type)\n', (9708, 9730), False, 'from tensorflow.python.framework import constant_op\n'), ((9955, 9986), 'tensorflow.python.ops.array_ops.expand_dims', 'array_ops.expand_dims', (['alpha', '(2)'], {}), '(alpha, 2)\n', (9976, 9986), False, 'from tensorflow.python.ops import array_ops\n'), ((10976, 11008), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (["('gather-' + name)"], {}), "('gather-' + name)\n", (10990, 11008), False, 'from tensorflow.python.framework import ops\n'), ((11109, 11161), 'tensorflow.python.ops.array_ops.gather', 'array_ops.gather', (['flattened_grid', 'linear_coordinates'], {}), '(flattened_grid, linear_coordinates)\n', (11125, 11161), False, 'from tensorflow.python.ops import array_ops\n'), ((11177, 11248), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['gathered_values', '[batch_size, num_queries, channels]'], {}), '(gathered_values, [batch_size, num_queries, channels])\n', (11194, 11248), False, 'from tensorflow.python.ops import array_ops\n'), ((13358, 13380), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['image'], {}), '(image)\n', (13373, 13380), False, 'from tensorflow.python.ops import array_ops\n'), ((13428, 13450), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['image'], {}), '(image)\n', (13443, 13450), False, 'from tensorflow.python.ops import array_ops\n'), ((13498, 13520), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['image'], {}), '(image)\n', (13513, 13520), False, 'from tensorflow.python.ops import array_ops\n'), ((13568, 13590), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['image'], {}), '(image)\n', (13583, 13590), False, 'from tensorflow.python.ops import array_ops\n'), ((2681, 2770), 'tensorflow.python.ops.check_ops.assert_greater_equal', 'check_ops.assert_greater_equal', (['height', '(2)'], {'message': '"""Grid height must be at least 2."""'}), "(height, 2, message=\n 'Grid height must be at least 2.')\n", (2711, 2770), False, 'from tensorflow.python.ops import check_ops\n'), ((2788, 2875), 'tensorflow.python.ops.check_ops.assert_greater_equal', 'check_ops.assert_greater_equal', (['width', '(2)'], {'message': '"""Grid width must be at least 2."""'}), "(width, 2, message=\n 'Grid width must be at least 2.')\n", (2818, 2875), False, 'from tensorflow.python.ops import check_ops\n'), ((4140, 4174), 'tensorflow.python.ops.math_ops.maximum', 'math_ops.maximum', (['min_alpha', 'alpha'], {}), '(min_alpha, alpha)\n', (4156, 4174), False, 'from tensorflow.python.ops import math_ops\n'), ((8305, 8394), 'tensorflow.python.ops.check_ops.assert_greater_equal', 'check_ops.assert_greater_equal', (['height', '(2)'], {'message': '"""Grid height must be at least 2."""'}), "(height, 2, message=\n 'Grid height must be at least 2.')\n", (8335, 8394), False, 'from tensorflow.python.ops import check_ops\n'), ((8412, 8499), 'tensorflow.python.ops.check_ops.assert_greater_equal', 'check_ops.assert_greater_equal', (['width', '(2)'], {'message': '"""Grid width must be at least 2."""'}), "(width, 2, message=\n 'Grid width must be at least 2.')\n", (8442, 8499), False, 'from tensorflow.python.ops import check_ops\n'), ((9764, 9798), 'tensorflow.python.ops.math_ops.maximum', 'math_ops.maximum', (['min_alpha', 'alpha'], {}), '(min_alpha, alpha)\n', (9780, 9798), False, 'from tensorflow.python.ops import math_ops\n'), ((11687, 11705), 'keras.backend.round', 'K.round', (['alphas[1]'], {}), '(alphas[1])\n', (11694, 11705), True, 'import keras.backend as K\n'), ((11764, 11782), 'keras.backend.round', 'K.round', (['alphas[1]'], {}), '(alphas[1])\n', (11771, 11782), True, 'import keras.backend as K\n'), ((11843, 11861), 'keras.backend.round', 'K.round', (['alphas[0]'], {}), '(alphas[0])\n', (11850, 11861), True, 'import keras.backend as K\n'), ((3590, 3613), 'tensorflow.python.ops.math_ops.floor', 'math_ops.floor', (['queries'], {}), '(queries)\n', (3604, 3613), False, 'from tensorflow.python.ops import math_ops\n'), ((4478, 4542), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(batch_size * height * width)'], {'dtype': 'dtypes.float32'}), '(batch_size * height * width, dtype=dtypes.float32)\n', (4491, 4542), False, 'from tensorflow.python.ops import math_ops\n'), ((4949, 4975), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['batch_size'], {}), '(batch_size)\n', (4963, 4975), False, 'from tensorflow.python.ops import math_ops\n'), ((9214, 9237), 'tensorflow.python.ops.math_ops.floor', 'math_ops.floor', (['queries'], {}), '(queries)\n', (9228, 9237), False, 'from tensorflow.python.ops import math_ops\n'), ((10102, 10166), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(batch_size * height * width)'], {'dtype': 'dtypes.float32'}), '(batch_size * height * width, dtype=dtypes.float32)\n', (10115, 10166), False, 'from tensorflow.python.ops import math_ops\n'), ((10573, 10599), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['batch_size'], {}), '(batch_size)\n', (10587, 10599), False, 'from tensorflow.python.ops import math_ops\n'), ((2465, 2494), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['query_points'], {}), '(query_points)\n', (2480, 2494), False, 'from tensorflow.python.ops import array_ops\n'), ((8089, 8118), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['query_points'], {}), '(query_points)\n', (8104, 8118), False, 'from tensorflow.python.ops import array_ops\n'), ((4556, 4574), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (4564, 4574), True, 'import numpy as np\n'), ((10180, 10198), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (10188, 10198), True, 'import numpy as np\n')] |
import numpy as np
#========================================================
#
# Environment-specific cost functions:
#
def cheetah_cost_fn(state, action, next_state):
if len(state.shape) > 1:
heading_penalty_factor=10
scores=np.zeros((state.shape[0],))
#dont move front shin back so far that you tilt forward
front_leg = state[:,5]
my_range = 0.2
scores[front_leg>=my_range] += heading_penalty_factor
front_shin = state[:,6]
my_range = 0
scores[front_shin>=my_range] += heading_penalty_factor
front_foot = state[:,7]
my_range = 0
scores[front_foot>=my_range] += heading_penalty_factor
scores-= (next_state[:,17] - state[:,17]) / 0.01 #+ 0.1 * (np.sum(action**2, axis=1))
return scores
heading_penalty_factor=10
score = 0
#dont move front shin back so far that you tilt forward
front_leg = state[5]
my_range = 0.2
if front_leg>=my_range:
score += heading_penalty_factor
front_shin = state[6]
my_range = 0
if front_shin>=my_range:
score += heading_penalty_factor
front_foot = state[7]
my_range = 0
if front_foot>=my_range:
score += heading_penalty_factor
score -= (next_state[17] - state[17]) / 0.01 #+ 0.1 * (np.sum(action**2))
return score
def half_cheetah_reward(_, action, next_state):
if len(action.shape) > 1:
next_s_vel = next_state[:, 8:17]
return -(np.square(next_s_vel).sum(axis=1) - 0.05 * np.square(action).sum(axis=1))
else:
next_s_vel = next_state[8:17]
return -(np.square(next_s_vel).sum() - 0.05 * np.square(action).sum())
#========================================================
#
# Cost function for a whole trajectory:
#
def trajectory_cost_fn(states, actions, next_states):
trajectory_cost = 0
for i in range(len(actions)):
trajectory_cost += half_cheetah_reward(states[i], actions[i], next_states[i])
return trajectory_cost
| [
"numpy.square",
"numpy.zeros"
] | [((249, 276), 'numpy.zeros', 'np.zeros', (['(state.shape[0],)'], {}), '((state.shape[0],))\n', (257, 276), True, 'import numpy as np\n'), ((1487, 1508), 'numpy.square', 'np.square', (['next_s_vel'], {}), '(next_s_vel)\n', (1496, 1508), True, 'import numpy as np\n'), ((1626, 1647), 'numpy.square', 'np.square', (['next_s_vel'], {}), '(next_s_vel)\n', (1635, 1647), True, 'import numpy as np\n'), ((1530, 1547), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (1539, 1547), True, 'import numpy as np\n'), ((1663, 1680), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (1672, 1680), True, 'import numpy as np\n')] |
import torch
import math
import cv2 as cv
import torch.nn.functional as F
import numpy as np
import random
'''modified from the original test implementation
Replace cv.BORDER_REPLICATE with cv.BORDER_CONSTANT
Add a variable called att_mask for computing attention and positional encoding later'''
def iou(reference, proposals):
"""Compute the IoU between a reference box with multiple proposal boxes.
args:
reference - Tensor of shape (1, 4).
proposals - Tensor of shape (num_proposals, 4)
returns:
torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box.
"""
# Intersection box
tl = torch.max(reference[:, :2], proposals[:, :2])
br = torch.min(reference[:, :2] + reference[:, 2:], proposals[:, :2] + proposals[:, 2:])
sz = (br - tl).clamp(0)
# Area
intersection = sz.prod(dim=1)
union = reference[:, 2:].prod(dim=1) + proposals[:, 2:].prod(dim=1) - intersection
return intersection / union
def centerness(reference, proposals):
grids_center = proposals[:, :2] #+ proposals[:, 2:] / 2.
tl = reference[:, :2]
br = reference[:, :2] + reference[:, 2:]
tl_gap = grids_center - tl
br_gap = br - grids_center
gaps = torch.stack([tl_gap, br_gap], 1)
centerness = gaps.min(1)[0].clamp(0) / gaps.max(1)[0]
#print(tl_gap, br_gap, centerness, gaps.min(1))
centerness = centerness.prod(1)
return centerness
def rand_uniform(a, b, shape=1):
""" sample numbers uniformly between a and b.
args:
a - lower bound
b - upper bound
shape - shape of the output tensor
returns:
torch.Tensor - tensor of shape=shape
"""
return (b - a) * torch.rand(shape) + a
def random_center(box, factor=2):
""" sample center from box.
args:
box: x y w, h not norm
factor - sample range
returns:
torch.Tensor - tensor of shape=shape
"""
x, y, w, h = box
x1 = x + 0.5*w - factor*w/2.
x2 = x + 0.5*w + factor*w/2.
y1 = y + 0.5*h - factor*h/2.
y2 = y + 0.5*h + factor*h/2.
sample_x = rand_uniform(x1, x2)
sample_y = rand_uniform(y1, y2)
#print(sample_x, sample_y)
return torch.cat([sample_x, sample_y])
def sample_target_fast(im, target_bb, search_area_factor, output_sz=None):
""" Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area
args:
im - cv image
target_bb - target box [x, y, w, h]
search_area_factor - Ratio of crop size to target size
output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.
returns:
cv image - extracted crop
float - the factor by which the crop has been resized to make the crop size equal output_size
"""
x, y, w, h = target_bb
# Crop image
crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor)
x1 = round(x + 0.5 * w - crop_sz * 0.5)
x2 = x1 + crop_sz
y1 = round(y + 0.5 * h - crop_sz * 0.5)
y2 = y1 + crop_sz
x1_pad = max(0, -x1)
x2_pad = max(x2 - im.shape[1] + 1, 0)
y1_pad = max(0, -y1)
y2_pad = max(y2 - im.shape[0] + 1, 0)
# Crop target
im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]
# Pad
im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT)
resize_factor = output_sz / crop_sz
#print(output_sz, crop_sz)
im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz))
return im_crop_padded, resize_factor
def sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None):
""" Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area
args:
im - cv image
target_bb - target box [x, y, w, h]
search_area_factor - Ratio of crop size to target size
output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.
returns:
cv image - extracted crop
float - the factor by which the crop has been resized to make the crop size equal output_size
"""
if not isinstance(target_bb, list):
x, y, w, h = target_bb.tolist()
else:
x, y, w, h = target_bb
# Crop image
crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor)
if crop_sz < 1:
raise Exception('Too small bounding box.')
x1 = round(x + 0.5 * w - crop_sz * 0.5)
x2 = x1 + crop_sz
y1 = round(y + 0.5 * h - crop_sz * 0.5)
y2 = y1 + crop_sz
x1_pad = max(0, -x1)
x2_pad = max(x2 - im.shape[1] + 1, 0)
y1_pad = max(0, -y1)
y2_pad = max(y2 - im.shape[0] + 1, 0)
# Crop target
im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]
if mask is not None:
mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]
# Pad
im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT)
# deal with attention mask
H, W, _ = im_crop_padded.shape
att_mask = np.ones((H,W))
end_x, end_y = -x2_pad, -y2_pad
if y2_pad == 0:
end_y = None
if x2_pad == 0:
end_x = None
att_mask[y1_pad:end_y, x1_pad:end_x] = 0
if mask is not None:
mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)
if output_sz is not None:
resize_factor = output_sz / crop_sz
#print(output_sz, crop_sz)
im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz))
att_mask = cv.resize(att_mask, (output_sz, output_sz)).astype(np.bool_)
if mask is None:
return im_crop_padded, resize_factor, att_mask
mask_crop_padded = \
F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0]
return im_crop_padded, resize_factor, att_mask, mask_crop_padded
else:
if mask is None:
return im_crop_padded, 1.0, att_mask.astype(np.bool_), 1.0
return im_crop_padded, 1.0, att_mask.astype(np.bool_), mask_crop_padded
def transform_image_to_crop(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor: float,
crop_sz: torch.Tensor, normalize=False) -> torch.Tensor:
""" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image
args:
box_in - the box for which the co-ordinates are to be transformed
box_extract - the box about which the image crop has been extracted.
resize_factor - the ratio between the original image scale and the scale of the image crop
crop_sz - size of the cropped image
returns:
torch.Tensor - transformed co-ordinates of box_in
"""
box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4]
box_in_center = box_in[0:2] + 0.5 * box_in[2:4]
box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor
box_out_wh = box_in[2:4] * resize_factor
box_out = torch.cat((box_out_center - 0.5 * box_out_wh, box_out_wh))
if normalize:
return box_out / crop_sz[0]
else:
return box_out
def jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None):
""" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2
times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box
box_gt are transformed to the image crop co-ordinates
args:
frames - list of frames
box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract
box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from
image co-ordinates to the crop co-ordinates
search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area
output_sz - The size to which the extracted crops are resized
returns:
list - list of image crops
list - box_gt location in the crop co-ordinates
"""
if masks is None:
crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz)
for f, a in zip(frames, box_extract)]
frames_crop, resize_factors, att_mask = zip(*crops_resize_factors)
masks_crop = None
else:
crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m)
for f, a, m in zip(frames, box_extract, masks)]
frames_crop, resize_factors, att_mask, masks_crop = zip(*crops_resize_factors)
# frames_crop: tuple of ndarray (128,128,3), att_mask: tuple of ndarray (128,128)
crop_sz = torch.Tensor([output_sz, output_sz])
# find the bb location in the crop
'''Note that here we use normalized coord'''
box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz, normalize=True)
for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)] # (x1,y1,w,h) list of tensors
return frames_crop, box_crop, att_mask, masks_crop
def transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor, normalize=False) -> torch.Tensor:
""" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image
args:
box - the box for which the co-ordinates are to be transformed
crop_box - bounding box defining the crop in the original image
crop_sz - size of the cropped image
returns:
torch.Tensor - transformed co-ordinates of box_in
"""
box_out = box.clone()
box_out[:2] -= crop_box[:2]
scale_factor = crop_sz / crop_box[2:]
box_out[:2] *= scale_factor
box_out[2:] *= scale_factor
if normalize:
return box_out / crop_sz[0]
else:
return box_out
| [
"torch.stack",
"math.sqrt",
"cv2.copyMakeBorder",
"torch.cat",
"numpy.ones",
"torch.Tensor",
"torch.max",
"torch.rand",
"torch.nn.functional.interpolate",
"torch.min",
"cv2.resize",
"torch.nn.functional.pad"
] | [((684, 729), 'torch.max', 'torch.max', (['reference[:, :2]', 'proposals[:, :2]'], {}), '(reference[:, :2], proposals[:, :2])\n', (693, 729), False, 'import torch\n'), ((739, 827), 'torch.min', 'torch.min', (['(reference[:, :2] + reference[:, 2:])', '(proposals[:, :2] + proposals[:, 2:])'], {}), '(reference[:, :2] + reference[:, 2:], proposals[:, :2] + proposals\n [:, 2:])\n', (748, 827), False, 'import torch\n'), ((1262, 1294), 'torch.stack', 'torch.stack', (['[tl_gap, br_gap]', '(1)'], {}), '([tl_gap, br_gap], 1)\n', (1273, 1294), False, 'import torch\n'), ((2235, 2266), 'torch.cat', 'torch.cat', (['[sample_x, sample_y]'], {}), '([sample_x, sample_y])\n', (2244, 2266), False, 'import torch\n'), ((3366, 3444), 'cv2.copyMakeBorder', 'cv.copyMakeBorder', (['im_crop', 'y1_pad', 'y2_pad', 'x1_pad', 'x2_pad', 'cv.BORDER_CONSTANT'], {}), '(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT)\n', (3383, 3444), True, 'import cv2 as cv\n'), ((3538, 3587), 'cv2.resize', 'cv.resize', (['im_crop_padded', '(output_sz, output_sz)'], {}), '(im_crop_padded, (output_sz, output_sz))\n', (3547, 3587), True, 'import cv2 as cv\n'), ((5003, 5081), 'cv2.copyMakeBorder', 'cv.copyMakeBorder', (['im_crop', 'y1_pad', 'y2_pad', 'x1_pad', 'x2_pad', 'cv.BORDER_CONSTANT'], {}), '(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT)\n', (5020, 5081), True, 'import cv2 as cv\n'), ((5163, 5178), 'numpy.ones', 'np.ones', (['(H, W)'], {}), '((H, W))\n', (5170, 5178), True, 'import numpy as np\n'), ((7194, 7252), 'torch.cat', 'torch.cat', (['(box_out_center - 0.5 * box_out_wh, box_out_wh)'], {}), '((box_out_center - 0.5 * box_out_wh, box_out_wh))\n', (7203, 7252), False, 'import torch\n'), ((8990, 9026), 'torch.Tensor', 'torch.Tensor', (['[output_sz, output_sz]'], {}), '([output_sz, output_sz])\n', (9002, 9026), False, 'import torch\n'), ((5393, 5478), 'torch.nn.functional.pad', 'F.pad', (['mask_crop'], {'pad': '(x1_pad, x2_pad, y1_pad, y2_pad)', 'mode': '"""constant"""', 'value': '(0)'}), "(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0\n )\n", (5398, 5478), True, 'import torch.nn.functional as F\n'), ((5609, 5658), 'cv2.resize', 'cv.resize', (['im_crop_padded', '(output_sz, output_sz)'], {}), '(im_crop_padded, (output_sz, output_sz))\n', (5618, 5658), True, 'import cv2 as cv\n'), ((1740, 1757), 'torch.rand', 'torch.rand', (['shape'], {}), '(shape)\n', (1750, 1757), False, 'import torch\n'), ((2936, 2952), 'math.sqrt', 'math.sqrt', (['(w * h)'], {}), '(w * h)\n', (2945, 2952), False, 'import math\n'), ((4401, 4417), 'math.sqrt', 'math.sqrt', (['(w * h)'], {}), '(w * h)\n', (4410, 4417), False, 'import math\n'), ((5860, 5970), 'torch.nn.functional.interpolate', 'F.interpolate', (['mask_crop_padded[None, None]', '(output_sz, output_sz)'], {'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(mask_crop_padded[None, None], (output_sz, output_sz), mode=\n 'bilinear', align_corners=False)\n", (5873, 5970), True, 'import torch.nn.functional as F\n'), ((5678, 5721), 'cv2.resize', 'cv.resize', (['att_mask', '(output_sz, output_sz)'], {}), '(att_mask, (output_sz, output_sz))\n', (5687, 5721), True, 'import cv2 as cv\n')] |
#%%
import warnings
warnings.simplefilter("ignore")
import random
import matplotlib.pyplot as plt
import tensorflow as tf
from itertools import product
import pandas as pd
import numpy as np
import pickle
import keras
from keras import layers
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
from proglearn.progressive_learner import ProgressiveLearner
from proglearn.deciders import SimpleArgmaxAverage
from proglearn.transformers import (
TreeClassificationTransformer,
NeuralClassificationTransformer,
)
from proglearn.voters import TreeClassificationVoter, KNNClassificationVoter
from joblib import Parallel, delayed
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
import time
#%%
def LF_experiment(
data_x, data_y, ntrees, shift, slot, model, num_points_per_task, acorn=None
):
df = pd.DataFrame()
shifts = []
slots = []
accuracies_across_tasks = []
train_times_across_tasks = []
inference_times_across_tasks = []
train_x_task0, train_y_task0, test_x_task0, test_y_task0 = cross_val_data(
data_x, data_y, num_points_per_task, total_task=10, shift=shift, slot=slot
)
if model == "dnn":
default_transformer_class = NeuralClassificationTransformer
network = keras.Sequential()
network.add(
layers.Conv2D(
filters=16,
kernel_size=(3, 3),
activation="relu",
input_shape=np.shape(train_x_task0)[1:],
)
)
network.add(
layers.Conv2D(
filters=32,
kernel_size=(3, 3),
strides=2,
padding="same",
activation="relu",
)
)
network.add(
layers.Conv2D(
filters=64,
kernel_size=(3, 3),
strides=2,
padding="same",
activation="relu",
)
)
network.add(
layers.Conv2D(
filters=128,
kernel_size=(3, 3),
strides=2,
padding="same",
activation="relu",
)
)
network.add(
layers.Conv2D(
filters=254,
kernel_size=(3, 3),
strides=2,
padding="same",
activation="relu",
)
)
network.add(layers.Flatten())
network.add(layers.Dense(2000, activation="relu"))
network.add(layers.Dense(2000, activation="relu"))
network.add(layers.Dense(units=10, activation="softmax"))
default_transformer_kwargs = {
"network": network,
"euclidean_layer_idx": -2,
"num_classes": 10,
"optimizer": keras.optimizers.Adam(3e-4),
}
default_voter_class = KNNClassificationVoter
default_voter_kwargs = {"k": int(np.log2(num_points_per_task * 0.33))}
default_decider_class = SimpleArgmaxAverage
elif model == "uf":
default_transformer_class = TreeClassificationTransformer
default_transformer_kwargs = {"kwargs": {"max_depth": 30}}
default_voter_class = TreeClassificationVoter
default_voter_kwargs = {}
default_decider_class = SimpleArgmaxAverage
progressive_learner = ProgressiveLearner(
default_transformer_class=default_transformer_class,
default_transformer_kwargs=default_transformer_kwargs,
default_voter_class=default_voter_class,
default_voter_kwargs=default_voter_kwargs,
default_decider_class=default_decider_class,
)
train_start_time = time.time()
progressive_learner.add_task(
X=train_x_task0,
y=train_y_task0,
num_transformers=1 if model == "dnn" else ntrees,
transformer_voter_decider_split=[0.67, 0.33, 0],
decider_kwargs={"classes": np.unique(train_y_task0)},
)
train_end_time = time.time()
inference_start_time = time.time()
task_0_predictions = progressive_learner.predict(test_x_task0, task_id=0)
inference_end_time = time.time()
shifts.append(shift)
slots.append(slot)
accuracies_across_tasks.append(np.mean(task_0_predictions == test_y_task0))
train_times_across_tasks.append(train_end_time - train_start_time)
inference_times_across_tasks.append(inference_end_time - inference_start_time)
for task_ii in range(1, 20):
train_x, train_y, _, _ = cross_val_data(
data_x,
data_y,
num_points_per_task,
total_task=10,
shift=shift,
slot=slot,
task=task_ii,
)
print("Starting Task {} For Fold {} For Slot {}".format(task_ii, shift, slot))
train_start_time = time.time()
progressive_learner.add_transformer(
X=train_x,
y=train_y,
transformer_data_proportion=1,
num_transformers=1 if model == "dnn" else ntrees,
backward_task_ids=[0],
)
train_end_time = time.time()
inference_start_time = time.time()
task_0_predictions = progressive_learner.predict(test_x_task0, task_id=0)
inference_end_time = time.time()
shifts.append(shift)
slots.append(slot)
accuracies_across_tasks.append(np.mean(task_0_predictions == test_y_task0))
train_times_across_tasks.append(train_end_time - train_start_time)
inference_times_across_tasks.append(inference_end_time - inference_start_time)
print("Accuracy Across Tasks: {}".format(accuracies_across_tasks))
print("Train Times Across Tasks: {}".format(train_times_across_tasks))
print("Inference Times Across Tasks: {}".format(inference_times_across_tasks))
df["data_fold"] = shifts
df["slot"] = slots
df["accuracy"] = accuracies_across_tasks
df["train_times"] = train_times_across_tasks
df["inference_times"] = inference_times_across_tasks
file_to_save = (
"result/" + model + str(ntrees) + "_" + str(shift) + "_" + str(slot) + ".pickle"
)
with open(file_to_save, "wb") as f:
pickle.dump(df, f)
#%%
def cross_val_data(
data_x, data_y, num_points_per_task, total_task=10, shift=1, slot=0, task=0
):
skf = StratifiedKFold(n_splits=6)
for _ in range(shift + 1):
train_idx, test_idx = next(skf.split(data_x, data_y))
data_x_train, data_y_train = data_x[train_idx], data_y[train_idx]
data_x_test, data_y_test = data_x[test_idx], data_y[test_idx]
selected_classes = np.random.choice(range(0, 100), 10)
train_idxs_of_selected_class = np.array(
[np.where(data_y_train == y_val)[0] for y_val in selected_classes]
)
num_points_per_class_per_slot = [
int(len(train_idxs_of_selected_class[class_idx]) // 10)
for class_idx in range(len(selected_classes))
]
selected_idxs = np.concatenate(
[
np.random.choice(
train_idxs_of_selected_class[class_idx],
num_points_per_class_per_slot[class_idx],
)
for class_idx in range(len(selected_classes))
]
)
train_idxs = np.random.choice(selected_idxs, num_points_per_task)
data_x_train = data_x_train[train_idxs]
data_y_train = data_y_train[train_idxs]
test_idxs_of_selected_class = np.concatenate(
[np.where(data_y_test == y_val)[0] for y_val in selected_classes]
)
data_x_test = data_x_test[test_idxs_of_selected_class]
data_y_test = data_y_test[test_idxs_of_selected_class]
return data_x_train, data_y_train, data_x_test, data_y_test
#%%
def run_parallel_exp(
data_x, data_y, n_trees, model, num_points_per_task, slot=0, shift=1
):
if model == "dnn":
with tf.device("/gpu:" + str(shift % 4)):
LF_experiment(
data_x,
data_y,
n_trees,
shift,
slot,
model,
num_points_per_task,
acorn=12345,
)
else:
LF_experiment(
data_x,
data_y,
n_trees,
shift,
slot,
model,
num_points_per_task,
acorn=12345,
)
#%%
### MAIN HYPERPARAMS ###
model = "uf"
num_points_per_task = 500
########################
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data()
data_x = np.concatenate([X_train, X_test])
if model == "uf":
data_x = data_x.reshape(
(data_x.shape[0], data_x.shape[1] * data_x.shape[2] * data_x.shape[3])
)
data_y = np.concatenate([y_train, y_test])
data_y = data_y[:, 0]
#%%
slot_fold = range(10)
if model == "uf":
shift_fold = range(1, 7, 1)
n_trees = [10]
iterable = product(n_trees, shift_fold, slot_fold)
Parallel(n_jobs=-1, verbose=1)(
delayed(run_parallel_exp)(
data_x, data_y, ntree, model, num_points_per_task, slot=slot, shift=shift
)
for ntree, shift, slot in iterable
)
elif model == "dnn":
print("Performing Stage 1 Shifts")
for slot in slot_fold:
def perform_shift(shift):
return run_parallel_exp(
data_x, data_y, 0, model, num_points_per_task, slot=slot, shift=shift
)
stage_1_shifts = range(1, 5)
with Pool(4) as p:
p.map(perform_shift, stage_1_shifts)
print("Performing Stage 2 Shifts")
for slot in slot_fold:
def perform_shift(shift):
return run_parallel_exp(
data_x, data_y, 0, model, num_points_per_task, slot=slot, shift=shift
)
stage_2_shifts = range(5, 7)
with Pool(4) as p:
p.map(perform_shift, stage_2_shifts)
# %%
| [
"pickle.dump",
"keras.Sequential",
"numpy.shape",
"numpy.mean",
"numpy.unique",
"pandas.DataFrame",
"warnings.simplefilter",
"keras.layers.Flatten",
"numpy.random.choice",
"itertools.product",
"numpy.log2",
"keras.optimizers.Adam",
"proglearn.progressive_learner.ProgressiveLearner",
"keras... | [((21, 52), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (42, 52), False, 'import warnings\n'), ((8525, 8560), 'keras.datasets.cifar100.load_data', 'keras.datasets.cifar100.load_data', ([], {}), '()\n', (8558, 8560), False, 'import keras\n'), ((8570, 8603), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_test]'], {}), '([X_train, X_test])\n', (8584, 8603), True, 'import numpy as np\n'), ((8745, 8778), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_test]'], {}), '([y_train, y_test])\n', (8759, 8778), True, 'import numpy as np\n'), ((883, 897), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (895, 897), True, 'import pandas as pd\n'), ((3422, 3690), 'proglearn.progressive_learner.ProgressiveLearner', 'ProgressiveLearner', ([], {'default_transformer_class': 'default_transformer_class', 'default_transformer_kwargs': 'default_transformer_kwargs', 'default_voter_class': 'default_voter_class', 'default_voter_kwargs': 'default_voter_kwargs', 'default_decider_class': 'default_decider_class'}), '(default_transformer_class=default_transformer_class,\n default_transformer_kwargs=default_transformer_kwargs,\n default_voter_class=default_voter_class, default_voter_kwargs=\n default_voter_kwargs, default_decider_class=default_decider_class)\n', (3440, 3690), False, 'from proglearn.progressive_learner import ProgressiveLearner\n'), ((3748, 3759), 'time.time', 'time.time', ([], {}), '()\n', (3757, 3759), False, 'import time\n'), ((4048, 4059), 'time.time', 'time.time', ([], {}), '()\n', (4057, 4059), False, 'import time\n'), ((4088, 4099), 'time.time', 'time.time', ([], {}), '()\n', (4097, 4099), False, 'import time\n'), ((4203, 4214), 'time.time', 'time.time', ([], {}), '()\n', (4212, 4214), False, 'import time\n'), ((6390, 6417), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(6)'}), '(n_splits=6)\n', (6405, 6417), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((7292, 7344), 'numpy.random.choice', 'np.random.choice', (['selected_idxs', 'num_points_per_task'], {}), '(selected_idxs, num_points_per_task)\n', (7308, 7344), True, 'import numpy as np\n'), ((8912, 8951), 'itertools.product', 'product', (['n_trees', 'shift_fold', 'slot_fold'], {}), '(n_trees, shift_fold, slot_fold)\n', (8919, 8951), False, 'from itertools import product\n'), ((1313, 1331), 'keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (1329, 1331), False, 'import keras\n'), ((4299, 4342), 'numpy.mean', 'np.mean', (['(task_0_predictions == test_y_task0)'], {}), '(task_0_predictions == test_y_task0)\n', (4306, 4342), True, 'import numpy as np\n'), ((4881, 4892), 'time.time', 'time.time', ([], {}), '()\n', (4890, 4892), False, 'import time\n'), ((5159, 5170), 'time.time', 'time.time', ([], {}), '()\n', (5168, 5170), False, 'import time\n'), ((5203, 5214), 'time.time', 'time.time', ([], {}), '()\n', (5212, 5214), False, 'import time\n'), ((5326, 5337), 'time.time', 'time.time', ([], {}), '()\n', (5335, 5337), False, 'import time\n'), ((6252, 6270), 'pickle.dump', 'pickle.dump', (['df', 'f'], {}), '(df, f)\n', (6263, 6270), False, 'import pickle\n'), ((8956, 8986), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)', 'verbose': '(1)'}), '(n_jobs=-1, verbose=1)\n', (8964, 8986), False, 'from joblib import Parallel, delayed\n'), ((1593, 1688), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (1606, 1688), False, 'from keras import layers\n'), ((1823, 1918), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (1836, 1918), False, 'from keras import layers\n'), ((2053, 2149), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (2066, 2149), False, 'from keras import layers\n'), ((2284, 2380), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(254)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=254, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (2297, 2380), False, 'from keras import layers\n'), ((2503, 2519), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2517, 2519), False, 'from keras import layers\n'), ((2541, 2578), 'keras.layers.Dense', 'layers.Dense', (['(2000)'], {'activation': '"""relu"""'}), "(2000, activation='relu')\n", (2553, 2578), False, 'from keras import layers\n'), ((2600, 2637), 'keras.layers.Dense', 'layers.Dense', (['(2000)'], {'activation': '"""relu"""'}), "(2000, activation='relu')\n", (2612, 2637), False, 'from keras import layers\n'), ((2659, 2703), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (2671, 2703), False, 'from keras import layers\n'), ((2872, 2901), 'keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.0003)'], {}), '(0.0003)\n', (2893, 2901), False, 'import keras\n'), ((5434, 5477), 'numpy.mean', 'np.mean', (['(task_0_predictions == test_y_task0)'], {}), '(task_0_predictions == test_y_task0)\n', (5441, 5477), True, 'import numpy as np\n'), ((7054, 7157), 'numpy.random.choice', 'np.random.choice', (['train_idxs_of_selected_class[class_idx]', 'num_points_per_class_per_slot[class_idx]'], {}), '(train_idxs_of_selected_class[class_idx],\n num_points_per_class_per_slot[class_idx])\n', (7070, 7157), True, 'import numpy as np\n'), ((3006, 3041), 'numpy.log2', 'np.log2', (['(num_points_per_task * 0.33)'], {}), '(num_points_per_task * 0.33)\n', (3013, 3041), True, 'import numpy as np\n'), ((3994, 4018), 'numpy.unique', 'np.unique', (['train_y_task0'], {}), '(train_y_task0)\n', (4003, 4018), True, 'import numpy as np\n'), ((6762, 6793), 'numpy.where', 'np.where', (['(data_y_train == y_val)'], {}), '(data_y_train == y_val)\n', (6770, 6793), True, 'import numpy as np\n'), ((7493, 7523), 'numpy.where', 'np.where', (['(data_y_test == y_val)'], {}), '(data_y_test == y_val)\n', (7501, 7523), True, 'import numpy as np\n'), ((8996, 9021), 'joblib.delayed', 'delayed', (['run_parallel_exp'], {}), '(run_parallel_exp)\n', (9003, 9021), False, 'from joblib import Parallel, delayed\n'), ((9478, 9485), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (9482, 9485), False, 'from multiprocessing import Pool\n'), ((9831, 9838), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (9835, 9838), False, 'from multiprocessing import Pool\n'), ((1507, 1530), 'numpy.shape', 'np.shape', (['train_x_task0'], {}), '(train_x_task0)\n', (1515, 1530), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from priors import *
import sys
sys.path.insert(0,".")
############## Job Control ##################################
# Default values, overridden by vb_in.py
#
jobctl_0 = {
"trials" : {
"Pfizer (Final)" : {"n_p":262, "n_v":8, "v2p_ratio":1.0, "xlo":0.75},
"Pfizer (Severe)" : {"n_p": 9, "n_v": 1, "v2p_ratio":1.0, "xlo":0.0},
"Moderna (Interim)" : {"n_p":90, "n_v":5, "v2p_ratio":1.0, "xlo":0.70},
"Moderna (Final)" : {"n_p":185, "n_v":11, "v2p_ratio":1.0, "xlo":0.75},
"Moderna (Severe)" : {"n_p": 30, "n_v":0, "v2p_ratio":1.0, "xlo":0.70},
"Sputnik V (Interim)" : {"n_p":31, "n_v":8, "v2p_ratio":3.0, "xlo":0.45},
"CoronaVac (Interim, Turkey)" : {"n_p":26, "n_v":3, "v2p_ratio":752.0/570.0, "xlo":0.3},
},
"cred": 0.90, # Probability level of credible regions
"cred_lb" : 0.99, # Probability level of lower bound
"nsamp" : 1000, # Number of equal-spaced samples in [0,1] for the posterior
"prior" : uniform_prior, # Prior choice, from 'priors.py'
}
# If vb_in.py exists and contains a dict called 'jobctl', use it to update jobctl_0
try:
from vb_in import jobctl
jobctl_0.update(jobctl)
print("Imported job from vb_in.py")
except ImportError:
print("No job imported, using defaults")
pass
# All keys in jobctl_0 now to become variables:
globals().update(jobctl_0)
################## Done with job control #####################
cs="%4.1f"%(cred*100.0)
de = 1.0 / nsamp
eff = de * (np.arange(nsamp, dtype=np.float) + 0.5 )
def loglik(e,trial):
ll = trial["n_v"] * np.log(1.0-e) - \
(trial["n_p"] + trial["n_v"]) * np.log(1.0 + (1-e)*trial["v2p_ratio"])
return ll
posterior = np.zeros(nsamp)
eff_ci = np.zeros(2)
fsize=16
fsize_l=12
lw_ci=2
lw_plot=3
msize=18
for trialname in trials.keys():
trial = trials[trialname]
ll = loglik(eff, trial)
pr = prior(eff)
llmax = np.max(ll)
posterior = np.exp(ll - llmax) * pr
norm = posterior.sum() * de
posterior /= norm
inds = np.argsort(posterior)[-1::-1]
cum = posterior[inds].cumsum() * de
lbcum = posterior[-1::-1].cumsum() * de
lb_ind = nsamp-np.searchsorted(lbcum, cred_lb)
eff_lb = eff[lb_ind]
lb_x = list(eff[lb_ind:])
lb_x.insert(0,eff[lb_ind])
lb_x.append(eff[-1])
lb_y = list(posterior[lb_ind:])
lb_y.insert(0,0.0)
lb_y.append(0.0)
eff_mp = eff[inds[0]]
eff_ci[0] = eff[inds[0]]
eff_ci[1] = eff[inds[0]]
ci_idx_lo = ci_idx_hi = inds[0]
for samp in range(nsamp):
if eff[inds[samp]] > eff_ci[1]:
eff_ci[1] = eff[inds[samp]]
ci_idx_hi = inds[samp]
if eff[inds[samp]] < eff_ci[0]:
eff_ci[0] = eff[inds[samp]]
ci_idx_lo = inds[samp]
if cum[samp] > cred:
break
ci_x = list(eff[ci_idx_lo:ci_idx_hi+1])
ci_x.insert(0, eff[ci_idx_lo])
ci_x.append(eff[ci_idx_hi])
ci_y = list(posterior[ci_idx_lo:ci_idx_hi+1])
ci_y.insert(0,0.0)
ci_y.append(0.0)
print(trialname +
": Max Posterior Effectiveness = %6.3f; %4.1f%% CI = [%6.3f, %6.3f]; %4.1f%% Lower Bound = %6.3f\n" %
(eff_mp, cred*100.0, eff_ci[0], eff_ci[1], cred_lb*100.0, eff_lb) )
fig = plt.figure()
fig.set_figwidth(8.0)
fig.set_figheight(8.0)
ax = fig.add_subplot(1,1,1)
ax.set_xlim([trial["xlo"],1.0])
ax.set_ylim(bottom=0.0, top=posterior[inds[0]]*1.2)
ax.set_xlabel("Efficacy", size=fsize)
ax.set_ylabel("Posterior Density", size=fsize)
ax.tick_params(labelsize=fsize)
ax.plot(eff,posterior,'b-', linewidth=lw_plot)
ax.axvline(eff_mp, color="c", linewidth=lw_plot,
linestyle="--",
label='Max Posterior: Eff. = %5.3f'%(eff_mp) )
ax.fill(ci_x, ci_y, color='r', alpha=0.4,
label='%4.1f%% Credible Region:'%(cred*100) + ' Eff.$\in$'+'[%5.3f,%5.3f]'%
(eff_ci[0],eff_ci[1]))
# ax.axvline(eff_ci[0], color='r', linewidth=lw_ci, linestyle=":")
# ax.axvline(eff_ci[1], color='r', linewidth=lw_ci, linestyle=":")
#ax.axvline(eff_lb, color='g', linewidth=lw_ci, linestyle="-.")
ax.fill(lb_x, lb_y, hatch="/", fill=False,
label="%4.1f%% Lower Bound: Eff. = %5.3f" % (cred_lb*100, eff_lb))
ax.legend(handlelength=4.0)
ax.set_title(trialname + ": Placebo Infections = %d, Vaccine Infections = %d\n Vaccine/Placebo Person-Year Ratio = %4.2f" % (trial["n_p"], trial["n_v"], trial["v2p_ratio"]) )
plt.savefig(trialname +".png", format="png")
plt.clf()
| [
"numpy.log",
"matplotlib.pyplot.clf",
"numpy.zeros",
"sys.path.insert",
"numpy.searchsorted",
"numpy.argsort",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.savefig"
] | [((102, 125), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (117, 125), False, 'import sys\n'), ((1769, 1784), 'numpy.zeros', 'np.zeros', (['nsamp'], {}), '(nsamp)\n', (1777, 1784), True, 'import numpy as np\n'), ((1794, 1805), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1802, 1805), True, 'import numpy as np\n'), ((1978, 1988), 'numpy.max', 'np.max', (['ll'], {}), '(ll)\n', (1984, 1988), True, 'import numpy as np\n'), ((3307, 3319), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3317, 3319), True, 'import matplotlib.pyplot as plt\n'), ((4566, 4611), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(trialname + '.png')"], {'format': '"""png"""'}), "(trialname + '.png', format='png')\n", (4577, 4611), True, 'import matplotlib.pyplot as plt\n'), ((4615, 4624), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4622, 4624), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1589), 'numpy.arange', 'np.arange', (['nsamp'], {'dtype': 'np.float'}), '(nsamp, dtype=np.float)\n', (1566, 1589), True, 'import numpy as np\n'), ((2005, 2023), 'numpy.exp', 'np.exp', (['(ll - llmax)'], {}), '(ll - llmax)\n', (2011, 2023), True, 'import numpy as np\n'), ((2094, 2115), 'numpy.argsort', 'np.argsort', (['posterior'], {}), '(posterior)\n', (2104, 2115), True, 'import numpy as np\n'), ((2227, 2258), 'numpy.searchsorted', 'np.searchsorted', (['lbcum', 'cred_lb'], {}), '(lbcum, cred_lb)\n', (2242, 2258), True, 'import numpy as np\n'), ((1644, 1659), 'numpy.log', 'np.log', (['(1.0 - e)'], {}), '(1.0 - e)\n', (1650, 1659), True, 'import numpy as np\n'), ((1703, 1745), 'numpy.log', 'np.log', (["(1.0 + (1 - e) * trial['v2p_ratio'])"], {}), "(1.0 + (1 - e) * trial['v2p_ratio'])\n", (1709, 1745), True, 'import numpy as np\n')] |
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Slicer test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import six
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.post_export_metrics import metric_keys
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import metrics_for_slice_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from google.protobuf import text_format
def make_features_dict(features_dict):
result = {}
for key, value in features_dict.items():
result[key] = {'node': np.array(value)}
return result
def create_fpls():
fpl1 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['f'],
'age': [13],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
fpl2 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
return [fpl1, fpl2]
def wrap_fpl(fpl):
return {
constants.INPUT_KEY: fpl,
constants.FEATURES_PREDICTIONS_LABELS_KEY: fpl
}
class SlicerTest(testutil.TensorflowModelAnalysisTest, parameterized.TestCase):
def setUp(self):
super(SlicerTest, self).setUp()
self.longMessage = True # pylint: disable=invalid-name
def _makeFeaturesDict(self, features_dict):
result = {}
for key, value in features_dict.items():
result[key] = {'node': np.array(value)}
return result
def assertSliceResult(self, name, features_dict, columns, features, expected):
spec = slicer.SingleSliceSpec(columns=columns, features=features)
msg = 'Test case %s: slice on columns %s, features %s' % (name, columns,
features)
six.assertCountEqual(
self, expected,
slicer.get_slices_for_features_dicts([features_dict], None, [spec]),
msg)
def testDeserializeSliceKey(self):
slice_metrics = text_format.Parse(
"""
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 1.0
}
""", metrics_for_slice_pb2.SliceKey())
got_slice_key = slicer.deserialize_slice_key(slice_metrics)
self.assertCountEqual([('age', 5), ('language', 'english'), ('price', 1.0)],
got_slice_key)
def testDeserializeCrossSliceKey(self):
slice_metrics = text_format.Parse(
"""
baseline_slice_key {
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 1.0
}
}
comparison_slice_key {
single_slice_keys {
column: 'age'
int64_value: 8
}
single_slice_keys {
column: 'language'
bytes_value: 'hindi'
}
}
""", metrics_for_slice_pb2.CrossSliceKey())
got_slice_key = slicer.deserialize_cross_slice_key(slice_metrics)
self.assertCountEqual(
((('age', 5), ('language', 'english'), ('price', 1.0)),
(('age', 8), ('language', 'hindi'))), got_slice_key)
def testSliceEquality(self):
overall = slicer.SingleSliceSpec()
age_column = slicer.SingleSliceSpec(columns=['age'])
age_feature = slicer.SingleSliceSpec(features=[('age', 5)])
age_and_gender = slicer.SingleSliceSpec(
columns=['age'], features=[('gender', 'f')])
# Note that we construct new instances of the slices to ensure that we
# aren't just checking object identity.
def check_equality_and_hash_equality(left, right):
self.assertEqual(left, right)
self.assertEqual(hash(left), hash(right))
check_equality_and_hash_equality(overall, slicer.SingleSliceSpec())
check_equality_and_hash_equality(age_column,
slicer.SingleSliceSpec(columns=['age']))
check_equality_and_hash_equality(
age_feature, slicer.SingleSliceSpec(features=[('age', 5)]))
check_equality_and_hash_equality(
age_and_gender,
slicer.SingleSliceSpec(columns=['age'], features=[('gender', 'f')]))
self.assertNotEqual(overall, age_column)
self.assertNotEqual(age_column, age_feature)
self.assertNotEqual(age_column, age_and_gender)
self.assertNotEqual(age_feature, age_and_gender)
self.assertCountEqual([slicer.SingleSliceSpec()], [overall])
self.assertCountEqual([
slicer.SingleSliceSpec(columns=['age']),
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(features=[('age', 5)]),
slicer.SingleSliceSpec(columns=['age'], features=[('gender', 'f')])
], [age_and_gender, age_feature, overall, age_column])
def testNoOverlappingColumns(self):
self.assertRaises(ValueError, slicer.SingleSliceSpec, ['age'], [('age', 5)])
def testNonUTF8ValueRaisesValueError(self):
column_name = 'column_name'
invalid_value = b'\x8a'
spec = slicer.SingleSliceSpec(columns=[column_name])
features_dict = self._makeFeaturesDict({
column_name: [invalid_value],
})
with self.assertRaisesRegex(ValueError, column_name):
list(slicer.get_slices_for_features_dicts([features_dict], None, [spec]))
def testGetSlicesForFeaturesDictUnivalent(self):
test_cases = [
('Overall', [], [], [()]),
('Feature does not match', [], [('age', 99)], []),
('No such column', ['no_such_column'], [], []),
('Single column', ['age'], [], [(('age', 5),)]),
('Single feature', [], [('age', 5)], [(('age', 5),)]),
('Single feature type mismatch', [], [('age', '5')], [(('age', 5),)]),
('One column, one feature',
['gender'], [('age', 5)], [(('age', 5), ('gender', 'f'))]),
('Two features', ['interest', 'gender'], [('age', 5)],
[(('age', 5), ('gender', 'f'), ('interest', 'cars'))]),
] # pyformat: disable
features_dict = self._makeFeaturesDict({
'gender': ['f'],
'age': [5],
'interest': ['cars']
})
for (name, columns, features, expected) in test_cases:
self.assertSliceResult(name, features_dict, columns, features, expected)
def testGetSlicesForFeaturesDictMultivalent(self):
test_cases = [
(
'One column',
['fruits'],
[],
[
(('fruits', 'apples'),),
(('fruits', 'pears'),)
],
),
(
'Two columns',
['fruits', 'interests'],
[],
[
(('fruits', 'apples'), ('interests', 'cars')),
(('fruits', 'apples'), ('interests', 'dogs')),
(('fruits', 'pears'), ('interests', 'cars')),
(('fruits', 'pears'), ('interests', 'dogs'))
],
),
(
'One feature',
[],
[('interests', 'cars')],
[
(('interests', 'cars'),)
],
),
(
'Two features',
[],
[('gender', 'f'), ('interests', 'cars')],
[
(('gender', 'f'), ('interests', 'cars'))
],
),
(
'One column, one feature',
['fruits'],
[('interests', 'cars')],
[
(('fruits', 'apples'), ('interests', 'cars')),
(('fruits', 'pears'), ('interests', 'cars'))
],
),
(
'One column, two features',
['fruits'],
[('gender', 'f'), ('interests', 'cars')],
[
(('fruits', 'apples'), ('gender', 'f'), ('interests', 'cars')),
(('fruits', 'pears'), ('gender', 'f'), ('interests', 'cars')),
],
),
(
'Two columns, one feature',
['interests', 'fruits'], [('gender', 'f')],
[
(('fruits', 'apples'), ('gender', 'f'), ('interests', 'cars')),
(('fruits', 'pears'), ('gender', 'f'), ('interests', 'cars')),
(('fruits', 'apples'), ('gender', 'f'), ('interests', 'dogs')),
(('fruits', 'pears'), ('gender', 'f'), ('interests', 'dogs'))
],
),
(
'Two columns, two features',
['interests', 'fruits'],
[('gender', 'f'), ('age', 5)],
[
(('age', 5), ('fruits', 'apples'), ('gender', 'f'),
('interests', 'cars')),
(('age', 5), ('fruits', 'pears'), ('gender', 'f'),
('interests', 'cars')),
(('age', 5), ('fruits', 'apples'), ('gender', 'f'),
('interests', 'dogs')),
(('age', 5), ('fruits', 'pears'), ('gender', 'f'),
('interests', 'dogs'))
],
)
] # pyformat: disable
features_dict = self._makeFeaturesDict({
'gender': ['f'],
'age': [5],
'interests': ['cars', 'dogs'],
'fruits': ['apples', 'pears']
})
for (name, columns, features, expected) in test_cases:
self.assertSliceResult(name, features_dict, columns, features, expected)
def testGetSlicesForFeaturesDictMultipleSingleSliceSpecs(self):
features_dict = self._makeFeaturesDict({
'gender': ['f'],
'age': [5],
'interest': ['cars']
})
spec_overall = slicer.SingleSliceSpec()
spec_age = slicer.SingleSliceSpec(columns=['age'])
spec_age4 = slicer.SingleSliceSpec(features=[('age', 4)])
spec_age5_gender = slicer.SingleSliceSpec(
columns=['gender'], features=[('age', 5)])
slice_spec = [spec_overall, spec_age, spec_age4, spec_age5_gender]
expected = [(), (('age', 5),), (('age', 5), ('gender', 'f'))]
self.assertCountEqual(
expected,
slicer.get_slices_for_features_dicts([features_dict], None, slice_spec))
def testStringifySliceKey(self):
test_cases = [
('overall', (), 'Overall'),
('one bytes feature', (('age_str', '5'),), 'age_str:5'),
('one int64 feature', (('age', 1),), 'age:1'),
('mixed', (('age', 1), ('gender', 'f')), 'age_X_gender:1_X_f'),
('more', (('age', 1), ('gender', 'f'), ('interest', 'cars')),
'age_X_gender_X_interest:1_X_f_X_cars'),
('unicode', (('text', b'\xe4\xb8\xad\xe6\x96\x87'),), u'text:\u4e2d\u6587'),
] # pyformat: disable
for (name, slice_key, stringified_key) in test_cases:
self.assertEqual(
stringified_key, slicer.stringify_slice_key(slice_key), msg=name)
def testIsCrossSliceApplicable(self):
test_cases = [
(True, 'overall pass', ((), (('b', 2),)), config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(True, 'value pass', ((('a', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(True, 'baseline key pass', ((('a', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_keys=['a']),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(True, 'comparison key pass', ((('a', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_keys=['b'])])),
(True, 'comparison multiple key pass', ((('a', 1),), (('c', 3),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_keys=['b']),
config_pb2.SlicingSpec(feature_keys=['c'])])),
(False, 'overall fail', ((('a', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(False, 'value fail', ((('a', 1),), (('b', 3),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(False, 'baseline key fail', ((('c', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_keys=['a']),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(False, 'comparison key fail', ((('a', 1),), (('c', 3),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_keys=['b'])])),
(False, 'comparison multiple key fail', ((('a', 1),), (('d', 3),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_keys=['b']),
config_pb2.SlicingSpec(feature_keys=['c'])])),
] # pyformat: disable
for (expected_result, name, sliced_key, slicing_spec) in test_cases:
self.assertEqual(
expected_result,
slicer.is_cross_slice_applicable(
cross_slice_key=sliced_key, cross_slicing_spec=slicing_spec),
msg=name)
def testGetSliceKeyType(self):
test_cases = [
(slicer.SliceKeyType, 'overall', ()),
(slicer.SliceKeyType, 'one bytes feature', (('a', '5'),)),
(slicer.SliceKeyType, 'one int64 feature', (('a', 1),)),
(slicer.SliceKeyType, 'mixed', (('a', 1), ('b', 'f'))),
(slicer.SliceKeyType, 'more', (('a', 1), ('b', 'f'), ('c', 'cars'))),
(slicer.SliceKeyType, 'unicode',
(('a', b'\xe4\xb8\xad\xe6\x96\x87'),)),
(slicer.CrossSliceKeyType, 'CrossSlice overall', ((), ())),
(slicer.CrossSliceKeyType, 'CrossSlice one slice key baseline',
((('a', '5'),), ())),
(slicer.CrossSliceKeyType, 'CrossSlice one slice key comparison',
((), (('a', 1),))),
(slicer.CrossSliceKeyType, 'CrossSlice two simple slice key',
((('a', 1),), (('b', 'f'),))),
(slicer.CrossSliceKeyType, 'CrossSlice two multiple slice key',
((('a', 1), ('b', 'f'), ('c', '11')),
(('a2', 1), ('b', 'm'), ('c', '11')))),
] # pyformat: disable
for (expected_result, name, slice_key) in test_cases:
self.assertEqual(
expected_result, slicer.get_slice_key_type(slice_key), msg=name)
unrecognized_test_cases = [
('Unrecognized 1: ', ('a')),
('Unrecognized 2: ', ('a',)),
('Unrecognized 3: ', ('a', 1)),
('Unrecognized 4: ', (('a'))),
('Unrecognized 5: ', (('a',))),
('Unrecognized 6: ', ((), (), ())),
('Unrecognized 7: ', ((('a', 1),), (('b', 1),), (('c', 1),))),
('Unrecognized 8: ', ((('a', 1),), ('b', 1))),
('Unrecognized 9: ', (('a', 1), (('b', 1),))),
] # pyformat: disable
for (name, slice_key) in unrecognized_test_cases:
with self.assertRaises(TypeError, msg=name + str(slice_key)):
slicer.get_slice_key_type(slice_key)
def testIsSliceApplicable(self):
test_cases = [
('applicable', ['column1'],
[('column3', 'value3'), ('column4', 'value4')],
(('column1', 'value1'), ('column3', 'value3'), ('column4', 'value4')),
True),
('wrongcolumns', ['column1', 'column2'],
[('column3', 'value3'), ('column4', 'value4')],
(('column1', 'value1'), ('column3', 'value3'), ('column4', 'value4')),
False),
('wrongfeatures', ['column1'], [('column3', 'value3')],
(('column1', 'value1'), ('column3', 'value3'), ('column4', 'value4')),
False),
('nocolumns', [], [('column3', 'value3')],
(('column1', 'value1'), ('column3', 'value3'), ('column4', 'value4')),
False),
('nofeatures', ['column1'], [], (('column1', 'value1'),), True),
('empty slice key', ['column1'], [('column2', 'value1')], (), False),
('overall', [], [], (), True)
] # pyformat: disable
for (name, columns, features, slice_key, result) in test_cases:
slice_spec = slicer.SingleSliceSpec(columns=columns, features=features)
self.assertEqual(
slice_spec.is_slice_applicable(slice_key), result, msg=name)
def testSliceDefaultSlice(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
metrics = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
[slicer.SingleSliceSpec()])
| 'FanoutSlices' >> slicer.FanoutSlices())
def check_result(got):
try:
self.assertLen(got, 2)
expected_result = [
((), wrap_fpl(fpls[0])),
((), wrap_fpl(fpls[1])),
]
self.assertEqual(len(got), len(expected_result))
self.assertTrue(
got[0] == expected_result[0] and got[1] == expected_result[1] or
got[1] == expected_result[0] and got[0] == expected_result[1])
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(metrics, check_result)
def testSliceOneSlice(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
metrics = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls, reshuffle=False)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys([
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
])
| 'FanoutSlices' >> slicer.FanoutSlices())
def check_result(got):
try:
self.assertLen(got, 4)
expected_result = [
((), wrap_fpl(fpls[0])),
((), wrap_fpl(fpls[1])),
((('gender', 'f'),), wrap_fpl(fpls[0])),
((('gender', 'm'),), wrap_fpl(fpls[1])),
]
self.assertCountEqual(got, expected_result)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(metrics, check_result)
def testMultidimSlices(self):
data = [{
'features': {
'gender': [['f'], ['f']],
'age': [[13], [13]],
'interest': [['cars'], ['cars']]
},
'predictions': [[1], [1]],
'labels': [[0], [0]],
constants.SLICE_KEY_TYPES_KEY:
np.array([[(), (('gender', 'f'),)], [(), (('gender', 'f'),)]])
}, {
'features': {
'gender': [['f'], ['m']],
'age': [[13], [10]],
'interest': [['cars'], ['cars']]
},
'predictions': [[1], [1]],
'labels': [[0], [0]],
constants.SLICE_KEY_TYPES_KEY: [[(), (('gender', 'f'),)],
[(), (('gender', 'm'),)]]
}]
with beam.Pipeline() as pipeline:
result = (
pipeline
| 'CreateTestInput' >> beam.Create(data, reshuffle=False)
| 'FanoutSlices' >> slicer.FanoutSlices())
def check_result(got):
try:
self.assertLen(got, 5)
del data[0][constants.SLICE_KEY_TYPES_KEY]
del data[1][constants.SLICE_KEY_TYPES_KEY]
expected_result = [
((), data[0]),
((), data[1]),
((('gender', 'f'),), data[0]),
((('gender', 'f'),), data[1]),
((('gender', 'm'),), data[1]),
]
self.assertCountEqual(got, expected_result)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result)
def testMultidimOverallSlices(self):
data = [{
constants.SLICE_KEY_TYPES_KEY: np.array([[()], [()]])
}, {
constants.SLICE_KEY_TYPES_KEY: np.array([[()], [()]])
}]
with beam.Pipeline() as pipeline:
result = (
pipeline
| 'CreateTestInput' >> beam.Create(data, reshuffle=False)
| 'FanoutSlices' >> slicer.FanoutSlices())
def check_result(got):
try:
self.assertLen(got, 2)
del data[0][constants.SLICE_KEY_TYPES_KEY]
del data[1][constants.SLICE_KEY_TYPES_KEY]
expected_result = [
((), data[0]),
((), data[1]),
]
self.assertCountEqual(got, expected_result)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result)
def testFilterOutSlices(self):
slice_key_1 = (('slice_key', 'slice1'),)
slice_key_2 = (('slice_key', 'slice2'),)
slice_key_3 = (('slice_key', 'slice3'),)
values_list = [(slice_key_1, {
'val11': 'val12'
}), (slice_key_2, {
'val21': 'val22'
})]
slice_counts_list = [(slice_key_1, 2), (slice_key_2, 1), (slice_key_3, 0)]
def check_output(got):
try:
self.assertLen(got, 2)
slices = {}
for (k, v) in got:
slices[k] = v
self.assertEqual(slices[slice_key_1], {'val11': 'val12'})
self.assertIn(metric_keys.ERROR_METRIC, slices[slice_key_2])
except AssertionError as err:
raise util.BeamAssertException(err)
with beam.Pipeline() as pipeline:
slice_counts_pcoll = (
pipeline | 'CreateSliceCountsPColl' >> beam.Create(slice_counts_list))
output_dict = (
pipeline
| 'CreateValuesPColl' >> beam.Create(values_list)
| 'FilterOutSlices' >> slicer.FilterOutSlices(
slice_counts_pcoll,
min_slice_size=2,
error_metric_key=metric_keys.ERROR_METRIC))
util.assert_that(output_dict, check_output)
@parameterized.named_parameters(
{
'testcase_name': 'matching_single_spec',
'slice_key': (('f1', 1),),
'slice_specs': [slicer.SingleSliceSpec(features=[('f1', 1)])],
'expected_result': True
},
{
'testcase_name': 'matching_single_spec_with_float',
'slice_key': (('f1', '1.0'),),
'slice_specs': [slicer.SingleSliceSpec(features=[('f1', '1.0')])],
'expected_result': True
},
{
'testcase_name': 'non_matching_single_spec',
'slice_key': (('f1', 1),),
'slice_specs': [slicer.SingleSliceSpec(columns=['f2'])],
'expected_result': False
},
{
'testcase_name': 'matching_multiple_specs',
'slice_key': (('f1', 1),),
'slice_specs': [
slicer.SingleSliceSpec(columns=['f1']),
slicer.SingleSliceSpec(columns=['f2'])
],
'expected_result': True
},
{
'testcase_name': 'empty_specs',
'slice_key': (('f1', 1),),
'slice_specs': [],
'expected_result': False
},
)
def testSliceKeyMatchesSliceSpecs(self, slice_key, slice_specs,
expected_result):
self.assertEqual(
expected_result,
slicer.slice_key_matches_slice_specs(slice_key, slice_specs))
if __name__ == '__main__':
tf.test.main()
| [
"apache_beam.Map",
"apache_beam.Pipeline",
"apache_beam.testing.util.BeamAssertException",
"tensorflow_model_analysis.proto.metrics_for_slice_pb2.SliceKey",
"tensorflow_model_analysis.slicer.slicer_lib.deserialize_cross_slice_key",
"tensorflow.test.main",
"tensorflow_model_analysis.slicer.slicer_lib.get... | [((25246, 25260), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (25258, 25260), True, 'import tensorflow as tf\n'), ((2819, 2877), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': 'columns', 'features': 'features'}), '(columns=columns, features=features)\n', (2841, 2877), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((3626, 3669), 'tensorflow_model_analysis.slicer.slicer_lib.deserialize_slice_key', 'slicer.deserialize_slice_key', (['slice_metrics'], {}), '(slice_metrics)\n', (3654, 3669), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((4590, 4639), 'tensorflow_model_analysis.slicer.slicer_lib.deserialize_cross_slice_key', 'slicer.deserialize_cross_slice_key', (['slice_metrics'], {}), '(slice_metrics)\n', (4624, 4639), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((4839, 4863), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {}), '()\n', (4861, 4863), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((4881, 4920), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['age']"}), "(columns=['age'])\n", (4903, 4920), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((4939, 4984), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'features': "[('age', 5)]"}), "(features=[('age', 5)])\n", (4961, 4984), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((5006, 5073), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['age']", 'features': "[('gender', 'f')]"}), "(columns=['age'], features=[('gender', 'f')])\n", (5028, 5073), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((6592, 6637), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': '[column_name]'}), '(columns=[column_name])\n', (6614, 6637), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((11055, 11079), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {}), '()\n', (11077, 11079), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((11095, 11134), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['age']"}), "(columns=['age'])\n", (11117, 11134), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((11151, 11196), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'features': "[('age', 4)]"}), "(features=[('age', 4)])\n", (11173, 11196), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((11220, 11285), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['gender']", 'features': "[('age', 5)]"}), "(columns=['gender'], features=[('age', 5)])\n", (11242, 11285), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((1529, 1544), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (1537, 1544), True, 'import numpy as np\n'), ((3085, 3152), 'tensorflow_model_analysis.slicer.slicer_lib.get_slices_for_features_dicts', 'slicer.get_slices_for_features_dicts', (['[features_dict]', 'None', '[spec]'], {}), '([features_dict], None, [spec])\n', (3121, 3152), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((3571, 3603), 'tensorflow_model_analysis.proto.metrics_for_slice_pb2.SliceKey', 'metrics_for_slice_pb2.SliceKey', ([], {}), '()\n', (3601, 3603), False, 'from tensorflow_model_analysis.proto import metrics_for_slice_pb2\n'), ((4530, 4567), 'tensorflow_model_analysis.proto.metrics_for_slice_pb2.CrossSliceKey', 'metrics_for_slice_pb2.CrossSliceKey', ([], {}), '()\n', (4565, 4567), False, 'from tensorflow_model_analysis.proto import metrics_for_slice_pb2\n'), ((5389, 5413), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {}), '()\n', (5411, 5413), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((5501, 5540), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['age']"}), "(columns=['age'])\n", (5523, 5540), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((5601, 5646), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'features': "[('age', 5)]"}), "(features=[('age', 5)])\n", (5623, 5646), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((5718, 5785), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['age']", 'features': "[('gender', 'f')]"}), "(columns=['age'], features=[('gender', 'f')])\n", (5740, 5785), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((11486, 11557), 'tensorflow_model_analysis.slicer.slicer_lib.get_slices_for_features_dicts', 'slicer.get_slices_for_features_dicts', (['[features_dict]', 'None', 'slice_spec'], {}), '([features_dict], None, slice_spec)\n', (11522, 11557), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((18148, 18206), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': 'columns', 'features': 'features'}), '(columns=columns, features=features)\n', (18170, 18206), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((18347, 18362), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {}), '()\n', (18360, 18362), True, 'import apache_beam as beam\n'), ((19230, 19269), 'apache_beam.testing.util.assert_that', 'util.assert_that', (['metrics', 'check_result'], {}), '(metrics, check_result)\n', (19246, 19269), False, 'from apache_beam.testing import util\n'), ((19311, 19326), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {}), '()\n', (19324, 19326), True, 'import apache_beam as beam\n'), ((20201, 20240), 'apache_beam.testing.util.assert_that', 'util.assert_that', (['metrics', 'check_result'], {}), '(metrics, check_result)\n', (20217, 20240), False, 'from apache_beam.testing import util\n'), ((20988, 21003), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {}), '()\n', (21001, 21003), True, 'import apache_beam as beam\n'), ((21736, 21774), 'apache_beam.testing.util.assert_that', 'util.assert_that', (['result', 'check_result'], {}), '(result, check_result)\n', (21752, 21774), False, 'from apache_beam.testing import util\n'), ((21979, 21994), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {}), '()\n', (21992, 21994), True, 'import apache_beam as beam\n'), ((22592, 22630), 'apache_beam.testing.util.assert_that', 'util.assert_that', (['result', 'check_result'], {}), '(result, check_result)\n', (22608, 22630), False, 'from apache_beam.testing import util\n'), ((23364, 23379), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {}), '()\n', (23377, 23379), True, 'import apache_beam as beam\n'), ((23791, 23834), 'apache_beam.testing.util.assert_that', 'util.assert_that', (['output_dict', 'check_output'], {}), '(output_dict, check_output)\n', (23807, 23834), False, 'from apache_beam.testing import util\n'), ((25153, 25213), 'tensorflow_model_analysis.slicer.slicer_lib.slice_key_matches_slice_specs', 'slicer.slice_key_matches_slice_specs', (['slice_key', 'slice_specs'], {}), '(slice_key, slice_specs)\n', (25189, 25213), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((2691, 2706), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (2699, 2706), True, 'import numpy as np\n'), ((6015, 6039), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {}), '()\n', (6037, 6039), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((6089, 6128), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['age']"}), "(columns=['age'])\n", (6111, 6128), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((6138, 6162), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {}), '()\n', (6160, 6162), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((6172, 6217), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'features': "[('age', 5)]"}), "(features=[('age', 5)])\n", (6194, 6217), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((6227, 6294), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['age']", 'features': "[('gender', 'f')]"}), "(columns=['age'], features=[('gender', 'f')])\n", (6249, 6294), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((6797, 6864), 'tensorflow_model_analysis.slicer.slicer_lib.get_slices_for_features_dicts', 'slicer.get_slices_for_features_dicts', (['[features_dict]', 'None', '[spec]'], {}), '([features_dict], None, [spec])\n', (6833, 6864), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((12183, 12220), 'tensorflow_model_analysis.slicer.slicer_lib.stringify_slice_key', 'slicer.stringify_slice_key', (['slice_key'], {}), '(slice_key)\n', (12209, 12220), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((15112, 15209), 'tensorflow_model_analysis.slicer.slicer_lib.is_cross_slice_applicable', 'slicer.is_cross_slice_applicable', ([], {'cross_slice_key': 'sliced_key', 'cross_slicing_spec': 'slicing_spec'}), '(cross_slice_key=sliced_key,\n cross_slicing_spec=slicing_spec)\n', (15144, 15209), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((16394, 16430), 'tensorflow_model_analysis.slicer.slicer_lib.get_slice_key_type', 'slicer.get_slice_key_type', (['slice_key'], {}), '(slice_key)\n', (16419, 16430), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((17051, 17087), 'tensorflow_model_analysis.slicer.slicer_lib.get_slice_key_type', 'slicer.get_slice_key_type', (['slice_key'], {}), '(slice_key)\n', (17076, 17087), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((20553, 20615), 'numpy.array', 'np.array', (["[[(), (('gender', 'f'),)], [(), (('gender', 'f'),)]]"], {}), "([[(), (('gender', 'f'),)], [(), (('gender', 'f'),)]])\n", (20561, 20615), True, 'import numpy as np\n'), ((21868, 21890), 'numpy.array', 'np.array', (['[[()], [()]]'], {}), '([[()], [()]])\n', (21876, 21890), True, 'import numpy as np\n'), ((21939, 21961), 'numpy.array', 'np.array', (['[[()], [()]]'], {}), '([[()], [()]])\n', (21947, 21961), True, 'import numpy as np\n'), ((23993, 24037), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'features': "[('f1', 1)]"}), "(features=[('f1', 1)])\n", (24015, 24037), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((24220, 24268), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'features': "[('f1', '1.0')]"}), "(features=[('f1', '1.0')])\n", (24242, 24268), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((24440, 24478), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['f2']"}), "(columns=['f2'])\n", (24462, 24478), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((24665, 24703), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['f1']"}), "(columns=['f1'])\n", (24687, 24703), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((24719, 24757), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['f2']"}), "(columns=['f2'])\n", (24741, 24757), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((18678, 18699), 'tensorflow_model_analysis.slicer.slicer_lib.FanoutSlices', 'slicer.FanoutSlices', ([], {}), '()\n', (18697, 18699), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((19727, 19748), 'tensorflow_model_analysis.slicer.slicer_lib.FanoutSlices', 'slicer.FanoutSlices', ([], {}), '()\n', (19746, 19748), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((21151, 21172), 'tensorflow_model_analysis.slicer.slicer_lib.FanoutSlices', 'slicer.FanoutSlices', ([], {}), '()\n', (21170, 21172), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((22142, 22163), 'tensorflow_model_analysis.slicer.slicer_lib.FanoutSlices', 'slicer.FanoutSlices', ([], {}), '()\n', (22161, 22163), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((23324, 23353), 'apache_beam.testing.util.BeamAssertException', 'util.BeamAssertException', (['err'], {}), '(err)\n', (23348, 23353), False, 'from apache_beam.testing import util\n'), ((23471, 23501), 'apache_beam.Create', 'beam.Create', (['slice_counts_list'], {}), '(slice_counts_list)\n', (23482, 23501), True, 'import apache_beam as beam\n'), ((23637, 23744), 'tensorflow_model_analysis.slicer.slicer_lib.FilterOutSlices', 'slicer.FilterOutSlices', (['slice_counts_pcoll'], {'min_slice_size': '(2)', 'error_metric_key': 'metric_keys.ERROR_METRIC'}), '(slice_counts_pcoll, min_slice_size=2,\n error_metric_key=metric_keys.ERROR_METRIC)\n', (23659, 23744), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((12397, 12421), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {}), '()\n', (12419, 12421), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((12626, 12675), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'a': '1'}"}), "(feature_values={'a': '1'})\n", (12648, 12675), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((12888, 12930), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_keys': "['a']"}), "(feature_keys=['a'])\n", (12910, 12930), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((13145, 13194), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'a': '1'}"}), "(feature_values={'a': '1'})\n", (13167, 13194), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((13411, 13460), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'a': '1'}"}), "(feature_values={'a': '1'})\n", (13433, 13460), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((13734, 13758), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {}), '()\n', (13756, 13758), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((13965, 14014), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'a': '1'}"}), "(feature_values={'a': '1'})\n", (13987, 14014), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((14228, 14270), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_keys': "['a']"}), "(feature_keys=['a'])\n", (14250, 14270), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((14486, 14535), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'a': '1'}"}), "(feature_values={'a': '1'})\n", (14508, 14535), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((14753, 14802), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'a': '1'}"}), "(feature_values={'a': '1'})\n", (14775, 14802), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((19193, 19222), 'apache_beam.testing.util.BeamAssertException', 'util.BeamAssertException', (['err'], {}), '(err)\n', (19217, 19222), False, 'from apache_beam.testing import util\n'), ((20164, 20193), 'apache_beam.testing.util.BeamAssertException', 'util.BeamAssertException', (['err'], {}), '(err)\n', (20188, 20193), False, 'from apache_beam.testing import util\n'), ((21086, 21120), 'apache_beam.Create', 'beam.Create', (['data'], {'reshuffle': '(False)'}), '(data, reshuffle=False)\n', (21097, 21120), True, 'import apache_beam as beam\n'), ((21699, 21728), 'apache_beam.testing.util.BeamAssertException', 'util.BeamAssertException', (['err'], {}), '(err)\n', (21723, 21728), False, 'from apache_beam.testing import util\n'), ((22077, 22111), 'apache_beam.Create', 'beam.Create', (['data'], {'reshuffle': '(False)'}), '(data, reshuffle=False)\n', (22088, 22111), True, 'import apache_beam as beam\n'), ((22555, 22584), 'apache_beam.testing.util.BeamAssertException', 'util.BeamAssertException', (['err'], {}), '(err)\n', (22579, 22584), False, 'from apache_beam.testing import util\n'), ((23579, 23603), 'apache_beam.Create', 'beam.Create', (['values_list'], {}), '(values_list)\n', (23590, 23603), True, 'import apache_beam as beam\n'), ((12450, 12499), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'b': '2'}"}), "(feature_values={'b': '2'})\n", (12472, 12499), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((12705, 12754), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'b': '2'}"}), "(feature_values={'b': '2'})\n", (12727, 12754), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((12960, 13009), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'b': '2'}"}), "(feature_values={'b': '2'})\n", (12982, 13009), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((13224, 13266), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_keys': "['b']"}), "(feature_keys=['b'])\n", (13246, 13266), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((13490, 13532), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_keys': "['b']"}), "(feature_keys=['b'])\n", (13512, 13532), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((13562, 13604), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_keys': "['c']"}), "(feature_keys=['c'])\n", (13584, 13604), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((13788, 13837), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'b': '2'}"}), "(feature_values={'b': '2'})\n", (13810, 13837), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((14044, 14093), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'b': '2'}"}), "(feature_values={'b': '2'})\n", (14066, 14093), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((14300, 14349), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_values': "{'b': '2'}"}), "(feature_values={'b': '2'})\n", (14322, 14349), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((14565, 14607), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_keys': "['b']"}), "(feature_keys=['b'])\n", (14587, 14607), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((14832, 14874), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_keys': "['b']"}), "(feature_keys=['b'])\n", (14854, 14874), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((14904, 14946), 'tensorflow_model_analysis.proto.config_pb2.SlicingSpec', 'config_pb2.SlicingSpec', ([], {'feature_keys': "['c']"}), "(feature_keys=['c'])\n", (14926, 14946), False, 'from tensorflow_model_analysis.proto import config_pb2\n'), ((18518, 18536), 'apache_beam.Map', 'beam.Map', (['wrap_fpl'], {}), '(wrap_fpl)\n', (18526, 18536), True, 'import apache_beam as beam\n'), ((19498, 19516), 'apache_beam.Map', 'beam.Map', (['wrap_fpl'], {}), '(wrap_fpl)\n', (19506, 19516), True, 'import apache_beam as beam\n'), ((18474, 18491), 'apache_beam.Create', 'beam.Create', (['fpls'], {}), '(fpls)\n', (18485, 18491), True, 'import apache_beam as beam\n'), ((18621, 18645), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {}), '()\n', (18643, 18645), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((19437, 19471), 'apache_beam.Create', 'beam.Create', (['fpls'], {'reshuffle': '(False)'}), '(fpls, reshuffle=False)\n', (19448, 19471), True, 'import apache_beam as beam\n'), ((19601, 19625), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {}), '()\n', (19623, 19625), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n'), ((19641, 19683), 'tensorflow_model_analysis.slicer.slicer_lib.SingleSliceSpec', 'slicer.SingleSliceSpec', ([], {'columns': "['gender']"}), "(columns=['gender'])\n", (19663, 19683), True, 'from tensorflow_model_analysis.slicer import slicer_lib as slicer\n')] |
# Copyright (c) 2021, <NAME>
# Licensed under BSD 3-Clause License. See LICENSE.txt for details.
import numpy as np
import gm
def poisson(k,lam,cumulative=False):
#poisson pmf
#vectorised in lambda, but not k
threshold=23 #threshold for switching to normal approximation
if k<threshold: #poisson
if cumulative:
res = 0.
for i in range(k+1):
res += (lam**i) / np.math.factorial(i)
return np.exp(-lam) * res
else:
return lam**k * np.exp(-lam) / np.math.factorial(k)
else: #normal
if cumulative:
return gm.normal(lam, np.sqrt(lam), k, cumulative=True)
else:
return gm.normal(lam, np.sqrt(lam), k)
#
# import matplotlib.pyplot as plt
#
# if __name__ == '__main__':
#
# ns=1000
# x=np.linspace(0,100,ns)
# fig, (ax1, ax2) = plt.subplot(1, 2)
# for i in range(1,99):
# ax1.plot(x,poisson(i,x),color='0' if i<30 else '0.')
# plt.show()
| [
"numpy.math.factorial",
"numpy.exp",
"numpy.sqrt"
] | [((460, 472), 'numpy.exp', 'np.exp', (['(-lam)'], {}), '(-lam)\n', (466, 472), True, 'import numpy as np\n'), ((533, 553), 'numpy.math.factorial', 'np.math.factorial', (['k'], {}), '(k)\n', (550, 553), True, 'import numpy as np\n'), ((640, 652), 'numpy.sqrt', 'np.sqrt', (['lam'], {}), '(lam)\n', (647, 652), True, 'import numpy as np\n'), ((718, 730), 'numpy.sqrt', 'np.sqrt', (['lam'], {}), '(lam)\n', (725, 730), True, 'import numpy as np\n'), ((422, 442), 'numpy.math.factorial', 'np.math.factorial', (['i'], {}), '(i)\n', (439, 442), True, 'import numpy as np\n'), ((518, 530), 'numpy.exp', 'np.exp', (['(-lam)'], {}), '(-lam)\n', (524, 530), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 09:32:12 2020
REF: https://github.com/sichkar-valentyn/Reinforcement_Learning_in_Python/blob/master/RL_Q-Learning_E1/env.py
@author: HG19230
"""
import numpy as np # To deal with data in form of matrices
import math
import random
#import tkinter as tk # To build GUI
import gym
from gym import spaces
from drone import drone
from mobileTarget import mobileTarget
from MobilityPatterns.RPGM_Mobility import RPGM_Mobility
route = {}
class OneDroneMobileTargetsEnv (gym.Env):
metadata = {'render.modes': ['human']}
class cell(object):
def __init__(self, ID, x, y, numTs):
self.cell= (x,y)
self.cellID= ID
self.numTargets=numTs
self.numUnCoveredTargets= 0
self.TargetsIndicators=[0]*numTs
self.TargetsCoverageIndicators=[0]*numTs
def addTarget(self, tID):
self.numUnCoveredTargets+=1
self.TargetsIndicators[tID]=1
print("cell "+ str(self.cell[0])+", "+str(self.cell[1])+": "+ str(self.numUnCoveredTargets))
def removeTarget(self, tID):
self.TargetsIndicators[tID]=1
self.TargetsCoverageIndicators=[0]*self.numTargets
def cellCovered(self):
self.numUnCoveredTargets= 0
print("cell covered "+ str(self.numUnCoveredTargets))
def setTargetCovered(self, tID):
self.TargetsCoverageIndicators[tID]=1
def resetTargetsIndicators(self):
self.TargetsIndicators=[0]*self.numTargets
self.TargetsCoverageIndicators=[0]*self.numTargets
def __init__(self, stepsLimitPerEpisode, areaSize, numTargets, droneCoverageDiameter, numCells, cellSideSize, mobilityModel, reward_scale, testing, testDatFolder, stateRep):
self.testing= testing
self.testFilesFolder= testDatFolder
self.MobilityModel= mobilityModel
super(OneDroneMobileTargetsEnv, self).__init__()
self.trainingStarted= False
self.areaDimLimit= np.sqrt(areaSize)
self.stateRep= stateRep
print("areaDimLimit: "+ str(self.areaDimLimit))
self.targets= []
self.route=[]
#self.targetsVelocity= 1.4 #m/s
self.gridlowCorners=[]
self.reward_scale= reward_scale
self.cells={}
self.numTargets= numTargets
self.cellSideSize= cellSideSize #self.calculateGridCellSize(droneCoverageDiameter)
self.numCells= numCells
print("side:"+ str(self.cellSideSize))
self.buildGrid(areaSize)
self.n_actions = 9#9
self.action_space= spaces.Discrete(self.n_actions)
self.state_size= numTargets+2
self.observation_space= spaces.Box(low=0, high= 200000, shape=(1,(self.numTargets+2)))#shape=(1,(2))) #
#shape=(1,(2))) #shape=(1,(self.numTargets+2)))
self.timeStep=0
self.totalSteps=0
self.TargetsCoverageIndicators=[0]*self.numTargets
print(self.cells)
#self.createTargets()
self.epReward=0
self.numEpisodes=0
print(self.targets)
if(testing):
self.stepsLimit= stepsLimitPerEpisode
else:
self.stepsLimit= stepsLimitPerEpisode #math.ceil(len(self.cells)+0.5*(len(self.cells))) #len(self.cells)# math.ceil(len(self.cells)+0.5*(len(self.cells))) #math.ceil(len(self.cells)+0.5*(len(self.cells))) #len(self.cells) # math.ceil(len(self.cells)+0.5*(len(self.cells)))
self.trainingFileName= "TrainingFiles/Training_A"+str(int(self.areaDimLimit))+"_T"+str(numTargets)+"_"+self.MobilityModel +"_"+str(self.stepsLimit)+".dat"
with open(self.trainingFileName, "w") as statsfile:
statsfile.write('EpNum'+'\t'+'EpSteps'+'\t'+'Reward'+'\t'+'ener'+'\t'+'dist'+'\t'+'NumCovTargets'+'\n')
#add hovering later
#self.action_space = ['up', 'down', 'left', 'right', 'dUpL', 'dUpR', 'dDwnL', 'dDwnR','hover']
#for i in range(len(self.gridlowCorners)):
# self.action_space.append(''+str(i))
print(self.action_space)
self.n_actions =9#9#len(self.action_space)
print(self.n_actions)
# Dictionaries to draw the final route
self.d = {}
self.f = {}
# Key for the dictionaries
self.i = 0
# Writing the final dictionary first time
self.c = True
# Showing the steps for longest found route
self.longest = 0
# Showing the steps for the shortest route
self.shortest = 0
# with open("RPGM_Locs.dat", "w") as rpgm:
# rpgm.write('EpNum'+'\t'+'EpSteps'+'\t'+'NodeLocs'+'\n')
def findCellForPoint(self, xp, yp):
xCell= math.floor(xp/self.cellSideSize)
yCell= math.floor(yp/self.cellSideSize)
xCell*=self.cellSideSize
yCell*=self.cellSideSize
return(xCell, yCell)
def createTargets(self, randomLoc):
for i in range(0, self.numTargets):
#(self, initiLocX, initialLocY, gridLowCorners, numCellsPerSide, cellSideSize, dimLimit):
t= mobileTarget(i, random.random()*self.areaDimLimit, random.random()*self.areaDimLimit, self.gridlowCorners, self.numCellsPerSide, self.cellSideSize, self.areaDimLimit, self.MobilityModel, randomLoc, self.testing, self.testFilesFolder, self.timeStepsScale, self.stepsLimit)
print(t.currentLocation)
if(self.MobilityModel=='RPGM'):
t.realMobFile= open("RealMobTesting/RPGMRealMobility_N"+str(t.ID)+".dat", "w")
t.currentLocation= self.RPGM.moveNode(t.ID, self.timeStep)
t.currentCell= t.findCell(self.gridlowCorners, self.numCellsPerSide, self.cellSideSize)
tCell= self.findCellForPoint(t.currentLocation[0], t.currentLocation[1])
print('finding cell for target'+ str(t.currentLocation))
print(tCell)
cell= self.cells[tCell]
cell.addTarget(i)
self.targets.append(t)
def placeDrone(self, drone):
drone.setRandomInitialLocation(self.gridlowCorners, self.cellSideSize, self.targets)
[covTPerc, covTsIDs, oldCovTsIDs]= drone.filterCoveredTargets(self.targets, False, self.cellSideSize)
drone.numTargetsCoveredInitially= len(drone.coveredTargets)
self.drone=drone
self.timeStepsScale= self.cellSideSize/ self.drone.speed
if(self.MobilityModel=='RPGM'):
self.maxStepsPerEpisode= self.stepsLimit+1
maxTargetsPerGroup= 2
numGroups= math.ceil(int(self.numTargets/maxTargetsPerGroup))
print(numGroups)
self.RPGM= RPGM_Mobility(self.areaDimLimit, self.areaDimLimit, numGroups, self.numTargets, self.maxStepsPerEpisode, self.timeStepsScale)
self.RPGM.generateMobility()
def buildGrid(self, areaSize):
# self.areaNumCells= int(round(areaSize / np.power(self.cellSideSize, 2)))
print(self.numCells)
areaDimMax= np.sqrt(areaSize)
print(areaDimMax)
self.maxLowerDim= self.areaDimLimit-self.cellSideSize
self.cornerCells= [(0,0), (0, self.maxLowerDim), (self.maxLowerDim, 0), (self.maxLowerDim, self.maxLowerDim)]
self.OtherSideCells= []
self.numCellsPerSide= int(areaDimMax/self.cellSideSize)
indx=0
for x in range (0, self.numCellsPerSide):
for y in range (0, self.numCellsPerSide):
print(x, y)
cell= (x*self.cellSideSize,y*self.cellSideSize)
self.gridlowCorners.append(cell)
currState= self.cell( self.gridlowCorners.index(cell), x*self.cellSideSize,y*self.cellSideSize, self.numTargets)
self.cells[(x*self.cellSideSize,y*self.cellSideSize)]=currState
indx+=1
if(x ==0 or y==0 or x==self.numCellsPerSide or y==self.numCellsPerSide):
self.OtherSideCells.append(cell)
self.OtherSideCells = list(set(self.OtherSideCells) - set(self.cornerCells))
# xlin = np.linspace(0+self.cellSideSize, areaDimMax, self.areaNumCells);
# [X,Y] = np.meshgrid(xlin, xlin, sparse=True)
# #print(X)
# Y= np.rollaxis(Y, 0, Y.ndim)
# #print(Y)
# self.gridTopCorners= list(zip(X[0], Y[0]))
# gridCenters = np.mgrid[self.cellSideSize/2:self.areaNumCells*self.cellSideSize:self.cellSideSize,
# self.cellSideSize/2:self.areaNumCells*self.cellSideSize:self.cellSideSize]
# gridCenters = np.rollaxis(gridCenters, 0, gridCenters.ndim)
# print(gridCenters)
print(self.gridlowCorners)
def calculateGridCellSize(self, droneCoverageDiameter):
squareDiagonal= droneCoverageDiameter
squareSide= squareDiagonal / np.sqrt(2)
return int(round(squareSide))
# Function to refresh the environment
def render(self):
#time.sleep(0.03)
self.update()
# Function to reset the environment and start new Episode
def getState(self, state, newCovTsIDs, oldCovTsIDs):
print('getting STATE INFO ===== '+ str(state.cellID))
currentSeenTs= [0]*self.numTargets
for t in self.targets:
if(t.ID in newCovTsIDs or t.ID in oldCovTsIDs):
currentSeenTs[t.ID]=1
if(self.stateRep=='TargetCov'):
stateInfo = [state.cellID, self.timeStep] + self.TargetsCoverageIndicators #+ list(self.drone.coveredTargets.keys())+[-1]*remTs
else:
stateInfo = [state.cellID, self.timeStep] + currentSeenTs #+ list(self.drone.coveredTargets.keys())+[-1]*remTs
reshapedStateInfo= np.reshape(stateInfo, [1, self.state_size])
print('((((((((((((((( State ))))))))'+ str(reshapedStateInfo))
return reshapedStateInfo
def reset(self):
#self.update()
#time.sleep(0.1)
print('ENV RESET ================')
self.route=[]
if(self.timeStep> 0 and self.trainingStarted and not self.testing):
self.numEpisodes+=1
with open(self.trainingFileName, "a") as statsfile:
statsfile.write(str(self.numEpisodes)+'\t'+str(self.timeStep)+'\t'+str(self.epReward)+'\t'+str(self.drone.totalEnergy)+'\t'+str(self.drone.totalTravelledDistance)+'\t'+str(len(self.drone.coveredTargets))+'\t'+str(self.drone.route)+'\n')
self.drone.reset(self.gridlowCorners, self.cellSideSize, self.targets)
self.drone.resetLocation()
self.timeStep=0
self.epReward=0
for s in self.cells:
self.cells[s].cellCovered()
self.cells[s].resetTargetsIndicators()
for t in self.targets:
t.reset(self.testing, self.testFilesFolder)
cell= t.initialCell
self.cells[cell].addTarget(t.ID)
self.TargetsCoverageIndicators=[0]*self.numTargets
#
# Clearing the dictionary and the i
self.d = {}
self.i = 0
if(self.MobilityModel=='RPGM'):
self.RPGM.generateMobility()
# with open("RPGM_Locs.dat", "w") as rpgm:
# rpgm.write('EpNum'+'\t'+'EpSteps'+'\t'+'NodeLocs'+'\n')
currState= self.cells[self.drone.currCell]
# stateInfo = [currState.cellID, self.timeStep] + self.TargetsCoverageIndicators #+ list(self.drone.coveredTargets.keys())+[-1]*remTs
# reshapedStateInfo= np.reshape(stateInfo, [1, self.state_size])
reshapedStateInfo= self.getState(currState, [], [])
#return [currState.cellID, currState.numUnCoveredTargets, len(self.drone.coveredTargets)]#[currState.cellID, self.timeStep]
return reshapedStateInfo#[currState.cellID, self.timeStep]
#return drone.currentCell
# Function to get the next observation and reward by doing next step
def step(self, action):
if(self.timeStep == 0):
[covTPerc, newCovTsIDs, oldCovTsIDs]= self.drone.filterCoveredTargets(self.targets, False, self.cellSideSize)
self.drone.numTargetsCoveredInitially= len(self.drone.coveredTargets)
for t in newCovTsIDs:
self.TargetsCoverageIndicators[t]= 1
print("numTargets covered initially: "+ str(self.drone.numTargetsCoveredInitially))
initReward= self.drone.numTargetsCoveredInitially * self.reward_scale
if(self.drone.numTargetsCoveredInitially == len(self.targets)):
done=True
reward= initReward
currState= self.cells[self.drone.currCell]
newState= self.cell(currState.cellID, currState.cell[0], currState.cell[1], self.timeStep)
reshapedStateInfo= self.getState(newState, newCovTsIDs, oldCovTsIDs)
self.epReward+=reward
return [reshapedStateInfo, reward, done, {}]
else:
initReward=0
self.timeStep+=1 ############# increment before stepping
currState= self.cells[self.drone.currCell]
self.route.append(self.drone.currCell)
print("env step "+str(self.timeStep)+" / "+ str(self.stepsLimit)+" function +++++++++++++State: ("+str(self.drone.currLocation[0])+","+str(self.drone.currLocation[1])+","+str(len(self.drone.coveredTargets))+") >>> Action: "+ str(action)+" +++++++++++++++++++++++++++++++++++++++++++++++++++++++")
for cell in self.cells:
self.cells[cell].resetTargetsIndicators()
for t in self.targets:
#t.move(self.cellSideSize, self.areaDimLimit)
# with open("RPGM_Locs.dat", "a") as rpgm:
# rpgm.write(self.timeStep)
tPrevLoc= t.currentLocation
if(self.MobilityModel== 'RPGM'):
t.currentLocation= self.RPGM.moveNode(t.ID, self.timeStep)
t.currentCell= t.findCell(self.gridlowCorners, self.numCellsPerSide, self.cellSideSize)
t.realMobFile.write(str(t.currentLocation[0])+'\t'+str(t.currentLocation[1])+'\n')
# rpgm.write('\t'+t.currentCell)
else:
t.step(self.gridlowCorners, self.testing)
print('=================== MOVED TARGET '+ str(t.ID)+" >> "+ str(t.currentLocation))
# mitigate time Scale effect ###################################################
#distance betwen the two points
# dist= math.sqrt((t.currentLocation[0]-tPrevLoc[0])**2 + (t.currentLocation[1]-tPrevLoc[1])**2)
# if(dist > 0):
# newDist= self.timeStepsScale * dist
# ratio= newDist/dist
# newX= (1-ratio)*tPrevLoc[0] + ratio * t.currentLocation[0]
# newY= (1-ratio)*tPrevLoc[1] + ratio * t.currentLocation[1]
# t.currentLocation = (newX, newY)
# t.currentCell= t.findCell(self.gridlowCorners, self.numCellsPerSide, self.cellSideSize)
# rpgm.write('\n')
#self.cells[t.currentCell].addTarget(t.ID)
# Current state of the drone
base_action = np.array([0, 0])
# Updating next state according to the action
# Action 'up'
#if action == 0:
# [cov, ener]= drone.rotate(self.targets, self.cellSideSize)
# Action 'hover'
# elif action == 1:
# [cov, ener]= drone.hover(self.targetsVelocity, self.cellSideSize*2, self.targets, self.cellSideSize)
# # Action 'goToCell action-2'
# else:
# cell= self.gridlowCorners[action-1] #[action-2]
#[0'up', 1'down', 2'left', 3'right', 4'dUpL', 5'dUpR', 6'dDwnL', 7'dDwnR', 8'hover']
if action == 0: #up: increase y
cell= (self.drone.currCell[0], self.drone.currCell[1]+ self.cellSideSize)
elif action == 1: #down: decrease y
cell= (self.drone.currCell[0], self.drone.currCell[1]- self.cellSideSize)
elif action == 2: #left: decrease x
cell= (self.drone.currCell[0]-self.cellSideSize , self.drone.currCell[1])
elif action == 3: #right: increase x
cell= (self.drone.currCell[0]+self.cellSideSize , self.drone.currCell[1])
elif action == 4: #dUpL: increase y , decrease x
cell= (self.drone.currCell[0]-self.cellSideSize , self.drone.currCell[1]+self.cellSideSize)
elif action == 5: #dUpR: increase y , icrease x
cell= (self.drone.currCell[0]+self.cellSideSize , self.drone.currCell[1]+self.cellSideSize)
elif action == 6: #dDwnL: decrease y , decrease x
cell= (self.drone.currCell[0]-self.cellSideSize , self.drone.currCell[1]-self.cellSideSize)
elif action == 7: #dDwnR: decrease y , increase x
cell= (self.drone.currCell[0]+self.cellSideSize , self.drone.currCell[1]-self.cellSideSize)
else: # hovering
cell= self.drone.currCell
#targetsCoveredBefore= drone.coveredTargets
[reward, newCovTsIDs, oldCovTsIDs]= self.drone.filterCoveredTargets(self.targets, False, self.cellSideSize)
#reward= reward/self.numTargets
#targetsCoveredAfter= drone.coveredTargets
for t in newCovTsIDs:
self.TargetsCoverageIndicators[t]= 1
time= self.cellSideSize/self.drone.speed
ener= (time * self.drone.hoveringEnergyPerSec_J)
self.drone.totalEnergy+= ener
if(reward==0):
reward = -1
else:
reward*=self.reward_scale
print("Calculating Reward ++++ ("+str(reward)+") +++ "+str(ener)+ " ++++ "+ str(self.drone.totalEnergy))
#(self.drone.totalEnergy > self.drone.energyCapacity) or
if(len(self.drone.coveredTargets) == len(self.targets) or self.timeStep >= self.stepsLimit):
# reward= -100
done= True
print("done at hovering")
else:
done= False
#self.timeStep+=1
currState= self.cells[cell]
newState= self.cell(currState.cellID, currState.cell[0], currState.cell[1], self.timeStep)
self.epReward+=(reward+initReward)
# stateInfo = [newState.cellID, self.timeStep] + self.TargetsCoverageIndicators #list(self.drone.coveredTargets.keys())+[-1]*remTs
# reshapedStateInfo= np.reshape(stateInfo, [1, self.state_size])
reshapedStateInfo= self.getState(newState, newCovTsIDs, oldCovTsIDs)
print("nextStateInfo "+str(currState.cell))
print(reshapedStateInfo)
print("action: "+str(action)+"...reward: "+ str(reward)+'-----------------------------------')
return [reshapedStateInfo, initReward+reward, done, {}]
#return [cell, reward, done]
########## finished hovering action ###############
#if action is not hovering
nextX= cell[0]
nextY= cell[1]
if(nextY >= self.areaDimLimit or nextX>=self.areaDimLimit or nextX<0 or nextY<0):
print('Invalid Action ---------------------------')
cell= self.drone.currCell
currState= self.cells[cell]
print('nextCell: ('+ str(nextX)+", "+ str(nextY)+')'+ str(currState.numUnCoveredTargets))
[reward, newCovTsIDs, oldCovTsIDs]= self.drone.filterCoveredTargets(self.targets, False, self.cellSideSize)
#reward=reward/self.numTargets
print(newCovTsIDs)
for t in newCovTsIDs:
print(t)
print(self.TargetsCoverageIndicators[t])
self.TargetsCoverageIndicators[t]=1
time= self.cellSideSize/self.drone.speed
ener=0
ener= (time * self.drone.hoveringEnergyPerSec_J)
self.drone.totalEnergy+= ener
if(reward==0):
reward = -1
else:
reward*=self.reward_scale
# currState= self.states[cell]
# currState.cellCovered()
#reward -=10 #adding extra punishment for invalid action
print("Calculating Reward ++++ ("+str(reward)+") +++ "+str(ener)+ " ++++ "+ str(self.drone.totalEnergy))
#self.drone.totalEnergy > self.drone.energyCapacity) or
if (len(self.drone.coveredTargets) == len(self.targets) or self.timeStep >= self.stepsLimit):
# reward= -100
done= True
print("done at invalid action")
else:
done= False
currState= self.cells[cell]
newState= self.cell(currState.cellID, currState.cell[0], currState.cell[1], self.timeStep)
self.epReward+=(reward+initReward)
# stateInfo = [newState.cellID, self.timeStep] + self.TargetsCoverageIndicators #list(self.drone.coveredTargets.keys())+[-1]*remTs
# reshapedStateInfo= np.reshape(stateInfo, [1, self.state_size])
reshapedStateInfo= self.getState(newState, newCovTsIDs, oldCovTsIDs)
print("nextStateInfo "+str(newState.cell))
print(reshapedStateInfo)
print("action: "+str(action)+"...reward: "+ str(reward)+'-----------------------')
return [reshapedStateInfo, initReward+reward, done, {}]
#return [cell, reward, done]
else: ########### valid transition ####################
currState= self.cells[cell]
print('valid action ++++ nextCell: ('+ str(nextX)+", "+ str(nextY)+')'+ str(currState.numUnCoveredTargets))
actionValid= True
[cov, newCovTsIDs, oldCovTsIDs, ener]= self.drone.move(cell, self.cellSideSize, self.targets )
print(newCovTsIDs)
for t in newCovTsIDs:
print(t)
print(self.TargetsCoverageIndicators[t])
self.TargetsCoverageIndicators[t]=1
# Calculating the reward for the agent
#self.drone.totalEnergy > self.drone.energyCapacity) or
if(len(self.drone.coveredTargets) == len(self.targets) or self.timeStep >= self.stepsLimit):
# reward= -100
done= True
print("done at other actions totalEner: "+ str(self.drone.totalEnergy> self.drone.energyCapacity)+ ", "+ str((len(self.drone.coveredTargets) == len(self.targets))))
else:
done= False
#elif len(agent.coveredTargets) == len(self.targets):
# reward= +100
# done= True
# elif cov==0:
if cov==0:
# reward= -0.1
reward= -1
# done= False
else:
#reward = (cov/ener)
reward = cov*self.reward_scale#*len(self.targets)
# currState= self.states[cell]
# currState.cellCovered()
# done = False
print("Calculating Reward ++++ ("+str(reward)+") +++ "+str(ener)+ " ++++ "+ str(self.drone.totalEnergy))
next_state = self.drone.currCell
print("done?????????????"+ str(done))
print(self.drone.totalEnergy > self.drone.energyCapacity)
print("num covered targets: "+ str(len(self.drone.coveredTargets)))
print(len(self.drone.coveredTargets) == len(self.targets))
currState= self.cells[next_state]
newState= self.cell(currState.cellID, currState.cell[0], currState.cell[1], self.timeStep)
self.epReward+=(reward+initReward)
#return [[newState.cellID, self.timeStep], reward, done, {}]
#return [[newState.cellID, reward, len(self.drone.coveredTargets)], reward, done, {}]
remTs= self.numTargets - len(self.drone.coveredTargets)
# stateInfo = [newState.cellID, self.timeStep] + self.TargetsCoverageIndicators #list(self.drone.coveredTargets.keys())+[-1]*remTs
# reshapedStateInfo= np.reshape(stateInfo, [1, self.state_size])
reshapedStateInfo= self.getState(newState, newCovTsIDs, oldCovTsIDs)
print("nextStateInfo "+str(newState.cell))
print(reshapedStateInfo)
print("action: "+str(action)+"...reward: "+ str(reward)+'-----------------------------')
return [reshapedStateInfo, initReward+reward, done, {}]
#return [next_state, reward, done]
def final(self, drone):
# for cell in agent.route:
# print(str(self.gridTopCorners.index(cell)) + " >> ")
print(drone.route)
def final_states(self, drone):
return drone.route | [
"drone.drone.filterCoveredTargets",
"math.floor",
"gym.spaces.Discrete",
"random.random",
"numpy.array",
"gym.spaces.Box",
"numpy.reshape",
"MobilityPatterns.RPGM_Mobility.RPGM_Mobility",
"drone.drone.setRandomInitialLocation",
"numpy.sqrt"
] | [((2092, 2109), 'numpy.sqrt', 'np.sqrt', (['areaSize'], {}), '(areaSize)\n', (2099, 2109), True, 'import numpy as np\n'), ((2711, 2742), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.n_actions'], {}), '(self.n_actions)\n', (2726, 2742), False, 'from gym import spaces\n'), ((2813, 2875), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(200000)', 'shape': '(1, self.numTargets + 2)'}), '(low=0, high=200000, shape=(1, self.numTargets + 2))\n', (2823, 2875), False, 'from gym import spaces\n'), ((4935, 4969), 'math.floor', 'math.floor', (['(xp / self.cellSideSize)'], {}), '(xp / self.cellSideSize)\n', (4945, 4969), False, 'import math\n'), ((4983, 5017), 'math.floor', 'math.floor', (['(yp / self.cellSideSize)'], {}), '(yp / self.cellSideSize)\n', (4993, 5017), False, 'import math\n'), ((6328, 6417), 'drone.drone.setRandomInitialLocation', 'drone.setRandomInitialLocation', (['self.gridlowCorners', 'self.cellSideSize', 'self.targets'], {}), '(self.gridlowCorners, self.cellSideSize, self\n .targets)\n', (6358, 6417), False, 'from drone import drone\n'), ((6456, 6522), 'drone.drone.filterCoveredTargets', 'drone.filterCoveredTargets', (['self.targets', '(False)', 'self.cellSideSize'], {}), '(self.targets, False, self.cellSideSize)\n', (6482, 6522), False, 'from drone import drone\n'), ((7291, 7308), 'numpy.sqrt', 'np.sqrt', (['areaSize'], {}), '(areaSize)\n', (7298, 7308), True, 'import numpy as np\n'), ((10059, 10102), 'numpy.reshape', 'np.reshape', (['stateInfo', '[1, self.state_size]'], {}), '(stateInfo, [1, self.state_size])\n', (10069, 10102), True, 'import numpy as np\n'), ((15866, 15882), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (15874, 15882), True, 'import numpy as np\n'), ((6954, 7084), 'MobilityPatterns.RPGM_Mobility.RPGM_Mobility', 'RPGM_Mobility', (['self.areaDimLimit', 'self.areaDimLimit', 'numGroups', 'self.numTargets', 'self.maxStepsPerEpisode', 'self.timeStepsScale'], {}), '(self.areaDimLimit, self.areaDimLimit, numGroups, self.\n numTargets, self.maxStepsPerEpisode, self.timeStepsScale)\n', (6967, 7084), False, 'from MobilityPatterns.RPGM_Mobility import RPGM_Mobility\n'), ((9174, 9184), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9181, 9184), True, 'import numpy as np\n'), ((5356, 5371), 'random.random', 'random.random', ([], {}), '()\n', (5369, 5371), False, 'import random\n'), ((5391, 5406), 'random.random', 'random.random', ([], {}), '()\n', (5404, 5406), False, 'import random\n')] |
"""
Copyright Eskapade:
License Apache-2: https://github.com/KaveIO/Eskapade-Core/blob/master/LICENSE
Reference link:
https://github.com/KaveIO/Eskapade/blob/master/python/eskapade/analysis/histogram_filling.py
All modifications copyright ING WBAA.
"""
import copy
import logging
from collections import defaultdict
import numpy as np
import pandas as pd
from ..primitives.average import Average
from ..primitives.bag import Bag
from ..primitives.bin import Bin
from ..primitives.categorize import Categorize
from ..primitives.centrallybin import CentrallyBin
from ..primitives.deviate import Deviate
from ..primitives.fraction import Fraction
from ..primitives.irregularlybin import IrregularlyBin
from ..primitives.minmax import Maximize, Minimize
from ..primitives.select import Select
from ..primitives.sparselybin import SparselyBin
from ..primitives.stack import Stack
from ..primitives.sum import Sum
from .filling_utils import check_column, check_dtype
class HistogramFillerBase(object):
"""Base class link to fill histograms.
Timestamp features are
converted to nanoseconds before the binning is applied.
Semi-clever auto-binning is applied in case no bin specifications are provided.
Final histograms are stored in the datastore.
"""
def __init__(
self,
features=None,
binning="unit",
bin_specs=None,
time_axis="",
var_dtype=None,
read_key=None,
store_key=None,
nbins_1d=40,
nbins_2d=20,
nbins_3d=10,
max_nunique=500,
):
"""Initialize module instance.
Store and do basic check on the attributes HistogramFillerBase.
:param list features: colums to pick up from input data. (default is all features)
For multi-dimensional histograms, separate the column names with a ":"
Example features list is:
.. code-block:: python
features = ['x', 'date', 'date:x', 'date:y', 'date:x:y']
:param str binning: default binning to revert to in case bin_specs not supplied. options are:
"unit" or "auto", default is "unit". When using "auto", semi-clever binning is automatically done.
:param dict bin_specs: dictionaries used for rebinning numeric or timestamp features
Example bin_specs dictionary is:
.. code-block:: python
bin_specs = {'x': {'binWidth': 1, 'origin': 0},
'y': {'num': 10, 'low': 0.0, 'high': 2.0},
'x:y': [{}, {'num': 5, 'low': 0.0, 'high': 1.0}],
'a': {'edges': [0, 2, 10, 11, 21, 101]},
'b': {'centers': [1, 6, 10.5, 16, 20, 100]},
'c': {'max': True},
'd': {'min': True},
'e': {'sum': True},
'f': {'average': True},
'a:f': [{'edges': [0, 10, 101]}, {'average': True}],
'g': {'thresholds': [0, 2, 10, 11, 21, 101]},
'h': {'bag': True},
}
In the bin specs for x:y, x reverts to the 1-dim setting.
:param str time_axis: name of datetime feature, used as time axis, eg 'date'. if True, will be guessed.
If time_axis is set, if no features given, features becomes: ['date:x', 'date:y', 'date:z'] etc.
:param dict var_dtype: dictionary with specified datatype per feature (optional)
:param str read_key: key of input histogram-dict to read from data store .
(only required when calling transform(datastore) as module)
:param str store_key: key of output data to store in data store
(only required when calling transform(datastore) as module)
:param int nbins_1d: auto-binning number of bins for 1d histograms. default is 40.
:param int nbins_2d: auto-binning number of bins for 2d histograms. default is 20.
:param int nbins_3d: auto-binning number of bins for 3d histograms. default is 10.
:param int max_nunique: auto-binning threshold for unique categorical values. default is 500.
"""
# common logger for histogram filling
self.logger = logging.getLogger()
features = features or []
self.features = [check_column(c) for c in features]
if not any([binning == opt for opt in ["auto", "unit"]]):
raise TypeError('binning should be "auto" or "unit".')
self.binning = binning
self.bin_specs = bin_specs or {}
self.time_axis = time_axis
var_dtype = var_dtype or {}
self.var_dtype = {k: check_dtype(v) for k, v in var_dtype.items()}
self.read_key = read_key
self.store_key = store_key
# several default unit values
self._unit_bin_specs = {"binWidth": 1.0, "origin": 0.0}
self._unit_timestamp_specs = {
"binWidth": pd.Timedelta(days=30).value,
"origin": pd.Timestamp("2010-01-04").value,
}
self._auto_n_bins_1d = nbins_1d
self._auto_n_bins_2d = nbins_2d
self._auto_n_bins_3d = nbins_3d
self._nunique_threshold = max_nunique
# these get filled during execution
self._hists = {}
def set_logger(self, logger):
"""Set logger of module
:param logger: input logger
"""
self.logger = logger
def assert_dataframe(self, df):
"""assert dataframe datatype"""
raise NotImplementedError("assert_dataframe not implemented!")
def get_features(self, df):
raise NotImplementedError("get_features not implemented!")
def get_quantiles(self, df, quantiles, columns):
"""return dict with quantiles for given columns"""
raise NotImplementedError("get_quantiles not implemented!")
def get_nunique(self, df, columns):
"""return dict with number of unique entries for given columns"""
raise NotImplementedError("get_nunique not implemented!")
def process_features(self, df, cols_by_type):
raise NotImplementedError("process_features not implemented!")
def fill_histograms(self, idf):
raise NotImplementedError("fill_histograms not implemented!")
def construct_empty_hist(self, features):
raise NotImplementedError("construct_empty_hist not implemented!")
def _auto_n_bins(self, c):
"""Return number of bins for this histogram
:param list c: list of columns for this histogram
:return: number of bins to use for this histogram
"""
if isinstance(c, str):
c = [c]
if len(self.time_axis) > 0 and c[0] == self.time_axis:
# in case of time-axis, use fine-grained binning
# do this by removing first element, decreasing size of c.
# note that affects original input c, so copy first
c = copy.copy(c)
del c[0]
if len(c) <= 1:
return self._auto_n_bins_1d
elif len(c) == 2:
return self._auto_n_bins_2d
elif len(c) == 3:
return self._auto_n_bins_3d
else:
return self._auto_n_bins_3d
def _execute(self, df):
"""
_execute() does five things:
* check presence and data type of requested features
* timestamp variables are converted to nanosec (integers)
* clever auto-binning is done in case no bin-specs have been provided
* do the actual value counting based on categories and created indices
* then convert to histograms
"""
df = self.assert_dataframe(df)
# 1. check presence and data type of requested features
# sort features into numerical, timestamp and category based
cols_by_type = self.categorize_features(df)
# 2. assign features to make histograms of (if not already provided)
# and figure out time-axis if provided
# check if all features are present in dataframe
self.assign_and_check_features(df, cols_by_type)
# 3. timestamp variables are converted to ns here
idf = self.process_features(df, cols_by_type)
# 4. complete bin-specs that have not been provided in case of 'auto' binning option
if self.binning == "auto":
self.auto_complete_bin_specs(idf, cols_by_type)
# 5. do the actual histogram/counter filling
self.logger.info(
f"Filling {len(self.features)} specified histograms. {self.binning}-binning."
)
self.fill_histograms(idf)
return self._hists
def assign_and_check_features(self, df, cols_by_type):
"""auto assign feature to make histograms of and do basic checks on them
:param df: input dateframe
:param cols_by_type: dict of columns classified by type
"""
# user leaves feature selection up to us
no_initial_features = len(self.features) == 0
all_cols = (
list(cols_by_type["num"]) +
list(cols_by_type["dt"]) +
list(cols_by_type["bool"]) +
list(cols_by_type["str"])
)
# 1. assign / figure out a time axis
if isinstance(self.time_axis, str) and len(self.time_axis) > 0:
# a) specified time axis
if self.time_axis not in all_cols:
raise RuntimeError(
f'Specified time-axis "{self.time_axis}" not found in dataframe.'
)
elif isinstance(self.time_axis, bool) and self.time_axis:
# b) try to figure out time axis
self.time_axis = ""
num = len(cols_by_type["dt"])
if num == 1:
# the obvious choice
self.time_axis = list(cols_by_type["dt"])[0]
self.logger.info(f'Time-axis automatically set to "{self.time_axis}"')
elif num == 0:
self.logger.warning(
"No obvious time-axes found to choose from. So not used."
)
else:
self.logger.warning(
f'Found {num} time-axes: {cols_by_type["dt"]}. Set *one* time_axis manually! Now NOT used.'
)
else:
# c) no time axis
self.time_axis = ""
# 2. assign all features to make histograms of, in case not provided by user
if no_initial_features:
if len(self.time_axis) > 0:
# time-axis is selected: make histograms of all columns in dataframe vs time-axis
self.features = [
[self.time_axis, c]
for c in sorted(self.get_features(df))
if c != self.time_axis
]
else:
# make histograms of all columns in dataframe
self.features = [[c] for c in sorted(self.get_features(df))]
# 3. check presence of all features (in case provided by user)
all_selected_cols = np.unique([j for i in self.features for j in i])
for c in all_selected_cols:
if c not in self.get_features(df):
raise RuntimeError(f"Requested feature {c} not in dataframe.")
# 4. check number of unique entries for categorical features
# this can be an expensive call, so avoid if possible. do run however when debugging.
if no_initial_features or self.logger.level == logging.DEBUG:
str_cols = [c for c in all_selected_cols if c in cols_by_type["str"]]
nuniq = self.get_nunique(df, str_cols)
huge_cats = []
for c in str_cols:
if nuniq[c] < self._nunique_threshold:
continue
if no_initial_features:
# we're the boss. we're not going to histogram this ...
huge_cats.append(c)
else: # debug mode
self.logger.warning(
f"Column {c} has {nuniq[c]} unique entries (large). Really histogram it?"
)
# scrub self.features of huge categories.
self.features = [
cols
for cols in self.features
if not any([c in huge_cats for c in cols])
]
def auto_complete_bin_specs(self, df, cols_by_type):
"""auto complete the bin-specs that have not been provided
:param df: input dataframe
:param cols_by_type: dict of columns classified by type
"""
# auto-determine binning of numerical and time features for which no bin_specs exist
# do this based on range of 5-95% quantiles, so extreme outliers are binned separately
# otherwise, the idea is to always reuse 1-dim binning for high n-dim, if those exist.
bs_keys = list(self.bin_specs.keys()) # create initial unchanging list of keys
all_selected_cols = np.unique([j for i in self.features for j in i])
cols = list(cols_by_type["num"]) + list(cols_by_type["dt"])
num_cols = [c for c in all_selected_cols if c in cols and c not in bs_keys]
# quantiles for bin specs
int_cols = [c for c in num_cols if c in cols_by_type["int"]]
quantiles_i = self.get_quantiles(df, quantiles=[0.0, 1.0], columns=int_cols)
float_cols = [c for c in num_cols if c not in cols_by_type["int"]]
quantiles_f = self.get_quantiles(df, quantiles=[0.05, 0.95], columns=float_cols)
for cols in self.features:
n = ":".join(cols)
if len(cols) == 1 and n not in num_cols:
continue
if n in bs_keys:
# already provided; will pick that one up
continue
# get default number of bins for n-dim histogram
n_bins = self._auto_n_bins(cols)
specs = []
for idx, c in enumerate(cols):
if c not in num_cols or c in bs_keys:
# skip categorical; revert to what is already provided by user at 1dim-level
specs.append({})
continue
if c in float_cols:
q = quantiles_f[c]
# by default, n_bins covers range 5-95% quantiles + we add 10%
# basically this gives a nice plot when plotted
# specs for Bin and Sparselybin histograms
if q[1] == q[0]:
# in case of highly imbalanced data it can happen that q05=q95. If so use min and max instead.
q = (self.get_quantiles(df, quantiles=[0.0, 1.0], columns=[c]))[
c
]
qdiff = (q[1] - q[0]) * (1.0 / 0.9) if q[1] > q[0] else 1.0
bin_width = qdiff / float(n_bins)
bin_offset = q[0] - qdiff * 0.05
low = q[0] - qdiff * 0.05
high = q[1] + qdiff * 0.05
elif c in int_cols:
# for ints use bins around integer values
low = quantiles_i[c][0]
high = quantiles_i[c][1]
bin_width = np.max((np.round((high - low) / float(n_bins)), 1.0))
bin_offset = low = np.floor(low - 0.5) + 0.5
n_bins = int((high - low) // bin_width) + int(
(high - low) % bin_width > 0.0
)
high = low + n_bins * bin_width
if c == self.time_axis and idx == 0:
# time axis is always sparselybin (unbound)
specs.append({"binWidth": bin_width, "origin": bin_offset})
elif len(cols) >= 3:
# always binned histogram for high n-dim histograms, avoid potentially exploding histograms
specs.append({"num": n_bins, "low": low, "high": high})
else:
# sparse allowed for low dimensional histograms (1 and 2 dim)
specs.append({"binWidth": bin_width, "origin": bin_offset})
if len(cols) == 1:
specs = specs[0]
self.bin_specs[n] = specs
def get_data_type(self, df, col):
"""Get data type of dataframe column.
:param df: input data frame
:param str col: column
"""
if col not in self.get_features(df):
raise KeyError(f'column "{col:s}" not in input dataframe')
return df[col].dtype
def categorize_features(self, df):
"""Categorize features of dataframe by data type.
:param df: input (pandas) data frame
"""
# check presence and data type of requested features
# sort features into numerical, timestamp and category based
cols_by_type = defaultdict(set)
features = (
self.features if self.features else [[c] for c in self.get_features(df)]
)
for col_list in features:
for col in col_list:
dt = self.var_dtype.get(col, check_dtype(self.get_data_type(df, col)))
if col not in self.var_dtype:
self.var_dtype[col] = dt
if np.issubdtype(dt, np.integer):
colset = cols_by_type["int"]
if col not in colset:
colset.add(col)
if np.issubdtype(dt, np.number):
colset = cols_by_type["num"]
if col not in colset:
colset.add(col)
elif np.issubdtype(dt, np.datetime64):
colset = cols_by_type["dt"]
if col not in colset:
colset.add(col)
elif np.issubdtype(dt, np.bool_):
colset = cols_by_type["bool"]
if col not in colset:
colset.add(col)
else:
colset = cols_by_type["str"]
if col not in colset:
colset.add(col)
self.logger.debug(
'Data type of column "{col}" is "{type}".'.format(
col=col, type=self.var_dtype[col]
)
)
return cols_by_type
def var_bin_specs(self, c, idx=0):
"""Determine bin_specs to use for variable c.
:param list c: list of variables, or string variable
:param int idx: index of the variable in c, for which to return the bin specs. default is 0.
:return: selected bin_specs of variable
"""
if isinstance(c, str):
c = [c]
n = ":".join(c)
# determine default bin specs
dt = np.dtype(self.var_dtype[c[idx]])
is_timestamp = isinstance(dt.type(), np.datetime64)
default = (
self._unit_bin_specs if not is_timestamp else self._unit_timestamp_specs
)
# get bin specs
if n in self.bin_specs and len(c) > 1 and len(c) == len(self.bin_specs[n]):
result = self.bin_specs[n][idx]
if not result:
result = self.bin_specs.get(c[idx], default)
else:
result = self.bin_specs.get(c[idx], default)
return result
def get_histograms(self, input_df):
"""Handy function to directly get dict of histograms corresponding to input dataframe.
:param input_df: spark/pandas input dataframe
:return: dict of histograms
"""
return self._execute(input_df)
def get_features_specs(self):
"""Return bin specifications used to generate histograms
Can then be passed on to other histogram filler to get identical histograms.
"""
features = [":".join(c) for c in self.features] # rejoin substrings
return features, self.bin_specs, self.var_dtype, self.time_axis
def transform(self, datastore):
"""Transform function called when used as module in a (popmon) pipeline
This function is for usage in popmon:
https://github.com/ing-bank/popmon
:param dict datastore: input datastore
:return: datastore
"""
if not isinstance(self.read_key, str) and len(self.read_key) > 0:
raise ValueError("read_key has not been properly set.")
if not isinstance(self.store_key, str) and len(self.store_key) > 0:
raise ValueError("store_key has not been properly set.")
if self.read_key not in datastore:
raise KeyError("read_key not found in datastore")
df = datastore[self.read_key]
hists = self.get_histograms(df)
datastore[self.store_key] = hists
return datastore
def get_hist_bin(self, hist, features, quant, col, dt):
is_number = np.issubdtype(dt, np.number)
is_timestamp = np.issubdtype(dt, np.datetime64)
is_bool = np.issubdtype(dt, np.bool_)
specs = self.var_bin_specs(features, features.index(col))
if is_number or is_timestamp:
# numbers and timestamps are put in a sparse binned histogram
if "binWidth" in specs or "bin_width" in specs:
hist = SparselyBin(
binWidth=specs.get("binWidth", specs.get("bin_width", 1.)),
origin=specs.get("origin", specs.get("bin_offset", 0.)),
quantity=quant,
value=hist,
)
elif "num" in specs and "low" in specs and "high" in specs:
hist = Bin(
num=specs["num"],
low=specs["low"],
high=specs["high"],
quantity=quant,
value=hist,
)
elif "edges" in specs or "bin_edges" in specs:
hist = IrregularlyBin(
edges=specs.get('edges', specs.get('bin_edges', [])),
quantity=quant,
value=hist,
)
elif "maximize" in specs or "max" in specs:
hist = Maximize(quantity=quant)
elif "minimize" in specs or "min" in specs:
hist = Minimize(quantity=quant)
elif "average" in specs or "mean" in specs:
hist = Average(quantity=quant)
elif "deviate" in specs:
hist = Deviate(quantity=quant)
elif "sum" in specs:
hist = Sum(quantity=quant)
elif "centers" in specs or "bin_centers" in specs:
hist = CentrallyBin(
centers=specs.get('centers', specs.get('bin_centers', [])),
quantity=quant,
value=hist,
)
elif "thresholds" in specs:
hist = Stack(
thresholds=specs['thresholds'],
quantity=quant,
value=hist,
)
elif "bag" in specs or "range" in specs:
hist = Bag(
range=specs.get('range', 'N'),
quantity=quant,
)
elif "fraction" in specs:
hist = Fraction(
quantity=quant,
value=hist,
)
elif "cut" in specs:
hist = Select(
quantity=quant,
cut=hist,
)
else:
raise RuntimeError("Do not know how to interpret bin specifications.")
else:
if not is_bool and ("bag" in specs or "range" in specs):
hist = Bag(range=specs.get('range', 'S'), quantity=quant)
else:
# string and booleans are treated as categories
hist = Categorize(quantity=quant, value=hist)
return hist
| [
"pandas.Timestamp",
"numpy.floor",
"numpy.dtype",
"copy.copy",
"numpy.issubdtype",
"logging.getLogger",
"collections.defaultdict",
"pandas.Timedelta",
"numpy.unique"
] | [((4323, 4342), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4340, 4342), False, 'import logging\n'), ((11120, 11168), 'numpy.unique', 'np.unique', (['[j for i in self.features for j in i]'], {}), '([j for i in self.features for j in i])\n', (11129, 11168), True, 'import numpy as np\n'), ((13052, 13100), 'numpy.unique', 'np.unique', (['[j for i in self.features for j in i]'], {}), '([j for i in self.features for j in i])\n', (13061, 13100), True, 'import numpy as np\n'), ((16985, 17001), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (16996, 17001), False, 'from collections import defaultdict\n'), ((18926, 18958), 'numpy.dtype', 'np.dtype', (['self.var_dtype[c[idx]]'], {}), '(self.var_dtype[c[idx]])\n', (18934, 18958), True, 'import numpy as np\n'), ((21006, 21034), 'numpy.issubdtype', 'np.issubdtype', (['dt', 'np.number'], {}), '(dt, np.number)\n', (21019, 21034), True, 'import numpy as np\n'), ((21058, 21090), 'numpy.issubdtype', 'np.issubdtype', (['dt', 'np.datetime64'], {}), '(dt, np.datetime64)\n', (21071, 21090), True, 'import numpy as np\n'), ((21109, 21136), 'numpy.issubdtype', 'np.issubdtype', (['dt', 'np.bool_'], {}), '(dt, np.bool_)\n', (21122, 21136), True, 'import numpy as np\n'), ((6999, 7011), 'copy.copy', 'copy.copy', (['c'], {}), '(c)\n', (7008, 7011), False, 'import copy\n'), ((5023, 5044), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(30)'}), '(days=30)\n', (5035, 5044), True, 'import pandas as pd\n'), ((5074, 5100), 'pandas.Timestamp', 'pd.Timestamp', (['"""2010-01-04"""'], {}), "('2010-01-04')\n", (5086, 5100), True, 'import pandas as pd\n'), ((17387, 17416), 'numpy.issubdtype', 'np.issubdtype', (['dt', 'np.integer'], {}), '(dt, np.integer)\n', (17400, 17416), True, 'import numpy as np\n'), ((17568, 17596), 'numpy.issubdtype', 'np.issubdtype', (['dt', 'np.number'], {}), '(dt, np.number)\n', (17581, 17596), True, 'import numpy as np\n'), ((17750, 17782), 'numpy.issubdtype', 'np.issubdtype', (['dt', 'np.datetime64'], {}), '(dt, np.datetime64)\n', (17763, 17782), True, 'import numpy as np\n'), ((17935, 17962), 'numpy.issubdtype', 'np.issubdtype', (['dt', 'np.bool_'], {}), '(dt, np.bool_)\n', (17948, 17962), True, 'import numpy as np\n'), ((15435, 15454), 'numpy.floor', 'np.floor', (['(low - 0.5)'], {}), '(low - 0.5)\n', (15443, 15454), True, 'import numpy as np\n')] |
#!/usr/bin/env python
u"""
reduce_ICESat2_ATL07_raster.py
Written by <NAME> (11/2021)
Create masks for reducing ICESat-2 ATL07 data using raster imagery
COMMAND LINE OPTIONS:
-R X, --raster X: Input raster file
-F X, --format X: Input raster file format
netCDF4
HDF5
geotiff
-v X, --variables X: variable names of data in HDF5 or netCDF4 file
x, y and data variable names
-P X, --projection X: spatial projection as EPSG code or PROJ4 string
4326: latitude and longitude coordinates on WGS84 reference ellipsoid
-O X, --output X: Output mask file name
-V, --verbose: Output information about each created file
-M X, --mode X: Permission mode of directories and files created
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
https://h5py.org
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL)
https://pypi.python.org/pypi/GDAL/
pyproj: Python interface to PROJ library
https://pypi.org/project/pyproj/
PROGRAM DEPENDENCIES:
read_ICESat2_ATL07.py: reads ICESat-2 sea ice height data files
convert_delta_time.py: converts from delta time into Julian and year-decimal
spatial.py: utilities for reading and writing spatial data
time.py: Utilities for calculating time operations
utilities.py: download and management utilities for syncing files
UPDATE HISTORY:
Written 11/2021
"""
from __future__ import print_function
import sys
import os
import re
import h5py
import pyproj
import logging
import argparse
import datetime
import warnings
import numpy as np
import scipy.spatial
import scipy.interpolate
import icesat2_toolkit.spatial
import icesat2_toolkit.time
import icesat2_toolkit.utilities
from icesat2_toolkit.convert_delta_time import convert_delta_time
from icesat2_toolkit.read_ICESat2_ATL07 import read_HDF5_ATL07
warnings.filterwarnings("ignore")
#-- PURPOSE: try to get the projection information for the input file
def get_projection(attributes, PROJECTION):
#-- coordinate reference system string from file
try:
crs = pyproj.CRS.from_string(attributes['projection'])
except (ValueError,pyproj.exceptions.CRSError):
pass
else:
return crs
#-- EPSG projection code
try:
crs = pyproj.CRS.from_string("epsg:{0:d}".format(int(PROJECTION)))
except (ValueError,pyproj.exceptions.CRSError):
pass
else:
return crs
#-- coordinate reference system string
try:
crs = pyproj.CRS.from_string(PROJECTION)
except (ValueError,pyproj.exceptions.CRSError):
pass
else:
return crs
#-- no projection can be made
raise pyproj.exceptions.CRSError
#-- PURPOSE: find a valid Delaunay triangulation for coordinates x0 and y0
#-- http://www.qhull.org/html/qhull.htm#options
#-- Attempt 1: standard qhull options Qt Qbb Qc Qz
#-- Attempt 2: rescale and center the inputs with option QbB
#-- Attempt 3: joggle the inputs to find a triangulation with option QJ
#-- if no passing triangulations: exit with empty list
def find_valid_triangulation(x0,y0,max_points=1e6):
#-- don't attempt triangulation if there are a large number of points
if (len(x0) > max_points):
#-- if too many points: set triangle as an empty list
logging.info('Too many points for triangulation')
return (None,[])
#-- Attempt 1: try with standard options Qt Qbb Qc Qz
#-- Qt: triangulated output, all facets will be simplicial
#-- Qbb: scale last coordinate to [0,m] for Delaunay triangulations
#-- Qc: keep coplanar points with nearest facet
#-- Qz: add point-at-infinity to Delaunay triangulation
#-- Attempt 2 in case of qhull error from Attempt 1 try Qt Qc QbB
#-- Qt: triangulated output, all facets will be simplicial
#-- Qc: keep coplanar points with nearest facet
#-- QbB: scale input to unit cube centered at the origin
#-- Attempt 3 in case of qhull error from Attempt 2 try QJ QbB
#-- QJ: joggle input instead of merging facets
#-- QbB: scale input to unit cube centered at the origin
#-- try each set of qhull_options
points = np.concatenate((x0[:,None],y0[:,None]),axis=1)
for i,opt in enumerate(['Qt Qbb Qc Qz','Qt Qc QbB','QJ QbB']):
logging.info('qhull option: {0}'.format(opt))
try:
triangle = scipy.spatial.Delaunay(points.data, qhull_options=opt)
except scipy.spatial.qhull.QhullError:
pass
else:
return (i+1,triangle)
#-- if still errors: set triangle as an empty list
return (None,[])
#-- PURPOSE: read ICESat-2 sea ice data (ATL07) from NSIDC
#-- reduce to a masked region using raster imagery
def reduce_ICESat2_ATL07_raster(FILE,
MASK=None,
FORMAT=None,
VARIABLES=[],
OUTPUT=None,
PROJECTION=None,
VERBOSE=False,
MODE=0o775):
#-- create logger
loglevel = logging.INFO if VERBOSE else logging.CRITICAL
logging.basicConfig(level=loglevel)
#-- read data from input file
logging.info('{0} -->'.format(os.path.basename(FILE)))
IS2_atl07_mds,IS2_atl07_attrs,IS2_atl07_beams = read_HDF5_ATL07(FILE,
ATTRIBUTES=True)
DIRECTORY = os.path.dirname(FILE)
#-- extract parameters from ICESat-2 ATLAS HDF5 sea ice file name
rx = re.compile(r'(processed_)?(ATL\d{2})-(\d{2})_(\d{4})(\d{2})(\d{2})'
r'(\d{2})(\d{2})(\d{2})_(\d{4})(\d{2})(\d{2})_(\d{3})_(\d{2})(.*?).h5$')
SUB,PRD,HMN,YY,MM,DD,HH,MN,SS,TRK,CYCL,SN,RL,VERS,AUX=rx.findall(FILE).pop()
#-- read raster image for spatial coordinates and data
dinput = icesat2_toolkit.spatial.from_file(MASK, FORMAT,
xname=VARIABLES[0], yname=VARIABLES[1], varname=VARIABLES[2])
#-- raster extents
xmin,xmax,ymin,ymax = np.copy(dinput['attributes']['extent'])
#-- check that x and y are strictly increasing
if (np.sign(dinput['attributes']['spacing'][0]) == -1):
dinput['x'] = dinput['x'][::-1]
dinput['data'] = dinput['data'][:,::-1]
if (np.sign(dinput['attributes']['spacing'][1]) == -1):
dinput['y'] = dinput['y'][::-1]
dinput['data'] = dinput['data'][::-1,:]
#-- find valid points within mask
indy,indx = np.nonzero(dinput['data'])
#-- check that input points are within convex hull of valid model points
gridx,gridy = np.meshgrid(dinput['x'],dinput['y'])
v,triangle = find_valid_triangulation(gridx[indy,indx],gridy[indy,indx])
#-- create an interpolator for input raster data
logging.info('Building Spline Interpolator')
SPL = scipy.interpolate.RectBivariateSpline(dinput['x'], dinput['y'],
dinput['data'].T, kx=1, ky=1)
#-- convert projection from input coordinates (EPSG) to data coordinates
crs1 = pyproj.CRS.from_string("epsg:{0:d}".format(4326))
crs2 = get_projection(dinput['attributes'], PROJECTION)
transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)
logging.info(crs2.to_proj4())
#-- copy variables for outputting to HDF5 file
IS2_atl07_mask = {}
IS2_atl07_fill = {}
IS2_atl07_dims = {}
IS2_atl07_mask_attrs = {}
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
#-- Add this value to delta time parameters to compute full gps_seconds
IS2_atl07_mask['ancillary_data'] = {}
IS2_atl07_mask_attrs['ancillary_data'] = {}
for key in ['atlas_sdp_gps_epoch']:
#-- get each HDF5 variable
IS2_atl07_mask['ancillary_data'][key] = IS2_atl07_mds['ancillary_data'][key]
#-- Getting attributes of group and included variables
IS2_atl07_mask_attrs['ancillary_data'][key] = {}
for att_name,att_val in IS2_atl07_attrs['ancillary_data'][key].items():
IS2_atl07_mask_attrs['ancillary_data'][key][att_name] = att_val
#-- for each input beam within the file
for gtx in sorted(IS2_atl07_beams):
#-- output data dictionaries for beam
IS2_atl07_mask[gtx] = dict(sea_ice_segments={})
IS2_atl07_fill[gtx] = dict(sea_ice_segments={})
IS2_atl07_dims[gtx] = dict(sea_ice_segments={})
IS2_atl07_mask_attrs[gtx] = dict(sea_ice_segments={})
#-- number of segments
val = IS2_atl07_mds[gtx]['sea_ice_segments']
n_seg = len(val['height_segment_id'])
#-- convert latitude/longitude to raster image projection
X,Y = transformer.transform(val['longitude'], val['latitude'])
#-- check where points are within complex hull of triangulation
#-- or within the bounds of the input raster image
if v:
interp_points = np.concatenate((X[:,None],Y[:,None]),axis=1)
valid = (triangle.find_simplex(interp_points) >= 0)
else:
valid = (X >= xmin) & (X <= xmax) & (Y >= ymin) & (Y <= ymax)
#-- interpolate raster mask to points
interp_mask = np.zeros((n_seg),dtype=bool)
#-- skip beam interpolation if no data within bounds of raster image
if np.any(valid):
interp_mask[valid] = SPL.ev(X[valid], Y[valid])
#-- group attributes for beam
IS2_atl07_mask_attrs[gtx]['Description'] = IS2_atl07_attrs[gtx]['Description']
IS2_atl07_mask_attrs[gtx]['atlas_pce'] = IS2_atl07_attrs[gtx]['atlas_pce']
IS2_atl07_mask_attrs[gtx]['atlas_beam_type'] = IS2_atl07_attrs[gtx]['atlas_beam_type']
IS2_atl07_mask_attrs[gtx]['groundtrack_id'] = IS2_atl07_attrs[gtx]['groundtrack_id']
IS2_atl07_mask_attrs[gtx]['atmosphere_profile'] = IS2_atl07_attrs[gtx]['atmosphere_profile']
IS2_atl07_mask_attrs[gtx]['atlas_spot_number'] = IS2_atl07_attrs[gtx]['atlas_spot_number']
IS2_atl07_mask_attrs[gtx]['sc_orientation'] = IS2_atl07_attrs[gtx]['sc_orientation']
#-- group attributes for sea_ice_segments
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['Description'] = ("Top group for sea "
"ice segments as computed by the ATBD algorithm.")
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['data_rate'] = ("Data within this "
"group are stored at the variable segment rate.")
#-- geolocation, time and segment ID
#-- delta time
IS2_atl07_mask[gtx]['sea_ice_segments']['delta_time'] = val['delta_time'].copy()
IS2_atl07_fill[gtx]['sea_ice_segments']['delta_time'] = None
IS2_atl07_dims[gtx]['sea_ice_segments']['delta_time'] = None
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['delta_time'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['delta_time']['units'] = "seconds since 2018-01-01"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['delta_time']['long_name'] = "Elapsed GPS seconds"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['delta_time']['standard_name'] = "time"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['delta_time']['source'] = "telemetry"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['delta_time']['calendar'] = "standard"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['delta_time']['description'] = ("Number of "
"GPS seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch "
"offset is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS "
"seconds between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP "
"epoch. By adding the offset contained within atlas_sdp_gps_epoch to delta time "
"parameters, the time in gps_seconds relative to the GPS epoch can be computed.")
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['delta_time']['coordinates'] = \
"height_segment_id latitude longitude"
#-- latitude
IS2_atl07_mask[gtx]['sea_ice_segments']['latitude'] = val['latitude'].copy()
IS2_atl07_fill[gtx]['sea_ice_segments']['latitude'] = None
IS2_atl07_dims[gtx]['sea_ice_segments']['latitude'] = ['delta_time']
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude']['units'] = "degrees_north"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude']['contentType'] = "physicalMeasurement"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude']['long_name'] = "Latitude"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude']['standard_name'] = "latitude"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude']['description'] = ("Latitude of "
"segment center")
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude']['valid_min'] = -90.0
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude']['valid_max'] = 90.0
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['latitude']['coordinates'] = \
"height_segment_id delta_time longitude"
#-- longitude
IS2_atl07_mask[gtx]['sea_ice_segments']['longitude'] = val['longitude'].copy()
IS2_atl07_fill[gtx]['sea_ice_segments']['longitude'] = None
IS2_atl07_dims[gtx]['sea_ice_segments']['longitude'] = ['delta_time']
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude']['units'] = "degrees_east"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude']['contentType'] = "physicalMeasurement"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude']['long_name'] = "Longitude"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude']['standard_name'] = "longitude"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude']['description'] = ("Longitude of "
"segment center")
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude']['valid_min'] = -180.0
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude']['valid_max'] = 180.0
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['longitude']['coordinates'] = \
"height_segment_id delta_time latitude"
#-- segment ID
IS2_atl07_mask[gtx]['sea_ice_segments']['height_segment_id'] = val['height_segment_id']
IS2_atl07_fill[gtx]['sea_ice_segments']['height_segment_id'] = None
IS2_atl07_dims[gtx]['sea_ice_segments']['height_segment_id'] = ['delta_time']
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['height_segment_id'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['height_segment_id']['units'] = "1"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['height_segment_id']['contentType'] = "referenceInformation"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['height_segment_id']['long_name'] = \
"Identifier of each height segment"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['height_segment_id']['description'] = \
"Identifier of each height segment"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['height_segment_id']['coordinates'] = \
"delta_time latitude longitude"
#-- geolocation segment beginning
IS2_atl07_mask[gtx]['sea_ice_segments']['geoseg_beg'] = val['geoseg_beg'].copy()
IS2_atl07_fill[gtx]['sea_ice_segments']['geoseg_beg'] = None
IS2_atl07_dims[gtx]['sea_ice_segments']['geoseg_beg'] = ['delta_time']
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_beg'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_beg']['units'] = "1"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_beg']['contentType'] = "referenceInformation"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_beg']['long_name'] = "Beginning GEOSEG"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_beg']['description'] = \
"Geolocation segment (geoseg) ID associated with the first photon used in this sea ice segment"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_beg']['coordinates'] = \
"height_segment_id delta_time latitude longitude"
#-- geolocation segment ending
IS2_atl07_mask[gtx]['sea_ice_segments']['geoseg_end'] = val['geoseg_end'].copy()
IS2_atl07_fill[gtx]['sea_ice_segments']['geoseg_end'] = None
IS2_atl07_dims[gtx]['sea_ice_segments']['geoseg_end'] = ['delta_time']
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_end'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_end']['units'] = "1"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_end']['contentType'] = "referenceInformation"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_end']['long_name'] = "Ending GEOSEG"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_end']['description'] = \
"Geolocation segment (geoseg) ID associated with the last photon used in this sea ice segment"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['geoseg_end']['coordinates'] = \
"height_segment_id delta_time latitude longitude"
#-- along track distance
IS2_atl07_mask[gtx]['sea_ice_segments']['seg_dist_x'] = val['seg_dist_x'].copy()
IS2_atl07_fill[gtx]['sea_ice_segments']['seg_dist_x'] = None
IS2_atl07_dims[gtx]['sea_ice_segments']['seg_dist_x'] = ['delta_time']
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['seg_dist_x'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['seg_dist_x']['units'] = "meters"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['seg_dist_x']['contentType'] = "referenceInformation"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['seg_dist_x']['long_name'] = "Along track distance"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['seg_dist_x']['description'] = \
"Along-track distance from the equator crossing to the segment center."
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['seg_dist_x']['coordinates'] = \
"height_segment_id delta_time latitude longitude"
#-- subsetting variables
IS2_atl07_mask[gtx]['sea_ice_segments']['subsetting'] = {}
IS2_atl07_fill[gtx]['sea_ice_segments']['subsetting'] = {}
IS2_atl07_dims[gtx]['sea_ice_segments']['subsetting'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting']['Description'] = ("The subsetting group "
"contains parameters used to reduce sea ice segments to specific regions of interest.")
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting']['data_rate'] = ("Data within this "
"group are stored at the variable segment rate.")
#-- output mask to HDF5
IS2_atl07_mask[gtx]['sea_ice_segments']['subsetting']['mask'] = interp_mask.copy()
IS2_atl07_fill[gtx]['sea_ice_segments']['subsetting']['mask'] = None
IS2_atl07_dims[gtx]['sea_ice_segments']['subsetting']['mask'] = ['delta_time']
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting']['mask'] = {}
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting']['mask']['contentType'] = \
"referenceInformation"
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting']['mask']['long_name'] = 'Mask'
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting']['mask']['description'] = \
'Mask calculated using raster image'
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting']['mask']['source'] = \
os.path.basename(MASK)
IS2_atl07_mask_attrs[gtx]['sea_ice_segments']['subsetting']['mask']['coordinates'] = \
"../height_segment_id ../delta_time ../latitude ../longitude"
#-- use default output file name and path
if OUTPUT:
output_file = os.path.expanduser(OUTPUT)
else:
fargs = (PRD,HMN,'MASK',YY,MM,DD,HH,MN,SS,TRK,CYCL,SN,RL,VERS,AUX)
file_format = '{0}-{1}_{2}_{3}{4}{5}{6}{7}{8}_{9}{10}{11}_{12}_{13}{14}.h5'
output_file = os.path.join(DIRECTORY,file_format.format(*fargs))
#-- print file information
logging.info('\t{0}'.format(output_file))
#-- write to output HDF5 file
HDF5_ATL07_mask_write(IS2_atl07_mask, IS2_atl07_mask_attrs,
CLOBBER=True, INPUT=os.path.basename(FILE),
FILL_VALUE=IS2_atl07_fill, DIMENSIONS=IS2_atl07_dims,
FILENAME=output_file)
#-- change the permissions mode
os.chmod(output_file, MODE)
#-- PURPOSE: outputting the masks for ICESat-2 data to HDF5
def HDF5_ATL07_mask_write(IS2_atl07_mask, IS2_atl07_attrs, INPUT=None,
FILENAME='', FILL_VALUE=None, DIMENSIONS=None, CLOBBER=False):
#-- setting HDF5 clobber attribute
if CLOBBER:
clobber = 'w'
else:
clobber = 'w-'
#-- open output HDF5 file
fileID = h5py.File(os.path.expanduser(FILENAME), clobber)
#-- create HDF5 records
h5 = {}
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
h5['ancillary_data'] = {}
for k,v in IS2_atl07_mask['ancillary_data'].items():
#-- Defining the HDF5 dataset variables
val = 'ancillary_data/{0}'.format(k)
h5['ancillary_data'][k] = fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype, compression='gzip')
#-- add HDF5 variable attributes
for att_name,att_val in IS2_atl07_attrs['ancillary_data'][k].items():
h5['ancillary_data'][k].attrs[att_name] = att_val
#-- write each output beam
beams = [k for k in IS2_atl07_mask.keys() if bool(re.match(r'gt\d[lr]',k))]
for gtx in beams:
fileID.create_group(gtx)
#-- add HDF5 group attributes for beam
for att_name in ['Description','atlas_pce','atlas_beam_type',
'groundtrack_id','atmosphere_profile','atlas_spot_number',
'sc_orientation']:
fileID[gtx].attrs[att_name] = IS2_atl07_attrs[gtx][att_name]
#-- create sea_ice_segments group
fileID[gtx].create_group('sea_ice_segments')
h5[gtx] = dict(sea_ice_segments={})
for att_name in ['Description','data_rate']:
att_val = IS2_atl07_attrs[gtx]['sea_ice_segments'][att_name]
fileID[gtx]['sea_ice_segments'].attrs[att_name] = att_val
#-- delta_time, geolocation and segment identification variables
for k in ['delta_time','latitude','longitude','height_segment_id',
'geoseg_beg','geoseg_end','seg_dist_x']:
#-- values and attributes
v = IS2_atl07_mask[gtx]['sea_ice_segments'][k]
attrs = IS2_atl07_attrs[gtx]['sea_ice_segments'][k]
fillvalue = FILL_VALUE[gtx]['sea_ice_segments'][k]
#-- Defining the HDF5 dataset variables
val = '{0}/{1}/{2}'.format(gtx,'sea_ice_segments',k)
if fillvalue:
h5[gtx]['sea_ice_segments'][k] = fileID.create_dataset(val,
np.shape(v), data=v, dtype=v.dtype, fillvalue=fillvalue,
compression='gzip')
else:
h5[gtx]['sea_ice_segments'][k] = fileID.create_dataset(val,
np.shape(v), data=v, dtype=v.dtype, compression='gzip')
#-- create or attach dimensions for HDF5 variable
if DIMENSIONS[gtx]['sea_ice_segments'][k]:
#-- attach dimensions
for i,dim in enumerate(DIMENSIONS[gtx]['sea_ice_segments'][k]):
h5[gtx]['sea_ice_segments'][k].dims[i].attach_scale(
h5[gtx]['sea_ice_segments'][dim])
else:
#-- make dimension
h5[gtx]['sea_ice_segments'][k].make_scale(k)
#-- add HDF5 variable attributes
for att_name,att_val in attrs.items():
h5[gtx]['sea_ice_segments'][k].attrs[att_name] = att_val
#-- add to subsetting variables
key = 'subsetting'
fileID[gtx]['sea_ice_segments'].create_group(key)
h5[gtx]['sea_ice_segments'][key] = {}
for att_name in ['Description','data_rate']:
att_val=IS2_atl07_attrs[gtx]['sea_ice_segments'][key][att_name]
fileID[gtx]['sea_ice_segments'][key].attrs[att_name] = att_val
for k,v in IS2_atl07_mask[gtx]['sea_ice_segments'][key].items():
#-- attributes
attrs = IS2_atl07_attrs[gtx]['sea_ice_segments'][key][k]
fillvalue = FILL_VALUE[gtx]['sea_ice_segments'][key][k]
#-- Defining the HDF5 dataset variables
val = '{0}/{1}/{2}/{3}'.format(gtx,'sea_ice_segments',key,k)
if fillvalue:
h5[gtx]['sea_ice_segments'][key][k] = \
fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype, fillvalue=fillvalue, compression='gzip')
else:
h5[gtx]['sea_ice_segments'][key][k] = \
fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype, compression='gzip')
#-- attach dimensions
for i,dim in enumerate(DIMENSIONS[gtx]['sea_ice_segments'][key][k]):
h5[gtx]['sea_ice_segments'][key][k].dims[i].attach_scale(
h5[gtx]['sea_ice_segments'][dim])
#-- add HDF5 variable attributes
for att_name,att_val in attrs.items():
h5[gtx]['sea_ice_segments'][key][k].attrs[att_name] = att_val
#-- HDF5 file title
fileID.attrs['featureType'] = 'trajectory'
fileID.attrs['title'] = 'ATLAS/ICESat-2 L3A Sea Ice Height'
fileID.attrs['summary'] = ('Subsetting masks for sea ice segments needed '
'to interpret and assess the quality of the height estimates.')
fileID.attrs['description'] = ('The data set (ATL07) contains along-track '
'heights for sea ice and open water leads (at varying length scales) '
'relative to the WGS84 ellipsoid (ITRF2014 reference frame) after '
'adjustment for geoidal and tidal variations, and inverted barometer '
'effects.')
date_created = datetime.datetime.today()
fileID.attrs['date_created'] = date_created.isoformat()
project = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'
fileID.attrs['project'] = project
platform = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'
fileID.attrs['project'] = platform
#-- add attribute for elevation instrument and designated processing level
instrument = 'ATLAS > Advanced Topographic Laser Altimeter System'
fileID.attrs['instrument'] = instrument
fileID.attrs['source'] = 'Spacecraft'
fileID.attrs['references'] = 'https://nsidc.org/data/icesat-2'
fileID.attrs['processing_level'] = '4'
#-- add attributes for input ATL07 file
fileID.attrs['input_files'] = os.path.basename(INPUT)
#-- find geospatial and temporal ranges
lnmn,lnmx,ltmn,ltmx,tmn,tmx = (np.inf,-np.inf,np.inf,-np.inf,np.inf,-np.inf)
for gtx in beams:
lon = IS2_atl07_mask[gtx]['sea_ice_segments']['longitude']
lat = IS2_atl07_mask[gtx]['sea_ice_segments']['latitude']
delta_time = IS2_atl07_mask[gtx]['sea_ice_segments']['delta_time']
#-- setting the geospatial and temporal ranges
lnmn = lon.min() if (lon.min() < lnmn) else lnmn
lnmx = lon.max() if (lon.max() > lnmx) else lnmx
ltmn = lat.min() if (lat.min() < ltmn) else ltmn
ltmx = lat.max() if (lat.max() > ltmx) else ltmx
tmn = delta_time.min() if (delta_time.min() < tmn) else tmn
tmx = delta_time.max() if (delta_time.max() > tmx) else tmx
#-- add geospatial and temporal attributes
fileID.attrs['geospatial_lat_min'] = ltmn
fileID.attrs['geospatial_lat_max'] = ltmx
fileID.attrs['geospatial_lon_min'] = lnmn
fileID.attrs['geospatial_lon_max'] = lnmx
fileID.attrs['geospatial_lat_units'] = "degrees_north"
fileID.attrs['geospatial_lon_units'] = "degrees_east"
fileID.attrs['geospatial_ellipsoid'] = "WGS84"
fileID.attrs['date_type'] = 'UTC'
fileID.attrs['time_type'] = 'CCSDS UTC-A'
#-- convert start and end time from ATLAS SDP seconds into GPS seconds
atlas_sdp_gps_epoch=IS2_atl07_mask['ancillary_data']['atlas_sdp_gps_epoch']
gps_seconds = atlas_sdp_gps_epoch + np.array([tmn,tmx])
#-- calculate leap seconds
leaps = icesat2_toolkit.time.count_leap_seconds(gps_seconds)
#-- convert from seconds since 1980-01-06T00:00:00 to Modified Julian days
MJD = icesat2_toolkit.time.convert_delta_time(gps_seconds - leaps,
epoch1=(1980,1,6,0,0,0), epoch2=(1858,11,17,0,0,0), scale=1.0/86400.0)
#-- convert to calendar date
YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(MJD + 2400000.5,
FORMAT='tuple')
#-- add attributes with measurement date start, end and duration
tcs = datetime.datetime(int(YY[0]), int(MM[0]), int(DD[0]),
int(HH[0]), int(MN[0]), int(SS[0]), int(1e6*(SS[0] % 1)))
fileID.attrs['time_coverage_start'] = tcs.isoformat()
tce = datetime.datetime(int(YY[1]), int(MM[1]), int(DD[1]),
int(HH[1]), int(MN[1]), int(SS[1]), int(1e6*(SS[1] % 1)))
fileID.attrs['time_coverage_end'] = tce.isoformat()
fileID.attrs['time_coverage_duration'] = '{0:0.0f}'.format(tmx-tmn)
#-- Closing the HDF5 file
fileID.close()
#-- Main program that calls reduce_ICESat2_ATL07_raster()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Create masks for reducing ICESat-2 data
using raster imagery
""",
fromfile_prefix_chars="@"
)
parser.convert_arg_line_to_args = \
icesat2_toolkit.utilities.convert_arg_line_to_args
#-- command line parameters
parser.add_argument('file',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='ICESat-2 ATL07 file to run')
#-- use default output file name
parser.add_argument('--output','-O',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='Name and path of output file')
#-- input raster file and file format
parser.add_argument('--raster','-R',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='Input raster file')
parser.add_argument('--format','-F',
type=str, default='geotiff', choices=('netCDF4','HDF5','geotiff'),
help='Input raster file format')
#-- variable names of data in HDF5 or netCDF4 file
parser.add_argument('--variables','-v',
type=str, nargs='+', default=['x','y','data'],
help='Variable names of data in HDF5 or netCDF4 files')
#-- spatial projection (EPSG code or PROJ4 string)
parser.add_argument('--projection','-P',
type=str, default='4326',
help='Spatial projection as EPSG code or PROJ4 string')
#-- verbosity settings
#-- verbose will output information about each output file
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of run')
#-- permissions mode of the local files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output files')
args,_ = parser.parse_known_args()
#-- run raster mask program with parameters
reduce_ICESat2_ATL07_raster(args.file,
MASK=args.raster,
FORMAT=args.format,
VARIABLES=args.variables,
PROJECTION=args.projection,
OUTPUT=args.output,
VERBOSE=args.verbose,
MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"numpy.shape",
"numpy.meshgrid",
"numpy.copy",
"os.path.dirname",
"os.chmod",
"datetime.datetime.today",
"os.path.basename",
"pyproj.CRS.from_string",
"re.match",
"pyproj.Transformer.from_crs",
"numpy.concatenate",
"re.compile",
"logging.basicConfig",
"warnings... | [((2252, 2285), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2275, 2285), False, 'import warnings\n'), ((4543, 4593), 'numpy.concatenate', 'np.concatenate', (['(x0[:, None], y0[:, None])'], {'axis': '(1)'}), '((x0[:, None], y0[:, None]), axis=1)\n', (4557, 4593), True, 'import numpy as np\n'), ((5352, 5387), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'loglevel'}), '(level=loglevel)\n', (5371, 5387), False, 'import logging\n'), ((5534, 5572), 'icesat2_toolkit.read_ICESat2_ATL07.read_HDF5_ATL07', 'read_HDF5_ATL07', (['FILE'], {'ATTRIBUTES': '(True)'}), '(FILE, ATTRIBUTES=True)\n', (5549, 5572), False, 'from icesat2_toolkit.read_ICESat2_ATL07 import read_HDF5_ATL07\n'), ((5597, 5618), 'os.path.dirname', 'os.path.dirname', (['FILE'], {}), '(FILE)\n', (5612, 5618), False, 'import os\n'), ((5698, 5856), 're.compile', 're.compile', (['"""(processed_)?(ATL\\\\d{2})-(\\\\d{2})_(\\\\d{4})(\\\\d{2})(\\\\d{2})(\\\\d{2})(\\\\d{2})(\\\\d{2})_(\\\\d{4})(\\\\d{2})(\\\\d{2})_(\\\\d{3})_(\\\\d{2})(.*?).h5$"""'], {}), "(\n '(processed_)?(ATL\\\\d{2})-(\\\\d{2})_(\\\\d{4})(\\\\d{2})(\\\\d{2})(\\\\d{2})(\\\\d{2})(\\\\d{2})_(\\\\d{4})(\\\\d{2})(\\\\d{2})_(\\\\d{3})_(\\\\d{2})(.*?).h5$'\n )\n", (5708, 5856), False, 'import re\n'), ((6168, 6207), 'numpy.copy', 'np.copy', (["dinput['attributes']['extent']"], {}), "(dinput['attributes']['extent'])\n", (6175, 6207), True, 'import numpy as np\n'), ((6609, 6635), 'numpy.nonzero', 'np.nonzero', (["dinput['data']"], {}), "(dinput['data'])\n", (6619, 6635), True, 'import numpy as np\n'), ((6731, 6768), 'numpy.meshgrid', 'np.meshgrid', (["dinput['x']", "dinput['y']"], {}), "(dinput['x'], dinput['y'])\n", (6742, 6768), True, 'import numpy as np\n'), ((6902, 6946), 'logging.info', 'logging.info', (['"""Building Spline Interpolator"""'], {}), "('Building Spline Interpolator')\n", (6914, 6946), False, 'import logging\n'), ((7276, 7331), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['crs1', 'crs2'], {'always_xy': '(True)'}), '(crs1, crs2, always_xy=True)\n', (7303, 7331), False, 'import pyproj\n'), ((20831, 20858), 'os.chmod', 'os.chmod', (['output_file', 'MODE'], {}), '(output_file, MODE)\n', (20839, 20858), False, 'import os\n'), ((26544, 26569), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (26567, 26569), False, 'import datetime\n'), ((27272, 27295), 'os.path.basename', 'os.path.basename', (['INPUT'], {}), '(INPUT)\n', (27288, 27295), False, 'import os\n'), ((29935, 30100), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create masks for reducing ICESat-2 data\n using raster imagery\n """', 'fromfile_prefix_chars': '"""@"""'}), '(description=\n """Create masks for reducing ICESat-2 data\n using raster imagery\n """\n , fromfile_prefix_chars=\'@\')\n', (29958, 30100), False, 'import argparse\n'), ((2477, 2525), 'pyproj.CRS.from_string', 'pyproj.CRS.from_string', (["attributes['projection']"], {}), "(attributes['projection'])\n", (2499, 2525), False, 'import pyproj\n'), ((2893, 2927), 'pyproj.CRS.from_string', 'pyproj.CRS.from_string', (['PROJECTION'], {}), '(PROJECTION)\n', (2915, 2927), False, 'import pyproj\n'), ((3683, 3732), 'logging.info', 'logging.info', (['"""Too many points for triangulation"""'], {}), "('Too many points for triangulation')\n", (3695, 3732), False, 'import logging\n'), ((6267, 6310), 'numpy.sign', 'np.sign', (["dinput['attributes']['spacing'][0]"], {}), "(dinput['attributes']['spacing'][0])\n", (6274, 6310), True, 'import numpy as np\n'), ((6415, 6458), 'numpy.sign', 'np.sign', (["dinput['attributes']['spacing'][1]"], {}), "(dinput['attributes']['spacing'][1])\n", (6422, 6458), True, 'import numpy as np\n'), ((9350, 9377), 'numpy.zeros', 'np.zeros', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (9358, 9377), True, 'import numpy as np\n'), ((9467, 9480), 'numpy.any', 'np.any', (['valid'], {}), '(valid)\n', (9473, 9480), True, 'import numpy as np\n'), ((19927, 19949), 'os.path.basename', 'os.path.basename', (['MASK'], {}), '(MASK)\n', (19943, 19949), False, 'import os\n'), ((20203, 20229), 'os.path.expanduser', 'os.path.expanduser', (['OUTPUT'], {}), '(OUTPUT)\n', (20221, 20229), False, 'import os\n'), ((21222, 21250), 'os.path.expanduser', 'os.path.expanduser', (['FILENAME'], {}), '(FILENAME)\n', (21240, 21250), False, 'import os\n'), ((28748, 28768), 'numpy.array', 'np.array', (['[tmn, tmx]'], {}), '([tmn, tmx])\n', (28756, 28768), True, 'import numpy as np\n'), ((5457, 5479), 'os.path.basename', 'os.path.basename', (['FILE'], {}), '(FILE)\n', (5473, 5479), False, 'import os\n'), ((9084, 9132), 'numpy.concatenate', 'np.concatenate', (['(X[:, None], Y[:, None])'], {'axis': '(1)'}), '((X[:, None], Y[:, None]), axis=1)\n', (9098, 9132), True, 'import numpy as np\n'), ((20675, 20697), 'os.path.basename', 'os.path.basename', (['FILE'], {}), '(FILE)\n', (20691, 20697), False, 'import os\n'), ((21702, 21713), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (21710, 21713), True, 'import numpy as np\n'), ((22037, 22061), 're.match', 're.match', (['"""gt\\\\d[lr]"""', 'k'], {}), "('gt\\\\d[lr]', k)\n", (22045, 22061), False, 'import re\n'), ((23410, 23421), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (23418, 23421), True, 'import numpy as np\n'), ((23621, 23632), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (23629, 23632), True, 'import numpy as np\n'), ((25193, 25204), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (25201, 25204), True, 'import numpy as np\n'), ((25411, 25422), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (25419, 25422), True, 'import numpy as np\n'), ((30315, 30336), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (30333, 30336), False, 'import os\n'), ((30499, 30520), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (30517, 30520), False, 'import os\n'), ((30690, 30711), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (30708, 30711), False, 'import os\n')] |
# coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generated random variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from absl.testing import parameterized
import edward2 as ed
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability import distributions as tfd
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
class GeneratedRandomVariablesTest(parameterized.TestCase, tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBernoulliDoc(self):
self.assertGreater(len(ed.Bernoulli.__doc__), 0)
self.assertIn(inspect.cleandoc(tfd.Bernoulli.__init__.__doc__),
ed.Bernoulli.__doc__)
self.assertEqual(ed.Bernoulli.__name__, "Bernoulli")
@parameterized.named_parameters(
{"testcase_name": "1d_rv_1d_event", "logits": np.zeros(1), "n": [1]},
{"testcase_name": "1d_rv_5d_event", "logits": np.zeros(1), "n": [5]},
{"testcase_name": "5d_rv_1d_event", "logits": np.zeros(5), "n": [1]},
{"testcase_name": "5d_rv_5d_event", "logits": np.zeros(5), "n": [5]},
)
@test_util.run_in_graph_and_eager_modes
def testBernoulliLogProb(self, logits, n):
rv = ed.Bernoulli(logits)
dist = tfd.Bernoulli(logits)
x = rv.distribution.sample(n)
rv_log_prob, dist_log_prob = self.evaluate(
[rv.distribution.log_prob(x), dist.log_prob(x)])
self.assertAllEqual(rv_log_prob, dist_log_prob)
@parameterized.named_parameters(
{"testcase_name": "0d_rv_0d_sample",
"logits": 0.,
"n": 1},
{"testcase_name": "0d_rv_1d_sample",
"logits": 0.,
"n": [1]},
{"testcase_name": "1d_rv_1d_sample",
"logits": np.array([0.]),
"n": [1]},
{"testcase_name": "1d_rv_5d_sample",
"logits": np.array([0.]),
"n": [5]},
{"testcase_name": "2d_rv_1d_sample",
"logits": np.array([-0.2, 0.8]),
"n": [1]},
{"testcase_name": "2d_rv_5d_sample",
"logits": np.array([-0.2, 0.8]),
"n": [5]},
)
@test_util.run_in_graph_and_eager_modes
def testBernoulliSample(self, logits, n):
rv = ed.Bernoulli(logits)
dist = tfd.Bernoulli(logits)
self.assertEqual(rv.distribution.sample(n).shape, dist.sample(n).shape)
@parameterized.named_parameters(
{"testcase_name": "0d_bernoulli",
"rv": ed.Bernoulli(probs=0.5),
"sample_shape": [],
"batch_shape": [],
"event_shape": []},
{"testcase_name": "2d_bernoulli",
"rv": ed.Bernoulli(tf.zeros([2, 3])),
"sample_shape": [],
"batch_shape": [2, 3],
"event_shape": []},
{"testcase_name": "2x0d_bernoulli",
"rv": ed.Bernoulli(probs=0.5, sample_shape=2),
"sample_shape": [2],
"batch_shape": [],
"event_shape": []},
{"testcase_name": "2x1d_bernoulli",
"rv": ed.Bernoulli(probs=0.5, sample_shape=[2, 1]),
"sample_shape": [2, 1],
"batch_shape": [],
"event_shape": []},
{"testcase_name": "3d_dirichlet",
"rv": ed.Dirichlet(tf.zeros(3)),
"sample_shape": [],
"batch_shape": [],
"event_shape": [3]},
{"testcase_name": "2x3d_dirichlet",
"rv": ed.Dirichlet(tf.zeros([2, 3])),
"sample_shape": [],
"batch_shape": [2],
"event_shape": [3]},
{"testcase_name": "1x3d_dirichlet",
"rv": ed.Dirichlet(tf.zeros(3), sample_shape=1),
"sample_shape": [1],
"batch_shape": [],
"event_shape": [3]},
{"testcase_name": "2x1x3d_dirichlet",
"rv": ed.Dirichlet(tf.zeros(3), sample_shape=[2, 1]),
"sample_shape": [2, 1],
"batch_shape": [],
"event_shape": [3]},
)
@test_util.run_in_graph_and_eager_modes
def testShape(self, rv, sample_shape, batch_shape, event_shape):
self.assertEqual(rv.shape, sample_shape + batch_shape + event_shape)
self.assertEqual(rv.sample_shape, sample_shape)
self.assertEqual(rv.distribution.batch_shape, batch_shape)
self.assertEqual(rv.distribution.event_shape, event_shape)
@parameterized.parameters(
{"cls": ed.Normal, "value": 2, "loc": 0.5, "scale": 1.0},
{"cls": ed.Normal, "value": [2], "loc": [0.5], "scale": [1.0]},
{"cls": ed.Poisson, "value": 2, "rate": 0.5},
)
@test_util.run_in_graph_and_eager_modes
def testValueShapeAndDtype(self, cls, value, **kwargs):
rv = cls(value=value, **kwargs)
value_shape = rv.value.shape
expected_shape = rv.sample_shape.concatenate(
rv.distribution.batch_shape).concatenate(rv.distribution.event_shape)
self.assertEqual(value_shape, expected_shape)
self.assertEqual(rv.distribution.dtype, rv.value.dtype)
@parameterized.parameters(
{"cls": ed.Normal, "value": 2, "loc": [0.5, 0.5], "scale": 1.0},
{"cls": ed.Normal, "value": 2, "loc": [0.5], "scale": [1.0]},
{"cls": ed.Normal, "value": np.zeros([10, 3]), "loc": [0.5, 0.5],
"scale": [1.0, 1.0]},
)
@test_util.run_in_graph_and_eager_modes
def testValueMismatchRaises(self, cls, value, **kwargs):
with self.assertRaises(ValueError):
cls(value=value, **kwargs)
@test_util.run_v1_only("Graph mode-only test.")
def testValueUnknownShape(self):
# should not raise error
ed.Bernoulli(probs=0.5, value=tf1.placeholder(tf.int32))
@test_util.run_in_graph_and_eager_modes
def testMakeRandomVariable(self):
"""Tests that manual wrapping is the same as the built-in solution."""
custom_normal = ed.make_random_variable(tfd.Normal)
def model_builtin():
return ed.Normal(1., 0.1, name="x")
def model_wrapped():
return custom_normal(1., 0.1, name="x")
log_joint_builtin = ed.make_log_joint_fn(model_builtin)
log_joint_wrapped = ed.make_log_joint_fn(model_wrapped)
self.assertEqual(self.evaluate(log_joint_builtin(x=7.)),
self.evaluate(log_joint_wrapped(x=7.)))
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow_probability.distributions.Bernoulli",
"edward2.Normal",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v1.placeholder",
"numpy.zeros",
"absl.testing.parameterized.parameters",
"edward2.make_log_joint_fn",
"tensorflow.python.framework.test_util.run_v1_o... | [((4738, 4939), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'cls': ed.Normal, 'value': 2, 'loc': 0.5, 'scale': 1.0}", "{'cls': ed.Normal, 'value': [2], 'loc': [0.5], 'scale': [1.0]}", "{'cls': ed.Poisson, 'value': 2, 'rate': 0.5}"], {}), "({'cls': ed.Normal, 'value': 2, 'loc': 0.5, 'scale':\n 1.0}, {'cls': ed.Normal, 'value': [2], 'loc': [0.5], 'scale': [1.0]}, {\n 'cls': ed.Poisson, 'value': 2, 'rate': 0.5})\n", (4762, 4939), False, 'from absl.testing import parameterized\n'), ((5813, 5859), 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""Graph mode-only test."""'], {}), "('Graph mode-only test.')\n", (5834, 5859), False, 'from tensorflow.python.framework import test_util\n'), ((6608, 6622), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (6620, 6622), True, 'import tensorflow.compat.v2 as tf\n'), ((1883, 1903), 'edward2.Bernoulli', 'ed.Bernoulli', (['logits'], {}), '(logits)\n', (1895, 1903), True, 'import edward2 as ed\n'), ((1915, 1936), 'tensorflow_probability.distributions.Bernoulli', 'tfd.Bernoulli', (['logits'], {}), '(logits)\n', (1928, 1936), True, 'from tensorflow_probability import distributions as tfd\n'), ((2815, 2835), 'edward2.Bernoulli', 'ed.Bernoulli', (['logits'], {}), '(logits)\n', (2827, 2835), True, 'import edward2 as ed\n'), ((2847, 2868), 'tensorflow_probability.distributions.Bernoulli', 'tfd.Bernoulli', (['logits'], {}), '(logits)\n', (2860, 2868), True, 'from tensorflow_probability import distributions as tfd\n'), ((6159, 6194), 'edward2.make_random_variable', 'ed.make_random_variable', (['tfd.Normal'], {}), '(tfd.Normal)\n', (6182, 6194), True, 'import edward2 as ed\n'), ((6360, 6395), 'edward2.make_log_joint_fn', 'ed.make_log_joint_fn', (['model_builtin'], {}), '(model_builtin)\n', (6380, 6395), True, 'import edward2 as ed\n'), ((6420, 6455), 'edward2.make_log_joint_fn', 'ed.make_log_joint_fn', (['model_wrapped'], {}), '(model_wrapped)\n', (6440, 6455), True, 'import edward2 as ed\n'), ((1296, 1344), 'inspect.cleandoc', 'inspect.cleandoc', (['tfd.Bernoulli.__init__.__doc__'], {}), '(tfd.Bernoulli.__init__.__doc__)\n', (1312, 1344), False, 'import inspect\n'), ((1531, 1542), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1539, 1542), True, 'import numpy as np\n'), ((1607, 1618), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1615, 1618), True, 'import numpy as np\n'), ((1683, 1694), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (1691, 1694), True, 'import numpy as np\n'), ((1759, 1770), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (1767, 1770), True, 'import numpy as np\n'), ((2386, 2401), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (2394, 2401), True, 'import numpy as np\n'), ((2480, 2495), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (2488, 2495), True, 'import numpy as np\n'), ((2574, 2595), 'numpy.array', 'np.array', (['[-0.2, 0.8]'], {}), '([-0.2, 0.8])\n', (2582, 2595), True, 'import numpy as np\n'), ((2675, 2696), 'numpy.array', 'np.array', (['[-0.2, 0.8]'], {}), '([-0.2, 0.8])\n', (2683, 2696), True, 'import numpy as np\n'), ((3034, 3057), 'edward2.Bernoulli', 'ed.Bernoulli', ([], {'probs': '(0.5)'}), '(probs=0.5)\n', (3046, 3057), True, 'import edward2 as ed\n'), ((3363, 3402), 'edward2.Bernoulli', 'ed.Bernoulli', ([], {'probs': '(0.5)', 'sample_shape': '(2)'}), '(probs=0.5, sample_shape=2)\n', (3375, 3402), True, 'import edward2 as ed\n'), ((3540, 3584), 'edward2.Bernoulli', 'ed.Bernoulli', ([], {'probs': '(0.5)', 'sample_shape': '[2, 1]'}), '(probs=0.5, sample_shape=[2, 1])\n', (3552, 3584), True, 'import edward2 as ed\n'), ((5564, 5581), 'numpy.zeros', 'np.zeros', (['[10, 3]'], {}), '([10, 3])\n', (5572, 5581), True, 'import numpy as np\n'), ((6234, 6263), 'edward2.Normal', 'ed.Normal', (['(1.0)', '(0.1)'], {'name': '"""x"""'}), "(1.0, 0.1, name='x')\n", (6243, 6263), True, 'import edward2 as ed\n'), ((3205, 3221), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[2, 3]'], {}), '([2, 3])\n', (3213, 3221), True, 'import tensorflow.compat.v2 as tf\n'), ((3736, 3747), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['(3)'], {}), '(3)\n', (3744, 3747), True, 'import tensorflow.compat.v2 as tf\n'), ((3899, 3915), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[2, 3]'], {}), '([2, 3])\n', (3907, 3915), True, 'import tensorflow.compat.v2 as tf\n'), ((4068, 4079), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['(3)'], {}), '(3)\n', (4076, 4079), True, 'import tensorflow.compat.v2 as tf\n'), ((4250, 4261), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['(3)'], {}), '(3)\n', (4258, 4261), True, 'import tensorflow.compat.v2 as tf\n'), ((5958, 5983), 'tensorflow.compat.v1.placeholder', 'tf1.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (5973, 5983), True, 'import tensorflow.compat.v1 as tf1\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.