code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import tensorflow as tf
from Autoencoders import add_gaussian_noise
mnist = tf.keras.datasets.mnist
def load_auto_encoder_mnist_data(noise_sigma=None, with_test=False):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
if with_test:
if noise_sigma:
return np.array([add_gaussian_noise(x, noise_sigma)
for x in x_train]), x_train, y_train, \
np.array([add_gaussian_noise(x, noise_sigma)
for x in x_test]), x_test, y_test
return x_train, x_train, y_train, x_test, x_test, y_test
images = np.concatenate((x_train, x_test), axis=0)
noisy_images = images
if noise_sigma:
noisy_images = np.array([add_gaussian_noise(image, noise_sigma)
for image in images])
return noisy_images, images
def load_gan_mnist_data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
mean = 255.0 / 2
x_train, x_test = (x_train - mean) / mean, (x_test - mean) / mean
images = np.concatenate((x_train, x_test), axis=0) # normalized to [-1,1]
return x_train.reshape(x_train.shape[0], 28, 28, 1)
| [
"Autoencoders.add_gaussian_noise",
"numpy.concatenate"
] | [((691, 732), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {'axis': '(0)'}), '((x_train, x_test), axis=0)\n', (705, 732), True, 'import numpy as np\n'), ((1132, 1173), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {'axis': '(0)'}), '((x_train, x_test), axis=0)\n', (1146, 1173), True, 'import numpy as np\n'), ((812, 850), 'Autoencoders.add_gaussian_noise', 'add_gaussian_noise', (['image', 'noise_sigma'], {}), '(image, noise_sigma)\n', (830, 850), False, 'from Autoencoders import add_gaussian_noise\n'), ((381, 415), 'Autoencoders.add_gaussian_noise', 'add_gaussian_noise', (['x', 'noise_sigma'], {}), '(x, noise_sigma)\n', (399, 415), False, 'from Autoencoders import add_gaussian_noise\n'), ((514, 548), 'Autoencoders.add_gaussian_noise', 'add_gaussian_noise', (['x', 'noise_sigma'], {}), '(x, noise_sigma)\n', (532, 548), False, 'from Autoencoders import add_gaussian_noise\n')] |
"""
Possible bug in fb prophet for multiplicative seasonalities.
"""
# %%
import numpy as np
import pandas as pd
import prophet
# %%
df = pd.DataFrame()
df['ds'] = pd.date_range(start='2019-01-01',end='2021-01-01',freq='4H')
df['daily_effect'] = np.cos(df['ds'].dt.hour/24 * 2*np.pi)*-0.5+0.5
df['yearly_effect'] = np.cos(df['ds'].dt.dayofyear/365 * 2*np.pi)*-0.3 + 0.7
df['y'] = df['daily_effect'] * df['yearly_effect']
df.set_index('ds')['y'].plot()
df.set_index('ds').loc['2020-01-01':'2020-01-05',:].plot()
df.set_index('ds').loc['2020-07-01':'2020-07-05',:].plot()
# %%
m = prophet.Prophet(growth='flat',weekly_seasonality=False,seasonality_mode='multiplicative')
m.fit(df)
# %%
df_future = m.make_future_dataframe(periods=365*6,freq='4H', include_history=False)
df_forecast = m.predict(df_future)
df_forecast['yhat2'] = df_forecast['trend'] * (1 + df_forecast['yearly']) * (1 + df_forecast['daily'])
# %%
m.plot(df_forecast)
fig = m.plot_components(df_forecast)
# %%
df_forecast.set_index('ds')['yhat'].plot()
df_forecast.set_index('ds')['yhat2'].plot()
# %%
df['y'] = np.log(df['y']+1)
# %%
m = prophet.Prophet(growth='flat',weekly_seasonality=False,seasonality_mode='additive')
m.fit(df)
# %%
df_future = m.make_future_dataframe(periods=365*6,freq='4H', include_history=False)
df_forecast = m.predict(df_future)
df_forecast['yhat'] = np.exp(df_forecast['yhat']) - 1
df_forecast['yhat_lower'] = np.exp(df_forecast['yhat_lower']) - 1
df_forecast['yhat_upper'] = np.exp(df_forecast['yhat_upper']) - 1
# proposed aggregation
df_forecast['yhat2'] = df_forecast['trend'] * (1 + df_forecast['yearly']) * (1 + df_forecast['daily'])
# %%
m.plot(df_forecast)
fig = m.plot_components(df_forecast)
# %%
df_forecast.set_index('ds')['yhat'].plot()
df_forecast.set_index('ds')['yhat2'].plot()
| [
"pandas.DataFrame",
"pandas.date_range",
"numpy.log",
"prophet.Prophet",
"numpy.exp",
"numpy.cos"
] | [((140, 154), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (152, 154), True, 'import pandas as pd\n'), ((166, 228), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2019-01-01"""', 'end': '"""2021-01-01"""', 'freq': '"""4H"""'}), "(start='2019-01-01', end='2021-01-01', freq='4H')\n", (179, 228), True, 'import pandas as pd\n'), ((584, 680), 'prophet.Prophet', 'prophet.Prophet', ([], {'growth': '"""flat"""', 'weekly_seasonality': '(False)', 'seasonality_mode': '"""multiplicative"""'}), "(growth='flat', weekly_seasonality=False, seasonality_mode=\n 'multiplicative')\n", (599, 680), False, 'import prophet\n'), ((1087, 1106), 'numpy.log', 'np.log', (["(df['y'] + 1)"], {}), "(df['y'] + 1)\n", (1093, 1106), True, 'import numpy as np\n'), ((1114, 1204), 'prophet.Prophet', 'prophet.Prophet', ([], {'growth': '"""flat"""', 'weekly_seasonality': '(False)', 'seasonality_mode': '"""additive"""'}), "(growth='flat', weekly_seasonality=False, seasonality_mode=\n 'additive')\n", (1129, 1204), False, 'import prophet\n'), ((1355, 1382), 'numpy.exp', 'np.exp', (["df_forecast['yhat']"], {}), "(df_forecast['yhat'])\n", (1361, 1382), True, 'import numpy as np\n'), ((1415, 1448), 'numpy.exp', 'np.exp', (["df_forecast['yhat_lower']"], {}), "(df_forecast['yhat_lower'])\n", (1421, 1448), True, 'import numpy as np\n'), ((1481, 1514), 'numpy.exp', 'np.exp', (["df_forecast['yhat_upper']"], {}), "(df_forecast['yhat_upper'])\n", (1487, 1514), True, 'import numpy as np\n'), ((248, 289), 'numpy.cos', 'np.cos', (["(df['ds'].dt.hour / 24 * 2 * np.pi)"], {}), "(df['ds'].dt.hour / 24 * 2 * np.pi)\n", (254, 289), True, 'import numpy as np\n'), ((317, 364), 'numpy.cos', 'np.cos', (["(df['ds'].dt.dayofyear / 365 * 2 * np.pi)"], {}), "(df['ds'].dt.dayofyear / 365 * 2 * np.pi)\n", (323, 364), True, 'import numpy as np\n')] |
import numpy as np
from ravel.schema import fields
class Array(fields.List):
def process(self, obj):
processed_obj, error = super().process(obj)
if error:
return (None, error)
arr = np.array(processed_obj, dtype=self.nested.np_dtype)
return (arr, None) | [
"numpy.array"
] | [((225, 276), 'numpy.array', 'np.array', (['processed_obj'], {'dtype': 'self.nested.np_dtype'}), '(processed_obj, dtype=self.nested.np_dtype)\n', (233, 276), True, 'import numpy as np\n')] |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Supported by <NAME> <<EMAIL>>
# License: BSD 3 clause
import numbers
from warnings import warn
import numpy as np
from sklearn.base import BaseEstimator, OutlierMixin
from sklearn.metrics import euclidean_distances
from sklearn.utils.validation import check_is_fitted, check_random_state
MAX_INT = np.iinfo(np.int32).max
MIN_FLOAT = np.finfo(float).eps
class IsolationNNE(OutlierMixin, BaseEstimator):
""" Isolation-based anomaly detection using nearest-neighbor ensembles.
The INNE algorithm uses the nearest neighbour ensemble to isolate anomalies.
It partitions the data space into regions using a subsample and determines an
isolation score for each region. As each region adapts to local distribution,
the calculated isolation score is a local measure that is relative to the local
neighbourhood, enabling it to detect both global and local anomalies. INNE has
linear time complexity to efficiently handle large and high-dimensional datasets
with complex distributions.
Parameters
----------
n_estimators : int, default=200
The number of base estimators in the ensemble.
max_samples : int, default="auto"
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples` * X.shape[0]` samples.
- If "auto", then `max_samples=min(8, n_samples)`.
contamination : "auto" or float, default="auto"
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If "auto", the threshold is determined as in the original paper.
- If float, the contamination should be in the range (0, 0.5].
random_state : int, RandomState instance or None, default=None
Controls the pseudo-randomness of the selection of the feature
and split values for each branching step and each tree in the forest.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"Isolation-based anomaly detection using nearest-neighbor ensembles." In Computational
Intelligence, vol. 34, 2018, pp. 968-998.
Examples
--------
>>> from inne import IsolationNNE
>>> import numpy as np
>>> X = [[-1.1], [0.3], [0.5], [100]]
>>> clf = IsolationNNE().fit(X)
>>> clf.predict([[0.1], [0], [90]])
array([ 1, 1, -1])
"""
def __init__(self, n_estimators=200, max_samples="auto", contamination="auto", random_state=None):
self.n_estimators = n_estimators
self.max_samples = max_samples
self.random_state = random_state
self.contamination = contamination
def fit(self, X, y=None):
"""
Fit estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
# Check data
X = self._validate_data(X, accept_sparse=False)
n_samples = X.shape[0]
if isinstance(self.max_samples, str):
if self.max_samples == "auto":
max_samples = min(16, n_samples)
else:
raise ValueError(
"max_samples (%s) is not supported."
'Valid choices are: "auto", int or'
"float"
% self.max_samples
)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not 0.0 < self.max_samples <= 1.0:
raise ValueError(
"max_samples must be in (0, 1], got %r" % self.max_samples
)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
self._fit(X)
self.is_fitted_ = True
if self.contamination != "auto":
if not (0.0 < self.contamination <= 0.5):
raise ValueError(
"contamination must be in (0, 0.5], got: %f" % self.contamination
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
else:
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(
self.score_samples(X), 100.0 * self.contamination)
return self
def _fit(self, X):
n_samples, n_features = X.shape
self._centroids = np.empty(
[self.n_estimators, self.max_samples_, n_features])
self._ratio = np.empty([self.n_estimators, self.max_samples_])
self._centroids_radius = np.empty(
[self.n_estimators, self.max_samples_])
random_state = check_random_state(self.random_state)
self._seeds = random_state.randint(MAX_INT, size=self.n_estimators)
for i in range(self.n_estimators):
rnd = check_random_state(self._seeds[i])
center_index = rnd.choice(
n_samples, self.max_samples_, replace=False)
self._centroids[i] = X[center_index]
center_dist = euclidean_distances(
self._centroids[i], self._centroids[i], squared=True)
np.fill_diagonal(center_dist, np.inf)
# radius of each hypersphere is the Nearest Neighbors distance of centroid.
self._centroids_radius[i] = np.amin(center_dist, axis=1)
# Nearest Neighbors of centroids
cnn_index = np.argmin(center_dist, axis=1)
cnn_radius = self._centroids_radius[i][cnn_index]
self._ratio[i] = 1 - (cnn_radius + MIN_FLOAT) / \
(self._centroids_radius[i] + MIN_FLOAT)
return self
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self)
decision_func = self.decision_function(X)
is_inlier = np.ones_like(decision_func, dtype=int)
is_inlier[decision_func < 0] = -1
return is_inlier
def decision_function(self, X):
"""
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the .
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier.
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
check_is_fitted(self, 'is_fitted_')
# Check data
X = self._validate_data(X, accept_sparse=False, reset=False)
isolation_scores = np.ones([self.n_estimators, X.shape[0]])
# each test instance is evaluated against n_estimators sets of hyperspheres
for i in range(self.n_estimators):
x_dists = euclidean_distances(X, self._centroids[i], squared=True)
# find instances that are covered by at least one hypersphere.
cover_radius = np.where(
x_dists <= self._centroids_radius[i], self._centroids_radius[i], np.nan)
x_covered = np.where(~np.isnan(cover_radius).all(axis=1))
# the centroid of the hypersphere covering x and having the smallest radius
cnn_x = np.nanargmin(cover_radius[x_covered], axis=1)
isolation_scores[i][x_covered] = self._ratio[i][cnn_x]
# the isolation scores are averaged to produce the anomaly score
scores = np.mean(isolation_scores, axis=0)
return -scores
| [
"numpy.nanargmin",
"numpy.fill_diagonal",
"numpy.ones_like",
"numpy.amin",
"numpy.empty",
"numpy.iinfo",
"numpy.ones",
"sklearn.utils.validation.check_is_fitted",
"numpy.argmin",
"numpy.finfo",
"sklearn.utils.validation.check_random_state",
"numpy.mean",
"sklearn.metrics.euclidean_distances"... | [((368, 386), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (376, 386), True, 'import numpy as np\n'), ((403, 418), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (411, 418), True, 'import numpy as np\n'), ((5547, 5607), 'numpy.empty', 'np.empty', (['[self.n_estimators, self.max_samples_, n_features]'], {}), '([self.n_estimators, self.max_samples_, n_features])\n', (5555, 5607), True, 'import numpy as np\n'), ((5643, 5691), 'numpy.empty', 'np.empty', (['[self.n_estimators, self.max_samples_]'], {}), '([self.n_estimators, self.max_samples_])\n', (5651, 5691), True, 'import numpy as np\n'), ((5725, 5773), 'numpy.empty', 'np.empty', (['[self.n_estimators, self.max_samples_]'], {}), '([self.n_estimators, self.max_samples_])\n', (5733, 5773), True, 'import numpy as np\n'), ((5811, 5848), 'sklearn.utils.validation.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (5829, 5848), False, 'from sklearn.utils.validation import check_is_fitted, check_random_state\n'), ((7430, 7451), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (7445, 7451), False, 'from sklearn.utils.validation import check_is_fitted, check_random_state\n'), ((7522, 7560), 'numpy.ones_like', 'np.ones_like', (['decision_func'], {'dtype': 'int'}), '(decision_func, dtype=int)\n', (7534, 7560), True, 'import numpy as np\n'), ((9010, 9045), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""is_fitted_"""'], {}), "(self, 'is_fitted_')\n", (9025, 9045), False, 'from sklearn.utils.validation import check_is_fitted, check_random_state\n'), ((9164, 9204), 'numpy.ones', 'np.ones', (['[self.n_estimators, X.shape[0]]'], {}), '([self.n_estimators, X.shape[0]])\n', (9171, 9204), True, 'import numpy as np\n'), ((9994, 10027), 'numpy.mean', 'np.mean', (['isolation_scores'], {'axis': '(0)'}), '(isolation_scores, axis=0)\n', (10001, 10027), True, 'import numpy as np\n'), ((5987, 6021), 'sklearn.utils.validation.check_random_state', 'check_random_state', (['self._seeds[i]'], {}), '(self._seeds[i])\n', (6005, 6021), False, 'from sklearn.utils.validation import check_is_fitted, check_random_state\n'), ((6198, 6271), 'sklearn.metrics.euclidean_distances', 'euclidean_distances', (['self._centroids[i]', 'self._centroids[i]'], {'squared': '(True)'}), '(self._centroids[i], self._centroids[i], squared=True)\n', (6217, 6271), False, 'from sklearn.metrics import euclidean_distances\n'), ((6301, 6338), 'numpy.fill_diagonal', 'np.fill_diagonal', (['center_dist', 'np.inf'], {}), '(center_dist, np.inf)\n', (6317, 6338), True, 'import numpy as np\n'), ((6467, 6495), 'numpy.amin', 'np.amin', (['center_dist'], {'axis': '(1)'}), '(center_dist, axis=1)\n', (6474, 6495), True, 'import numpy as np\n'), ((6565, 6595), 'numpy.argmin', 'np.argmin', (['center_dist'], {'axis': '(1)'}), '(center_dist, axis=1)\n', (6574, 6595), True, 'import numpy as np\n'), ((9354, 9410), 'sklearn.metrics.euclidean_distances', 'euclidean_distances', (['X', 'self._centroids[i]'], {'squared': '(True)'}), '(X, self._centroids[i], squared=True)\n', (9373, 9410), False, 'from sklearn.metrics import euclidean_distances\n'), ((9514, 9599), 'numpy.where', 'np.where', (['(x_dists <= self._centroids_radius[i])', 'self._centroids_radius[i]', 'np.nan'], {}), '(x_dists <= self._centroids_radius[i], self._centroids_radius[i],\n np.nan)\n', (9522, 9599), True, 'import numpy as np\n'), ((9791, 9836), 'numpy.nanargmin', 'np.nanargmin', (['cover_radius[x_covered]'], {'axis': '(1)'}), '(cover_radius[x_covered], axis=1)\n', (9803, 9836), True, 'import numpy as np\n'), ((4060, 4229), 'warnings.warn', 'warn', (["('max_samples (%s) is greater than the total number of samples (%s). max_samples will be set to n_samples for estimation.'\n % (self.max_samples, n_samples))"], {}), "(\n 'max_samples (%s) is greater than the total number of samples (%s). max_samples will be set to n_samples for estimation.'\n % (self.max_samples, n_samples))\n", (4064, 4229), False, 'from warnings import warn\n'), ((9647, 9669), 'numpy.isnan', 'np.isnan', (['cover_radius'], {}), '(cover_radius)\n', (9655, 9669), True, 'import numpy as np\n')] |
"""Processes a CRD file.
Note: Interfacing with external files is done in the `interfacer.py` library.
"""
from pathlib import Path
from typing import List, Tuple
import warnings
from iniabu import ini
import numpy as np
from . import processor_utils
from .data_io.crd_reader import CRDReader
from .utilities import peirce, string_transformer, utils
class CRDFileProcessor:
"""Process a CRD file in this class, dead time corrections, etc.
Computationally expensive routines are sourced out into processor_utils.py for
jitting.
Todo example
"""
def __init__(self, fname: Path) -> None:
"""Initialize the processor and read the CRD file that is wanted.
:param fname: Filename of CRD file to be processed
:type fname: Path
"""
# read in the CRD file
self.fname = fname
self.crd = CRDReader(fname)
self.ions_per_shot = self.crd.ions_per_shot
self.ions_to_tof_map = self.crd.ions_to_tof_map
self.all_tofs = self.crd.all_tofs
# Data, ToF, and Masses
self.tof = None
self.mass = None
self.data = None
self.data_pkg = None
# variables for filtered packages
self._filter_max_ion_per_pkg_applied = False # was max ions per pkg run?
self._pkg_size = None # max ions filtered with
self._filter_max_ion_per_pkg_ind = None # indices of pkgs that were trashed
# Integrals
self.integrals = None
self.integrals_pkg = None
# parameters for calibration and evaluation
self._params_mcal = None # mass calibration
self._params_integrals = None # integral definitions
self._params_backgrounds = None # bg_correction
self._peak_fwhm = 0.0646 # peak fwhm in us
self._us_to_chan = None # how to change microseconds to channel / bin number
# file info
self.nof_shots = self.crd.nof_shots
self.nof_shots_pkg = None
# PROPERTIES #
@property
def def_backgrounds(self) -> Tuple[List[str], np.ndarray]:
"""Background definitions for integrals.
The definitions consist of a tuple of a list and a np.ndarray.
The list contains first the names of the integrals.
The np.ndarray then contains in each row the lower and upper limit in amu of
the peak that needs to be integrated.
.. note:: The format for defining backgrounds is the same as the format for
defining integrals, except that peaks can occur multiple times for
multiple backgrounds.
:return: Background definitions.
:raise ValueError: Data Shape is wrong
Example:
>>> data = CRDFileProcessor("my_data.crd")
>>> peak_names = ["54Fe", "54Fe"]
>>> peak_limits = np.array([[53.4, 53.6], [54.4, 54.6]])
>>> data.def_integrals = (peak_names, peak_limits)
"""
return self._params_backgrounds
@def_backgrounds.setter
def def_backgrounds(self, value):
if not value: # empty list is passed
self._params_backgrounds = None
else:
if len(value) != 2:
raise ValueError("Data tuple must be of length 2.")
if len(value[0]) != len(value[1]):
raise ValueError("Name and data array must have the same length.")
if value[1].shape[1] != 2:
raise ValueError("The data array must have 2 entries for every line.")
self._params_backgrounds = value
@property
def def_mcal(self) -> np.ndarray:
"""Mass calibration definitions.
:return: Mass calibration definitions. The columns are as following:
1st: ToF (us)
2nd: Mass (amu)
:raise TypeError: Value is not a numpy ndarray
:raise ValueError: At least two parameters must be given for a mass calibration.
:raise ValueError: The array is of the wrong shape.
"""
return self._params_mcal
@def_mcal.setter
def def_mcal(self, value):
if not isinstance(value, np.ndarray):
raise TypeError(
f"Mass calibration definition must be given as a numpy "
f"ndarray but is a {type(value)}."
)
if value.shape[0] < 2:
raise ValueError("At least two mass calibration points must be given.")
if value.shape[1] != 2:
raise ValueError("The mass calibration definition is of the wrong shape.")
self._params_mcal = value
@property
def def_integrals(self) -> Tuple[List[str], np.ndarray]:
"""Integral definitions.
The definitions consist of a tuple of a list and a np.ndarray.
The list contains first the names of the integrals.
The np.ndarray then contains in each row the lower and upper limit in amu of
the peak that needs to be integrated.
:return: Integral definitions.
:raise ValueError: Data Shape is wrong
:raise ValueError: More than one definition exist for a given peak.
Example:
>>> data = CRDFileProcessor("my_data.crd")
>>> peak_names = ["54Fe", "64Ni"]
>>> peak_limits = np.array([[53.8, 54.2], [63.5, 64.5]])
>>> data.def_integrals = (peak_names, peak_limits)
"""
return self._params_integrals
@def_integrals.setter
def def_integrals(self, value):
if not value: # empty list is passed
self._params_integrals = None
else:
if len(value) != 2:
raise ValueError("Data tuple must be of length 2.")
if len(value[0]) != len(value[1]):
raise ValueError("Name and data array must have the same length.")
if value[1].shape[1] != 2:
raise ValueError("The data array must have 2 entries for every line.")
if len(value[0]) != len(set(value[0])):
raise ValueError(
"The peak names for integral definitions must be unique."
)
self._params_integrals = value
@property
def peak_fwhm(self) -> float:
"""Get / Set the FWHM of the peak.
:return: FWHM of the peak in us.
"""
return self._peak_fwhm
@peak_fwhm.setter
def peak_fwhm(self, value: float) -> None:
self._peak_fwhm = value
@property
def us_to_chan(self) -> float:
"""Conversion factor for microseconds to channel / bin number.
:return: Conversion factor
"""
return self._us_to_chan
@us_to_chan.setter
def us_to_chan(self, value: float) -> None:
self._us_to_chan = value
# METHODS #
# fixme make sure that the following docstring is actually correct
def dead_time_correction(self, dbins: int) -> None:
"""Perform a dead time correction on the whole spectrum.
If packages were set, the dead time correction is performed on each package
individually as well.
:param dbins: Number of dead bins after original bin (total - 1).
:warning.warn: There are no shots left in the package. No deadtime
correction can be applied.
"""
if self.nof_shots == 0:
warnings.warn("No data available; maybe all shots were filtered out?")
return
self.data = processor_utils.dead_time_correction(
self.data.reshape(1, self.data.shape[0]),
np.array(self.nof_shots).reshape(1),
dbins,
)[
0
] # want to shape it back the way it was!
if self.data_pkg is not None:
self.data_pkg = processor_utils.dead_time_correction(
self.data_pkg, self.nof_shots_pkg, dbins
)
def filter_max_ions_per_pkg(self, max_ions: int) -> None:
"""Filter out packages with too many ions.
.. note:: Only run more than once if filtering out more. Otherwise, you need
to reset the dataset first.
:param max_ions: Maximum number of ions per package.
:raises ValueError: Invalid range for number of ions.
:raises IOError: No package data available.
"""
if max_ions < 1:
raise ValueError("The maximum number of ions must be larger than 1.")
if self.data_pkg is None:
raise IOError("There is no packaged data. Please create packages first.")
# update helper variables
self._filter_max_ion_per_pkg_applied = True
total_ions_per_pkg = np.sum(self.data_pkg, axis=1)
self._filter_max_ion_per_pkg_ind = np.where(total_ions_per_pkg > max_ions)[0]
self.data_pkg = np.delete(
self.data_pkg, self._filter_max_ion_per_pkg_ind, axis=0
)
self.nof_shots_pkg = np.delete(
self.nof_shots_pkg, self._filter_max_ion_per_pkg_ind, axis=0
)
self.data = np.sum(self.data_pkg, axis=0)
self.nof_shots = np.sum(self.nof_shots_pkg)
def filter_max_ions_per_shot(self, max_ions: int) -> None:
"""Filter out shots that have more than the max_ions defined.
.. note:: Only run more than once if filtering out more. Otherwise, you need
to reset the dataset first.
:param max_ions: Maximum number of ions allowed in a shot.
:raises ValueError: Invalid range for number of ions.
"""
if max_ions < 1:
raise ValueError("The maximum number of ions must be >=1.")
shots_rejected = np.where(self.ions_per_shot > max_ions)[0]
self._filter_individual_shots(shots_rejected)
def filter_max_ions_per_time(self, max_ions: int, time_us: float) -> None:
"""Filter shots with >= max ions per time, i.e., due to ringing.
:param max_ions: Maximum number of ions that is allowed within a time window.
:param time_us: Width of the time window in microseconds (us)
"""
time_chan = time_us * self.us_to_chan
shots_to_check = np.where(self.ions_per_shot > max_ions)[0]
if shots_to_check.shape == (0,): # nothing needs to be done
return
all_tofs_filtered = self._all_tofs_filtered(shots_to_check)
shot_mask = processor_utils.mask_filter_max_ions_per_time(
self.ions_per_shot[shots_to_check], all_tofs_filtered, max_ions, time_chan
)
shots_rejected = shots_to_check[shot_mask]
if shots_rejected.shape != (0,):
self._filter_individual_shots(shots_rejected)
def filter_max_ions_per_tof_window(
self, max_ions: int, tof_window: np.array
) -> None:
"""Filer out maximum number of ions in a given ToF time window.
:param max_ions: Maximum number of ions in the time window.
:param tof_window: The time of flight window that the ions would have to be in.
Array of start and stop time of flight (2 entries).
:raises ValueError: Length of `tof_window` is wrong.
"""
if len(tof_window) != 2:
raise ValueError(
"ToF window must be specified with two entries: the start "
"and the stop time of the window."
)
# convert to int to avoid weird float issues
channel_window = np.array(tof_window * self.us_to_chan, dtype=int)
shots_to_check = np.where(self.ions_per_shot > max_ions)[0]
if shots_to_check.shape == (0,): # nothing needs to be done
return
all_tofs_filtered = self._all_tofs_filtered(shots_to_check)
shot_mask = processor_utils.mask_filter_max_ions_per_tof_window(
self.ions_per_shot[shots_to_check],
all_tofs_filtered,
max_ions,
channel_window,
)
shots_rejected = shots_to_check[shot_mask]
if shots_rejected.shape != (0,):
self._filter_individual_shots(shots_rejected)
def filter_pkg_peirce_countrate(self) -> None:
"""Filter out packages based on Peirce criterion for total count rate.
.. warning:: Running this more than once might lead to weird results. You have
been warned!
Now we are going to directly use all the integrals to get the sum of the counts,
which we will then feed to the rejection routine. Maybe this can detect blasts.
"""
sum_integrals = self.integrals_pkg[:, :, 0].sum(axis=1)
_, _, _, rejected_indexes = peirce.reject_outliers(sum_integrals)
print(
f"Peirce criterion rejected "
f"{len(rejected_indexes)} / {len(self.integrals_pkg)} "
f"packages"
)
index_list = list(map(int, rejected_indexes))
integrals_pkg = np.delete(self.integrals_pkg, index_list, axis=0)
self.nof_shots_pkg = np.delete(self.nof_shots_pkg, index_list)
self.nof_shots = np.sum(self.nof_shots_pkg)
# integrals
integrals = np.zeros_like(self.integrals)
integrals[:, 0] = integrals_pkg.sum(axis=0)[:, 0]
integrals[:, 1] = np.sqrt(np.sum(integrals_pkg[:, :, 1] ** 2, axis=0))
# write back
self.integrals = integrals
self.integrals_pkg = integrals_pkg
def filter_pkg_peirce_delta(self, ratios: List[Tuple[str, str]]) -> None:
"""Filter out packages based on Peirce criterion for delta values.
# fixme: this routine needs to be deleted!
For the given ratios, calculate the isotope ratio for each package, then
calculate the delta values using Solar System abundances. Based on these values,
apply Peirce's criterion to reject outliers and delete them from the packages.
Ultimately, create the total sum of the integrals again.
# fixme there's a lot to do here...
:param ratios: Ratios to consider, e.g., (("46Ti", "48Ti"), ("47Ti", "48Ti")).
These ratios must have the same names as the integrals and must be valid
isotope names.
"""
peaks = self.def_integrals[0]
integrals = self.integrals_pkg[:, :, 0]
rejected_indexes = [] # we will append numpy arrays here
for ratio in ratios:
int_ratio = (
integrals[:, peaks.index(ratio[0])]
/ integrals[:, peaks.index(ratio[1])]
)
int_delta = ini.iso_delta(
string_transformer.iso_to_iniabu(ratio[0]),
string_transformer.iso_to_iniabu(ratio[1]),
int_ratio,
)
_, _, _, indexes = peirce.reject_outliers(int_delta)
rejected_indexes.append(indexes)
# now create a set out of the indexes and then sort them to become a list
index_set = set(rejected_indexes[0])
for ind in range(1, len(rejected_indexes)):
index_set = index_set.union(rejected_indexes[ind])
index_list = sorted(map(int, index_set))
print(
f"Peirce criterion rejected {len(index_list)} / {len(self.integrals_pkg)} "
f"packages"
)
integrals_pkg = np.delete(self.integrals_pkg, index_list, axis=0)
self.nof_shots_pkg = np.delete(self.nof_shots_pkg, index_list)
self.nof_shots = np.sum(self.nof_shots_pkg)
# integrals
integrals = np.zeros_like(self.integrals)
integrals[:, 0] = integrals_pkg.sum(axis=0)[:, 0]
integrals[:, 1] = np.sqrt(np.sum(integrals_pkg[:, :, 1] ** 2, axis=0))
# write back
self.integrals = integrals
self.integrals_pkg = integrals_pkg
def _filter_individual_shots(self, shots_rejected: np.ndarray):
"""Private routine to finish filtering for individual shots.
This will end up setting all the data. All routines that filter shots only
have to provide a list of rejected shots. This routine does the rest, including.
the handling of the data if packages exist.
ToDo: rejected shots should be stored somewhere.
:param shots_rejected: Indices of rejected shots.
"""
len_indexes = len(self.ions_per_shot)
# reject filtered packages, i.e., remove ions from deleted packages
if self._filter_max_ion_per_pkg_applied:
(
shots_indexes,
shots_rejected,
) = processor_utils.remove_shots_from_filtered_packages_ind(
shots_rejected,
len_indexes,
self._filter_max_ion_per_pkg_ind,
self._pkg_size,
)
else:
shots_indexes = utils.not_index(shots_rejected, len_indexes)
all_tofs_filtered = self._all_tofs_filtered(shots_indexes)
self.data = processor_utils.sort_data_into_spectrum(
all_tofs_filtered,
self.all_tofs.min(),
self.all_tofs.max(),
)
# remove the rejected shots from packages
if self.data_pkg is not None:
(
self.data_pkg,
self.nof_shots_pkg,
) = processor_utils.remove_shots_from_packages(
self._pkg_size,
shots_rejected,
self.ions_to_tof_map,
self.all_tofs,
self.data_pkg,
self.nof_shots_pkg,
self._filter_max_ion_per_pkg_ind,
)
self.ions_per_shot = self.ions_per_shot[shots_indexes]
self.ions_to_tof_map = self.ions_to_tof_map[shots_indexes]
self.all_tofs = all_tofs_filtered
self.nof_shots = len(shots_indexes)
def integrals_calc(self, bg_corr=True) -> None:
"""Calculate integrals for data and packages (if present).
The integrals to be set per peak are going to be set as an ndarray.
Each row will contain one entry in the first column and its associated
uncertainty in the second.
:param bg_corr: If false, will never do background correction. Otherwise
(default), background correction will be applied if available. This is a
toggle to switch usage while leaving backgrounds defined.
:raises ValueError: No integrals were set.
:raises ValueError: No mass calibration has been applied.
"""
def integral_windows(limits_tmp: np.array) -> List:
"""Create windows list for given limits.
:param limits_tmp: Window limits.
:return: List with all the windows that need to be calculated.
"""
windows_tmp = []
for low_lim, upp_lim in limits:
windows_tmp.append(
np.where(np.logical_and(self.mass >= low_lim, self.mass <= upp_lim))
)
return windows_tmp
if self._params_integrals is None:
raise ValueError("No integrals were set.")
if self.mass is None:
raise ValueError("A mass calibration needs to be applied first.")
names, limits = self.def_integrals
windows = integral_windows(limits)
self.integrals, self.integrals_pkg = processor_utils.integrals_summing(
self.data, tuple(windows), self.data_pkg
)
# background correction
if bg_corr and self._params_backgrounds is not None:
names_bg, limits_bg = self.def_backgrounds
windows_bgs = integral_windows(limits_bg)
bgs, bgs_pkg = processor_utils.integrals_summing(
self.data, tuple(windows_bgs), self.data_pkg
)
# determine channel lengths
peak_ch_length = np.array([len(it) for it in windows])
bgs_ch_length = np.array([len(it) for it in windows_bgs])
# call the processor and do the background correction
self.integrals, self.integrals_pkg = processor_utils.integrals_bg_corr(
self.integrals,
np.array(names),
peak_ch_length,
bgs,
np.array(names_bg),
bgs_ch_length,
self.integrals_pkg,
bgs_pkg,
)
def mass_calibration(self) -> None:
r"""Perform a mass calibration on the data.
Let m be the mass and t the respective time of flight. We can then write:
.. math::
t \propto \sqrt[a]{m}
Usually it is assumed that $a=2$, i.e., that the square root is taken.
We don't have to assume this though. In the generalized form we can now
linearize the mass calibration such that:
.. math::
\log(m) = a \log(t) + b
Here, :math:`a` is, as above, the exponent, and :math:`b` is a second constant.
With two values or more for :math:`m` and :math:`t`, we can then make a
linear approximation for the mass calibration :math:`m(t)`.
:raises ValueError: No mass calibration set.
"""
if self._params_mcal is None:
raise ValueError("No mass calibration was set.")
self.mass = processor_utils.mass_calibration(self.def_mcal, self.tof)
def optimize_mcal(self, offset: float = None) -> None:
"""Take an existing mass calibration and finds maxima within a FWHM.
This will act on small corrections for drifts in peaks.
:param offset: How far do you think the peak has wandered? If None, it will be
set to the FWHM value.
"""
if offset is None:
offset = self.peak_fwhm
positions = self.def_mcal[:, 0]
positions_new = np.zeros_like(positions) * np.nan # nan array
for it, pos in enumerate(positions):
min_time = pos - offset - 2 * self.peak_fwhm
max_time = pos + offset + 2 * self.peak_fwhm
if max_time > self.tof.max(): # we don't have a value here
continue
window = np.where(np.logical_and(self.tof > min_time, self.tof < max_time))
tofs = self.tof[window]
data = self.data[window]
positions_new[it] = processor_utils.gaussian_fit_get_max(tofs, data)
mcal_new = self.def_mcal.copy()
index_to_del = []
for it, posn in enumerate(positions_new):
if np.abs(mcal_new[it][0] - posn) < offset:
mcal_new[it][0] = posn
else:
index_to_del.append(it)
mcal_new = np.delete(mcal_new, index_to_del, axis=0)
if len(mcal_new) < 2:
warnings.warn(
"Automatic mass calibration optimization did not find enough peaks."
)
else:
self.def_mcal = mcal_new
def packages(self, shots: int) -> None:
"""Break data into packages.
:param shots: Number of shots per package. The last package will have the rest.
:raises ValueError: Number of shots out of range
"""
if shots < 1 or shots >= self.nof_shots:
raise ValueError(
f"Number of shots per package must be between 1 and "
f"{self.nof_shots}, but is {shots}."
)
self._pkg_size = shots
self.data_pkg, self.nof_shots_pkg = processor_utils.create_packages(
shots, self.ions_to_tof_map, self.all_tofs
)
def spectrum_full(self) -> None:
"""Create ToF and summed ion count array for the full spectrum.
The full spectrum is transfered to ToF and ion counts. The spectrum is then
saved to:
- ToF array is written to `self.tof`
- Data array is written to `self.data`
:warnings: Time of Flight and data have different shape
"""
bin_length = self.crd.header["binLength"]
bin_start = self.crd.header["binStart"]
bin_end = self.crd.header["binEnd"]
delta_t = self.crd.header["deltaT"]
# reset the data
self.ions_to_tof_map = self.crd.ions_to_tof_map
self.all_tofs = self.crd.all_tofs
# set up ToF
self.tof = (
np.arange(bin_start, bin_end + 1, 1) * bin_length / 1e6 + delta_t * 1e6
)
self.data = processor_utils.sort_data_into_spectrum(
self.all_tofs, self.all_tofs.min(), self.all_tofs.max()
)
# set constants
self.us_to_chan = 1e6 / self.crd.header["binLength"] # convert us to bins
if self.tof.shape != self.data.shape:
# fixme remove print
print(f"Header binStart: {bin_start}, binEnd: {bin_end}")
print(
f"File binStart: {self.crd.all_tofs.min()}, "
f"binEnd {self.crd.all_tofs.max()}"
)
warnings.warn(
"Bin ranges in CRD file were of bad length. Creating ToF "
"array without CRD header input."
)
self.tof = np.arange(len(self.data)) * bin_length / 1e6
def spectrum_part(
self, rng: Tuple[Tuple[int, int], Tuple[Tuple[int, int]]]
) -> None:
"""Create ToF for a part of the spectra.
Select part of the shot range. These ranges will be 1 indexed! Always start
with the full data range.
:param rng: Shot range, either as a tuple (from, to) or as a tuple of multiple
((from1, to1), (from2, to2), ...).
:raises ValueError: Ranges are not defined from, to where from < to
:raises ValueError: Tuples are not mutually exclusive.
"""
# reset current settings
self.ions_to_tof_map = self.crd.ions_to_tof_map
self.all_tofs = self.crd.all_tofs
# range
rng = np.array(rng)
if len(rng.shape) == 1: # only one entry
rng = rng.reshape(1, 2)
# subtract 1 from start range -> zero indexing plus upper limit inclusive now
rng[:, 0] -= 1
# sort by first entry
rng = rng[rng[:, 0].argsort()]
# check if any issues with the
if any(rng[:, 1] < rng[:, 0]):
raise ValueError(
"The `from, to` values in your range are not defined "
"such that `from` < `to`."
)
# check that mutually exclusive
for it in range(1, len(rng)):
if rng[it - 1][1] > rng[it][0]:
raise ValueError("Your ranges are not mutually exclusive.")
# filter ions per shot
ion_indexes = processor_utils.multi_range_indexes(rng)
# create all_tof ranges and filter
rng_all_tofs = self.ions_to_tof_map[ion_indexes]
tof_indexes = processor_utils.multi_range_indexes(rng_all_tofs)
all_tofs_filtered = self.all_tofs[tof_indexes]
ions_to_tof_map_filtered = self.ions_to_tof_map[ion_indexes]
# if empty shape: we got no data!
if len(tof_indexes) == 0:
self.data = np.zeros_like(self.data)
else:
self.data = processor_utils.sort_data_into_spectrum(
all_tofs_filtered, all_tofs_filtered.min(), all_tofs_filtered.max()
)
# set back values
self.ions_per_shot = self.ions_per_shot[ion_indexes]
self.ions_to_tof_map = ions_to_tof_map_filtered
self.all_tofs = all_tofs_filtered
self.nof_shots = len(ion_indexes)
# PRIVATE ROUTINES #
def _all_tofs_filtered(self, shots_indexes: np.array) -> np.array:
"""Filter time of flights based on the indexes of the shots.
This function is heavily used in filters.
:param shots_indexes: Array with indexes of the shots.
:return: All time of flight bins for the given shots
"""
rng_all_tofs = self.ions_to_tof_map[shots_indexes]
tof_indexes = processor_utils.multi_range_indexes(rng_all_tofs)
return self.all_tofs[tof_indexes]
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.abs",
"numpy.logical_and",
"numpy.where",
"numpy.array",
"numpy.arange",
"warnings.warn",
"numpy.delete"
] | [((8603, 8632), 'numpy.sum', 'np.sum', (['self.data_pkg'], {'axis': '(1)'}), '(self.data_pkg, axis=1)\n', (8609, 8632), True, 'import numpy as np\n'), ((8745, 8811), 'numpy.delete', 'np.delete', (['self.data_pkg', 'self._filter_max_ion_per_pkg_ind'], {'axis': '(0)'}), '(self.data_pkg, self._filter_max_ion_per_pkg_ind, axis=0)\n', (8754, 8811), True, 'import numpy as np\n'), ((8863, 8934), 'numpy.delete', 'np.delete', (['self.nof_shots_pkg', 'self._filter_max_ion_per_pkg_ind'], {'axis': '(0)'}), '(self.nof_shots_pkg, self._filter_max_ion_per_pkg_ind, axis=0)\n', (8872, 8934), True, 'import numpy as np\n'), ((8978, 9007), 'numpy.sum', 'np.sum', (['self.data_pkg'], {'axis': '(0)'}), '(self.data_pkg, axis=0)\n', (8984, 9007), True, 'import numpy as np\n'), ((9033, 9059), 'numpy.sum', 'np.sum', (['self.nof_shots_pkg'], {}), '(self.nof_shots_pkg)\n', (9039, 9059), True, 'import numpy as np\n'), ((11351, 11400), 'numpy.array', 'np.array', (['(tof_window * self.us_to_chan)'], {'dtype': 'int'}), '(tof_window * self.us_to_chan, dtype=int)\n', (11359, 11400), True, 'import numpy as np\n'), ((12802, 12851), 'numpy.delete', 'np.delete', (['self.integrals_pkg', 'index_list'], {'axis': '(0)'}), '(self.integrals_pkg, index_list, axis=0)\n', (12811, 12851), True, 'import numpy as np\n'), ((12881, 12922), 'numpy.delete', 'np.delete', (['self.nof_shots_pkg', 'index_list'], {}), '(self.nof_shots_pkg, index_list)\n', (12890, 12922), True, 'import numpy as np\n'), ((12948, 12974), 'numpy.sum', 'np.sum', (['self.nof_shots_pkg'], {}), '(self.nof_shots_pkg)\n', (12954, 12974), True, 'import numpy as np\n'), ((13016, 13045), 'numpy.zeros_like', 'np.zeros_like', (['self.integrals'], {}), '(self.integrals)\n', (13029, 13045), True, 'import numpy as np\n'), ((15163, 15212), 'numpy.delete', 'np.delete', (['self.integrals_pkg', 'index_list'], {'axis': '(0)'}), '(self.integrals_pkg, index_list, axis=0)\n', (15172, 15212), True, 'import numpy as np\n'), ((15242, 15283), 'numpy.delete', 'np.delete', (['self.nof_shots_pkg', 'index_list'], {}), '(self.nof_shots_pkg, index_list)\n', (15251, 15283), True, 'import numpy as np\n'), ((15309, 15335), 'numpy.sum', 'np.sum', (['self.nof_shots_pkg'], {}), '(self.nof_shots_pkg)\n', (15315, 15335), True, 'import numpy as np\n'), ((15377, 15406), 'numpy.zeros_like', 'np.zeros_like', (['self.integrals'], {}), '(self.integrals)\n', (15390, 15406), True, 'import numpy as np\n'), ((22490, 22531), 'numpy.delete', 'np.delete', (['mcal_new', 'index_to_del'], {'axis': '(0)'}), '(mcal_new, index_to_del, axis=0)\n', (22499, 22531), True, 'import numpy as np\n'), ((25700, 25713), 'numpy.array', 'np.array', (['rng'], {}), '(rng)\n', (25708, 25713), True, 'import numpy as np\n'), ((7307, 7377), 'warnings.warn', 'warnings.warn', (['"""No data available; maybe all shots were filtered out?"""'], {}), "('No data available; maybe all shots were filtered out?')\n", (7320, 7377), False, 'import warnings\n'), ((8677, 8716), 'numpy.where', 'np.where', (['(total_ions_per_pkg > max_ions)'], {}), '(total_ions_per_pkg > max_ions)\n', (8685, 8716), True, 'import numpy as np\n'), ((9586, 9625), 'numpy.where', 'np.where', (['(self.ions_per_shot > max_ions)'], {}), '(self.ions_per_shot > max_ions)\n', (9594, 9625), True, 'import numpy as np\n'), ((10078, 10117), 'numpy.where', 'np.where', (['(self.ions_per_shot > max_ions)'], {}), '(self.ions_per_shot > max_ions)\n', (10086, 10117), True, 'import numpy as np\n'), ((11427, 11466), 'numpy.where', 'np.where', (['(self.ions_per_shot > max_ions)'], {}), '(self.ions_per_shot > max_ions)\n', (11435, 11466), True, 'import numpy as np\n'), ((13138, 13181), 'numpy.sum', 'np.sum', (['(integrals_pkg[:, :, 1] ** 2)'], {'axis': '(0)'}), '(integrals_pkg[:, :, 1] ** 2, axis=0)\n', (13144, 13181), True, 'import numpy as np\n'), ((15499, 15542), 'numpy.sum', 'np.sum', (['(integrals_pkg[:, :, 1] ** 2)'], {'axis': '(0)'}), '(integrals_pkg[:, :, 1] ** 2, axis=0)\n', (15505, 15542), True, 'import numpy as np\n'), ((21654, 21678), 'numpy.zeros_like', 'np.zeros_like', (['positions'], {}), '(positions)\n', (21667, 21678), True, 'import numpy as np\n'), ((22574, 22662), 'warnings.warn', 'warnings.warn', (['"""Automatic mass calibration optimization did not find enough peaks."""'], {}), "(\n 'Automatic mass calibration optimization did not find enough peaks.')\n", (22587, 22662), False, 'import warnings\n'), ((24756, 24870), 'warnings.warn', 'warnings.warn', (['"""Bin ranges in CRD file were of bad length. Creating ToF array without CRD header input."""'], {}), "(\n 'Bin ranges in CRD file were of bad length. Creating ToF array without CRD header input.'\n )\n", (24769, 24870), False, 'import warnings\n'), ((26910, 26934), 'numpy.zeros_like', 'np.zeros_like', (['self.data'], {}), '(self.data)\n', (26923, 26934), True, 'import numpy as np\n'), ((19986, 20001), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (19994, 20001), True, 'import numpy as np\n'), ((20072, 20090), 'numpy.array', 'np.array', (['names_bg'], {}), '(names_bg)\n', (20080, 20090), True, 'import numpy as np\n'), ((21988, 22044), 'numpy.logical_and', 'np.logical_and', (['(self.tof > min_time)', '(self.tof < max_time)'], {}), '(self.tof > min_time, self.tof < max_time)\n', (22002, 22044), True, 'import numpy as np\n'), ((22332, 22362), 'numpy.abs', 'np.abs', (['(mcal_new[it][0] - posn)'], {}), '(mcal_new[it][0] - posn)\n', (22338, 22362), True, 'import numpy as np\n'), ((24118, 24154), 'numpy.arange', 'np.arange', (['bin_start', '(bin_end + 1)', '(1)'], {}), '(bin_start, bin_end + 1, 1)\n', (24127, 24154), True, 'import numpy as np\n'), ((7522, 7546), 'numpy.array', 'np.array', (['self.nof_shots'], {}), '(self.nof_shots)\n', (7530, 7546), True, 'import numpy as np\n'), ((18719, 18777), 'numpy.logical_and', 'np.logical_and', (['(self.mass >= low_lim)', '(self.mass <= upp_lim)'], {}), '(self.mass >= low_lim, self.mass <= upp_lim)\n', (18733, 18777), True, 'import numpy as np\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: utils.ipynb (unless otherwise specified).
__all__ = ['test', 'test_eq', 'simplify_qd']
# Cell
import open3d as o3d
import operator
import numpy as np
# Cell
def test(a,b,cmp,cname=None):
if cname is None: cname=cmp.__name__
assert cmp(a,b),f"{cname}:\n{a}\n{b}"
def test_eq(a,b): test(a,b,operator.eq,'==')
# Cell
def simplify_qd(verts, faces, target_number_of_triangles):
mesh = o3d.geometry.TriangleMesh()
mesh.triangles = o3d.utility.Vector3iVector(faces)
mesh.vertices = o3d.utility.Vector3dVector(verts)
mesh_smp = mesh.simplify_quadric_decimation(target_number_of_triangles)
return np.asarray(mesh_smp.vertices), np.asarray(mesh_smp.triangles) | [
"open3d.utility.Vector3iVector",
"numpy.asarray",
"open3d.geometry.TriangleMesh",
"open3d.utility.Vector3dVector"
] | [((441, 468), 'open3d.geometry.TriangleMesh', 'o3d.geometry.TriangleMesh', ([], {}), '()\n', (466, 468), True, 'import open3d as o3d\n'), ((490, 523), 'open3d.utility.Vector3iVector', 'o3d.utility.Vector3iVector', (['faces'], {}), '(faces)\n', (516, 523), True, 'import open3d as o3d\n'), ((544, 577), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['verts'], {}), '(verts)\n', (570, 577), True, 'import open3d as o3d\n'), ((665, 694), 'numpy.asarray', 'np.asarray', (['mesh_smp.vertices'], {}), '(mesh_smp.vertices)\n', (675, 694), True, 'import numpy as np\n'), ((696, 726), 'numpy.asarray', 'np.asarray', (['mesh_smp.triangles'], {}), '(mesh_smp.triangles)\n', (706, 726), True, 'import numpy as np\n')] |
"""
A script to calculate the p-values and averages for Section 6.3 of the paper.
"""
import json
import pickle
import numpy as np
from scipy.stats import mannwhitneyu, ttest_1samp
from bld.project_paths import project_paths_join as ppj
def calc_p_value_by_super_game(
data_experiment,
super_star_data,
grid_data,
sg,
treatment):
"""Calculate the respective p values mentioned in the paper.
Args:
data_experiment (DataFrame): DataFrame with the experimental data on group level
super_star_data (array): Price array from the best performing market
grid_data (array): Entire price grid array
sg (integer): Super game indicator
treatment (string): Treatment indicator
Returns:
tuple: p_value_super_star, p_value_grid_avg, avg_grid
p_value_super_star (float): p-value when comparing the experimental data
to the best performing market with MWU test.
p_value_grid_avg (float): p-value when comparing the experimental data
to the average of the price grid with t-test.
avg_grid (float): Average of the price grid.
"""
# Aggregate the group for a given super game on super group matching group
# level
subset_sg = data_experiment.loc[(data_experiment['super_game'] == sg) &
(data_experiment['treatment'] == treatment)
].groupby(['super_group_id_general', 'treatment'],
as_index=False)[['winning_price']].mean()
p_value_super_star = mannwhitneyu(
subset_sg['winning_price'],
super_star_data,
use_continuity=False,
alternative='two-sided'
)[-1]
# Compare against average of grid
avg_grid = grid_data.mean()
p_value_grid_avg = ttest_1samp(
subset_sg['winning_price'],
avg_grid,
alternative='two-sided'
)[-1]
return p_value_super_star, p_value_grid_avg, avg_grid
def create_result_dict(
data_experiment,
super_star_data_2,
super_star_data_3,
grid_data_2,
grid_data_3):
"""
Create a dictionary with all p values from Section 6.3.
Args:
data_experiment (DataFrame): DataFrame with the experimental data on group level
super_star_data_2 (array): Price array from the best performing market (2 firm market)
super_star_data_3 (array): Price array from the best performing market (3 firm market)
grid_data_2 (array): Entire price grid array (2 firm market)
grid_data_3 (array): Entire price grid array (3 firm market)
Returns:
dict: Dictionary with all p values
"""
# Algo v Human
all_results = {}
all_results['algo_v_human'] = {}
combinations = [('2H0A', super_star_data_2, grid_data_2),
('3H0A', super_star_data_3, grid_data_3)]
supergames = [1, 2, 3]
for t, d_super_star, d_grid in combinations:
all_results['algo_v_human'][t] = {}
for current_sg in supergames:
all_results['algo_v_human'][t][f'supergame_{current_sg}'] = {}
p_value_super_star, p_value_grid_avg, avg_grid = calc_p_value_by_super_game(
data_experiment=data_experiment,
super_star_data=d_super_star,
grid_data=d_grid, sg=current_sg,
treatment=t)
all_results['algo_v_human'][t][f'supergame_{current_sg}']['p_value_super_star'] = p_value_super_star
all_results['algo_v_human'][t][f'supergame_{current_sg}']['p_value_grid_avg'] = p_value_grid_avg
all_results['algo_v_human'][t][f'supergame_{current_sg}']['avg_grid'] = avg_grid
# Human v Human
all_results['human_v_human'] = {}
# By supergame
for current_sg in supergames:
data_agg_sg = data_group_level.loc[
(data_group_level['super_game'] == current_sg)
].groupby(['super_group_id_general', 'treatment'], as_index=False)[['winning_price']].mean()
p_value_humans_sg = mannwhitneyu(
data_agg_sg.loc[data_agg_sg['treatment'] == '3H0A']['winning_price'],
data_agg_sg.loc[data_agg_sg['treatment'] == '2H0A']['winning_price'],
use_continuity=False,
alternative='two-sided'
)[-1]
all_results['human_v_human'][f'supergame_{current_sg}'] = p_value_humans_sg
# Pooled
data_agg_pooled = data_group_level.groupby(
['super_group_id_general', 'treatment'], as_index=False)[['winning_price']].mean()
p_value_humans_pooled = mannwhitneyu(
data_agg_pooled.loc[data_agg_pooled['treatment'] == '3H0A']['winning_price'],
data_agg_pooled.loc[data_agg_pooled['treatment'] == '2H0A']['winning_price'],
use_continuity=False,
alternative='two-sided'
)[-1]
all_results['human_v_human']['pooled'] = p_value_humans_pooled
return all_results
if __name__ == '__main__':
# Load the simulation data
# Two firm algorithm markets
with open(ppj("OUT_DATA", "grid_2_agents.pickle"), "rb") as f:
all_output_grids_2_agents = pickle.load(f)
all_prices_2_agents_grid = np.array(all_output_grids_2_agents['avg_price'])
with open(ppj("OUT_DATA", "super_star_avg_prices_2_agents.pickle"), "rb") as f:
super_star_avg_prices_2_agents = pickle.load(f)
# Three firm algorithm markets
with open(ppj("OUT_DATA", "grid_3_agents.pickle"), "rb") as f:
all_output_grids_3_agents = pickle.load(f)
all_prices_3_agents_grid = np.array(all_output_grids_3_agents['avg_price'])
with open(ppj("OUT_DATA", "super_star_avg_prices_3_agents.pickle"), "rb") as f:
super_star_avg_prices_3_agents = pickle.load(f)
# Load the group level data from the experiments
with open(ppj("OUT_DATA", "data_group_level.pickle"), "rb") as f:
data_group_level = pickle.load(f)
# Calc all results
result_dict = create_result_dict(
data_experiment=data_group_level,
super_star_data_2=super_star_avg_prices_2_agents,
super_star_data_3=super_star_avg_prices_3_agents,
grid_data_2=all_prices_2_agents_grid,
grid_data_3=all_prices_3_agents_grid)
# Write to JSON
with open(
ppj("OUT_ANALYSIS", "all_results_comparing_algos_and_humans.json"), "w", encoding="utf-8"
) as f:
json.dump(result_dict, f, indent=4)
| [
"json.dump",
"bld.project_paths.project_paths_join",
"scipy.stats.mannwhitneyu",
"scipy.stats.ttest_1samp",
"pickle.load",
"numpy.array"
] | [((5296, 5344), 'numpy.array', 'np.array', (["all_output_grids_2_agents['avg_price']"], {}), "(all_output_grids_2_agents['avg_price'])\n", (5304, 5344), True, 'import numpy as np\n'), ((5670, 5718), 'numpy.array', 'np.array', (["all_output_grids_3_agents['avg_price']"], {}), "(all_output_grids_3_agents['avg_price'])\n", (5678, 5718), True, 'import numpy as np\n'), ((1700, 1809), 'scipy.stats.mannwhitneyu', 'mannwhitneyu', (["subset_sg['winning_price']", 'super_star_data'], {'use_continuity': '(False)', 'alternative': '"""two-sided"""'}), "(subset_sg['winning_price'], super_star_data, use_continuity=\n False, alternative='two-sided')\n", (1712, 1809), False, 'from scipy.stats import mannwhitneyu, ttest_1samp\n'), ((1941, 2015), 'scipy.stats.ttest_1samp', 'ttest_1samp', (["subset_sg['winning_price']", 'avg_grid'], {'alternative': '"""two-sided"""'}), "(subset_sg['winning_price'], avg_grid, alternative='two-sided')\n", (1952, 2015), False, 'from scipy.stats import mannwhitneyu, ttest_1samp\n'), ((4704, 4928), 'scipy.stats.mannwhitneyu', 'mannwhitneyu', (["data_agg_pooled.loc[data_agg_pooled['treatment'] == '3H0A']['winning_price']", "data_agg_pooled.loc[data_agg_pooled['treatment'] == '2H0A']['winning_price']"], {'use_continuity': '(False)', 'alternative': '"""two-sided"""'}), "(data_agg_pooled.loc[data_agg_pooled['treatment'] == '3H0A'][\n 'winning_price'], data_agg_pooled.loc[data_agg_pooled['treatment'] ==\n '2H0A']['winning_price'], use_continuity=False, alternative='two-sided')\n", (4716, 4928), False, 'from scipy.stats import mannwhitneyu, ttest_1samp\n'), ((5250, 5264), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5261, 5264), False, 'import pickle\n'), ((5470, 5484), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5481, 5484), False, 'import pickle\n'), ((5624, 5638), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5635, 5638), False, 'import pickle\n'), ((5844, 5858), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5855, 5858), False, 'import pickle\n'), ((6010, 6024), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6021, 6024), False, 'import pickle\n'), ((6490, 6525), 'json.dump', 'json.dump', (['result_dict', 'f'], {'indent': '(4)'}), '(result_dict, f, indent=4)\n', (6499, 6525), False, 'import json\n'), ((4177, 4386), 'scipy.stats.mannwhitneyu', 'mannwhitneyu', (["data_agg_sg.loc[data_agg_sg['treatment'] == '3H0A']['winning_price']", "data_agg_sg.loc[data_agg_sg['treatment'] == '2H0A']['winning_price']"], {'use_continuity': '(False)', 'alternative': '"""two-sided"""'}), "(data_agg_sg.loc[data_agg_sg['treatment'] == '3H0A'][\n 'winning_price'], data_agg_sg.loc[data_agg_sg['treatment'] == '2H0A'][\n 'winning_price'], use_continuity=False, alternative='two-sided')\n", (4189, 4386), False, 'from scipy.stats import mannwhitneyu, ttest_1samp\n'), ((5161, 5200), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""grid_2_agents.pickle"""'], {}), "('OUT_DATA', 'grid_2_agents.pickle')\n", (5164, 5200), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((5359, 5415), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""super_star_avg_prices_2_agents.pickle"""'], {}), "('OUT_DATA', 'super_star_avg_prices_2_agents.pickle')\n", (5362, 5415), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((5535, 5574), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""grid_3_agents.pickle"""'], {}), "('OUT_DATA', 'grid_3_agents.pickle')\n", (5538, 5574), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((5733, 5789), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""super_star_avg_prices_3_agents.pickle"""'], {}), "('OUT_DATA', 'super_star_avg_prices_3_agents.pickle')\n", (5736, 5789), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((5927, 5969), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""data_group_level.pickle"""'], {}), "('OUT_DATA', 'data_group_level.pickle')\n", (5930, 5969), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((6380, 6446), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_ANALYSIS"""', '"""all_results_comparing_algos_and_humans.json"""'], {}), "('OUT_ANALYSIS', 'all_results_comparing_algos_and_humans.json')\n", (6383, 6446), True, 'from bld.project_paths import project_paths_join as ppj\n')] |
"""
Main experimentation pipeline for measuring robustness of explainers.
Unlike the other pipelines, we just want to compare the original LIME with its robustified version,
so we do not require a list of configs to run through.
We mainly run three experiments:
* Robustness of original LIME against Fooling LIME attack (surrogate sampler)
* Robustness of CTGAN-LIME against Fooling LIME attack (surrogate sampler)
* Robustness of CTGAN-LIME against Fooling LIME attack with CTGAN sampler (white-box)
We measure the following metrics:
* How often is the biased column (e.g. race) identified as the top feature for a prediction (top-1 accuracy)
* How often is the biased column identified as among the top k features for a prediction (top-k accuracy)
* How often is 'unrelated_column' identified as the top feature for a prediction (success rate)
"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from experiments.experiments_common import create_save_path
from experiments.utils.adversarial_lime import Adversarial_Lime_Model, one_hot_encode
from experiments.utils.constants import Datasets, Explainers
from experiments.utils.datasets import get_dataset
from experiments.utils.explainers import get_explainer
DATASET_CONFIGS = {
Datasets.COMPAS: {
'biased_column': 'race',
'unrelated_column': 'unrelated_column',
'use_cat_for_ctgan': True,
'ctgan_params': {
'embedding_dim': 512,
'gen_dim': (256, 256, 256, 256, 256),
'dis_dim': (256, 256, 256, 256, 256)
},
'discriminator_threshold': 0.5
},
Datasets.GERMANCREDIT: {
'biased_column': 'Sex',
'unrelated_column': 'unrelated_column',
'use_cat_for_ctgan': False,
'ctgan_params': {
'embedding_dim': 512,
'gen_dim': (256, 256, 256, 256, 256),
'dis_dim': (256, 256, 256, 256, 256)
},
'ctgan_epochs': 300,
'use_onehot': True,
'measure_distance': 'raw',
'discriminator_threshold': 0.5
},
Datasets.COMMUNITY: {
'biased_column': 'racePctWhite numeric',
'unrelated_column': 'unrelated_column',
'use_cat_for_ctgan': True,
'ctgan_params': {
'embedding_dim': 512,
'gen_dim': (256, 256, 256, 256, 256),
'dis_dim': (256, 256, 256, 256, 256),
},
'ctgan_epochs': 1000,
'use_onehot': False,
'measure_distance': 'raw',
'discriminator_threshold': 0.01
}
}
class sexist_model_f:
"""
For the German credit dataset
"""
def __init__(self, sex_index):
self.sex_index = sex_index
# Decision rule: classify negatively if race is black
def predict(self, X):
return np.array([0 if x[self.sex_index] > 0 else 1 for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X, y):
return np.sum(self.predict(X) == y) / len(X)
class innocuous_model_psi_german:
def __init__(self, unrelated_index):
self.unrelated_index = unrelated_index
# Decision rule: classify according to randomly drawn column 'unrelated column'
def predict(self, X):
return np.array([0 if x[self.unrelated_index] > 0 else 1 for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X, y):
return np.sum(self.predict(X) == y) / len(X)
class racist_model_f:
"""
For the COMPAS dataset
"""
def __init__(self, race_index):
self.race_index = race_index
# Decision rule: classify negatively if race is black
def predict(self, X):
return np.array([0 if x[self.race_index] > 0 else 1 for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X, y):
return np.sum(self.predict(X) == y) / len(X)
class innocuous_model_psi:
def __init__(self, unrelated_index):
self.unrelated_index = unrelated_index
# Decision rule: classify according to randomly drawn column 'unrelated column'
def predict(self, X):
return np.array([0 if x[self.unrelated_index] > 0 else 1 for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X, y):
return np.sum(self.predict(X) == y) / len(X)
def preprocess_robustness_datasets(dataset, params={}):
data = get_dataset(dataset, params)
if dataset == Datasets.COMPAS:
X, y, _ = data['data'], data['target'], data['cols']
X[DATASET_CONFIGS[Datasets.COMPAS]['unrelated_column']] = np.random.choice([0, 1],
size=X.shape[0])
features = list(X.columns)
categorical_feature_name = ['two_year_recid', 'c_charge_degree_F', 'c_charge_degree_M',
'sex_Female', 'sex_Male', 'race', 'unrelated_column']
categorical_feature_indcs = [features.index(c) for c in categorical_feature_name]
X = X.values
elif dataset == Datasets.GERMANCREDIT:
X, y = data['data'], data['target']
X = pd.DataFrame(X, columns=data['feature_names'])
X[DATASET_CONFIGS[Datasets.GERMANCREDIT]['unrelated_column']] = np.random.choice([0, 1],
size=
X.shape[0])
features = list(X.columns)
categorical_feature_name = data['categorical_features'] + ['unrelated_column']
categorical_feature_indcs = [features.index(c) for c in categorical_feature_name]
X = X.values
elif dataset == Datasets.ADULT:
X, y = data['data'], data['target']
X[DATASET_CONFIGS[Datasets.ADULT]['unrelated_column']] = np.random.choice([0, 1],
size=
X.shape[0])
features = list(X.columns)
categorical_feature_name = data['categorical_features'] + ['unrelated_column']
categorical_feature_indcs = [features.index(c) for c in categorical_feature_name]
X = X.values
elif dataset == Datasets.COMMUNITY:
X, y = data['data'], data['target']
X[DATASET_CONFIGS[Datasets.COMMUNITY]['unrelated_column']] = np.random.choice(
[0, 1],
size=
X.shape[0]).astype(int)
features = list(X.columns)
categorical_feature_name = [DATASET_CONFIGS[Datasets.COMMUNITY]['unrelated_column']]
categorical_feature_indcs = [features.index(c) for c in categorical_feature_name]
X = X.values
else:
raise KeyError('Dataset {} not available'.format(dataset))
numerical_features = [f for f in features if f not in categorical_feature_name]
numerical_feature_indcs = [features.index(c) for c in numerical_features]
sc = StandardScaler()
X[:, numerical_feature_indcs] = sc.fit_transform(X[:, numerical_feature_indcs])
return X, y, features, categorical_feature_name, categorical_feature_indcs
def get_explanations(explainer, X, adv_lime, explainer_name, top_features=3, num_samples=1000):
list_top_k = []
for idx in tqdm(range(X.shape[0])):
label = np.argmax(adv_lime.predict_proba(X[idx].reshape((1, -1))))
if explainer_name == Explainers.LIMETABULAR:
exp = explainer.explain_instance(X[idx], adv_lime.predict_proba,
num_features=top_features,
labels=(0, 1)).as_list(label)
else:
exp = explainer.explain_instance(X[idx], adv_lime.predict_proba, label=label,
num_samples=num_samples,
num_features=top_features)
top_k = [e[0] for e in exp]
list_top_k.append(top_k)
return list_top_k
def measure_robustness(dataset, top_features=3, params={}):
X, y, features, categorical_feature_name, categorical_feature_indcs = preprocess_robustness_datasets(
dataset, params)
X_train, X_test, y_train, y_test = train_test_split(X, y)
biased_column = DATASET_CONFIGS[dataset]['biased_column']
unrelated_column = DATASET_CONFIGS[dataset]['unrelated_column']
biased_index = features.index(biased_column)
unrelated_index = features.index(unrelated_column)
# Get original lime model
logger.info('Initializing original LIME')
original_lime_params = {
'training_data': X_train,
'feature_names': features,
'discretize_continuous': False,
'categorical_features': categorical_feature_indcs
}
original_lime = get_explainer(Explainers.LIMETABULAR, original_lime_params)
# Train the adversarial model for LIME with f and psi
logger.info('Initializing Fooling LIME')
if dataset in [Datasets.COMPAS, Datasets.COMMUNITY]:
biased_model = racist_model_f(biased_index)
innocuous_model = innocuous_model_psi(unrelated_index)
elif dataset in [Datasets.GERMANCREDIT, Datasets.ADULT]:
biased_model = sexist_model_f(biased_index)
innocuous_model = innocuous_model_psi_german(unrelated_index)
else:
raise KeyError('Dataset not supported: {}'.format(dataset))
adv_lime = Adversarial_Lime_Model(
biased_model,
innocuous_model).train(X_train, y_train,
feature_names=features,
categorical_features=categorical_feature_indcs)
adv_lime_for_explainer = Adversarial_Lime_Model(
biased_model,
innocuous_model).train(X_train, y_train,
feature_names=features,
categorical_features=categorical_feature_indcs)
# Get robust lime
logger.info('Initializing CTGAN-LIME')
robust_lime_params = {
'training_data': X,
'feature_names': features,
'categorical_feature_idxes': categorical_feature_indcs,
'ctgan_epochs': DATASET_CONFIGS[dataset].get('ctgan_epochs', 300),
'ctgan_verbose': True,
'use_cat_for_ctgan': DATASET_CONFIGS[dataset]['use_cat_for_ctgan'],
'discriminator': adv_lime_for_explainer,
'discriminator_threshold': DATASET_CONFIGS[dataset].get('discriminator_threshold', 0.5),
'ctgan_params': DATASET_CONFIGS[dataset]['ctgan_params'],
'use_onehot': DATASET_CONFIGS[dataset].get('use_onehot', True),
'measure_distance': DATASET_CONFIGS[dataset].get('measure_distance', 'raw')
}
robust_lime = get_explainer(Explainers.NUMPYROBUSTTABULAR, robust_lime_params)
# Train the adversarial model with CTGAN
logger.info('Initializing Fooling LIME with CTGAN')
adv_ctgan_lime = Adversarial_Lime_Model(
biased_model,
innocuous_model).train_ctgan(
X_train, y_train,
ctgan_sampler=robust_lime.ctgan_sampler,
feature_names=features,
categorical_features=categorical_feature_indcs)
model_pairs = [
(original_lime, adv_lime),
(original_lime, adv_ctgan_lime),
(robust_lime, adv_lime),
(robust_lime, adv_ctgan_lime)
]
name_pairs = [
(Explainers.LIMETABULAR, 'Fooling LIME'),
(Explainers.LIMETABULAR, 'Fooling LIME v2'),
(Explainers.NUMPYROBUSTTABULAR, 'Fooling LIME'),
(Explainers.NUMPYROBUSTTABULAR, 'Fooling LIME with CTGAN')
]
list_results = []
for (explainer, attacker), (e_name, a_name) in zip(model_pairs, name_pairs):
logger.info('=========================================')
logger.info('Measuring {} against {}'.format(e_name, a_name))
top_explanations = get_explanations(
explainer=explainer,
X=X_test,
adv_lime=attacker,
explainer_name=e_name,
top_features=top_features
)
top_1 = np.mean(list(map(lambda x: biased_column in x[0], top_explanations)))
top_k = np.mean(list(map(lambda x: any([biased_column in e for e in x]), top_explanations)))
success_rate = np.mean(list(map(lambda x: unrelated_column in x[0], top_explanations)))
logger.info('Top 1 accuracy: {:.4f}'.format(top_1))
logger.info('Top k ({}) accuracy: {:.4f}'.format(top_features, top_k))
logger.info('Attack success rate: {:.4f}'.format(success_rate))
list_results.append((top_1, top_k, success_rate))
return list(zip(name_pairs, list_results))
def main(dataset, num_runs, top_features, params={}):
dict_results = {}
logger.info('Testing on dataset: {}'.format(dataset))
logger.info('Going through {} runs'.format(num_runs))
for run in range(num_runs):
logger.info('=============================================')
logger.info('Run: {}'.format(run))
list_results = measure_robustness(dataset, top_features=top_features, params=params)
for (name_pair, result) in list_results:
e_name, a_name = name_pair
top_1_acc, top_k_acc, attack_success_rate = result
key = '{} vs. {}'.format(e_name, a_name)
if key in dict_results:
dict_results[key]['top_1_acc'].append(top_1_acc)
dict_results[key]['top_k_acc'].append(top_k_acc)
dict_results[key]['attack_success_rate'].append(attack_success_rate)
else:
dict_results[key] = {
'top_1_acc': [top_1_acc],
'top_k_acc': [top_k_acc],
'attack_success_rate': [attack_success_rate]
}
logger.info('Finished running experiments')
for key, items in dict_results.items():
logger.info('Results for {}'.format(key))
logger.info('Mean top 1 accuracy: {:.4f} (+/- {:.4f})'.format(np.mean(items['top_1_acc']),
np.std(items['top_1_acc'])))
logger.info('Mean top k ({}) accuracy: {:.4f} (+/- {:.4f})'.format(top_features, np.mean(
items['top_k_acc']),
np.std(
items['top_k_acc'])))
logger.info(
'Mean success rate: {:.4f} (+/- {:.4f})'.format(np.mean(items['attack_success_rate']),
np.std(items['attack_success_rate'])))
now = datetime.now()
timestamp = str(int(datetime.timestamp(now)))
save_path = create_save_path(
save_dir=save_dir,
config_name='{}_robustness_results_top_features_{}'.format(dataset, top_features),
timestamp=timestamp
)
logger.info('Saving results to {}'.format(save_path))
joblib.dump(dict_results, save_path)
return dict_results
if __name__ == '__main__':
np.random.seed(123456)
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('--log_out', required=False, type=str,
default='',
help='Place to log output')
parser.add_argument('--save_dir', required=False, type=str,
default='',
help='Place to save results')
parser.add_argument('--dataset', required=False, type=str, default=Datasets.COMPAS,
help='Dataset to measure robustness with')
parser.add_argument('--num_runs', required=False, type=int, default=1,
help='Number of trials to run')
parser.add_argument('--max_features', required=False, type=int, default=3,
help='How many features to iterate through?')
# Parse args
args = parser.parse_args()
args = vars(args)
log_out = args['log_out']
save_dir = args['save_dir']
dataset = args['dataset']
num_runs = int(args['num_runs'])
max_features = int(args['max_features'])
if not log_out:
log_out = 'experiments/logs/{}_robustness.log'.format(dataset)
if not save_dir:
save_dir = 'experiments/robustness_{}_results'.format(dataset)
os.makedirs(save_dir, exist_ok=True)
# Set up logging
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler(log_out)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
handlers=[file_handler, console_handler])
logger = logging.getLogger(__file__)
# Run main
logger.info('Received following params: {}'.format(args))
list_all_results = []
for top_features in range(max_features):
dict_results = main(
dataset=dataset,
num_runs=num_runs,
top_features=int(top_features)
)
list_all_results.append({top_features: dict_results})
now = datetime.now()
timestamp = str(int(datetime.timestamp(now)))
save_path = create_save_path(
save_dir=save_dir,
config_name='{}_robustness_results_max_features_{}'.format(dataset, max_features),
timestamp=timestamp
)
logger.info('Saving results to {}'.format(save_path))
joblib.dump(list_all_results, save_path)
logger.info('Finished running experiments')
| [
"sklearn.externals.joblib.dump",
"sklearn.preprocessing.StandardScaler",
"numpy.random.seed",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"logging.getLogger",
"experiments.utils.explainers.get_explainer",
"experiments.utils.datasets.get_dataset",
"numpy.mean",
"pandas.Da... | [((4675, 4703), 'experiments.utils.datasets.get_dataset', 'get_dataset', (['dataset', 'params'], {}), '(dataset, params)\n', (4686, 4703), False, 'from experiments.utils.datasets import get_dataset\n'), ((7319, 7335), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7333, 7335), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8582, 8604), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {}), '(X, y)\n', (8598, 8604), False, 'from sklearn.model_selection import train_test_split\n'), ((9138, 9197), 'experiments.utils.explainers.get_explainer', 'get_explainer', (['Explainers.LIMETABULAR', 'original_lime_params'], {}), '(Explainers.LIMETABULAR, original_lime_params)\n', (9151, 9197), False, 'from experiments.utils.explainers import get_explainer\n'), ((11034, 11098), 'experiments.utils.explainers.get_explainer', 'get_explainer', (['Explainers.NUMPYROBUSTTABULAR', 'robust_lime_params'], {}), '(Explainers.NUMPYROBUSTTABULAR, robust_lime_params)\n', (11047, 11098), False, 'from experiments.utils.explainers import get_explainer\n'), ((14953, 14967), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14965, 14967), False, 'from datetime import datetime\n'), ((15266, 15302), 'sklearn.externals.joblib.dump', 'joblib.dump', (['dict_results', 'save_path'], {}), '(dict_results, save_path)\n', (15277, 15302), False, 'from sklearn.externals import joblib\n'), ((15360, 15382), 'numpy.random.seed', 'np.random.seed', (['(123456)'], {}), '(123456)\n', (15374, 15382), True, 'import numpy as np\n'), ((15396, 15439), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'allow_abbrev': '(False)'}), '(allow_abbrev=False)\n', (15419, 15439), False, 'import argparse\n'), ((16615, 16651), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (16626, 16651), False, 'import os\n'), ((16696, 16719), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (16717, 16719), False, 'import logging\n'), ((16739, 16767), 'logging.FileHandler', 'logging.FileHandler', (['log_out'], {}), '(log_out)\n', (16758, 16767), False, 'import logging\n'), ((16772, 16926), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(name)-12s %(levelname)-8s %(message)s"""', 'handlers': '[file_handler, console_handler]'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', handlers=[\n file_handler, console_handler])\n", (16791, 16926), False, 'import logging\n'), ((16978, 17005), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (16995, 17005), False, 'import logging\n'), ((17370, 17384), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17382, 17384), False, 'from datetime import datetime\n'), ((17683, 17723), 'sklearn.externals.joblib.dump', 'joblib.dump', (['list_all_results', 'save_path'], {}), '(list_all_results, save_path)\n', (17694, 17723), False, 'from sklearn.externals import joblib\n'), ((2985, 3043), 'numpy.array', 'np.array', (['[(0 if x[self.sex_index] > 0 else 1) for x in X]'], {}), '([(0 if x[self.sex_index] > 0 else 1) for x in X])\n', (2993, 3043), True, 'import numpy as np\n'), ((3454, 3518), 'numpy.array', 'np.array', (['[(0 if x[self.unrelated_index] > 0 else 1) for x in X]'], {}), '([(0 if x[self.unrelated_index] > 0 else 1) for x in X])\n', (3462, 3518), True, 'import numpy as np\n'), ((3919, 3978), 'numpy.array', 'np.array', (['[(0 if x[self.race_index] > 0 else 1) for x in X]'], {}), '([(0 if x[self.race_index] > 0 else 1) for x in X])\n', (3927, 3978), True, 'import numpy as np\n'), ((4382, 4446), 'numpy.array', 'np.array', (['[(0 if x[self.unrelated_index] > 0 else 1) for x in X]'], {}), '([(0 if x[self.unrelated_index] > 0 else 1) for x in X])\n', (4390, 4446), True, 'import numpy as np\n'), ((4866, 4907), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'X.shape[0]'}), '([0, 1], size=X.shape[0])\n', (4882, 4907), True, 'import numpy as np\n'), ((5425, 5471), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': "data['feature_names']"}), "(X, columns=data['feature_names'])\n", (5437, 5471), True, 'import pandas as pd\n'), ((5544, 5585), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'X.shape[0]'}), '([0, 1], size=X.shape[0])\n', (5560, 5585), True, 'import numpy as np\n'), ((9751, 9804), 'experiments.utils.adversarial_lime.Adversarial_Lime_Model', 'Adversarial_Lime_Model', (['biased_model', 'innocuous_model'], {}), '(biased_model, innocuous_model)\n', (9773, 9804), False, 'from experiments.utils.adversarial_lime import Adversarial_Lime_Model, one_hot_encode\n'), ((10010, 10063), 'experiments.utils.adversarial_lime.Adversarial_Lime_Model', 'Adversarial_Lime_Model', (['biased_model', 'innocuous_model'], {}), '(biased_model, innocuous_model)\n', (10032, 10063), False, 'from experiments.utils.adversarial_lime import Adversarial_Lime_Model, one_hot_encode\n'), ((11222, 11275), 'experiments.utils.adversarial_lime.Adversarial_Lime_Model', 'Adversarial_Lime_Model', (['biased_model', 'innocuous_model'], {}), '(biased_model, innocuous_model)\n', (11244, 11275), False, 'from experiments.utils.adversarial_lime import Adversarial_Lime_Model, one_hot_encode\n'), ((14992, 15015), 'datetime.datetime.timestamp', 'datetime.timestamp', (['now'], {}), '(now)\n', (15010, 15015), False, 'from datetime import datetime\n'), ((17409, 17432), 'datetime.datetime.timestamp', 'datetime.timestamp', (['now'], {}), '(now)\n', (17427, 17432), False, 'from datetime import datetime\n'), ((6145, 6186), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'X.shape[0]'}), '([0, 1], size=X.shape[0])\n', (6161, 6186), True, 'import numpy as np\n'), ((14280, 14307), 'numpy.mean', 'np.mean', (["items['top_1_acc']"], {}), "(items['top_1_acc'])\n", (14287, 14307), True, 'import numpy as np\n'), ((14379, 14405), 'numpy.std', 'np.std', (["items['top_1_acc']"], {}), "(items['top_1_acc'])\n", (14385, 14405), True, 'import numpy as np\n'), ((14497, 14524), 'numpy.mean', 'np.mean', (["items['top_k_acc']"], {}), "(items['top_k_acc'])\n", (14504, 14524), True, 'import numpy as np\n'), ((14614, 14640), 'numpy.std', 'np.std', (["items['top_k_acc']"], {}), "(items['top_k_acc'])\n", (14620, 14640), True, 'import numpy as np\n'), ((14804, 14841), 'numpy.mean', 'np.mean', (["items['attack_success_rate']"], {}), "(items['attack_success_rate'])\n", (14811, 14841), True, 'import numpy as np\n'), ((14903, 14939), 'numpy.std', 'np.std', (["items['attack_success_rate']"], {}), "(items['attack_success_rate'])\n", (14909, 14939), True, 'import numpy as np\n'), ((6739, 6780), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'X.shape[0]'}), '([0, 1], size=X.shape[0])\n', (6755, 6780), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import gtn
import numpy as np
def gen_transitions(num_classes, calc_grad=False):
"""Make a bigram transition graph."""
g = gtn.Graph(calc_grad)
for i in range(num_classes):
g.add_node(False, True)
g.add_node(True, True)
for i in range(num_classes):
g.add_arc(num_classes, i, i) # s(<s>, i)
for j in range(num_classes):
g.add_arc(i, j, j) # s(i, j)
return g
def gen_potentials(num_features, num_classes, calc_grad=False):
"""Make the unary potential graph"""
g = gtn.Graph(calc_grad)
g.add_node(True, True)
for i in range(num_features):
for c in range(num_classes):
g.add_arc(0, 0, i, c) # f(i, c)
return g
def gen_model(num_features, num_classes, calc_grad=False, init=True):
transitions = gen_transitions(num_classes, calc_grad)
potentials = gen_potentials(num_features, num_classes, calc_grad)
# Randomly set the arc weights of the graphs:
if init:
transitions.set_weights(
10 * np.random.randn(transitions.num_arcs()))
potentials.set_weights(
10 * np.random.randn(potentials.num_arcs()))
return potentials, transitions
def make_chain_graph(seq, calc_grad=False):
"""Make a simple chain graph from an iterable of integers."""
g = gtn.Graph(calc_grad)
g.add_node(True)
for e, s in enumerate(seq):
g.add_node(False, e + 1 == len(seq))
g.add_arc(e, e + 1, s)
return g
def sample_model(
num_features, num_classes,
potentials, transitions,
num_samples, max_len=20):
"""
Sample `num_samples` from a linear-chain CRF specified
by a `potentials` graph and a `transitions` graph. The
samples will have a random length in `[1, max_len]`.
"""
model = gtn.compose(potentials, transitions)
# Draw a random X with length randomly from [1, max_len] and find the
# most likely Y under the model:
samples = []
while len(samples) < num_samples:
# Sample X:
T = np.random.randint(1, max_len + 1)
X = np.random.randint(0, num_features, size=(T,))
X = make_chain_graph(X)
# Find the most likely Y given X:
Y = gtn.viterbi_path(gtn.compose(X, model))
# Clean up Y:
Y = gtn.project_output(Y)
Y.set_weights(np.zeros(Y.num_arcs()))
samples.append((X, Y))
return samples
def crf_loss(X, Y, potentials, transitions):
feature_graph = gtn.compose(X, potentials)
# Compute the unnormalized score of `(X, Y)`
target_graph = gtn.compose(feature_graph, gtn.intersect(Y, transitions))
target_score = gtn.forward_score(target_graph)
# Compute the partition function
norm_graph = gtn.compose(feature_graph, transitions)
norm_score = gtn.forward_score(norm_graph)
return gtn.subtract(norm_score, target_score)
def update_params(learning_rate, *graphs):
"""Take a gradient step on each graph in `graphs`."""
for graph in graphs:
params = graph.weights_to_numpy()
grad = graph.grad().weights_to_numpy()
params += learning_rate * grad
graph.set_weights(params)
def sampler(dataset):
"""Iterator which randomly samples from a dataset."""
while True:
indices = np.random.permutation(len(dataset))
for idx in indices:
yield dataset[idx]
def main():
num_features = 3 # number of input features
num_classes = 2 # number of output classes
num_train = 1000 # size of the training set
num_test = 200 # size of the testing set
# Setup ground-truth model:
gt_potentials, gt_transitions = gen_model(num_features, num_classes)
# Sample training and test datasets:
samples = sample_model(
num_features, num_classes,
gt_potentials, gt_transitions,
num_train + num_test)
train, test = samples[:num_train], samples[num_train:]
print(f"Using {len(train)} samples for the training set")
print(f"Using {len(test)} samples for the test set")
# Make the graphs for learning:
potentials, transitions = gen_model(
num_features, num_classes, calc_grad=True, init=False)
print("Unary potential graph has {} nodes and {} arcs".format(
potentials.num_nodes(), potentials.num_arcs()))
print("Transition graph has {} nodes and {} arcs".format(
transitions.num_nodes(), transitions.num_arcs()))
# Make the graphs to be learned:
potentials, transitions = gen_model(
num_features, num_classes, calc_grad=True, init=False)
# Run the SGD loop:
learning_rate = 1e-2
max_iter = 10000
losses = []
for it, (X, Y) in enumerate(sampler(train)):
# Compute the loss and take a gradient step:
loss = crf_loss(X, Y, potentials, transitions)
gtn.backward(loss)
update_params(-learning_rate, potentials, transitions)
# Clear the gradients:
transitions.zero_grad()
potentials.zero_grad()
losses.append(loss.item())
if (it + 1) % 1000 == 0:
print("=" * 50)
print(f"Iteration {it + 1}, Avg. Loss {np.mean(losses):.3f}")
losses = []
if it == max_iter:
break
# Evaluate on the test set:
correct = 0.0
total = 0
for X, Y in test:
full_graph = gtn.compose(gtn.compose(X, potentials), transitions)
prediction = gtn.viterbi_path(full_graph).labels_to_list(False)
correct += np.sum(np.array(Y.labels_to_list()) == prediction)
total += len(prediction)
print("Test: Accuracy {:.3f}".format(correct / total))
if __name__ == "__main__":
main()
| [
"gtn.viterbi_path",
"gtn.backward",
"gtn.forward_score",
"gtn.subtract",
"numpy.random.randint",
"gtn.intersect",
"gtn.Graph",
"gtn.project_output",
"numpy.mean",
"gtn.compose"
] | [((335, 355), 'gtn.Graph', 'gtn.Graph', (['calc_grad'], {}), '(calc_grad)\n', (344, 355), False, 'import gtn\n'), ((738, 758), 'gtn.Graph', 'gtn.Graph', (['calc_grad'], {}), '(calc_grad)\n', (747, 758), False, 'import gtn\n'), ((1514, 1534), 'gtn.Graph', 'gtn.Graph', (['calc_grad'], {}), '(calc_grad)\n', (1523, 1534), False, 'import gtn\n'), ((2002, 2038), 'gtn.compose', 'gtn.compose', (['potentials', 'transitions'], {}), '(potentials, transitions)\n', (2013, 2038), False, 'import gtn\n'), ((2675, 2701), 'gtn.compose', 'gtn.compose', (['X', 'potentials'], {}), '(X, potentials)\n', (2686, 2701), False, 'import gtn\n'), ((2848, 2879), 'gtn.forward_score', 'gtn.forward_score', (['target_graph'], {}), '(target_graph)\n', (2865, 2879), False, 'import gtn\n'), ((2935, 2974), 'gtn.compose', 'gtn.compose', (['feature_graph', 'transitions'], {}), '(feature_graph, transitions)\n', (2946, 2974), False, 'import gtn\n'), ((2992, 3021), 'gtn.forward_score', 'gtn.forward_score', (['norm_graph'], {}), '(norm_graph)\n', (3009, 3021), False, 'import gtn\n'), ((3034, 3072), 'gtn.subtract', 'gtn.subtract', (['norm_score', 'target_score'], {}), '(norm_score, target_score)\n', (3046, 3072), False, 'import gtn\n'), ((2238, 2271), 'numpy.random.randint', 'np.random.randint', (['(1)', '(max_len + 1)'], {}), '(1, max_len + 1)\n', (2255, 2271), True, 'import numpy as np\n'), ((2284, 2329), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_features'], {'size': '(T,)'}), '(0, num_features, size=(T,))\n', (2301, 2329), True, 'import numpy as np\n'), ((2490, 2511), 'gtn.project_output', 'gtn.project_output', (['Y'], {}), '(Y)\n', (2508, 2511), False, 'import gtn\n'), ((2798, 2827), 'gtn.intersect', 'gtn.intersect', (['Y', 'transitions'], {}), '(Y, transitions)\n', (2811, 2827), False, 'import gtn\n'), ((5019, 5037), 'gtn.backward', 'gtn.backward', (['loss'], {}), '(loss)\n', (5031, 5037), False, 'import gtn\n'), ((2433, 2454), 'gtn.compose', 'gtn.compose', (['X', 'model'], {}), '(X, model)\n', (2444, 2454), False, 'import gtn\n'), ((5556, 5582), 'gtn.compose', 'gtn.compose', (['X', 'potentials'], {}), '(X, potentials)\n', (5567, 5582), False, 'import gtn\n'), ((5618, 5646), 'gtn.viterbi_path', 'gtn.viterbi_path', (['full_graph'], {}), '(full_graph)\n', (5634, 5646), False, 'import gtn\n'), ((5344, 5359), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (5351, 5359), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import norm
def ci_test_fisher_z(data_matrix, x, y, s, **kwargs):
assert 'corr_matrix' in kwargs
cm = kwargs['corr_matrix']
n = data_matrix.shape[0]
z = zstat(x, y, list(s), cm, n)
p_val = 2.0 * norm.sf(np.absolute(z))
return p_val
def zstat(x, y, s, cm, n):
r = pcor_order(x, y, s, cm)
zv = np.sqrt(n - len(s) - 3) * 0.5 * log_q1pm(r)
if np.isnan(zv):
return 0
else:
return zv
def log_q1pm(r):
if r == 1:
r = 1 - 1e-10
return np.log1p(2 * r / (1 - r))
def pcor_order(x, y, s, cm):
if len(s) == 0:
return cm[x, y]
else:
pim = np.linalg.pinv(cm[[x, y] + s, :][:, [x, y] + s])
return -pim[0, 1] / np.sqrt(pim[0, 0] * pim[1, 1])
| [
"numpy.absolute",
"numpy.isnan",
"numpy.linalg.pinv",
"numpy.log1p",
"numpy.sqrt"
] | [((413, 425), 'numpy.isnan', 'np.isnan', (['zv'], {}), '(zv)\n', (421, 425), True, 'import numpy as np\n'), ((538, 563), 'numpy.log1p', 'np.log1p', (['(2 * r / (1 - r))'], {}), '(2 * r / (1 - r))\n', (546, 563), True, 'import numpy as np\n'), ((662, 710), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cm[[x, y] + s, :][:, [x, y] + s]'], {}), '(cm[[x, y] + s, :][:, [x, y] + s])\n', (676, 710), True, 'import numpy as np\n'), ((260, 274), 'numpy.absolute', 'np.absolute', (['z'], {}), '(z)\n', (271, 274), True, 'import numpy as np\n'), ((739, 769), 'numpy.sqrt', 'np.sqrt', (['(pim[0, 0] * pim[1, 1])'], {}), '(pim[0, 0] * pim[1, 1])\n', (746, 769), True, 'import numpy as np\n')] |
from math import sqrt
from random import choice
from re import escape
from socket import socket, AF_INET, SOCK_STREAM
from time import sleep
import cv2 as cv
import numpy as np
# ------------------------------- CONSTANTS -----------------------------------
IP = "172.16.17.32"
PORT = 3456
BUFFER_SIZE = 2**16
SOCKET = None
SOI = b'\xff\xd8'
EOI = b'\xff\xd9'
# -------------------------------- NETWORK ------------------------------------
_id = 0
def say(message = ""):
global _id, SOCKET
res_image = None
res_text = None
# connect
if SOCKET == None:
SOCKET = socket(AF_INET, SOCK_STREAM)
SOCKET.connect((IP, PORT))
#SOCKET.settimeout(None)
# send message
print("[OUT] %s" % repr(message))
SOCKET.send((str(message)+"\n").encode())
# receive response
data = SOCKET.recv(BUFFER_SIZE)
# process response
if SOI in data:
while data.find(EOI) == -1:
data += SOCKET.recv(BUFFER_SIZE)
image = data[data.find(SOI):data.find(EOI)+2]
with open("img%s.jpg" % _id, "wb") as f:
f.write(image)
msg = "[IN] Image of %s bytes stored in img%s.jpg." % (len(image), _id)
tail = data[data.find(EOI)+2:]
if len(tail) > 2:
res_text = tail.decode(errors = 'ignore')
msg += " It also contained: '%s'." % res_text.strip()
res_image = "img%s.jpg" % _id
else:
print("[IN] %s" % repr(data.decode()))
res_text = data.decode()
# return
_id += 1
return res_image, res_text
def notify():
try:
dummy = socket(AF_INET, SOCK_STREAM)
dummy.setblocking(False)
dummy.connect(('1.2.3.4', 80))
except:
pass
#data = SOCKET.recv(BUFFER_SIZE)
# --------------------------------- IMAGE -------------------------------------
def show(img, name = "Image", scale = 5):
#print(name)
return
cv.imshow(name, cv.resize(img, (0,0), fx = scale, fy = scale, interpolation = 0))
cv.waitKey(0)
cv.destroyAllWindows()
def trim_borders(img, white_threshold = 255):
white_cols = [n for n,v in enumerate(np.mean(img, 0)) if np.mean(v) > white_threshold]
img = np.delete(img, white_cols, 1)
white_rows = [n for n,v in enumerate(np.mean(img, 1)) if np.mean(v) > white_threshold]
img = np.delete(img, white_rows, 0)
return img
def fix_columns(img, blur_radius = 2, hor_threshold = 400, ver_threshold = 100, outlier_factor = 3):
borders = np.mean(cv.Canny(cv.blur(img,tuple([blur_radius]*2)), hor_threshold, ver_threshold), 0)
outliers_threshold = np.mean(borders) + outlier_factor*np.std(borders)
cuts = [n for n in range(len(borders)) if borders[n] > outliers_threshold]
cuts.append(len(borders))
#print("Points to cut image: %s" % cuts)
for n in range(len(cuts) - 1):
if not n%2:
img[:,cuts[n]:cuts[n+1]] = np.fliplr(img[:,cuts[n]:cuts[n+1]])
return img
def match_coins(img, similarity_threshold = 10**7):
coins = [1, 2, 5, 10, 20, 50, 100, 200]
res = 0
for coin in coins:
template = cv.imread("./coins/%s.jpg" % coin)
radius = len(template)
comparison = cv.matchTemplate(img, template, cv.TM_SQDIFF)
matches = []
for r in range(len(comparison)):
for c in range(len(comparison[r])):
if comparison[r][c] < similarity_threshold and not any(sqrt((r2-r)**2 + (c2-c)**2) < radius/2 for r2, c2 in matches):
matches.append((r,c))
res += len(matches)*coin
return res
def identify(img_path):
# open image
img = cv.imread(img_path)
show(img, "Before")
img = trim_borders(img)
show(img, "After crop")
# get column cuts
img = fix_columns(img)
show(img, "After column flips")
# convolution to identify coins
return match_coins(img)
# ---------------------------- MAIN -------------------------------------------
#"""
while True:
notify()
img, tail = say()
if tail == None:
say()
amount = identify(img)
say(amount)
#sleep(1)
#"""
#print(identify("img9.jpg")) | [
"math.sqrt",
"cv2.waitKey",
"numpy.std",
"socket.socket",
"cv2.imread",
"numpy.fliplr",
"numpy.mean",
"cv2.destroyAllWindows",
"numpy.delete",
"cv2.resize",
"cv2.matchTemplate"
] | [((1804, 1817), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1814, 1817), True, 'import cv2 as cv\n'), ((1819, 1841), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1839, 1841), True, 'import cv2 as cv\n'), ((1984, 2013), 'numpy.delete', 'np.delete', (['img', 'white_cols', '(1)'], {}), '(img, white_cols, 1)\n', (1993, 2013), True, 'import numpy as np\n'), ((2111, 2140), 'numpy.delete', 'np.delete', (['img', 'white_rows', '(0)'], {}), '(img, white_rows, 0)\n', (2120, 2140), True, 'import numpy as np\n'), ((3276, 3295), 'cv2.imread', 'cv.imread', (['img_path'], {}), '(img_path)\n', (3285, 3295), True, 'import cv2 as cv\n'), ((571, 599), 'socket.socket', 'socket', (['AF_INET', 'SOCK_STREAM'], {}), '(AF_INET, SOCK_STREAM)\n', (577, 599), False, 'from socket import socket, AF_INET, SOCK_STREAM\n'), ((1434, 1462), 'socket.socket', 'socket', (['AF_INET', 'SOCK_STREAM'], {}), '(AF_INET, SOCK_STREAM)\n', (1440, 1462), False, 'from socket import socket, AF_INET, SOCK_STREAM\n'), ((1737, 1796), 'cv2.resize', 'cv.resize', (['img', '(0, 0)'], {'fx': 'scale', 'fy': 'scale', 'interpolation': '(0)'}), '(img, (0, 0), fx=scale, fy=scale, interpolation=0)\n', (1746, 1796), True, 'import cv2 as cv\n'), ((2376, 2392), 'numpy.mean', 'np.mean', (['borders'], {}), '(borders)\n', (2383, 2392), True, 'import numpy as np\n'), ((2831, 2865), 'cv2.imread', 'cv.imread', (["('./coins/%s.jpg' % coin)"], {}), "('./coins/%s.jpg' % coin)\n", (2840, 2865), True, 'import cv2 as cv\n'), ((2906, 2951), 'cv2.matchTemplate', 'cv.matchTemplate', (['img', 'template', 'cv.TM_SQDIFF'], {}), '(img, template, cv.TM_SQDIFF)\n', (2922, 2951), True, 'import cv2 as cv\n'), ((2410, 2425), 'numpy.std', 'np.std', (['borders'], {}), '(borders)\n', (2416, 2425), True, 'import numpy as np\n'), ((2647, 2685), 'numpy.fliplr', 'np.fliplr', (['img[:, cuts[n]:cuts[n + 1]]'], {}), '(img[:, cuts[n]:cuts[n + 1]])\n', (2656, 2685), True, 'import numpy as np\n'), ((1927, 1942), 'numpy.mean', 'np.mean', (['img', '(0)'], {}), '(img, 0)\n', (1934, 1942), True, 'import numpy as np\n'), ((1947, 1957), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (1954, 1957), True, 'import numpy as np\n'), ((2052, 2067), 'numpy.mean', 'np.mean', (['img', '(1)'], {}), '(img, 1)\n', (2059, 2067), True, 'import numpy as np\n'), ((2072, 2082), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (2079, 2082), True, 'import numpy as np\n'), ((3101, 3136), 'math.sqrt', 'sqrt', (['((r2 - r) ** 2 + (c2 - c) ** 2)'], {}), '((r2 - r) ** 2 + (c2 - c) ** 2)\n', (3105, 3136), False, 'from math import sqrt\n')] |
# https://www.hackerrank.com/challenges/np-linear-algebra/problem
import numpy
# Inputs
standard_input = """2
1.1 1.1
1.1 1.1"""
n = int(input())
# 2
a = numpy.array([input().split() for _ in range(n)], float)
# 1.1 1.1
# 1.1 1.1
numpy.set_printoptions(legacy="1.13")
print(numpy.linalg.det(a))
# 0.0
""" Reference
[linalg.det]
The linalg.det tool computes the determinant of an array.
print(numpy.linalg.det([[1, 2], [2, 1]]))
# -3.0
[linalg.eig]
The linalg.eig computes the eigenvalues and right eigenvectors of a square array.
vals, vecs = numpy.linalg.eig([[1, 2], [2, 1]])
print(vals)
# [ 3. -1.]
print(vecs)
# [[ 0.70710678 -0.70710678]
# [ 0.70710678 0.70710678]]
[linalg.inv]
The linalg.inv tool computes the (multiplicative) inverse of a matrix.
print(numpy.linalg.inv([[1, 2], [2, 1]]))
# [[-0.33333333 0.66666667]
# [ 0.66666667 -0.33333333]]
"""
| [
"numpy.linalg.det",
"numpy.set_printoptions"
] | [((238, 275), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'legacy': '"""1.13"""'}), "(legacy='1.13')\n", (260, 275), False, 'import numpy\n'), ((282, 301), 'numpy.linalg.det', 'numpy.linalg.det', (['a'], {}), '(a)\n', (298, 301), False, 'import numpy\n')] |
#!/usr/bin/env python
import click
import matplotlib
import matplotlib.pyplot as plt
#matplotlib.style.use('ggplot')
import numpy as np
import pandas as pd
from projections.pd_utils import load_pandas
import projections.modelr as modelr
LU2 = {'annual': 'c3ann + c4ann',
'nitrogen': 'c3nfx',
'pasture': 'pastr',
'perennial': 'c3per + c4per',
'primary': 'primf + primn',
'rangelands': 'range',
'timber': '0',
'urban': 'urban',
'young_secondary': 'secdy',
'intermediate_secondary': 'secdi',
'mature_secondary': 'secdm',
}
LU = {'cropland': 'cropland',
'pasture': 'pastr',
'primary': 'primf + primn',
'urban': 'urban',
'secondary': 'secdy',
}
@click.command()
@click.argument('model-file', type=click.Path(dir_okay=False))
@click.option('-m', '--max-x', type=float, default=1.2)
@click.option('-s', '--steps', type=int, default=20)
def main(model_file, max_x=1.2, steps=20):
mod = modelr.load(model_file)
mod.intercept
out = {}
for lu in LU.keys():
out[lu] = np.exp(mod.partial({'logHPD_rs': np.linspace(0, max_x, steps),
lu: np.full(steps, 1.0),
lu + '_intense': np.full(steps, 0.04),
lu + '_light': np.full(steps, 0.07),
lu + '_minimal': np.full(steps, 0.35)
}))
df = pd.DataFrame(out, index=np.linspace(0, max_x, steps))
print(df)
df.plot()
ax.set_title('')
ax.set_ylabel('Species Richness')
ax.set_xlabel('log(HPD + 1) [rescaled by 10.02083]')
#ax.xaxis.set_major_locator(plt.NullLocator())
plt.savefig('log-abund.png')
plt.show()
if __name__ == '__main__':
main()
| [
"numpy.full",
"projections.modelr.load",
"matplotlib.pyplot.show",
"click.option",
"click.command",
"numpy.linspace",
"click.Path",
"matplotlib.pyplot.savefig"
] | [((731, 746), 'click.command', 'click.command', ([], {}), '()\n', (744, 746), False, 'import click\n'), ((811, 865), 'click.option', 'click.option', (['"""-m"""', '"""--max-x"""'], {'type': 'float', 'default': '(1.2)'}), "('-m', '--max-x', type=float, default=1.2)\n", (823, 865), False, 'import click\n'), ((867, 918), 'click.option', 'click.option', (['"""-s"""', '"""--steps"""'], {'type': 'int', 'default': '(20)'}), "('-s', '--steps', type=int, default=20)\n", (879, 918), False, 'import click\n'), ((970, 993), 'projections.modelr.load', 'modelr.load', (['model_file'], {}), '(model_file)\n', (981, 993), True, 'import projections.modelr as modelr\n'), ((1624, 1652), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""log-abund.png"""'], {}), "('log-abund.png')\n", (1635, 1652), True, 'import matplotlib.pyplot as plt\n'), ((1655, 1665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1663, 1665), True, 'import matplotlib.pyplot as plt\n'), ((782, 808), 'click.Path', 'click.Path', ([], {'dir_okay': '(False)'}), '(dir_okay=False)\n', (792, 808), False, 'import click\n'), ((1408, 1436), 'numpy.linspace', 'np.linspace', (['(0)', 'max_x', 'steps'], {}), '(0, max_x, steps)\n', (1419, 1436), True, 'import numpy as np\n'), ((1092, 1120), 'numpy.linspace', 'np.linspace', (['(0)', 'max_x', 'steps'], {}), '(0, max_x, steps)\n', (1103, 1120), True, 'import numpy as np\n'), ((1153, 1172), 'numpy.full', 'np.full', (['steps', '(1.0)'], {}), '(steps, 1.0)\n', (1160, 1172), True, 'import numpy as np\n'), ((1218, 1238), 'numpy.full', 'np.full', (['steps', '(0.04)'], {}), '(steps, 0.04)\n', (1225, 1238), True, 'import numpy as np\n'), ((1282, 1302), 'numpy.full', 'np.full', (['steps', '(0.07)'], {}), '(steps, 0.07)\n', (1289, 1302), True, 'import numpy as np\n'), ((1348, 1368), 'numpy.full', 'np.full', (['steps', '(0.35)'], {}), '(steps, 0.35)\n', (1355, 1368), True, 'import numpy as np\n')] |
"""
@author: <NAME>
@description : Rest pose of a skeleton
File Format:
numpy file (npy)
Content:
1 numpy array
- float_array(num_bones, size(BONE_ATTRIBUTES))
"""
import deep_deformation.utils.common as common
import os
import numpy as np
class RestPoseData:
def __init__(self):
self.bone_data = None
def allocate(self, num_bones):
bone_data_shape = (num_bones, len(common.BONE_ATTRIBUTES))
self.bone_data = np.zeros(bone_data_shape, dtype=float)
def save(self, overwrite = False):
rest_path = common.get_rest_pose_path()
if os.path.exists(rest_path) and overwrite == False:
return
np.save(rest_path, self.bone_data)
def load(self):
rest_path = common.get_rest_pose_path()
self.bone_data = np.load(rest_path)
| [
"numpy.load",
"numpy.save",
"numpy.zeros",
"os.path.exists",
"deep_deformation.utils.common.get_rest_pose_path"
] | [((451, 489), 'numpy.zeros', 'np.zeros', (['bone_data_shape'], {'dtype': 'float'}), '(bone_data_shape, dtype=float)\n', (459, 489), True, 'import numpy as np\n'), ((550, 577), 'deep_deformation.utils.common.get_rest_pose_path', 'common.get_rest_pose_path', ([], {}), '()\n', (575, 577), True, 'import deep_deformation.utils.common as common\n'), ((666, 700), 'numpy.save', 'np.save', (['rest_path', 'self.bone_data'], {}), '(rest_path, self.bone_data)\n', (673, 700), True, 'import numpy as np\n'), ((742, 769), 'deep_deformation.utils.common.get_rest_pose_path', 'common.get_rest_pose_path', ([], {}), '()\n', (767, 769), True, 'import deep_deformation.utils.common as common\n'), ((795, 813), 'numpy.load', 'np.load', (['rest_path'], {}), '(rest_path)\n', (802, 813), True, 'import numpy as np\n'), ((589, 614), 'os.path.exists', 'os.path.exists', (['rest_path'], {}), '(rest_path)\n', (603, 614), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Environment
Functions:
initialize(unit, fovea_center, fovea_size, objects) - initialize unit size
environment containing objects and fovea with size fovea_size and center in
fovea_center.
redraw(environment, unit, objects) - redraw the unit size environment
containing objects.
"""
import numpy as np
from geometricshapes import Fovea, Square, Circle, Rectangle
def initialize(unit, fovea_center, fovea_size, objects):
"""Initialize environment
Keyword arguments:
- unit -- the size (int) of the sides of the quadratic environment
- fovea_center -- center coordinates (floats tuple) of fovea
- fovea_size -- size (float) of fovea
- objects -- list of objects
Creates enviroment image array and draws objects in it. Returns
enviroment image, fovea image and list of objects.
The environment is RGB color coded pixels. That is each pixel has
three dimensions.
"""
environment = np.zeros([unit, unit, 3])
fovea = Fovea(fovea_center, fovea_size, [0, 0, 0], unit)
environment = redraw(environment, unit, objects)
return environment, fovea, objects
def redraw(environment, unit, objects):
"""Redraw an environment image.
Keyword arguments:
- environment -- the image array of the environment
- unit -- the size of the sides of the quadratic environment
- objects -- a list containing the objects in the environment
"""
environment.fill(0)
for obj in objects:
if obj.center.all():
obj.draw(environment)
return environment
def get_object_images(unit, fovea_size):
"""Get images of all object shape/color combinations
Keyword arguments:
- unit -- the size of the sides of the quadratic environment
- fovea_size -- size (float) of fovea
Generates focus images for all possible combinations of object
shape and color. Returns a list containing these images.
"""
env = np.zeros([unit, unit, 3])
fov = Fovea([0.35, 0.65], fovea_size, [1, 1, 1], unit)
s1 = Square([0.2, 0.2], 0.14, [1, 0, 0], unit)
s2 = Square([0.2, 0.5], 0.14, [0, 1, 0], unit)
s3 = Square([0.2, 0.8], 0.14, [0, 0, 1], unit)
c1 = Circle([0.5, 0.2], 0.14, [1, 0, 0], unit)
c2 = Circle([0.5, 0.5], 0.14, [0, 1, 0], unit)
c3 = Circle([0.5, 0.8], 0.14, [0, 0, 1], unit)
b1 = Rectangle([0.8, 0.2], 0.14, [1, 0, 0], unit, 0)
b2 = Rectangle([0.8, 0.5], 0.14, [0, 1, 0], unit, 0)
b3 = Rectangle([0.8, 0.8], 0.14, [0, 0, 1], unit, 0)
object_images = []
objects = [s1, s2, s3, c1, c2, c3, b1, b2, b3]
for obj in objects:
obj.draw(env)
center = obj.center
fov.center = np.array(center)
fov_im = fov.get_focus_image(env)
if obj.type_ == 'Square':
object_type = 0
elif obj.type_ == 'Circle':
object_type = 1
elif obj.type_ == 'Rectangle':
object_type = 2
object_color = obj.color.index(1)
object_image = np.concatenate((np.array([object_type, object_color]),
fov_im.flatten('F').T))
object_images.append(object_image)
# centers = ([0.2, 0.2], [0.2, 0.5], [0.2, 0.8], [0.5, 0.2], [0.5, 0.5],
# [0.5, 0.8], [0.8, 0.2], [0.8, 0.5], [0.8, 0.8])
#
#
# for center in centers:
# fov.center = np.array(center)
# fov_im = fov.get_focus_image(env)
# object_images.append(fov_im)
return object_images
if __name__ == '__main__':
"""Main"""
object_images = get_object_images(100, 0.2)
| [
"geometricshapes.Square",
"geometricshapes.Circle",
"numpy.zeros",
"geometricshapes.Fovea",
"numpy.array",
"geometricshapes.Rectangle"
] | [((972, 997), 'numpy.zeros', 'np.zeros', (['[unit, unit, 3]'], {}), '([unit, unit, 3])\n', (980, 997), True, 'import numpy as np\n'), ((1010, 1058), 'geometricshapes.Fovea', 'Fovea', (['fovea_center', 'fovea_size', '[0, 0, 0]', 'unit'], {}), '(fovea_center, fovea_size, [0, 0, 0], unit)\n', (1015, 1058), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((1959, 1984), 'numpy.zeros', 'np.zeros', (['[unit, unit, 3]'], {}), '([unit, unit, 3])\n', (1967, 1984), True, 'import numpy as np\n'), ((1995, 2043), 'geometricshapes.Fovea', 'Fovea', (['[0.35, 0.65]', 'fovea_size', '[1, 1, 1]', 'unit'], {}), '([0.35, 0.65], fovea_size, [1, 1, 1], unit)\n', (2000, 2043), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2054, 2095), 'geometricshapes.Square', 'Square', (['[0.2, 0.2]', '(0.14)', '[1, 0, 0]', 'unit'], {}), '([0.2, 0.2], 0.14, [1, 0, 0], unit)\n', (2060, 2095), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2105, 2146), 'geometricshapes.Square', 'Square', (['[0.2, 0.5]', '(0.14)', '[0, 1, 0]', 'unit'], {}), '([0.2, 0.5], 0.14, [0, 1, 0], unit)\n', (2111, 2146), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2156, 2197), 'geometricshapes.Square', 'Square', (['[0.2, 0.8]', '(0.14)', '[0, 0, 1]', 'unit'], {}), '([0.2, 0.8], 0.14, [0, 0, 1], unit)\n', (2162, 2197), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2207, 2248), 'geometricshapes.Circle', 'Circle', (['[0.5, 0.2]', '(0.14)', '[1, 0, 0]', 'unit'], {}), '([0.5, 0.2], 0.14, [1, 0, 0], unit)\n', (2213, 2248), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2258, 2299), 'geometricshapes.Circle', 'Circle', (['[0.5, 0.5]', '(0.14)', '[0, 1, 0]', 'unit'], {}), '([0.5, 0.5], 0.14, [0, 1, 0], unit)\n', (2264, 2299), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2309, 2350), 'geometricshapes.Circle', 'Circle', (['[0.5, 0.8]', '(0.14)', '[0, 0, 1]', 'unit'], {}), '([0.5, 0.8], 0.14, [0, 0, 1], unit)\n', (2315, 2350), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2360, 2407), 'geometricshapes.Rectangle', 'Rectangle', (['[0.8, 0.2]', '(0.14)', '[1, 0, 0]', 'unit', '(0)'], {}), '([0.8, 0.2], 0.14, [1, 0, 0], unit, 0)\n', (2369, 2407), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2417, 2464), 'geometricshapes.Rectangle', 'Rectangle', (['[0.8, 0.5]', '(0.14)', '[0, 1, 0]', 'unit', '(0)'], {}), '([0.8, 0.5], 0.14, [0, 1, 0], unit, 0)\n', (2426, 2464), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2474, 2521), 'geometricshapes.Rectangle', 'Rectangle', (['[0.8, 0.8]', '(0.14)', '[0, 0, 1]', 'unit', '(0)'], {}), '([0.8, 0.8], 0.14, [0, 0, 1], unit, 0)\n', (2483, 2521), False, 'from geometricshapes import Fovea, Square, Circle, Rectangle\n'), ((2692, 2708), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (2700, 2708), True, 'import numpy as np\n'), ((3028, 3065), 'numpy.array', 'np.array', (['[object_type, object_color]'], {}), '([object_type, object_color])\n', (3036, 3065), True, 'import numpy as np\n')] |
""" Implementation of filters for images and texts"""
import numpy as np
from jina import Executor, DocumentArray, requests
class ImageReader(Executor):
@requests(on='/index')
def index_read(self, docs: 'DocumentArray', **kwargs):
array = DocumentArray(list(filter(lambda doc: doc.modality=='image', docs)))
for doc in array:
doc.convert_image_buffer_to_blob()
doc.blob = np.array(doc.blob).astype(np.uint8)
return array
@requests(on='/search')
def search_read(self, docs: 'DocumentArray', **kwargs):
image_docs = DocumentArray(list(filter(lambda doc: doc.mime_type in ('image/jpeg', 'image/png'), docs)))
if not image_docs:
return DocumentArray([])
for doc in image_docs:
doc.convert_uri_to_buffer()
doc.convert_image_buffer_to_blob()
doc.blob = doc.blob.astype(np.uint8)
return image_docs
class TextFilter(Executor):
@requests
def filter_text(self, docs: 'DocumentArray', **kwargs):
docs = DocumentArray(list(filter(lambda doc: doc.mime_type == 'text/plain', docs)))
return docs
| [
"jina.DocumentArray",
"numpy.array",
"jina.requests"
] | [((161, 182), 'jina.requests', 'requests', ([], {'on': '"""/index"""'}), "(on='/index')\n", (169, 182), False, 'from jina import Executor, DocumentArray, requests\n'), ((486, 508), 'jina.requests', 'requests', ([], {'on': '"""/search"""'}), "(on='/search')\n", (494, 508), False, 'from jina import Executor, DocumentArray, requests\n'), ((728, 745), 'jina.DocumentArray', 'DocumentArray', (['[]'], {}), '([])\n', (741, 745), False, 'from jina import Executor, DocumentArray, requests\n'), ((423, 441), 'numpy.array', 'np.array', (['doc.blob'], {}), '(doc.blob)\n', (431, 441), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import nibabel as nib
import numpy as np
import glob
import nipype
from nipype.interfaces import niftyreg
import os
import tempfile
from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
def get_args():
parser = argparse.ArgumentParser(description='performs 12-DOF affine registration of a T1 image to ICBM152 template')
parser.add_argument('-a', '--anat', type=str, help='input T1w nifti image')
parser.add_argument('-o', '--out_dir', type=str, help='output directory')
return parser.parse_args()
def registration(in_file, out_file, reference):
temp = tempfile.TemporaryDirectory()
ala = niftyreg.RegAladin(gpuid_val=0, platform_val=1, omp_core_val=20)
ala.inputs.ref_file = reference
ala.inputs.flo_file = in_file
ala.inputs.res_file = os.path.join(temp.name, 'res.nii.gz')
ala.inputs.aff_file = os.path.join(temp.name, 'aff.txt')
result0 = ala.run()
res = niftyreg.RegResample(inter_val="SINC")
res.inputs.ref_file = reference
res.inputs.flo_file = in_file
res.inputs.trans_file = result0.outputs.aff_file
res.inputs.out_file = out_file
result = res.run()
temp.cleanup()
def normalization(scan):
scan = (scan - np.mean(scan)) / np.std(scan)
return scan
def clip(scan):
return np.clip(scan, -1, 2.5)
def back_remove(file, temp, new_path):
if not os.path.exists(new_path):
os.mkdir(new_path)
data = np.load(file)
new_data = data[:,:,:]
stack = [(0, 0, 0), (180, 0, 0), (0, 216, 0), (180, 216, 0)]
visited = set([(0, 0, 0), (180, 0, 0), (0, 216, 0), (180, 216, 0)])
def valid(x, y, z):
if x < 0 or x >= 181:
return False
if y < 0 or y >= 217:
return False
if z < 0 or z >= 181:
return False
return True
while stack:
x, y, z = stack.pop()
for dx, dy, dz in [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]:
new_x, new_y, new_z = x + dx, y + dy, z + dz
if valid(new_x, new_y, new_z) and (new_x, new_y, new_z) not in visited \
and data[new_x, new_y, new_z] < -0.6 and temp[new_x, new_y, new_z] < 0.8:
visited.add((new_x, new_y, new_z))
new_data[new_x, new_y, new_z] = -10
stack.append((new_x, new_y, new_z))
filename = file.split('/')[-1]
plt.subplot(131)
plt.imshow(new_data[100, :, :])
plt.subplot(132)
plt.imshow(new_data[:, 100, :])
plt.subplot(133)
plt.imshow(new_data[:, :, 100])
plt.savefig(os.path.join(new_path, filename.replace('npy', 'jpg')))
plt.close()
new_data = np.where(new_data==-10, -np.ones((181, 217, 181)), new_data).astype(np.float32)
np.save(os.path.join(new_path, filename), new_data)
if __name__ == "__main__":
reference = '/Meliora_DeepLearning/mni_icbm152_t1_tal_nlin_sym_09c_1mm_181x217x181.nii.gz'
tmp = tempfile.TemporaryDirectory()
args = get_args()
in_anat = args.anat
out_dir = args.out_dir
os.makedirs(out_dir, exist_ok=True)
out_name_nifti = os.path.basename(in_anat).split('.', 1)[0] + '.nii.gz'
out_anat_nifti = os.path.join(tmp.name, out_name_nifti)
registration(in_anat, out_anat_nifti, reference)
in_img = nib.load(out_anat_nifti)
out_name_npy = os.path.basename(in_anat).split('.', 1)[0] + '.npy'
out_npy = os.path.join(out_dir, out_name_npy)
data = np.array(in_img.dataobj)
data = normalization(data)
data = clip(data)
np.save(out_npy, data)
tmp.cleanup()
template = np.load('/home/medetax/Applications/brain2020/brain_region.npy')
back_remove(out_npy, template, out_dir)
| [
"os.mkdir",
"numpy.load",
"argparse.ArgumentParser",
"numpy.ones",
"numpy.clip",
"numpy.mean",
"os.path.join",
"tempfile.TemporaryDirectory",
"numpy.std",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"os.path.exists",
"nipype.interfaces.niftyreg.RegAladin",
"numpy.save",
"os.pa... | [((195, 216), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (209, 216), False, 'import matplotlib\n'), ((296, 409), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""performs 12-DOF affine registration of a T1 image to ICBM152 template"""'}), "(description=\n 'performs 12-DOF affine registration of a T1 image to ICBM152 template')\n", (319, 409), False, 'import argparse\n'), ((654, 683), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (681, 683), False, 'import tempfile\n'), ((694, 758), 'nipype.interfaces.niftyreg.RegAladin', 'niftyreg.RegAladin', ([], {'gpuid_val': '(0)', 'platform_val': '(1)', 'omp_core_val': '(20)'}), '(gpuid_val=0, platform_val=1, omp_core_val=20)\n', (712, 758), False, 'from nipype.interfaces import niftyreg\n'), ((855, 892), 'os.path.join', 'os.path.join', (['temp.name', '"""res.nii.gz"""'], {}), "(temp.name, 'res.nii.gz')\n", (867, 892), False, 'import os\n'), ((919, 953), 'os.path.join', 'os.path.join', (['temp.name', '"""aff.txt"""'], {}), "(temp.name, 'aff.txt')\n", (931, 953), False, 'import os\n'), ((988, 1026), 'nipype.interfaces.niftyreg.RegResample', 'niftyreg.RegResample', ([], {'inter_val': '"""SINC"""'}), "(inter_val='SINC')\n", (1008, 1026), False, 'from nipype.interfaces import niftyreg\n'), ((1346, 1368), 'numpy.clip', 'np.clip', (['scan', '(-1)', '(2.5)'], {}), '(scan, -1, 2.5)\n', (1353, 1368), True, 'import numpy as np\n'), ((1486, 1499), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (1493, 1499), True, 'import numpy as np\n'), ((2443, 2459), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (2454, 2459), True, 'import matplotlib.pyplot as plt\n'), ((2464, 2495), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_data[100, :, :]'], {}), '(new_data[100, :, :])\n', (2474, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2516), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (2511, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2552), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_data[:, 100, :]'], {}), '(new_data[:, 100, :])\n', (2531, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2557, 2573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (2568, 2573), True, 'import matplotlib.pyplot as plt\n'), ((2578, 2609), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_data[:, :, 100]'], {}), '(new_data[:, :, 100])\n', (2588, 2609), True, 'import matplotlib.pyplot as plt\n'), ((2686, 2697), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2695, 2697), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3017), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3015, 3017), False, 'import tempfile\n'), ((3095, 3130), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (3106, 3130), False, 'import os\n'), ((3228, 3266), 'os.path.join', 'os.path.join', (['tmp.name', 'out_name_nifti'], {}), '(tmp.name, out_name_nifti)\n', (3240, 3266), False, 'import os\n'), ((3333, 3357), 'nibabel.load', 'nib.load', (['out_anat_nifti'], {}), '(out_anat_nifti)\n', (3341, 3357), True, 'import nibabel as nib\n'), ((3443, 3478), 'os.path.join', 'os.path.join', (['out_dir', 'out_name_npy'], {}), '(out_dir, out_name_npy)\n', (3455, 3478), False, 'import os\n'), ((3490, 3514), 'numpy.array', 'np.array', (['in_img.dataobj'], {}), '(in_img.dataobj)\n', (3498, 3514), True, 'import numpy as np\n'), ((3572, 3594), 'numpy.save', 'np.save', (['out_npy', 'data'], {}), '(out_npy, data)\n', (3579, 3594), True, 'import numpy as np\n'), ((3628, 3692), 'numpy.load', 'np.load', (['"""/home/medetax/Applications/brain2020/brain_region.npy"""'], {}), "('/home/medetax/Applications/brain2020/brain_region.npy')\n", (3635, 3692), True, 'import numpy as np\n'), ((1289, 1301), 'numpy.std', 'np.std', (['scan'], {}), '(scan)\n', (1295, 1301), True, 'import numpy as np\n'), ((1421, 1445), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (1435, 1445), False, 'import os\n'), ((1455, 1473), 'os.mkdir', 'os.mkdir', (['new_path'], {}), '(new_path)\n', (1463, 1473), False, 'import os\n'), ((2810, 2842), 'os.path.join', 'os.path.join', (['new_path', 'filename'], {}), '(new_path, filename)\n', (2822, 2842), False, 'import os\n'), ((1272, 1285), 'numpy.mean', 'np.mean', (['scan'], {}), '(scan)\n', (1279, 1285), True, 'import numpy as np\n'), ((2743, 2767), 'numpy.ones', 'np.ones', (['(181, 217, 181)'], {}), '((181, 217, 181))\n', (2750, 2767), True, 'import numpy as np\n'), ((3152, 3177), 'os.path.basename', 'os.path.basename', (['in_anat'], {}), '(in_anat)\n', (3168, 3177), False, 'import os\n'), ((3377, 3402), 'os.path.basename', 'os.path.basename', (['in_anat'], {}), '(in_anat)\n', (3393, 3402), False, 'import os\n')] |
import copy
from fv3fit._shared.config import SliceConfig
from fv3fit._shared.packer import (
pack_tfdataset,
clip_sample,
)
from fv3fit.tfdataset import tfdataset_from_batches
import tensorflow as tf
from typing import Mapping, Sequence
import numpy as np
import pytest
import xarray as xr
import fv3fit.tfdataset
def assert_datasets_equal(dataset1, dataset2, rtol=1e-6):
sample = next(iter(dataset1))
if isinstance(sample, dict):
for name in sample:
result = tf.data.Dataset.zip((dataset1, dataset2)).map(
lambda x1, x2: tf.reduce_any(
tf.abs(x2[name] - x1[name]) / (0.5 * (x2[name] + x1[name])) > rtol
)
)
assert not tf.reduce_any(any(result)), name
else:
result = tf.data.Dataset.zip((dataset1, dataset2)).map(
lambda x1, x2: tf.reduce_any(tf.abs(x2 - x1) / (0.5 * (x2 + x1)) > rtol)
)
assert not tf.reduce_any(any(result))
def get_tfdataset(
n_dims: int, n_samples: int, entry_features: Mapping[str, int],
):
if n_dims < 2:
raise ValueError(
"n_dims must be at least 2 (requires sample, feature dimensions)"
)
data = {}
for name, n_features in entry_features.items():
shape = list(range(2, n_dims + 2))
shape[-1] = n_features
shape[0] = n_samples
data[name] = tf.convert_to_tensor(
np.random.uniform(size=shape).astype(np.float32)
)
return get_tfdataset_from_data(data)
def get_tfdataset_from_data(data):
def gen():
yield data
sample = next(iter(gen()))
if isinstance(sample, dict):
output_signature = {
key: tf.TensorSpec(val.shape, dtype=val.dtype)
for key, val in sample.items()
}
else:
output_signature = tf.TensorSpec(sample.shape, dtype=sample.dtype)
return tf.data.Dataset.from_generator(
gen, output_signature=output_signature
).unbatch()
def test_assert_datasets_equal_raises():
dataset_1 = get_tfdataset_from_data(tf.convert_to_tensor(np.asarray([0.0])))
dataset_2 = get_tfdataset_from_data(tf.convert_to_tensor(np.asarray([1.0])))
with pytest.raises(AssertionError):
assert_datasets_equal(dataset_1, dataset_2, rtol=0.1)
def test_tfdataset_from_batches_empty():
with pytest.raises(NotImplementedError):
tfdataset_from_batches([])
def test_tfdataset_from_batches_single():
n_samples = 3
xr_dataset = xr.Dataset(
data_vars={
"a": xr.DataArray(np.random.uniform(size=[n_samples, 3, 4])),
"b": xr.DataArray(np.random.uniform(size=[n_samples, 3])),
}
)
tfdataset = tfdataset_from_batches([xr_dataset])
result = next(iter(tfdataset))
assert isinstance(result, dict)
np.testing.assert_array_equal(result["a"], xr_dataset["a"].values)
np.testing.assert_array_equal(result["b"], xr_dataset["b"].values)
def assert_tfdataset_equals_batches(tfdataset, batches):
for i_batch, result in enumerate(iter(tfdataset)):
assert isinstance(result, dict)
np.testing.assert_array_equal(result["a"], batches[i_batch]["a"].values)
np.testing.assert_array_equal(result["b"], batches[i_batch]["b"].values)
def test_tfdataset_from_batches_multiple():
n_samples = 3
batches = [
xr.Dataset(
data_vars={
"a": xr.DataArray(np.random.uniform(size=[n_samples, 3, 4])),
"b": xr.DataArray(np.random.uniform(size=[n_samples, 3])),
}
)
for _ in range(3)
]
tfdataset = tfdataset_from_batches(batches)
assert_tfdataset_equals_batches(tfdataset, batches)
def test_tfdataset_from_batches_multiple_different_samples():
n_samples = 3
batches = [
xr.Dataset(
data_vars={
"a": xr.DataArray(np.random.uniform(size=[n_samples * 2, 3, 4])),
"b": xr.DataArray(np.random.uniform(size=[n_samples * 2, 3])),
}
),
xr.Dataset(
data_vars={
"a": xr.DataArray(np.random.uniform(size=[n_samples, 3, 4])),
"b": xr.DataArray(np.random.uniform(size=[n_samples, 3])),
}
),
]
tfdataset = tfdataset_from_batches(batches)
assert_tfdataset_equals_batches(tfdataset, batches)
@pytest.mark.parametrize("n_dims", [2, 3, 5])
@pytest.mark.parametrize(
"variable_names",
[
pytest.param(["a", "b", "c", "d"], id="all"),
pytest.param(["b"], id="one"),
pytest.param(["a", "c"], id="half"),
],
)
def test_pack_tfdataset(n_dims: int, variable_names: Sequence[str]):
entry_features = {
"a": 5,
"b": 7,
"c": 1,
"d": 1,
}
dataset = get_tfdataset(n_dims=n_dims, n_samples=10, entry_features=entry_features)
packed, packing_info = pack_tfdataset(dataset, variable_names=variable_names)
packed_sum = tf.reduce_sum(sum(packed))
expected_sum = sum(
tf.reduce_sum(sum(dataset.map(lambda x: x[name]))) for name in variable_names
)
np.testing.assert_allclose(packed_sum.numpy(), expected_sum.numpy(), rtol=1e-6)
assert packing_info.names == variable_names
assert packing_info.features == [entry_features[name] for name in variable_names]
sample = next(iter(packed))
assert sample.shape[-1] == sum(
n for (name, n) in entry_features.items() if name in variable_names
)
@pytest.mark.parametrize("n_dims", [2, 3, 5])
@pytest.mark.parametrize(
"config, clipped_features",
[
pytest.param(
{"a": SliceConfig(start=1)},
{"a": 4, "b": 7, "c": 1, "d": 1},
id="a_start_1",
),
pytest.param(
{"a": SliceConfig(stop=-1)},
{"a": 4, "b": 7, "c": 1, "d": 1},
id="a_stop_1",
),
pytest.param(
{"b": SliceConfig(start=2, stop=-1)},
{"a": 5, "b": 4, "c": 1, "d": 1},
id="b_start_stop_1",
),
],
)
def test_clip_tfdataset(
n_dims, config: Mapping[str, SliceConfig], clipped_features: Mapping[str, int]
):
entry_features = {
"a": 5,
"b": 7,
"c": 1,
"d": 1,
}
dataset = get_tfdataset(n_dims=n_dims, n_samples=10, entry_features=entry_features)
clipped = dataset.map(clip_sample(config))
sample_in = next(iter(dataset))
sample_out = next(iter(clipped))
assert sample_out.keys() == sample_in.keys()
for name, value in sample_out.items():
assert value.shape[-1] == clipped_features[name], name
if name in config:
np.testing.assert_array_equal(
sample_in[name][..., config[name].slice], sample_out[name]
)
else:
np.testing.assert_array_equal(sample_in[name], sample_out[name])
def test__seq_to_tfdataset():
batches = [{"a": np.arange(30).reshape(10, 3)} for _ in range(3)]
def transform(batch):
out = copy.deepcopy(batch)
out["a"] = out["a"] * 2
return out
tf_ds = fv3fit.tfdataset.seq_to_tfdataset(batches, transform)
assert isinstance(tf_ds, tf.data.Dataset)
result = next(tf_ds.as_numpy_iterator())
assert isinstance(result, dict)
np.testing.assert_equal(result["a"], batches[0]["a"] * 2)
| [
"fv3fit._shared.packer.pack_tfdataset",
"fv3fit.tfdataset.tfdataset_from_batches",
"copy.deepcopy",
"numpy.random.uniform",
"tensorflow.abs",
"numpy.testing.assert_array_equal",
"numpy.asarray",
"fv3fit._shared.config.SliceConfig",
"pytest.param",
"pytest.raises",
"fv3fit._shared.packer.clip_sam... | [((4379, 4423), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_dims"""', '[2, 3, 5]'], {}), "('n_dims', [2, 3, 5])\n", (4402, 4423), False, 'import pytest\n'), ((5488, 5532), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_dims"""', '[2, 3, 5]'], {}), "('n_dims', [2, 3, 5])\n", (5511, 5532), False, 'import pytest\n'), ((2713, 2749), 'fv3fit.tfdataset.tfdataset_from_batches', 'tfdataset_from_batches', (['[xr_dataset]'], {}), '([xr_dataset])\n', (2735, 2749), False, 'from fv3fit.tfdataset import tfdataset_from_batches\n'), ((2825, 2891), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['a']", "xr_dataset['a'].values"], {}), "(result['a'], xr_dataset['a'].values)\n", (2854, 2891), True, 'import numpy as np\n'), ((2896, 2962), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['b']", "xr_dataset['b'].values"], {}), "(result['b'], xr_dataset['b'].values)\n", (2925, 2962), True, 'import numpy as np\n'), ((3628, 3659), 'fv3fit.tfdataset.tfdataset_from_batches', 'tfdataset_from_batches', (['batches'], {}), '(batches)\n', (3650, 3659), False, 'from fv3fit.tfdataset import tfdataset_from_batches\n'), ((4288, 4319), 'fv3fit.tfdataset.tfdataset_from_batches', 'tfdataset_from_batches', (['batches'], {}), '(batches)\n', (4310, 4319), False, 'from fv3fit.tfdataset import tfdataset_from_batches\n'), ((4902, 4956), 'fv3fit._shared.packer.pack_tfdataset', 'pack_tfdataset', (['dataset'], {'variable_names': 'variable_names'}), '(dataset, variable_names=variable_names)\n', (4916, 4956), False, 'from fv3fit._shared.packer import pack_tfdataset, clip_sample\n'), ((7294, 7351), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["result['a']", "(batches[0]['a'] * 2)"], {}), "(result['a'], batches[0]['a'] * 2)\n", (7317, 7351), True, 'import numpy as np\n'), ((1841, 1888), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['sample.shape'], {'dtype': 'sample.dtype'}), '(sample.shape, dtype=sample.dtype)\n', (1854, 1888), True, 'import tensorflow as tf\n'), ((2209, 2238), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2222, 2238), False, 'import pytest\n'), ((2354, 2388), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2367, 2388), False, 'import pytest\n'), ((2398, 2424), 'fv3fit.tfdataset.tfdataset_from_batches', 'tfdataset_from_batches', (['[]'], {}), '([])\n', (2420, 2424), False, 'from fv3fit.tfdataset import tfdataset_from_batches\n'), ((3125, 3197), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['a']", "batches[i_batch]['a'].values"], {}), "(result['a'], batches[i_batch]['a'].values)\n", (3154, 3197), True, 'import numpy as np\n'), ((3206, 3278), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['b']", "batches[i_batch]['b'].values"], {}), "(result['b'], batches[i_batch]['b'].values)\n", (3235, 3278), True, 'import numpy as np\n'), ((4486, 4530), 'pytest.param', 'pytest.param', (["['a', 'b', 'c', 'd']"], {'id': '"""all"""'}), "(['a', 'b', 'c', 'd'], id='all')\n", (4498, 4530), False, 'import pytest\n'), ((4540, 4569), 'pytest.param', 'pytest.param', (["['b']"], {'id': '"""one"""'}), "(['b'], id='one')\n", (4552, 4569), False, 'import pytest\n'), ((4579, 4614), 'pytest.param', 'pytest.param', (["['a', 'c']"], {'id': '"""half"""'}), "(['a', 'c'], id='half')\n", (4591, 4614), False, 'import pytest\n'), ((6381, 6400), 'fv3fit._shared.packer.clip_sample', 'clip_sample', (['config'], {}), '(config)\n', (6392, 6400), False, 'from fv3fit._shared.packer import pack_tfdataset, clip_sample\n'), ((7023, 7043), 'copy.deepcopy', 'copy.deepcopy', (['batch'], {}), '(batch)\n', (7036, 7043), False, 'import copy\n'), ((1709, 1750), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['val.shape'], {'dtype': 'val.dtype'}), '(val.shape, dtype=val.dtype)\n', (1722, 1750), True, 'import tensorflow as tf\n'), ((1900, 1970), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['gen'], {'output_signature': 'output_signature'}), '(gen, output_signature=output_signature)\n', (1930, 1970), True, 'import tensorflow as tf\n'), ((2099, 2116), 'numpy.asarray', 'np.asarray', (['[0.0]'], {}), '([0.0])\n', (2109, 2116), True, 'import numpy as np\n'), ((2180, 2197), 'numpy.asarray', 'np.asarray', (['[1.0]'], {}), '([1.0])\n', (2190, 2197), True, 'import numpy as np\n'), ((6669, 6762), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sample_in[name][..., config[name].slice]', 'sample_out[name]'], {}), '(sample_in[name][..., config[name].slice],\n sample_out[name])\n', (6698, 6762), True, 'import numpy as np\n'), ((6815, 6879), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sample_in[name]', 'sample_out[name]'], {}), '(sample_in[name], sample_out[name])\n', (6844, 6879), True, 'import numpy as np\n'), ((790, 831), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(dataset1, dataset2)'], {}), '((dataset1, dataset2))\n', (809, 831), True, 'import tensorflow as tf\n'), ((5637, 5657), 'fv3fit._shared.config.SliceConfig', 'SliceConfig', ([], {'start': '(1)'}), '(start=1)\n', (5648, 5657), False, 'from fv3fit._shared.config import SliceConfig\n'), ((5785, 5805), 'fv3fit._shared.config.SliceConfig', 'SliceConfig', ([], {'stop': '(-1)'}), '(stop=-1)\n', (5796, 5805), False, 'from fv3fit._shared.config import SliceConfig\n'), ((5932, 5961), 'fv3fit._shared.config.SliceConfig', 'SliceConfig', ([], {'start': '(2)', 'stop': '(-1)'}), '(start=2, stop=-1)\n', (5943, 5961), False, 'from fv3fit._shared.config import SliceConfig\n'), ((499, 540), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(dataset1, dataset2)'], {}), '((dataset1, dataset2))\n', (518, 540), True, 'import tensorflow as tf\n'), ((1427, 1456), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (1444, 1456), True, 'import numpy as np\n'), ((2566, 2607), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n_samples, 3, 4]'}), '(size=[n_samples, 3, 4])\n', (2583, 2607), True, 'import numpy as np\n'), ((2640, 2678), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n_samples, 3]'}), '(size=[n_samples, 3])\n', (2657, 2678), True, 'import numpy as np\n'), ((6933, 6946), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (6942, 6946), True, 'import numpy as np\n'), ((3437, 3478), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n_samples, 3, 4]'}), '(size=[n_samples, 3, 4])\n', (3454, 3478), True, 'import numpy as np\n'), ((3515, 3553), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n_samples, 3]'}), '(size=[n_samples, 3])\n', (3532, 3553), True, 'import numpy as np\n'), ((3892, 3937), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n_samples * 2, 3, 4]'}), '(size=[n_samples * 2, 3, 4])\n', (3909, 3937), True, 'import numpy as np\n'), ((3974, 4016), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n_samples * 2, 3]'}), '(size=[n_samples * 2, 3])\n', (3991, 4016), True, 'import numpy as np\n'), ((4122, 4163), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n_samples, 3, 4]'}), '(size=[n_samples, 3, 4])\n', (4139, 4163), True, 'import numpy as np\n'), ((4200, 4238), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n_samples, 3]'}), '(size=[n_samples, 3])\n', (4217, 4238), True, 'import numpy as np\n'), ((878, 893), 'tensorflow.abs', 'tf.abs', (['(x2 - x1)'], {}), '(x2 - x1)\n', (884, 893), True, 'import tensorflow as tf\n'), ((612, 639), 'tensorflow.abs', 'tf.abs', (['(x2[name] - x1[name])'], {}), '(x2[name] - x1[name])\n', (618, 639), True, 'import tensorflow as tf\n')] |
from math import ceil, floor, log2
import numpy as np
from CQT_Toolbox.winfuns import winfuns
def nsgcqwin(*args):
fmin, fmax, bins, sr, Ls = args[:5]
bwfac = 1
min_win = 4
fractional = 0
winfun = "hann"
gamma = 0
nargin = len(args)
if nargin < 5:
raise ValueError("Not enough input arguments")
if nargin >= 6:
varargin = args[5:]
Lvar = len(varargin)
if Lvar % 2:
raise ValueError("Invalid input arguments")
for i in range(0, Lvar, 2):
if not isinstance(varargin[i], str):
raise ValueError("Invalid input arguments")
if varargin[i] == "min_win":
min_win = varargin[i + 1]
elif varargin[i] == "gamma":
gamma = varargin[i + 1]
elif varargin[i] == "bwfac":
bwfac = varargin[i + 1]
elif varargin[i] == "fractional":
fractional = varargin[i + 1]
elif varargin[i] == "winfun":
winfun = varargin[i + 1]
else:
raise ValueError("Invalid input arguments " + varargin[i])
nf = sr // 2
if fmax > nf:
fmax = nf
fftres = sr // Ls
b = floor(bins * log2(fmax / fmin))
fbas = fmin * pow(2, np.arange(b).T / bins)
Q = pow(2, 1 / bins) - pow(2, -1 / bins)
cqtbw = Q * fbas + gamma
cqtbw = cqtbw[:]
# Make sure the support of highest filter won't exceed nf
tmpIdx = np.where(fbas + cqtbw / 2 > nf)[0]
if not np.all(tmpIdx == 0):
fbas = fbas[: tmpIdx[0]]
cqtbw = cqtbw[: tmpIdx[0]]
# Make sure the support of lowest filter won't exceed DC
tmpIdx = np.where(fbas - cqtbw / 2 < 0)[0]
if not np.all(tmpIdx == 0):
fbas = fbas[tmpIdx:]
cqtbw = cqtbw[tmpIdx:]
Lfbas = len(fbas)
fbas = np.insert(fbas, 0, 0)
fbas = np.insert(fbas, len(fbas), nf)
fbas = np.insert(fbas, len(fbas), sr - fbas[Lfbas:0:-1])
bw = cqtbw[::-1]
bw = np.insert(bw, 0, fbas[Lfbas + 2] - fbas[Lfbas])
bw = np.insert(bw, 0, cqtbw)
bw = np.insert(bw, 0, 2 * fmin)
fftres = sr / Ls
bw = bw / fftres
fbas = fbas / fftres
# Centre positions of filters in DFT frame
posit = np.zeros(fbas.shape)
posit[: Lfbas + 2] = np.array(
[floor(fbas_num) for fbas_num in list(fbas[: Lfbas + 2])]
)
posit[Lfbas + 2 :] = np.array(
[ceil(fbas_num) for fbas_num in list(fbas[Lfbas + 2 :])]
)
shift = np.diff(posit)
shift = np.insert(shift, 0, -posit[-1] % Ls)
if fractional:
corr_shift = fbas - posit
M = ceil(bw + 1)
else:
bw = np.round(bw)
M = bw
for i in range(2 * (Lfbas + 1)):
if bw[i] < min_win:
bw[i] = min_win
M[i] = bw[i]
if fractional:
temp = (
(np.arange(ceil(M / 2) + 1) + np.arange(-floor(M / 2), 0)).conj().T
)
g = winfuns(winfun, (temp - corr_shift) / bw) / np.sqrt(bw)
else:
g = np.empty_like(bw, dtype=object)
for i in range(len(bw)):
g[i] = winfuns(winfun, bw[i])
M = bwfac * np.ceil(M / bwfac)
# Setup turnkey window for 0- and Nyquist-frequency
for i in range(Lfbas + 2):
if M[i] > M[i + 1]:
start = int((np.floor(M[i] / 2) - np.floor(M[i + 1] / 2) + 1))
end = int((np.floor(M[i] / 2) + np.ceil(M[i + 1] / 2)))
g[i][start - 1 : end] = winfuns("hann", M[i + 1])
g[i] = g[i] / np.sqrt(M[i])
return g, shift, M
| [
"numpy.ceil",
"math.ceil",
"numpy.floor",
"numpy.zeros",
"numpy.empty_like",
"math.floor",
"numpy.insert",
"numpy.diff",
"numpy.where",
"numpy.arange",
"CQT_Toolbox.winfuns.winfuns",
"math.log2",
"numpy.round",
"numpy.all",
"numpy.sqrt"
] | [((1861, 1882), 'numpy.insert', 'np.insert', (['fbas', '(0)', '(0)'], {}), '(fbas, 0, 0)\n', (1870, 1882), True, 'import numpy as np\n'), ((2017, 2064), 'numpy.insert', 'np.insert', (['bw', '(0)', '(fbas[Lfbas + 2] - fbas[Lfbas])'], {}), '(bw, 0, fbas[Lfbas + 2] - fbas[Lfbas])\n', (2026, 2064), True, 'import numpy as np\n'), ((2074, 2097), 'numpy.insert', 'np.insert', (['bw', '(0)', 'cqtbw'], {}), '(bw, 0, cqtbw)\n', (2083, 2097), True, 'import numpy as np\n'), ((2107, 2133), 'numpy.insert', 'np.insert', (['bw', '(0)', '(2 * fmin)'], {}), '(bw, 0, 2 * fmin)\n', (2116, 2133), True, 'import numpy as np\n'), ((2262, 2282), 'numpy.zeros', 'np.zeros', (['fbas.shape'], {}), '(fbas.shape)\n', (2270, 2282), True, 'import numpy as np\n'), ((2509, 2523), 'numpy.diff', 'np.diff', (['posit'], {}), '(posit)\n', (2516, 2523), True, 'import numpy as np\n'), ((2536, 2572), 'numpy.insert', 'np.insert', (['shift', '(0)', '(-posit[-1] % Ls)'], {}), '(shift, 0, -posit[-1] % Ls)\n', (2545, 2572), True, 'import numpy as np\n'), ((1491, 1522), 'numpy.where', 'np.where', (['(fbas + cqtbw / 2 > nf)'], {}), '(fbas + cqtbw / 2 > nf)\n', (1499, 1522), True, 'import numpy as np\n'), ((1537, 1556), 'numpy.all', 'np.all', (['(tmpIdx == 0)'], {}), '(tmpIdx == 0)\n', (1543, 1556), True, 'import numpy as np\n'), ((1701, 1731), 'numpy.where', 'np.where', (['(fbas - cqtbw / 2 < 0)'], {}), '(fbas - cqtbw / 2 < 0)\n', (1709, 1731), True, 'import numpy as np\n'), ((1746, 1765), 'numpy.all', 'np.all', (['(tmpIdx == 0)'], {}), '(tmpIdx == 0)\n', (1752, 1765), True, 'import numpy as np\n'), ((2639, 2651), 'math.ceil', 'ceil', (['(bw + 1)'], {}), '(bw + 1)\n', (2643, 2651), False, 'from math import ceil, floor, log2\n'), ((2675, 2687), 'numpy.round', 'np.round', (['bw'], {}), '(bw)\n', (2683, 2687), True, 'import numpy as np\n'), ((3039, 3070), 'numpy.empty_like', 'np.empty_like', (['bw'], {'dtype': 'object'}), '(bw, dtype=object)\n', (3052, 3070), True, 'import numpy as np\n'), ((3163, 3181), 'numpy.ceil', 'np.ceil', (['(M / bwfac)'], {}), '(M / bwfac)\n', (3170, 3181), True, 'import numpy as np\n'), ((1252, 1269), 'math.log2', 'log2', (['(fmax / fmin)'], {}), '(fmax / fmin)\n', (1256, 1269), False, 'from math import ceil, floor, log2\n'), ((2327, 2342), 'math.floor', 'floor', (['fbas_num'], {}), '(fbas_num)\n', (2332, 2342), False, 'from math import ceil, floor, log2\n'), ((2434, 2448), 'math.ceil', 'ceil', (['fbas_num'], {}), '(fbas_num)\n', (2438, 2448), False, 'from math import ceil, floor, log2\n'), ((2961, 3002), 'CQT_Toolbox.winfuns.winfuns', 'winfuns', (['winfun', '((temp - corr_shift) / bw)'], {}), '(winfun, (temp - corr_shift) / bw)\n', (2968, 3002), False, 'from CQT_Toolbox.winfuns import winfuns\n'), ((3005, 3016), 'numpy.sqrt', 'np.sqrt', (['bw'], {}), '(bw)\n', (3012, 3016), True, 'import numpy as np\n'), ((3123, 3145), 'CQT_Toolbox.winfuns.winfuns', 'winfuns', (['winfun', 'bw[i]'], {}), '(winfun, bw[i])\n', (3130, 3145), False, 'from CQT_Toolbox.winfuns import winfuns\n'), ((3477, 3502), 'CQT_Toolbox.winfuns.winfuns', 'winfuns', (['"""hann"""', 'M[i + 1]'], {}), "('hann', M[i + 1])\n", (3484, 3502), False, 'from CQT_Toolbox.winfuns import winfuns\n'), ((3529, 3542), 'numpy.sqrt', 'np.sqrt', (['M[i]'], {}), '(M[i])\n', (3536, 3542), True, 'import numpy as np\n'), ((1296, 1308), 'numpy.arange', 'np.arange', (['b'], {}), '(b)\n', (1305, 1308), True, 'import numpy as np\n'), ((3396, 3414), 'numpy.floor', 'np.floor', (['(M[i] / 2)'], {}), '(M[i] / 2)\n', (3404, 3414), True, 'import numpy as np\n'), ((3417, 3438), 'numpy.ceil', 'np.ceil', (['(M[i + 1] / 2)'], {}), '(M[i + 1] / 2)\n', (3424, 3438), True, 'import numpy as np\n'), ((3323, 3341), 'numpy.floor', 'np.floor', (['(M[i] / 2)'], {}), '(M[i] / 2)\n', (3331, 3341), True, 'import numpy as np\n'), ((3344, 3366), 'numpy.floor', 'np.floor', (['(M[i + 1] / 2)'], {}), '(M[i + 1] / 2)\n', (3352, 3366), True, 'import numpy as np\n'), ((2882, 2893), 'math.ceil', 'ceil', (['(M / 2)'], {}), '(M / 2)\n', (2886, 2893), False, 'from math import ceil, floor, log2\n'), ((2912, 2924), 'math.floor', 'floor', (['(M / 2)'], {}), '(M / 2)\n', (2917, 2924), False, 'from math import ceil, floor, log2\n')] |
import numpy as np
import os, sys
import pickle, functools, operator
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.externals import joblib
from keras.utils import to_categorical
from keras.models import Model, load_model
from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten
from keras.callbacks import EarlyStopping, ModelCheckpoint
import argparse, json
def parse():
parser = argparse.ArgumentParser(description="Video to Text Model")
parser.add_argument('--uid', type=str, help='training uid', required=True)
parser.add_argument('--train_path', default='data/training_data', type=str, help='training data path')
parser.add_argument('--test_path', default='data/testing_data', type=str, help='test data path')
parser.add_argument('--learning_rate', type=float, default=0.0007, help='learning rate for training')
parser.add_argument('--batch_size', type=int, default=320, help='batch size for training')
parser.add_argument('--epoch', type=int, default=100, help='epochs for training')
parser.add_argument('--test', action='store_true', help='use this flag for testing')
try:
from argument import add_arguments
parser = add_arguments(parser)
except:
pass
args = parser.parse_args()
return args
class Video2Text(object):
''' Initialize the parameters for the model '''
def __init__(self, args):
self.uid = args.uid
self.train_path = args.train_path
self.test_path = args.test_path
self.batch_size = args.batch_size
self.lr = args.learning_rate
self.epochs = args.epoch
self.trainable = not args.test
self.latent_dim = 512
self.num_encoder_tokens = 4096
self.num_decoder_tokens = 1500
self.time_steps_encoder = 80
self.time_steps_decoder = None
self.preload = True
self.preload_data_path = 'preload_data'
self.max_propablity = -1
# processed data
self.encoder_input_data = []
self.decoder_input_data = []
self.decoder_target_data = []
self.tokenizer = None
# models
self.encoder_model = None
self.decoder_model = None
self.inf_encoder_model = None
self.inf_decoder_model = None
self.save_model_path = os.path.join('models', self.uid)
def load_data(self):
if(self.preload):
with open(os.path.join(self.preload_data_path, 'X_data1024.jlib'), 'rb') as file:
self.encoder_input_data = joblib.load(file)
print(self.encoder_input_data.shape)
with open(os.path.join(self.preload_data_path, 'y_data1024.jlib'), 'rb') as file:
decoder_data = joblib.load(file)
print(decoder_data.shape)
with open(os.path.join(self.preload_data_path, 'tokenizer1024'), 'rb') as file:
self.tokenizer = joblib.load(file)
print(len(self.tokenizer.word_index))
for e in decoder_data:
i = e[:-1]
o = e[1:]
self.decoder_input_data.append(i)
self.decoder_target_data.append(o)
self.decoder_input_data = np.array(self.decoder_input_data)
self.decoder_target_data = np.array(self.decoder_target_data)
else:
TRAIN_LABEL_PATH = os.path.join(self.train_path, 'training_label.json')
with open(TRAIN_LABEL_PATH) as data_file:
y_data = json.load(data_file)
videoId = []
videoSeq = []
for y in y_data:
for idx, cap in enumerate(y['caption']):
cap = "<bos> " + cap + " <eos>"
videoId.append(y['id'])
videoSeq.append(cap)
TRAIN_FEATURE_DIR = os.path.join(self.train_path, 'feat')
x_data = {}
for filename in os.listdir(TRAIN_FEATURE_DIR):
f = np.load(os.path.join(TRAIN_FEATURE_DIR, filename))
x_data[filename[:-4]] = f
self.tokenizer = Tokenizer(num_words=self.num_decoder_tokens)
self.tokenizer.fit_on_texts(videoSeq)
word_index = self.tokenizer.word_index
print ('Convert to index sequences.')
train_sequences = self.tokenizer.texts_to_sequences(videoSeq)
train_sequences = np.array(train_sequences)
train_sequences = pad_sequences(train_sequences, padding='post',truncating='post')
print(train_sequences.shape)
max_seq_length = train_sequences.shape[1]
filesize = len(train_sequences)
X_data = []
y_data = []
vCount = 0
curFilename = videoId[0]
for idx in range(0,filesize):
if(videoId[idx] == curFilename):
vCount = vCount + 1
if(vCount > 2):
continue
else:
vCount = 1
curFilename = videoId[idx]
self.encoder_input_data.append(x_data[videoId[idx]])
y = to_categorical(train_sequences[idx], self.num_decoder_tokens)
self.decoder_input_data.append(y[:-1])
self.decoder_target_data.append(y[1:])
self.encoder_input_data = np.array(self.encoder_input_data)
self.decoder_input_data = np.array(self.decoder_input_data)
self.decoder_target_data = np.array(self.decoder_target_data)
# init decoder max length
self.time_steps_decoder = self.decoder_input_data.shape[1]
return [self.encoder_input_data, self.decoder_input_data], self.decoder_target_data, self.tokenizer
def load_inference_models(self):
# load tokenizer
with open(os.path.join(self.save_model_path, 'tokenizer' + str(self.num_decoder_tokens)), 'rb') as file:
self.tokenizer = joblib.load(file)
# inference encoder model
self.inf_encoder_model = load_model(os.path.join(self.save_model_path, 'encoder_model.h5'))
# inference decoder model
decoder_inputs = Input(shape=(None, self.num_decoder_tokens))
decoder_dense = Dense(self.num_decoder_tokens, activation='softmax')
decoder_lstm = LSTM(self.latent_dim, return_sequences=True, return_state=True)
decoder_state_input_h = Input(shape=(self.latent_dim,))
decoder_state_input_c = Input(shape=(self.latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
self.inf_decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
self.inf_decoder_model.load_weights(os.path.join(self.save_model_path, 'decoder_model_weights.h5'))
def train(self):
# Define an input sequence and process it.
encoder_inputs = Input(shape=(self.time_steps_encoder, self.num_encoder_tokens), name="encoder_inputs")
encoder = LSTM(self.latent_dim, return_state=True,return_sequences=True, name='endcoder_lstm')
_, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# Attention mechanism
# attention = keras.layers.Permute((2,1))(encoder_output)
# attention = keras.layers.Dense(TIME_STEPS_ENCODER, activation='softmax')(attention)
# attention = keras.layers.Permute((2,1))(attention)
# hidden = keras.layers.Multiply()([encoder_output, attention])
# hidden = keras.layers.Permute((2,1))(hidden)
# hidden = keras.layers.Dense(DECODER_MAX_LENGTH, activation='relu')(hidden)
# hidden = keras.layers.Permute((2,1))(hidden)
# hidden = keras.layers.Dense(num_decoder_tokens, activation='relu')(hidden)
# Set up the decoder
decoder_inputs = Input(shape=(self.time_steps_decoder, self.num_decoder_tokens), name= "decoder_inputs")
decoder_lstm = LSTM(self.latent_dim, return_sequences=True, return_state=True, name='decoder_lstm')
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(self.num_decoder_tokens, activation='relu', name='decoder_relu')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.summary()
# Early Stopping
earlystopping = EarlyStopping(monitor='val_loss', patience = 4, verbose=1, mode='min')
# Run training
opt = keras.optimizers.adam(lr = self.lr)
model.compile(metrics=['accuracy'], optimizer=opt, loss='categorical_crossentropy')
try:
model.fit([self.encoder_input_data, self.decoder_input_data], self.decoder_target_data,
batch_size=self.batch_size,
epochs=self.epochs,
validation_split=0.15,
callbacks=[earlystopping])
except KeyboardInterrupt:
print("\nW: interrupt received, stopping")
finally:
pass
# saving process
if not os.path.exists(self.save_model_path):
os.makedirs(self.save_model_path)
self.encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(self.latent_dim,))
decoder_state_input_c = Input(shape=(self.latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
self.decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
self.encoder_model.summary()
self.decoder_model.summary()
# save models
self.encoder_model.save(os.path.join(self.save_model_path, 'encoder_model.h5'))
self.decoder_model.save_weights(os.path.join(self.save_model_path, 'decoder_model_weights.h5'))
with open(os.path.join(self.save_model_path,'tokenizer'+ str(self.num_decoder_tokens) ),'wb') as file:
joblib.dump(self.tokenizer, file)
# attention_model.save(os.path.join(directory, 'attention_model.h5'))
def decode_sequence2bs(self, input_seq):
states_value = self.inf_encoder_model.predict(input_seq)
target_seq = np.zeros((1, 1, self.num_decoder_tokens))
target_seq[0, 0, self.tokenizer.word_index['bos']] = 1
self.beam_search(target_seq, states_value,[],[],0)
return decode_seq
def beam_search(self, target_seq, states_value, prob, path, lens):
global decode_seq
node = 2
output_tokens, h, c = self.inf_decoder_model.predict(
[target_seq] + states_value)
output_tokens = output_tokens.reshape((self.num_decoder_tokens))
sampled_token_index = output_tokens.argsort()[-node:][::-1]
states_value = [h, c]
for i in range(node):
if sampled_token_index[i] == 0:
sampled_char = ''
else:
sampled_char = list(self.tokenizer.word_index.keys())[list(self.tokenizer.word_index.values()).index(sampled_token_index[i])]
MAX_LEN = 9
if(sampled_char != 'eos' and lens <= MAX_LEN):
p = output_tokens[sampled_token_index[i]]
if(sampled_char == ''):
p = 1
prob_new = list(prob)
prob_new.append(p)
path_new = list(path)
path_new.append(sampled_char)
target_seq = np.zeros((1, 1, self.num_decoder_tokens))
target_seq[0, 0, sampled_token_index[i]] = 1.
self.beam_search(target_seq, states_value, prob_new, path_new, lens+1)
else:
p = output_tokens[sampled_token_index[i]]
prob_new = list(prob)
prob_new.append(p)
p = functools.reduce(operator.mul, prob_new, 1)
if(p > self.max_propablity):
decode_seq = path
self.max_propablity = p
def decoded_sentence_tuning(self, decoded_sentence):
decode_str = []
filter_string = ['bos', 'eos']
unigram = {}
last_string = ""
for idx2, c in enumerate(decoded_sentence):
if c in unigram:
unigram[c] += 1
else:
unigram[c] = 1
if(last_string == c and idx2 > 0):
continue
if c in filter_string:
continue
if len(c) > 0:
decode_str.append(c)
if idx2 > 0:
last_string = c
return decode_str
def get_test_data(self, path):
X_test = []
X_test_filename = []
with open (os.path.join(path, 'testing_id.txt')) as testing_file:
lines = testing_file.readlines()
for filename in lines:
filename = filename.strip()
f = np.load(os.path.join(path , 'feat', filename + '.npy'))
X_test.append(f)
X_test_filename.append(filename[:-4])
X_test = np.array(X_test)
return X_test, X_test_filename
def test(self):
X_test, X_test_filename = self.get_test_data(os.path.join(self.test_path))
# generate inference test outputs
with open(os.path.join(self.save_model_path, 'test_output.txt'), 'w') as file:
for idx, x in enumerate(X_test):
file.write(X_test_filename[idx]+',')
decoded_sentence = self.decode_sequence2bs(x.reshape(-1, 80, 4096))
decode_str = self.decoded_sentence_tuning(decoded_sentence)
for d in decode_str:
file.write(d + ' ')
file.write('\n')
# re-init max prob
self.max_propablity = -1
if __name__ == "__main__":
vid2Text = Video2Text(parse())
if(vid2Text.trainable):
vid2Text.load_data()
vid2Text.train()
vid2Text.load_inference_models()
vid2Text.test()
| [
"sklearn.externals.joblib.dump",
"argparse.ArgumentParser",
"keras.preprocessing.sequence.pad_sequences",
"keras.models.Model",
"argument.add_arguments",
"keras.layers.Input",
"os.path.join",
"keras.optimizers.adam",
"os.path.exists",
"keras.preprocessing.text.Tokenizer",
"keras.utils.to_categor... | [((494, 552), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Video to Text Model"""'}), "(description='Video to Text Model')\n", (517, 552), False, 'import argparse, json\n'), ((1285, 1306), 'argument.add_arguments', 'add_arguments', (['parser'], {}), '(parser)\n', (1298, 1306), False, 'from argument import add_arguments\n'), ((2403, 2435), 'os.path.join', 'os.path.join', (['"""models"""', 'self.uid'], {}), "('models', self.uid)\n", (2415, 2435), False, 'import os, sys\n'), ((6263, 6307), 'keras.layers.Input', 'Input', ([], {'shape': '(None, self.num_decoder_tokens)'}), '(shape=(None, self.num_decoder_tokens))\n', (6268, 6307), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((6332, 6384), 'keras.layers.Dense', 'Dense', (['self.num_decoder_tokens'], {'activation': '"""softmax"""'}), "(self.num_decoder_tokens, activation='softmax')\n", (6337, 6384), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((6408, 6471), 'keras.layers.LSTM', 'LSTM', (['self.latent_dim'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(self.latent_dim, return_sequences=True, return_state=True)\n', (6412, 6471), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((6504, 6535), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (6509, 6535), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((6568, 6599), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (6573, 6599), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((6923, 7010), 'keras.models.Model', 'Model', (['([decoder_inputs] + decoder_states_inputs)', '([decoder_outputs] + decoder_states)'], {}), '([decoder_inputs] + decoder_states_inputs, [decoder_outputs] +\n decoder_states)\n', (6928, 7010), False, 'from keras.models import Model, load_model\n'), ((7238, 7329), 'keras.layers.Input', 'Input', ([], {'shape': '(self.time_steps_encoder, self.num_encoder_tokens)', 'name': '"""encoder_inputs"""'}), "(shape=(self.time_steps_encoder, self.num_encoder_tokens), name=\n 'encoder_inputs')\n", (7243, 7329), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((7343, 7433), 'keras.layers.LSTM', 'LSTM', (['self.latent_dim'], {'return_state': '(True)', 'return_sequences': '(True)', 'name': '"""endcoder_lstm"""'}), "(self.latent_dim, return_state=True, return_sequences=True, name=\n 'endcoder_lstm')\n", (7347, 7433), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((8185, 8276), 'keras.layers.Input', 'Input', ([], {'shape': '(self.time_steps_decoder, self.num_decoder_tokens)', 'name': '"""decoder_inputs"""'}), "(shape=(self.time_steps_decoder, self.num_decoder_tokens), name=\n 'decoder_inputs')\n", (8190, 8276), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((8296, 8385), 'keras.layers.LSTM', 'LSTM', (['self.latent_dim'], {'return_sequences': '(True)', 'return_state': '(True)', 'name': '"""decoder_lstm"""'}), "(self.latent_dim, return_sequences=True, return_state=True, name=\n 'decoder_lstm')\n", (8300, 8385), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((8496, 8566), 'keras.layers.Dense', 'Dense', (['self.num_decoder_tokens'], {'activation': '"""relu"""', 'name': '"""decoder_relu"""'}), "(self.num_decoder_tokens, activation='relu', name='decoder_relu')\n", (8501, 8566), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((8683, 8739), 'keras.models.Model', 'Model', (['[encoder_inputs, decoder_inputs]', 'decoder_outputs'], {}), '([encoder_inputs, decoder_inputs], decoder_outputs)\n', (8688, 8739), False, 'from keras.models import Model, load_model\n'), ((8814, 8882), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(4)', 'verbose': '(1)', 'mode': '"""min"""'}), "(monitor='val_loss', patience=4, verbose=1, mode='min')\n", (8827, 8882), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((8923, 8956), 'keras.optimizers.adam', 'keras.optimizers.adam', ([], {'lr': 'self.lr'}), '(lr=self.lr)\n', (8944, 8956), False, 'import keras\n'), ((9620, 9657), 'keras.models.Model', 'Model', (['encoder_inputs', 'encoder_states'], {}), '(encoder_inputs, encoder_states)\n', (9625, 9657), False, 'from keras.models import Model, load_model\n'), ((9690, 9721), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (9695, 9721), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((9754, 9785), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (9759, 9785), False, 'from keras.layers import Input, LSTM, Dense, Permute, Reshape, Activation, Flatten\n'), ((10118, 10205), 'keras.models.Model', 'Model', (['([decoder_inputs] + decoder_states_inputs)', '([decoder_outputs] + decoder_states)'], {}), '([decoder_inputs] + decoder_states_inputs, [decoder_outputs] +\n decoder_states)\n', (10123, 10205), False, 'from keras.models import Model, load_model\n'), ((10883, 10924), 'numpy.zeros', 'np.zeros', (['(1, 1, self.num_decoder_tokens)'], {}), '((1, 1, self.num_decoder_tokens))\n', (10891, 10924), True, 'import numpy as np\n'), ((3305, 3338), 'numpy.array', 'np.array', (['self.decoder_input_data'], {}), '(self.decoder_input_data)\n', (3313, 3338), True, 'import numpy as np\n'), ((3378, 3412), 'numpy.array', 'np.array', (['self.decoder_target_data'], {}), '(self.decoder_target_data)\n', (3386, 3412), True, 'import numpy as np\n'), ((3458, 3510), 'os.path.join', 'os.path.join', (['self.train_path', '"""training_label.json"""'], {}), "(self.train_path, 'training_label.json')\n", (3470, 3510), False, 'import os, sys\n'), ((3921, 3958), 'os.path.join', 'os.path.join', (['self.train_path', '"""feat"""'], {}), "(self.train_path, 'feat')\n", (3933, 3958), False, 'import os, sys\n'), ((4011, 4040), 'os.listdir', 'os.listdir', (['TRAIN_FEATURE_DIR'], {}), '(TRAIN_FEATURE_DIR)\n', (4021, 4040), False, 'import os, sys\n'), ((4184, 4228), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'self.num_decoder_tokens'}), '(num_words=self.num_decoder_tokens)\n', (4193, 4228), False, 'from keras.preprocessing.text import Tokenizer\n'), ((4487, 4512), 'numpy.array', 'np.array', (['train_sequences'], {}), '(train_sequences)\n', (4495, 4512), True, 'import numpy as np\n'), ((4543, 4608), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sequences'], {'padding': '"""post"""', 'truncating': '"""post"""'}), "(train_sequences, padding='post', truncating='post')\n", (4556, 4608), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((5455, 5488), 'numpy.array', 'np.array', (['self.encoder_input_data'], {}), '(self.encoder_input_data)\n', (5463, 5488), True, 'import numpy as np\n'), ((5527, 5560), 'numpy.array', 'np.array', (['self.decoder_input_data'], {}), '(self.decoder_input_data)\n', (5535, 5560), True, 'import numpy as np\n'), ((5600, 5634), 'numpy.array', 'np.array', (['self.decoder_target_data'], {}), '(self.decoder_target_data)\n', (5608, 5634), True, 'import numpy as np\n'), ((6050, 6067), 'sklearn.externals.joblib.load', 'joblib.load', (['file'], {}), '(file)\n', (6061, 6067), False, 'from sklearn.externals import joblib\n'), ((6147, 6201), 'os.path.join', 'os.path.join', (['self.save_model_path', '"""encoder_model.h5"""'], {}), "(self.save_model_path, 'encoder_model.h5')\n", (6159, 6201), False, 'import os, sys\n'), ((7076, 7138), 'os.path.join', 'os.path.join', (['self.save_model_path', '"""decoder_model_weights.h5"""'], {}), "(self.save_model_path, 'decoder_model_weights.h5')\n", (7088, 7138), False, 'import os, sys\n'), ((9498, 9534), 'os.path.exists', 'os.path.exists', (['self.save_model_path'], {}), '(self.save_model_path)\n', (9512, 9534), False, 'import os, sys\n'), ((9548, 9581), 'os.makedirs', 'os.makedirs', (['self.save_model_path'], {}), '(self.save_model_path)\n', (9559, 9581), False, 'import os, sys\n'), ((10356, 10410), 'os.path.join', 'os.path.join', (['self.save_model_path', '"""encoder_model.h5"""'], {}), "(self.save_model_path, 'encoder_model.h5')\n", (10368, 10410), False, 'import os, sys\n'), ((10452, 10514), 'os.path.join', 'os.path.join', (['self.save_model_path', '"""decoder_model_weights.h5"""'], {}), "(self.save_model_path, 'decoder_model_weights.h5')\n", (10464, 10514), False, 'import os, sys\n'), ((10639, 10672), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self.tokenizer', 'file'], {}), '(self.tokenizer, file)\n', (10650, 10672), False, 'from sklearn.externals import joblib\n'), ((13730, 13746), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (13738, 13746), True, 'import numpy as np\n'), ((13860, 13888), 'os.path.join', 'os.path.join', (['self.test_path'], {}), '(self.test_path)\n', (13872, 13888), False, 'import os, sys\n'), ((2625, 2642), 'sklearn.externals.joblib.load', 'joblib.load', (['file'], {}), '(file)\n', (2636, 2642), False, 'from sklearn.externals import joblib\n'), ((2821, 2838), 'sklearn.externals.joblib.load', 'joblib.load', (['file'], {}), '(file)\n', (2832, 2838), False, 'from sklearn.externals import joblib\n'), ((3006, 3023), 'sklearn.externals.joblib.load', 'joblib.load', (['file'], {}), '(file)\n', (3017, 3023), False, 'from sklearn.externals import joblib\n'), ((3594, 3614), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (3603, 3614), False, 'import argparse, json\n'), ((5245, 5306), 'keras.utils.to_categorical', 'to_categorical', (['train_sequences[idx]', 'self.num_decoder_tokens'], {}), '(train_sequences[idx], self.num_decoder_tokens)\n', (5259, 5306), False, 'from keras.utils import to_categorical\n'), ((12124, 12165), 'numpy.zeros', 'np.zeros', (['(1, 1, self.num_decoder_tokens)'], {}), '((1, 1, self.num_decoder_tokens))\n', (12132, 12165), True, 'import numpy as np\n'), ((12484, 12527), 'functools.reduce', 'functools.reduce', (['operator.mul', 'prob_new', '(1)'], {}), '(operator.mul, prob_new, 1)\n', (12500, 12527), False, 'import pickle, functools, operator\n'), ((13367, 13403), 'os.path.join', 'os.path.join', (['path', '"""testing_id.txt"""'], {}), "(path, 'testing_id.txt')\n", (13379, 13403), False, 'import os, sys\n'), ((13950, 14003), 'os.path.join', 'os.path.join', (['self.save_model_path', '"""test_output.txt"""'], {}), "(self.save_model_path, 'test_output.txt')\n", (13962, 14003), False, 'import os, sys\n'), ((2511, 2566), 'os.path.join', 'os.path.join', (['self.preload_data_path', '"""X_data1024.jlib"""'], {}), "(self.preload_data_path, 'X_data1024.jlib')\n", (2523, 2566), False, 'import os, sys\n'), ((2718, 2773), 'os.path.join', 'os.path.join', (['self.preload_data_path', '"""y_data1024.jlib"""'], {}), "(self.preload_data_path, 'y_data1024.jlib')\n", (2730, 2773), False, 'import os, sys\n'), ((2903, 2956), 'os.path.join', 'os.path.join', (['self.preload_data_path', '"""tokenizer1024"""'], {}), "(self.preload_data_path, 'tokenizer1024')\n", (2915, 2956), False, 'import os, sys\n'), ((4070, 4111), 'os.path.join', 'os.path.join', (['TRAIN_FEATURE_DIR', 'filename'], {}), '(TRAIN_FEATURE_DIR, filename)\n', (4082, 4111), False, 'import os, sys\n'), ((13574, 13619), 'os.path.join', 'os.path.join', (['path', '"""feat"""', "(filename + '.npy')"], {}), "(path, 'feat', filename + '.npy')\n", (13586, 13619), False, 'import os, sys\n')] |
"""Various one off plots.
Usage:
./plots.py
Author:
<NAME> - 2021-08-30
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from typing import List, Optional
from scipy.stats import norm
from warzone.base import normalize, running_mean, cumulative_mean
from warzone.document_filter import DocumentFilter
def personal_plot(doc_filter: DocumentFilter) -> None:
"""
Returns a series of plots.
:param doc_filter: A DocumentFilter.
:type doc_filter: DocumentFilter
:return: *None*
:example: *None*
:note: This is intended to be used with map_choice, mode_choice and a Gamertag inputted into the DocumentFilter.
"""
data = doc_filter.df
dates = list(data['startDate'].unique())
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(30, 30))
plt.title('Personal Data for: ' + doc_filter.username, fontsize='xx-large')
# win/loss
win_count_lst = []
game_count_lst = []
wl_ratio_lst = []
for i in dates:
temp = data[data['startDate'] == i]
wins = len(temp[temp['teamPlacement'] == 1])
losses = len(temp[temp['teamPlacement'] > 1])
win_count_lst.append(wins)
game_count_lst.append(losses + wins)
wl_ratio_lst.append(wins / (wins + losses))
wl_df = pd.DataFrame(wl_ratio_lst, columns=['ratio'], index=dates)
wl_df['wins'] = win_count_lst
wl_df['losses'] = game_count_lst
cm_wl = cumulative_mean(np.array(wl_df['ratio']))
rm_wl = running_mean(np.array(wl_df['ratio']), 50)
ax[0, 0].set_title('Daily Win / Loss Ratio', fontsize='xx-large')
ax[0, 0].plot(cm_wl, label='W/L Ratio Cumulative Mean', color='tab:blue')
ax[0, 0].plot(rm_wl, label='W/L Ratio Running Mean', color='tab:blue', alpha=0.25)
ax[0, 0].legend(loc='lower left', fontsize='large', frameon=True, framealpha=0.85)
ax2 = ax[0, 0].twinx()
ax2.plot(np.array(wl_df['losses']), label='Daily Game Count', color='black', alpha=0.25)
ax2.legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# ax2.set_xticks(np.arange(min(range(len(wl_df))), max(range(len(wl_df))) + 1, 100.0))
# placement
cm_p = cumulative_mean(np.array(data['placementPercent']))
rm_p = running_mean(np.array(data['placementPercent']), 50)
ax[0, 1].set_title('Team Placement', fontsize='xx-large')
ax[0, 1].plot(cm_p, label='Placement Cumulative Mean', color='tab:blue')
ax[0, 1].plot(rm_p, label='Placement Running Mean', color='tab:blue', alpha=0.25)
ax[0, 1].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[0, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# kd
cm_kd = cumulative_mean(np.array(data['kdRatio']))
rm_kd = running_mean(np.array(data['kdRatio']), 50)
ax[1, 0].set_title('Kill Death Ratio', fontsize='xx-large')
ax[1, 0].plot(cm_kd, label='Kd Ratio Cumulative Mean', color='tab:blue')
ax[1, 0].plot(rm_kd, label='Kd Ratio Running Mean', color='tab:blue', alpha=0.25)
ax[1, 0].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[1, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Kills and Deaths
ax[1, 1].set_title('Kills and Deaths Per Game', fontsize='xx-large')
cm_kills = cumulative_mean(np.array(data['kills']))
cm_deaths = cumulative_mean(np.array(data['deaths']))
rm_kills = running_mean(np.array(data['kills']), 50)
rm_deaths = running_mean(np.array(data['deaths']), 50)
ax[1, 1].set_title('Kills and Deaths', fontsize='xx-large')
ax[1, 1].plot(cm_kills, label='Kills Cumulative Mean', color='green')
ax[1, 1].plot(cm_deaths, label='Deaths Cumulative Mean', color='red')
ax[1, 1].plot(rm_kills, label='Kills Running Mean', color='green', alpha=0.25)
ax[1, 1].plot(rm_deaths, label='Deaths Running Mean', color='red', alpha=0.25)
ax[1, 1].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[1, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Damage
cm_dam_d = cumulative_mean(np.array(data['damageDone']))
cm_dam_t = cumulative_mean(np.array(data['damageTaken']))
rm_dam_d = running_mean(np.array(data['damageDone']), 50)
rm_dam_t = running_mean(np.array(data['damageTaken']), 50)
ax[2, 0].set_title('Damage', fontsize='xx-large')
ax[2, 0].plot(cm_dam_d, label='Damage Done Cumulative Mean', color='green')
ax[2, 0].plot(cm_dam_t, label='Damage Taken Cumulative Mean', color='red')
ax[2, 0].plot(rm_dam_d, label='Damage Done Running Mean', color='green', alpha=0.25)
ax[2, 0].plot(rm_dam_t, label='Damage Taken Running Mean', color='red', alpha=0.25)
ax[2, 0].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[2, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Misc
ax[2, 1].set_title('Misc', fontsize='xx-large')
ax[2, 1].plot(data['headshots'])
ax[2, 1].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[2, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
plt.show()
def lobby_plot(doc_filter: DocumentFilter) -> None:
"""
Returns a series of plots.
:param doc_filter: A DocumentFilter.
:type doc_filter: DocumentFilter
:return: *None*
:example: *None*
:note: This is intended to be used with map_choice and mode_choice inputted into the DocumentFilter.
"""
data = doc_filter.df
games = doc_filter.unique_ids
dates = list(data['startDate'].unique())
col_lst = ['kdRatio', 'kills', 'deaths', 'damageDone', 'damageTaken', 'percentTimeMoving', 'distanceTraveled',
'objectiveTeamWiped', 'objectiveReviver', 'missionsComplete']
day_dic = {}
for date in dates:
temp_df = data[data['startDate'] == date].fillna(0)
day_dic[date] = [np.mean(temp_df[col]) for col in col_lst]
game_dic = {}
for game in games:
temp_df = data[data['matchID'] == game].fillna(0)
game_dic[game] = [np.mean(temp_df[col]) for col in col_lst]
day_df = pd.DataFrame.from_dict(day_dic, orient='index', columns=col_lst).fillna(0)
game_df = pd.DataFrame.from_dict(game_dic, orient='index', columns=col_lst).fillna(0)
fig, ax = plt.subplots(nrows=8, ncols=2, figsize=(30, 50))
plt.title('Lobby Data for', fontsize='xx-large')
# kd
cm_kd = cumulative_mean(np.array(day_df['kdRatio']))
rm_kd = running_mean(np.array(day_df['kdRatio']), 50)
ax[0, 0].set_title('Kill Death Ratio Per Day', fontsize='xx-large')
ax[0, 0].plot(cm_kd, label='Kd Ratio Cumulative Mean', color='tab:blue')
ax[0, 0].plot(rm_kd, label='Kd Ratio Running Mean', color='tab:blue', alpha=0.25)
# ax[1, 0].set_xticks(np.arange(min(range(len(day_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[0, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
cm_kd = cumulative_mean(np.array(game_df['kdRatio']))
rm_kd = running_mean(np.array(game_df['kdRatio']), 50)
ax[0, 1].set_title('Kill Death Ratio Per Game', fontsize='xx-large')
ax[0, 1].plot(cm_kd, label='Kd Ratio Cumulative Mean', color='tab:blue')
ax[0, 1].plot(rm_kd, label='Kd Ratio Running Mean', color='tab:blue', alpha=0.25)
# ax[0, 1].set_xticks(np.arange(min(range(len(day_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[0, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Kills and Deaths
ax[1, 0].set_title('Kills and Deaths Per Game', fontsize='xx-large')
cm_kills = cumulative_mean(np.array(day_df['kills']))
cm_deaths = cumulative_mean(np.array(day_df['deaths']))
rm_kills = running_mean(np.array(day_df['kills']), 50)
rm_deaths = running_mean(np.array(day_df['deaths']), 50)
ax[1, 0].set_title('Kills and Deaths Per Day', fontsize='xx-large')
ax[1, 0].plot(cm_kills, label='Kills Cumulative Mean', color='green')
ax[1, 0].plot(cm_deaths, label='Deaths Cumulative Mean', color='red')
ax[1, 0].plot(rm_kills, label='Kills Running Mean', color='green', alpha=0.25)
ax[1, 0].plot(rm_deaths, label='Deaths Running Mean', color='red', alpha=0.25)
# ax[1, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[1, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
ax[1, 1].set_title('Kills and Deaths Per Game', fontsize='xx-large')
cm_kills = cumulative_mean(np.array(game_df['kills']))
cm_deaths = cumulative_mean(np.array(game_df['deaths']))
rm_kills = running_mean(np.array(game_df['kills']), 50)
rm_deaths = running_mean(np.array(game_df['deaths']), 50)
ax[1, 1].set_title('Kills and Deaths Per Game', fontsize='xx-large')
ax[1, 1].plot(cm_kills, label='Kills Cumulative Mean', color='green')
ax[1, 1].plot(cm_deaths, label='Deaths Cumulative Mean', color='red')
ax[1, 1].plot(rm_kills, label='Kills Running Mean', color='green', alpha=0.25)
ax[1, 1].plot(rm_deaths, label='Deaths Running Mean', color='red', alpha=0.25)
# ax[1, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[1, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Damage
cm_dam_d = cumulative_mean(np.array(day_df['damageDone']))
cm_dam_t = cumulative_mean(np.array(day_df['damageTaken']))
rm_dam_d = running_mean(np.array(day_df['damageDone']), 50)
rm_dam_t = running_mean(np.array(day_df['damageTaken']), 50)
ax[2, 0].set_title('Damage Per Day', fontsize='xx-large')
ax[2, 0].plot(cm_dam_d, label='Damage Done Cumulative Mean', color='green')
ax[2, 0].plot(cm_dam_t, label='Damage Taken Cumulative Mean', color='red')
ax[2, 0].plot(rm_dam_d, label='Damage Done Running Mean', color='green', alpha=0.25)
ax[2, 0].plot(rm_dam_t, label='Damage Taken Running Mean', color='red', alpha=0.25)
# ax[2, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[2, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
cm_dam_d = cumulative_mean(np.array(game_df['damageDone']))
cm_dam_t = cumulative_mean(np.array(game_df['damageTaken']))
rm_dam_d = running_mean(np.array(game_df['damageDone']), 50)
rm_dam_t = running_mean(np.array(game_df['damageTaken']), 50)
ax[2, 1].set_title('Damage Per Game', fontsize='xx-large')
ax[2, 1].plot(cm_dam_d, label='Damage Done Cumulative Mean', color='green')
ax[2, 1].plot(cm_dam_t, label='Damage Taken Cumulative Mean', color='red')
ax[2, 1].plot(rm_dam_d, label='Damage Done Running Mean', color='green', alpha=0.25)
ax[2, 1].plot(rm_dam_t, label='Damage Taken Running Mean', color='red', alpha=0.25)
# ax[2, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[2, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Time Moving
cm_kd = cumulative_mean(np.array(day_df['percentTimeMoving']))
rm_kd = running_mean(np.array(day_df['percentTimeMoving']), 50)
ax[3, 0].set_title('Time Moving Per Day', fontsize='xx-large')
ax[3, 0].plot(cm_kd, label='Time Moving Cumulative Mean', color='tab:blue')
ax[3, 0].plot(rm_kd, label='Time Moving Running Mean', color='tab:blue', alpha=0.25)
# ax[3, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[3, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
cm_kd = cumulative_mean(np.array(game_df['percentTimeMoving']))
rm_kd = running_mean(np.array(game_df['percentTimeMoving']), 50)
ax[3, 1].set_title('Time Moving Per Game', fontsize='xx-large')
ax[3, 1].plot(cm_kd, label='Time Moving Cumulative Mean', color='tab:blue')
ax[3, 1].plot(rm_kd, label='Time Moving Running Mean', color='tab:blue', alpha=0.25)
# ax[3, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[3, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Distance Traveled
cm_kd = cumulative_mean(np.array(day_df['distanceTraveled']))
rm_kd = running_mean(np.array(day_df['distanceTraveled']), 50)
ax[4, 0].set_title('Distance Traveled Per Day', fontsize='xx-large')
ax[4, 0].plot(cm_kd, label='Distance Traveled Cumulative Mean', color='tab:blue')
ax[4, 0].plot(rm_kd, label='Distance Traveled Running Mean', color='tab:blue', alpha=0.25)
# ax[4, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[4, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
cm_kd = cumulative_mean(np.array(game_df['distanceTraveled']))
rm_kd = running_mean(np.array(game_df['distanceTraveled']), 50)
ax[4, 1].set_title('Distance Traveled Per Game', fontsize='xx-large')
ax[4, 1].plot(cm_kd, label='Distance Traveled Cumulative Mean', color='tab:blue')
ax[4, 1].plot(rm_kd, label='Distance Traveled Running Mean', color='tab:blue', alpha=0.25)
# ax[4, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[4, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Team Wipes
cm_kd = cumulative_mean(np.array(day_df['objectiveTeamWiped']))
rm_kd = running_mean(np.array(day_df['objectiveTeamWiped']), 50)
ax[5, 0].set_title('Team Wipes Per Day', fontsize='xx-large')
ax[5, 0].plot(cm_kd, label='Team Wipes Cumulative Mean', color='tab:blue')
ax[5, 0].plot(rm_kd, label='Team Wipes Running Mean', color='tab:blue', alpha=0.25)
# ax[5, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[5, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
cm_kd = cumulative_mean(np.array(game_df['objectiveTeamWiped']))
rm_kd = running_mean(np.array(game_df['objectiveTeamWiped']), 50)
ax[5, 1].set_title('Team Wipes Per Game', fontsize='xx-large')
ax[5, 1].plot(cm_kd, label='Team Wipes Cumulative Mean', color='tab:blue')
ax[5, 1].plot(rm_kd, label='Team Wipes Running Mean', color='tab:blue', alpha=0.25)
# ax[5, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[5, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Revives
cm_kd = cumulative_mean(np.array(day_df['objectiveReviver']))
rm_kd = running_mean(np.array(day_df['objectiveReviver']), 50)
ax[6, 0].set_title('Revives Per Day', fontsize='xx-large')
ax[6, 0].plot(cm_kd, label='Revives Cumulative Mean', color='tab:blue')
ax[6, 0].plot(rm_kd, label='Revives Running Mean', color='tab:blue', alpha=0.25)
# ax[6, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[6, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
cm_kd = cumulative_mean(np.array(game_df['objectiveReviver']))
rm_kd = running_mean(np.array(game_df['objectiveReviver']), 50)
ax[6, 1].set_title('Revives Per Game', fontsize='xx-large')
ax[6, 1].plot(cm_kd, label='Revives Cumulative Mean', color='tab:blue')
ax[6, 1].plot(rm_kd, label='Revives Running Mean', color='tab:blue', alpha=0.25)
# ax[6, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[6, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Missions Complete
cm_kd = cumulative_mean(np.array(day_df['missionsComplete']))
rm_kd = running_mean(np.array(day_df['missionsComplete']), 50)
ax[7, 0].set_title('Missions Complete Per Day', fontsize='xx-large')
ax[7, 0].plot(cm_kd, label='Missions Complete Cumulative Mean', color='tab:blue')
ax[7, 0].plot(rm_kd, label='Missions Complete Running Mean', color='tab:blue', alpha=0.25)
# ax[7, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[7, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
cm_kd = cumulative_mean(np.array(game_df['missionsComplete']))
rm_kd = running_mean(np.array(game_df['missionsComplete']), 50)
ax[7, 1].set_title('Missions Complete Per Game', fontsize='xx-large')
ax[7, 1].plot(cm_kd, label='Missions Complete Cumulative Mean', color='tab:blue')
ax[7, 1].plot(rm_kd, label='Missions Complete Running Mean', color='tab:blue', alpha=0.25)
# ax[5, 0].set_xticks(np.arange(min(range(len(base_df['matchID']))), max(range(len(base_df['matchID']))) + 1, 100.0))
ax[7, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
plt.show()
def squad_plot(doc_filter: DocumentFilter, col_lst: Optional[List[str]] = None) -> None:
"""
Build a Polar plot for visualizing squad stats.
:param doc_filter: A DocumentFilter.
:type doc_filter: DocumentFilter
:param col_lst: Input List of Columns to analyze.
:type col_lst: List[str] or str
:return: *None*
:example: *None*
:note: This is intended to be used with map_choice and mode_choice inputted into the DocumentFilter.
"""
data = doc_filter.df
if col_lst is None:
col_lst = ['kdRatio', 'kills', 'deaths', 'damageDone', 'damageTaken', 'objectiveTeamWiped', 'objectiveReviver',
'missionsComplete']
people_dic = {}
for i in doc_filter.username_lst:
temp_df = data[data['uno'] == doc_filter.username_dic[i]]
people_dic[i] = {j: np.mean(temp_df[j]) for j in col_lst}
people_df = pd.DataFrame.from_dict(people_dic, orient='index')
normalized_df = (people_df - people_df.loc['Claim']) / people_df.loc['Claim'] + 1
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111, polar=True)
n = len(doc_filter.username_lst) - 1
cmap = [plt.get_cmap('viridis')(1. * i / n) for i in range(n)]
theta = np.linspace(0, 2 * np.pi, len(col_lst) + 1)
count = 0
for ind1, person in enumerate(doc_filter.username_lst):
row = list(normalized_df.loc[person])
if person == 'Claim':
ax.plot(theta, row + [row[0]], color='tab:orange', linewidth=4, alpha=1, linestyle=(0, (4, 2)))
else:
ax.plot(theta, row + [row[0]], color=cmap[count], linewidth=2, alpha=0.50)
count += 1
# Add fill
# ax.fill(theta, row + [row[0]], alpha=0.1, color=color)
ax.legend(labels=doc_filter.username_lst,
loc='upper left',
fontsize='large',
frameon=True,
bbox_to_anchor=(1.05, 1)).get_frame().set_linewidth(2)
col_lst_n = []
for i in col_lst:
col_lst_n.append(i)
col_lst_n.append('')
# Highlight user gridline
# gridlines = ax.yaxis.get_gridlines()
# temp_lines = [i / len(gridlines) for i in range(1, len(gridlines))] + [1]
# ax.set_yticklabels(temp_lines)
# ind = temp_lines.index(0.625)
# gridlines[ind].set_color("black")
# gridlines[ind].set_linestyle((0, (5, 10)))
# gridlines[ind].set_linewidth(2)
ax.xaxis.set_ticks(theta)
ax.xaxis.set_ticklabels(col_lst + [''], fontsize='large')
ax.grid(linewidth=1, linestyle=(0, (5, 5)), alpha=.75)
# Hide x ticks
# ax.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom=False, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
# labelbottom=False)
plt.show()
| [
"matplotlib.pyplot.title",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((758, 806), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(2)', 'figsize': '(30, 30)'}), '(nrows=3, ncols=2, figsize=(30, 30))\n', (770, 806), True, 'import matplotlib.pyplot as plt\n'), ((811, 886), 'matplotlib.pyplot.title', 'plt.title', (["('Personal Data for: ' + doc_filter.username)"], {'fontsize': '"""xx-large"""'}), "('Personal Data for: ' + doc_filter.username, fontsize='xx-large')\n", (820, 886), True, 'import matplotlib.pyplot as plt\n'), ((1288, 1346), 'pandas.DataFrame', 'pd.DataFrame', (['wl_ratio_lst'], {'columns': "['ratio']", 'index': 'dates'}), "(wl_ratio_lst, columns=['ratio'], index=dates)\n", (1300, 1346), True, 'import pandas as pd\n'), ((5333, 5343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5341, 5343), True, 'import matplotlib.pyplot as plt\n'), ((6498, 6546), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(8)', 'ncols': '(2)', 'figsize': '(30, 50)'}), '(nrows=8, ncols=2, figsize=(30, 50))\n', (6510, 6546), True, 'import matplotlib.pyplot as plt\n'), ((6551, 6599), 'matplotlib.pyplot.title', 'plt.title', (['"""Lobby Data for"""'], {'fontsize': '"""xx-large"""'}), "('Lobby Data for', fontsize='xx-large')\n", (6560, 6599), True, 'import matplotlib.pyplot as plt\n'), ((17294, 17304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17302, 17304), True, 'import matplotlib.pyplot as plt\n'), ((18197, 18247), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['people_dic'], {'orient': '"""index"""'}), "(people_dic, orient='index')\n", (18219, 18247), True, 'import pandas as pd\n'), ((18345, 18373), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (18355, 18373), True, 'import matplotlib.pyplot as plt\n'), ((20177, 20187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20185, 20187), True, 'import matplotlib.pyplot as plt\n'), ((1446, 1470), 'numpy.array', 'np.array', (["wl_df['ratio']"], {}), "(wl_df['ratio'])\n", (1454, 1470), True, 'import numpy as np\n'), ((1497, 1521), 'numpy.array', 'np.array', (["wl_df['ratio']"], {}), "(wl_df['ratio'])\n", (1505, 1521), True, 'import numpy as np\n'), ((1889, 1914), 'numpy.array', 'np.array', (["wl_df['losses']"], {}), "(wl_df['losses'])\n", (1897, 1914), True, 'import numpy as np\n'), ((2187, 2221), 'numpy.array', 'np.array', (["data['placementPercent']"], {}), "(data['placementPercent'])\n", (2195, 2221), True, 'import numpy as np\n'), ((2247, 2281), 'numpy.array', 'np.array', (["data['placementPercent']"], {}), "(data['placementPercent'])\n", (2255, 2281), True, 'import numpy as np\n'), ((2752, 2777), 'numpy.array', 'np.array', (["data['kdRatio']"], {}), "(data['kdRatio'])\n", (2760, 2777), True, 'import numpy as np\n'), ((2804, 2829), 'numpy.array', 'np.array', (["data['kdRatio']"], {}), "(data['kdRatio'])\n", (2812, 2829), True, 'import numpy as np\n'), ((3392, 3415), 'numpy.array', 'np.array', (["data['kills']"], {}), "(data['kills'])\n", (3400, 3415), True, 'import numpy as np\n'), ((3449, 3473), 'numpy.array', 'np.array', (["data['deaths']"], {}), "(data['deaths'])\n", (3457, 3473), True, 'import numpy as np\n'), ((3503, 3526), 'numpy.array', 'np.array', (["data['kills']"], {}), "(data['kills'])\n", (3511, 3526), True, 'import numpy as np\n'), ((3561, 3585), 'numpy.array', 'np.array', (["data['deaths']"], {}), "(data['deaths'])\n", (3569, 3585), True, 'import numpy as np\n'), ((4216, 4244), 'numpy.array', 'np.array', (["data['damageDone']"], {}), "(data['damageDone'])\n", (4224, 4244), True, 'import numpy as np\n'), ((4277, 4306), 'numpy.array', 'np.array', (["data['damageTaken']"], {}), "(data['damageTaken'])\n", (4285, 4306), True, 'import numpy as np\n'), ((4336, 4364), 'numpy.array', 'np.array', (["data['damageDone']"], {}), "(data['damageDone'])\n", (4344, 4364), True, 'import numpy as np\n'), ((4398, 4427), 'numpy.array', 'np.array', (["data['damageTaken']"], {}), "(data['damageTaken'])\n", (4406, 4427), True, 'import numpy as np\n'), ((6638, 6665), 'numpy.array', 'np.array', (["day_df['kdRatio']"], {}), "(day_df['kdRatio'])\n", (6646, 6665), True, 'import numpy as np\n'), ((6692, 6719), 'numpy.array', 'np.array', (["day_df['kdRatio']"], {}), "(day_df['kdRatio'])\n", (6700, 6719), True, 'import numpy as np\n'), ((7198, 7226), 'numpy.array', 'np.array', (["game_df['kdRatio']"], {}), "(game_df['kdRatio'])\n", (7206, 7226), True, 'import numpy as np\n'), ((7253, 7281), 'numpy.array', 'np.array', (["game_df['kdRatio']"], {}), "(game_df['kdRatio'])\n", (7261, 7281), True, 'import numpy as np\n'), ((7860, 7885), 'numpy.array', 'np.array', (["day_df['kills']"], {}), "(day_df['kills'])\n", (7868, 7885), True, 'import numpy as np\n'), ((7919, 7945), 'numpy.array', 'np.array', (["day_df['deaths']"], {}), "(day_df['deaths'])\n", (7927, 7945), True, 'import numpy as np\n'), ((7975, 8000), 'numpy.array', 'np.array', (["day_df['kills']"], {}), "(day_df['kills'])\n", (7983, 8000), True, 'import numpy as np\n'), ((8035, 8061), 'numpy.array', 'np.array', (["day_df['deaths']"], {}), "(day_df['deaths'])\n", (8043, 8061), True, 'import numpy as np\n'), ((8768, 8794), 'numpy.array', 'np.array', (["game_df['kills']"], {}), "(game_df['kills'])\n", (8776, 8794), True, 'import numpy as np\n'), ((8828, 8855), 'numpy.array', 'np.array', (["game_df['deaths']"], {}), "(game_df['deaths'])\n", (8836, 8855), True, 'import numpy as np\n'), ((8885, 8911), 'numpy.array', 'np.array', (["game_df['kills']"], {}), "(game_df['kills'])\n", (8893, 8911), True, 'import numpy as np\n'), ((8946, 8973), 'numpy.array', 'np.array', (["game_df['deaths']"], {}), "(game_df['deaths'])\n", (8954, 8973), True, 'import numpy as np\n'), ((9621, 9651), 'numpy.array', 'np.array', (["day_df['damageDone']"], {}), "(day_df['damageDone'])\n", (9629, 9651), True, 'import numpy as np\n'), ((9684, 9715), 'numpy.array', 'np.array', (["day_df['damageTaken']"], {}), "(day_df['damageTaken'])\n", (9692, 9715), True, 'import numpy as np\n'), ((9745, 9775), 'numpy.array', 'np.array', (["day_df['damageDone']"], {}), "(day_df['damageDone'])\n", (9753, 9775), True, 'import numpy as np\n'), ((9809, 9840), 'numpy.array', 'np.array', (["day_df['damageTaken']"], {}), "(day_df['damageTaken'])\n", (9817, 9840), True, 'import numpy as np\n'), ((10486, 10517), 'numpy.array', 'np.array', (["game_df['damageDone']"], {}), "(game_df['damageDone'])\n", (10494, 10517), True, 'import numpy as np\n'), ((10550, 10582), 'numpy.array', 'np.array', (["game_df['damageTaken']"], {}), "(game_df['damageTaken'])\n", (10558, 10582), True, 'import numpy as np\n'), ((10612, 10643), 'numpy.array', 'np.array', (["game_df['damageDone']"], {}), "(game_df['damageDone'])\n", (10620, 10643), True, 'import numpy as np\n'), ((10677, 10709), 'numpy.array', 'np.array', (["game_df['damageTaken']"], {}), "(game_df['damageTaken'])\n", (10685, 10709), True, 'import numpy as np\n'), ((11371, 11408), 'numpy.array', 'np.array', (["day_df['percentTimeMoving']"], {}), "(day_df['percentTimeMoving'])\n", (11379, 11408), True, 'import numpy as np\n'), ((11435, 11472), 'numpy.array', 'np.array', (["day_df['percentTimeMoving']"], {}), "(day_df['percentTimeMoving'])\n", (11443, 11472), True, 'import numpy as np\n'), ((11953, 11991), 'numpy.array', 'np.array', (["game_df['percentTimeMoving']"], {}), "(game_df['percentTimeMoving'])\n", (11961, 11991), True, 'import numpy as np\n'), ((12018, 12056), 'numpy.array', 'np.array', (["game_df['percentTimeMoving']"], {}), "(game_df['percentTimeMoving'])\n", (12026, 12056), True, 'import numpy as np\n'), ((12562, 12598), 'numpy.array', 'np.array', (["day_df['distanceTraveled']"], {}), "(day_df['distanceTraveled'])\n", (12570, 12598), True, 'import numpy as np\n'), ((12625, 12661), 'numpy.array', 'np.array', (["day_df['distanceTraveled']"], {}), "(day_df['distanceTraveled'])\n", (12633, 12661), True, 'import numpy as np\n'), ((13160, 13197), 'numpy.array', 'np.array', (["game_df['distanceTraveled']"], {}), "(game_df['distanceTraveled'])\n", (13168, 13197), True, 'import numpy as np\n'), ((13224, 13261), 'numpy.array', 'np.array', (["game_df['distanceTraveled']"], {}), "(game_df['distanceTraveled'])\n", (13232, 13261), True, 'import numpy as np\n'), ((13778, 13816), 'numpy.array', 'np.array', (["day_df['objectiveTeamWiped']"], {}), "(day_df['objectiveTeamWiped'])\n", (13786, 13816), True, 'import numpy as np\n'), ((13843, 13881), 'numpy.array', 'np.array', (["day_df['objectiveTeamWiped']"], {}), "(day_df['objectiveTeamWiped'])\n", (13851, 13881), True, 'import numpy as np\n'), ((14359, 14398), 'numpy.array', 'np.array', (["game_df['objectiveTeamWiped']"], {}), "(game_df['objectiveTeamWiped'])\n", (14367, 14398), True, 'import numpy as np\n'), ((14425, 14464), 'numpy.array', 'np.array', (["game_df['objectiveTeamWiped']"], {}), "(game_df['objectiveTeamWiped'])\n", (14433, 14464), True, 'import numpy as np\n'), ((14957, 14993), 'numpy.array', 'np.array', (["day_df['objectiveReviver']"], {}), "(day_df['objectiveReviver'])\n", (14965, 14993), True, 'import numpy as np\n'), ((15020, 15056), 'numpy.array', 'np.array', (["day_df['objectiveReviver']"], {}), "(day_df['objectiveReviver'])\n", (15028, 15056), True, 'import numpy as np\n'), ((15525, 15562), 'numpy.array', 'np.array', (["game_df['objectiveReviver']"], {}), "(game_df['objectiveReviver'])\n", (15533, 15562), True, 'import numpy as np\n'), ((15589, 15626), 'numpy.array', 'np.array', (["game_df['objectiveReviver']"], {}), "(game_df['objectiveReviver'])\n", (15597, 15626), True, 'import numpy as np\n'), ((16120, 16156), 'numpy.array', 'np.array', (["day_df['missionsComplete']"], {}), "(day_df['missionsComplete'])\n", (16128, 16156), True, 'import numpy as np\n'), ((16183, 16219), 'numpy.array', 'np.array', (["day_df['missionsComplete']"], {}), "(day_df['missionsComplete'])\n", (16191, 16219), True, 'import numpy as np\n'), ((16718, 16755), 'numpy.array', 'np.array', (["game_df['missionsComplete']"], {}), "(game_df['missionsComplete'])\n", (16726, 16755), True, 'import numpy as np\n'), ((16782, 16819), 'numpy.array', 'np.array', (["game_df['missionsComplete']"], {}), "(game_df['missionsComplete'])\n", (16790, 16819), True, 'import numpy as np\n'), ((6094, 6115), 'numpy.mean', 'np.mean', (['temp_df[col]'], {}), '(temp_df[col])\n', (6101, 6115), True, 'import numpy as np\n'), ((6262, 6283), 'numpy.mean', 'np.mean', (['temp_df[col]'], {}), '(temp_df[col])\n', (6269, 6283), True, 'import numpy as np\n'), ((6318, 6382), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['day_dic'], {'orient': '"""index"""', 'columns': 'col_lst'}), "(day_dic, orient='index', columns=col_lst)\n", (6340, 6382), True, 'import pandas as pd\n'), ((6407, 6472), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['game_dic'], {'orient': '"""index"""', 'columns': 'col_lst'}), "(game_dic, orient='index', columns=col_lst)\n", (6429, 6472), True, 'import pandas as pd\n'), ((18142, 18161), 'numpy.mean', 'np.mean', (['temp_df[j]'], {}), '(temp_df[j])\n', (18149, 18161), True, 'import numpy as np\n'), ((18470, 18493), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (18482, 18493), True, 'import matplotlib.pyplot as plt\n')] |
# coding: utf-8
import tensorflow as tf
import numpy as np
class TCNNConfig(object):
"""CNN配置参数"""
embedding_dim = 64 # 词向量维度
seq_length = 30 # 序列长度
num_classes = 8 # 类别数
num_filters = 128 # 卷积核数目
vocab_size = 5000 # 词汇表达小
l2_reg_lambda = 0.0
filter_sizes = [2, 3, 4, 5]
kernel_size = 3
hidden_dim = 128 # 全连接层神经元
decay_steps = 1000
decay_rate = 0.64
dropout_keep_prob = 0.8 # dropout保留比例
# learning_rate = 1e-3 # 学习率
learning_rate = 0.001 # 学习率
batch_size = 128 # 每批训练大小
num_epochs = 100 # 总迭代轮次
print_per_batch = 100 # 每多少轮输出一次结果
save_per_batch = 10 # 每多少轮存入tensorboard
class TextCNN(object):
"""文本分类,CNN模型"""
def __init__(self, config):
self.config = config
# 三个待输入的数据
self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='rate')
self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
self.decay_steps, self.decay_rate = self.config.decay_steps, self.config.decay_rate
self.l2_loss = tf.constant(0.0)
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def swish(x):
return x * tf.nn.sigmoid(x)
self.gelu = gelu
self.swish = swish
self.cnn()
def cbam_module(self, inputs, reduction_ratio=0.5, name=""):
with tf.variable_scope("cbam_" + name, reuse=tf.AUTO_REUSE):
batch_size, hidden_num = inputs.get_shape().as_list()[0], inputs.get_shape().as_list()[3]
maxpool_channel = tf.reduce_max(tf.reduce_max(inputs, axis=1, keepdims=True), axis=2, keepdims=True)
avgpool_channel = tf.reduce_mean(tf.reduce_mean(inputs, axis=1, keepdims=True), axis=2, keepdims=True)
maxpool_channel = tf.layers.Flatten()(maxpool_channel)
avgpool_channel = tf.layers.Flatten()(avgpool_channel)
mlp_1_max = tf.layers.dense(inputs=maxpool_channel, units=int(hidden_num * reduction_ratio), name="mlp_1",
reuse=None, activation=tf.nn.relu)
mlp_2_max = tf.layers.dense(inputs=mlp_1_max, units=hidden_num, name="mlp_2", reuse=None)
mlp_2_max = tf.reshape(mlp_2_max, [batch_size, 1, 1, hidden_num])
mlp_1_avg = tf.layers.dense(inputs=avgpool_channel, units=int(hidden_num * reduction_ratio), name="mlp_1",
reuse=True, activation=tf.nn.relu)
mlp_2_avg = tf.layers.dense(inputs=mlp_1_avg, units=hidden_num, name="mlp_2", reuse=True)
mlp_2_avg = tf.reshape(mlp_2_avg, [batch_size, 1, 1, hidden_num])
channel_attention = tf.nn.sigmoid(mlp_2_max + mlp_2_avg)
channel_refined_feature = inputs * channel_attention
maxpool_spatial = tf.reduce_max(inputs, axis=3, keepdims=True)
avgpool_spatial = tf.reduce_mean(inputs, axis=3, keepdims=True)
max_avg_pool_spatial = tf.concat([maxpool_spatial, avgpool_spatial], axis=3)
conv_layer = tf.layers.conv2d(inputs=max_avg_pool_spatial, filters=1, kernel_size=(7, 7), padding="same",
activation=None)
spatial_attention = tf.nn.sigmoid(conv_layer)
refined_feature = channel_refined_feature * spatial_attention
return refined_feature
@staticmethod
def Global_Average_Pooling(x, stride=1):
width = np.shape(x)[1]
height = np.shape(x)[2]
pool_size = [width, height]
return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride)
@staticmethod
def Fully_connected(x, units=None, layer_name='fully_connected'):
with tf.name_scope(layer_name):
return tf.layers.dense(inputs=x, use_bias=True, units=units)
def Squeeze_excitation_layer(self, input_x, out_dim, ratio, layer_name):
with tf.name_scope(layer_name):
squeeze = self.Global_Average_Pooling(input_x)
excitation = self.Fully_connected(squeeze, units=out_dim / ratio,
layer_name=layer_name + '_fully_connected1')
excitation = tf.nn.relu(excitation)
excitation = self.Fully_connected(excitation, units=out_dim, layer_name=layer_name + '_fully_connected2')
excitation = tf.nn.sigmoid(excitation)
excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
scale = input_x * excitation
return scale
def attention_encoder(self, X, stddev=0.1):
"""
attention encoder layer
"""
M = X.get_shape().as_list()[1]
N = X.get_shape().as_list()[2]
reshaped_x = tf.reshape(X, [-1, N, M])
attention = tf.layers.dense(reshaped_x, M, activation='softmax')
attention = tf.reshape(attention, [-1, M, N])
outputs = tf.multiply(X, attention)
return outputs
def conv2d_block(self, X, W):
"""
gated dilation conv1d layer
"""
glu = tf.sigmoid(tf.nn.conv2d(
X,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv"))
conv1 = tf.nn.conv2d(
X,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
gated_conv = tf.multiply(conv1, glu)
return gated_conv
def cnn_block(self, num_filters, h, j, i):
# W1 = tf.Variable(tf.truncated_normal([3, 1, num_filters, num_filters], stddev=0.1), name="W1")
W1 = tf.get_variable(
"W1_" + str(i) + str(j),
shape=[3, 1, num_filters, num_filters],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
b1 = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b1_" + str(i) + str(j))
conv1 = tf.nn.conv2d(
h,
W1,
strides=[1, 1, 1, 1],
padding="SAME")
h1 = tf.nn.relu(tf.nn.bias_add(conv1, b1), name="relu1")
# W2 = tf.Variable(tf.truncated_normal([3, 1, num_filters, num_filters], stddev=0.1), name="W2")
W2 = tf.get_variable(
"W2_" + str(i) + str(j),
shape=[3, 1, num_filters, num_filters],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
b2 = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b2_" + str(i) + str(j))
conv2 = tf.nn.conv2d(
h1,
W2,
strides=[1, 1, 1, 1],
padding="SAME")
h2 = tf.nn.relu(tf.nn.bias_add(conv2, b2), name="relu2")
self.l2_loss += tf.nn.l2_loss(W2)
self.l2_loss += tf.nn.l2_loss(b2)
return h2
def cnn(self):
"""CNN模型"""
# 词向量映射
with tf.device('/cpu:0'), tf.name_scope("embedding"):
embedding = tf.Variable(tf.random_uniform([self.config.vocab_size, self.config.embedding_dim], -1.0, 1.0),
name='embedding')
self.embedding_inputs = tf.expand_dims(tf.nn.embedding_lookup(embedding, self.input_x), -1)
with tf.name_scope("cnn"):
pooled_outputs = []
for i, filter_size in enumerate(self.config.filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, self.config.embedding_dim, 1, self.config.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[self.config.num_filters]), name="b")
conv = self.conv2d_block(self.embedding_inputs, W)
conv = tf.layers.batch_normalization(conv, name='cnn_bn_%s' % filter_size)
# Apply nonlinearity
h = self.swish(tf.nn.bias_add(conv, b))
channel = int(np.shape(h)[-1])
h = self.Squeeze_excitation_layer(h, out_dim=channel, ratio=4,
layer_name='SE_B_%s' % filter_size)
# 残差结构
# for j in range(4):
# h2 = self.cnn_block(self.config.num_filters, h, j, i)
# h = h2 + h
# Maxpooling over the outputs
linear_max_pooled = tf.nn.max_pool(
h,
ksize=[1, self.config.seq_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
gated_max_pooled = tf.nn.max_pool(
h,
ksize=[1, self.config.seq_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="avg")
pooled = linear_max_pooled * tf.sigmoid(gated_max_pooled)
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = self.config.num_filters * len(self.config.filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.keep_prob)
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, self.config.num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[self.config.num_classes]), name="b")
self.l2_loss += tf.nn.l2_loss(W)
self.l2_loss += tf.nn.l2_loss(b)
self.logits = tf.nn.xw_plus_b(self.h_drop, W, b, name="logits")
self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1, name="predictions")
self.y_pred_prob = tf.reduce_max(tf.nn.softmax(self.logits), axis=1, name='prediction_prob')
# Calculate mean cross-entropy loss
with tf.name_scope("optimize"):
# learning_rate = tf.train.exponential_decay(self.config.learning_rate, self.global_step, self.decay_steps,
# self.decay_rate, staircase=True)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(cross_entropy) + self.config.l2_reg_lambda * self.l2_loss
self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
# Accuracy
with tf.name_scope("accuracy"):
correct_pred = tf.equal(self.y_pred_cls, tf.argmax(self.input_y, 1))
self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name="accuracy")
| [
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.reshape",
"tensorflow.train.AdamOptimizer",
"numpy.shape",
"tensorflow.multiply",
"tensorflow.sigmoid",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.reduce_max",
"tensorflow.sqrt",
"tensorflow.layers.batch_normalization",
... | [((817, 889), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.config.seq_length]'], {'name': '"""input_x"""'}), "(tf.int32, [None, self.config.seq_length], name='input_x')\n", (831, 889), True, 'import tensorflow as tf\n'), ((913, 988), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.config.num_classes]'], {'name': '"""input_y"""'}), "(tf.float32, [None, self.config.num_classes], name='input_y')\n", (927, 988), True, 'import tensorflow as tf\n'), ((1014, 1053), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""rate"""'}), "(tf.float32, name='rate')\n", (1028, 1053), True, 'import tensorflow as tf\n'), ((1082, 1133), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""Global_Step"""'}), "(0, trainable=False, name='Global_Step')\n", (1093, 1133), True, 'import tensorflow as tf\n'), ((1250, 1266), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (1261, 1266), True, 'import tensorflow as tf\n'), ((4129, 4203), 'tensorflow.layers.average_pooling2d', 'tf.layers.average_pooling2d', ([], {'inputs': 'x', 'pool_size': 'pool_size', 'strides': 'stride'}), '(inputs=x, pool_size=pool_size, strides=stride)\n', (4156, 4203), True, 'import tensorflow as tf\n'), ((5312, 5337), 'tensorflow.reshape', 'tf.reshape', (['X', '[-1, N, M]'], {}), '(X, [-1, N, M])\n', (5322, 5337), True, 'import tensorflow as tf\n'), ((5358, 5410), 'tensorflow.layers.dense', 'tf.layers.dense', (['reshaped_x', 'M'], {'activation': '"""softmax"""'}), "(reshaped_x, M, activation='softmax')\n", (5373, 5410), True, 'import tensorflow as tf\n'), ((5431, 5464), 'tensorflow.reshape', 'tf.reshape', (['attention', '[-1, M, N]'], {}), '(attention, [-1, M, N])\n', (5441, 5464), True, 'import tensorflow as tf\n'), ((5483, 5508), 'tensorflow.multiply', 'tf.multiply', (['X', 'attention'], {}), '(X, attention)\n', (5494, 5508), True, 'import tensorflow as tf\n'), ((5801, 5871), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['X', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""conv"""'}), "(X, W, strides=[1, 1, 1, 1], padding='VALID', name='conv')\n", (5813, 5871), True, 'import tensorflow as tf\n'), ((5954, 5977), 'tensorflow.multiply', 'tf.multiply', (['conv1', 'glu'], {}), '(conv1, glu)\n', (5965, 5977), True, 'import tensorflow as tf\n'), ((6458, 6515), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['h', 'W1'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(h, W1, strides=[1, 1, 1, 1], padding='SAME')\n", (6470, 6515), True, 'import tensorflow as tf\n'), ((7035, 7093), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['h1', 'W2'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(h1, W2, strides=[1, 1, 1, 1], padding='SAME')\n", (7047, 7093), True, 'import tensorflow as tf\n'), ((7232, 7249), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W2'], {}), '(W2)\n', (7245, 7249), True, 'import tensorflow as tf\n'), ((7274, 7291), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['b2'], {}), '(b2)\n', (7287, 7291), True, 'import tensorflow as tf\n'), ((1961, 2015), 'tensorflow.variable_scope', 'tf.variable_scope', (["('cbam_' + name)"], {'reuse': 'tf.AUTO_REUSE'}), "('cbam_' + name, reuse=tf.AUTO_REUSE)\n", (1978, 2015), True, 'import tensorflow as tf\n'), ((2702, 2779), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'mlp_1_max', 'units': 'hidden_num', 'name': '"""mlp_2"""', 'reuse': 'None'}), "(inputs=mlp_1_max, units=hidden_num, name='mlp_2', reuse=None)\n", (2717, 2779), True, 'import tensorflow as tf\n'), ((2804, 2857), 'tensorflow.reshape', 'tf.reshape', (['mlp_2_max', '[batch_size, 1, 1, hidden_num]'], {}), '(mlp_2_max, [batch_size, 1, 1, hidden_num])\n', (2814, 2857), True, 'import tensorflow as tf\n'), ((3077, 3154), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'mlp_1_avg', 'units': 'hidden_num', 'name': '"""mlp_2"""', 'reuse': '(True)'}), "(inputs=mlp_1_avg, units=hidden_num, name='mlp_2', reuse=True)\n", (3092, 3154), True, 'import tensorflow as tf\n'), ((3179, 3232), 'tensorflow.reshape', 'tf.reshape', (['mlp_2_avg', '[batch_size, 1, 1, hidden_num]'], {}), '(mlp_2_avg, [batch_size, 1, 1, hidden_num])\n', (3189, 3232), True, 'import tensorflow as tf\n'), ((3266, 3302), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['(mlp_2_max + mlp_2_avg)'], {}), '(mlp_2_max + mlp_2_avg)\n', (3279, 3302), True, 'import tensorflow as tf\n'), ((3399, 3443), 'tensorflow.reduce_max', 'tf.reduce_max', (['inputs'], {'axis': '(3)', 'keepdims': '(True)'}), '(inputs, axis=3, keepdims=True)\n', (3412, 3443), True, 'import tensorflow as tf\n'), ((3474, 3519), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inputs'], {'axis': '(3)', 'keepdims': '(True)'}), '(inputs, axis=3, keepdims=True)\n', (3488, 3519), True, 'import tensorflow as tf\n'), ((3555, 3608), 'tensorflow.concat', 'tf.concat', (['[maxpool_spatial, avgpool_spatial]'], {'axis': '(3)'}), '([maxpool_spatial, avgpool_spatial], axis=3)\n', (3564, 3608), True, 'import tensorflow as tf\n'), ((3634, 3747), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'max_avg_pool_spatial', 'filters': '(1)', 'kernel_size': '(7, 7)', 'padding': '"""same"""', 'activation': 'None'}), "(inputs=max_avg_pool_spatial, filters=1, kernel_size=(7, 7),\n padding='same', activation=None)\n", (3650, 3747), True, 'import tensorflow as tf\n'), ((3818, 3843), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['conv_layer'], {}), '(conv_layer)\n', (3831, 3843), True, 'import tensorflow as tf\n'), ((4031, 4042), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (4039, 4042), True, 'import numpy as np\n'), ((4063, 4074), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (4071, 4074), True, 'import numpy as np\n'), ((4306, 4331), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (4319, 4331), True, 'import tensorflow as tf\n'), ((4352, 4405), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'x', 'use_bias': '(True)', 'units': 'units'}), '(inputs=x, use_bias=True, units=units)\n', (4367, 4405), True, 'import tensorflow as tf\n'), ((4497, 4522), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (4510, 4522), True, 'import tensorflow as tf\n'), ((4778, 4800), 'tensorflow.nn.relu', 'tf.nn.relu', (['excitation'], {}), '(excitation)\n', (4788, 4800), True, 'import tensorflow as tf\n'), ((4944, 4969), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['excitation'], {}), '(excitation)\n', (4957, 4969), True, 'import tensorflow as tf\n'), ((4996, 5039), 'tensorflow.reshape', 'tf.reshape', (['excitation', '[-1, 1, 1, out_dim]'], {}), '(excitation, [-1, 1, 1, out_dim])\n', (5006, 5039), True, 'import tensorflow as tf\n'), ((5652, 5722), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['X', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""conv"""'}), "(X, W, strides=[1, 1, 1, 1], padding='VALID', name='conv')\n", (5664, 5722), True, 'import tensorflow as tf\n'), ((6373, 6410), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[num_filters]'}), '(0.1, shape=[num_filters])\n', (6384, 6410), True, 'import tensorflow as tf\n'), ((6589, 6614), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv1', 'b1'], {}), '(conv1, b1)\n', (6603, 6614), True, 'import tensorflow as tf\n'), ((6950, 6987), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[num_filters]'}), '(0.1, shape=[num_filters])\n', (6961, 6987), True, 'import tensorflow as tf\n'), ((7167, 7192), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv2', 'b2'], {}), '(conv2, b2)\n', (7181, 7192), True, 'import tensorflow as tf\n'), ((7379, 7398), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7388, 7398), True, 'import tensorflow as tf\n'), ((7400, 7426), 'tensorflow.name_scope', 'tf.name_scope', (['"""embedding"""'], {}), "('embedding')\n", (7413, 7426), True, 'import tensorflow as tf\n'), ((7719, 7739), 'tensorflow.name_scope', 'tf.name_scope', (['"""cnn"""'], {}), "('cnn')\n", (7732, 7739), True, 'import tensorflow as tf\n'), ((9841, 9869), 'tensorflow.concat', 'tf.concat', (['pooled_outputs', '(3)'], {}), '(pooled_outputs, 3)\n', (9850, 9869), True, 'import tensorflow as tf\n'), ((9901, 9949), 'tensorflow.reshape', 'tf.reshape', (['self.h_pool', '[-1, num_filters_total]'], {}), '(self.h_pool, [-1, num_filters_total])\n', (9911, 9949), True, 'import tensorflow as tf\n'), ((9964, 9988), 'tensorflow.name_scope', 'tf.name_scope', (['"""dropout"""'], {}), "('dropout')\n", (9977, 9988), True, 'import tensorflow as tf\n'), ((10016, 10063), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.h_pool_flat', 'self.keep_prob'], {}), '(self.h_pool_flat, self.keep_prob)\n', (10029, 10063), True, 'import tensorflow as tf\n'), ((10078, 10101), 'tensorflow.name_scope', 'tf.name_scope', (['"""output"""'], {}), "('output')\n", (10091, 10101), True, 'import tensorflow as tf\n'), ((10410, 10426), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W'], {}), '(W)\n', (10423, 10426), True, 'import tensorflow as tf\n'), ((10455, 10471), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['b'], {}), '(b)\n', (10468, 10471), True, 'import tensorflow as tf\n'), ((10498, 10547), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['self.h_drop', 'W', 'b'], {'name': '"""logits"""'}), "(self.h_drop, W, b, name='logits')\n", (10513, 10547), True, 'import tensorflow as tf\n'), ((10802, 10827), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimize"""'], {}), "('optimize')\n", (10815, 10827), True, 'import tensorflow as tf\n'), ((11067, 11155), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'self.logits', 'labels': 'self.input_y'}), '(logits=self.logits, labels=self.\n input_y)\n', (11109, 11155), True, 'import tensorflow as tf\n'), ((11390, 11415), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (11403, 11415), True, 'import tensorflow as tf\n'), ((1793, 1809), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (1806, 1809), True, 'import tensorflow as tf\n'), ((2164, 2208), 'tensorflow.reduce_max', 'tf.reduce_max', (['inputs'], {'axis': '(1)', 'keepdims': '(True)'}), '(inputs, axis=1, keepdims=True)\n', (2177, 2208), True, 'import tensorflow as tf\n'), ((2278, 2323), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inputs'], {'axis': '(1)', 'keepdims': '(True)'}), '(inputs, axis=1, keepdims=True)\n', (2292, 2323), True, 'import tensorflow as tf\n'), ((2379, 2398), 'tensorflow.layers.Flatten', 'tf.layers.Flatten', ([], {}), '()\n', (2396, 2398), True, 'import tensorflow as tf\n'), ((2446, 2465), 'tensorflow.layers.Flatten', 'tf.layers.Flatten', ([], {}), '()\n', (2463, 2465), True, 'import tensorflow as tf\n'), ((6301, 6346), 'tensorflow.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), '()\n', (6344, 6346), True, 'import tensorflow as tf\n'), ((6878, 6923), 'tensorflow.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), '()\n', (6921, 6923), True, 'import tensorflow as tf\n'), ((7464, 7549), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.config.vocab_size, self.config.embedding_dim]', '(-1.0)', '(1.0)'], {}), '([self.config.vocab_size, self.config.embedding_dim], -1.0,\n 1.0)\n', (7481, 7549), True, 'import tensorflow as tf\n'), ((7652, 7699), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'self.input_x'], {}), '(embedding, self.input_x)\n', (7674, 7699), True, 'import tensorflow as tf\n'), ((10321, 10370), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[self.config.num_classes]'}), '(0.1, shape=[self.config.num_classes])\n', (10332, 10370), True, 'import tensorflow as tf\n'), ((10588, 10614), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits'], {}), '(self.logits)\n', (10601, 10614), True, 'import tensorflow as tf\n'), ((10684, 10710), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits'], {}), '(self.logits)\n', (10697, 10710), True, 'import tensorflow as tf\n'), ((11175, 11204), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (11189, 11204), True, 'import tensorflow as tf\n'), ((11470, 11496), 'tensorflow.argmax', 'tf.argmax', (['self.input_y', '(1)'], {}), '(self.input_y, 1)\n', (11479, 11496), True, 'import tensorflow as tf\n'), ((11536, 11569), 'tensorflow.cast', 'tf.cast', (['correct_pred', 'tf.float32'], {}), '(correct_pred, tf.float32)\n', (11543, 11569), True, 'import tensorflow as tf\n'), ((7865, 7911), 'tensorflow.name_scope', 'tf.name_scope', (["('conv-maxpool-%s' % filter_size)"], {}), "('conv-maxpool-%s' % filter_size)\n", (7878, 7911), True, 'import tensorflow as tf\n'), ((8345, 8412), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['conv'], {'name': "('cnn_bn_%s' % filter_size)"}), "(conv, name='cnn_bn_%s' % filter_size)\n", (8374, 8412), True, 'import tensorflow as tf\n'), ((9015, 9147), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['h'], {'ksize': '[1, self.config.seq_length - filter_size + 1, 1, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""pool"""'}), "(h, ksize=[1, self.config.seq_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n", (9029, 9147), True, 'import tensorflow as tf\n'), ((9304, 9435), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['h'], {'ksize': '[1, self.config.seq_length - filter_size + 1, 1, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""avg"""'}), "(h, ksize=[1, self.config.seq_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='avg')\n", (9318, 9435), True, 'import tensorflow as tf\n'), ((10253, 10291), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10289, 10291), True, 'import tensorflow as tf\n'), ((11273, 11336), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.config.learning_rate'}), '(learning_rate=self.config.learning_rate)\n', (11295, 11336), True, 'import tensorflow as tf\n'), ((8093, 8138), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['filter_shape'], {'stddev': '(0.1)'}), '(filter_shape, stddev=0.1)\n', (8112, 8138), True, 'import tensorflow as tf\n'), ((8186, 8235), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[self.config.num_filters]'}), '(0.1, shape=[self.config.num_filters])\n', (8197, 8235), True, 'import tensorflow as tf\n'), ((8490, 8513), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'b'], {}), '(conv, b)\n', (8504, 8513), True, 'import tensorflow as tf\n'), ((9602, 9630), 'tensorflow.sigmoid', 'tf.sigmoid', (['gated_max_pooled'], {}), '(gated_max_pooled)\n', (9612, 9630), True, 'import tensorflow as tf\n'), ((1694, 1706), 'tensorflow.sqrt', 'tf.sqrt', (['(2.0)'], {}), '(2.0)\n', (1701, 1706), True, 'import tensorflow as tf\n'), ((8549, 8560), 'numpy.shape', 'np.shape', (['h'], {}), '(h)\n', (8557, 8560), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import math
from torch.nn import functional as F
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist, boxlist_iou
from maskrcnn_benchmark.modeling.python_nms import cyto_nms
import pdb
# F.kl_div
class RelationModule(nn.Module):
''' Multi-head self attention relation module'''
def __init__(self, appearance_feature_dim=1024, geo_feature_dim = 64 ,
fc_dim=(64, 16), group=16, dim=(1024, 1024, 1024), topk = 10, iou_method = 'b'):
super(RelationModule, self).__init__()
self.fc_dim = fc_dim
self.dim_group = (int(dim[0] / group), int(dim[1] / group), int( dim[2] / group))
self.dim = dim
self.group = group
self.WG = nn.Linear(geo_feature_dim, fc_dim[1], bias=True)
# 1024, 1024
self.WK = nn.Linear(appearance_feature_dim, dim[1], bias=True)
self.WQ = nn.Linear(appearance_feature_dim, dim[0], bias=True)
# self.WV = nn.Linear(appearance_feature_dim, key_feature_dim, bias=True)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(self.fc_dim[1] * appearance_feature_dim, dim[2], 1, groups=group )
self.topk = topk
self.iou_method = iou_method
assert fc_dim[1] == group, 'Check the dimensions in attention!'
def forward(self, f_a, position_embedding, iou):
# f_a: [num_rois, num_fg_classes, feat_dim]
N, num_fg_class, feat_dim = f_a.size()
# f_a = f_a.transpose(0, 1)
f_a = f_a.permute(1,0,2)
# f_a_reshape [num-roi*num-fg-cls, feat-dim]
f_a_reshape = f_a.contiguous().view( N*num_fg_class, feat_dim)
# [num_fg_classes * num_rois * num_rois, fc_dim[0]]
position_embedding = position_embedding.view(-1,self.fc_dim[0])
w_g = self.relu(self.WG(position_embedding))
w_k = self.WK(f_a_reshape)
# [ num_rpi, 16, 64 ]
w_k = w_k.view(-1, N, self.group ,self.dim_group[1])
w_k = w_k.permute(0,2,3,1)
# k_data_batch, [num_fg_classes * group, dim_group[1], num_rois,]
w_k = w_k.contiguous().view(-1, self.dim_group[1], N )
w_q = self.WQ(f_a_reshape)
w_q = w_q.view(-1, N,self.group, self.dim_group[0])
w_q = w_q.transpose( 1, 2)
# q_data_batch, [num_fg_classes * group, num_rois, dim_group[0]]
w_q = w_q.contiguous().view(-1, N, self.dim_group[0] )
# aff, [num_fg_classes * group, num_rois, num_rois]
aff = (1.0 / math.sqrt(float(self.dim_group[1]))) * torch.bmm(w_q, w_k)
w_g = w_g.view(-1, N, N, self.fc_dim[1])
# [num_fg_classes, fc_dim[1], num_rois, num_rois]
w_g = w_g.permute(0,3,1,2)
# [num_fg_classes * fc_dim[1], num_rois, num_rois]
w_g = w_g.contiguous().view(-1 ,N ,N)
if iou is not None:
iou = torch.cat([iou[0][None, :, :], iou[1][None, :, :]])
if self.iou_method == 's':
log_iou = torch.log(iou + 1)
elif self.iou_method == 'h':
log_iou = torch.log(torch.clamp(iou, min = 1e-6))
else:
iou[iou>=1e-6] = 1
log_iou = torch.log(torch.clamp(iou, min = 1e-6))
log_iou = log_iou[:,None,:,:].repeat(1, self.group,1,1).view(-1, N, N)
w_mn = torch.log(torch.clamp(w_g, min = 1e-6)) + aff + log_iou
else:
w_mn = torch.log(torch.clamp(w_g, min=1e-6)) + aff
top_k = min(N, self.topk)
w_mn_topk, indices = torch.topk(w_mn, top_k, dim=2,
largest=True, sorted=True)
res = torch.zeros_like(w_mn).to(w_mn.device)
w_mn_topk = F.softmax(w_mn_topk, dim = 2)
w_mn_topk = res.scatter(2, indices, w_mn_topk)
w_mn_topk = w_mn_topk.view(num_fg_class, -1, N)
output = torch.bmm(w_mn_topk, f_a)
output = output.view(num_fg_class, self.fc_dim[1], N, feat_dim)
output = output.permute(1, 3, 2, 0)
# output_t_reshape, [1, fc_dim[1] * feat_dim, num_rois, num_fg_classes]
output = output.contiguous().view(1, self.fc_dim[1] * feat_dim, N, -1)
# [1, 128, nroi, cls]
output = self.conv1(output)
output = output.squeeze()
output = output.permute(1,2,0)
return output
#
class DuplicationRemovalNetwork(nn.Module):
def __init__(self, cfg, is_teacher=False, ):
super(DuplicationRemovalNetwork, self).__init__()
self.cfg = cfg.clone()
# if reg_iou = True, then this network is used to regress
# the iou to the GT. if not True, this predict
# true-object/duplicate
self.reg_iou = self.cfg.MODEL.RELATION_NMS.REG_IOU
self.first_n = cfg.MODEL.RELATION_NMS.FIRST_N
self.NMS_thread = cfg.MODEL.RELATION_NMS.THREAD
self.nms_rank_fc = nn.Linear(cfg.MODEL.RELATION_NMS.ROI_FEAT_DIM, cfg.MODEL.RELATION_NMS.APPEARANCE_FEAT_DIM, bias=True)
self.roi_feat_embedding_fc = nn.Linear(cfg.MODEL.RELATION_NMS.ROI_FEAT_DIM, cfg.MODEL.RELATION_NMS.APPEARANCE_FEAT_DIM, bias=True)
self.target_thresh = cfg.MODEL.RELATION_NMS.THREAD
self.geo_feature_dim = cfg.MODEL.RELATION_NMS.GEO_FEAT_DIM
if cfg.MODEL.RELATION_NMS.USE_IOU:
self.geo_feature_dim = int(self.geo_feature_dim/4 * 5)
self.relation_module = RelationModule(cfg.MODEL.RELATION_NMS.APPEARANCE_FEAT_DIM,
geo_feature_dim=self.geo_feature_dim,
fc_dim= (self.geo_feature_dim, 16),
group=cfg.MODEL.RELATION_NMS.GROUP,
dim=cfg.MODEL.RELATION_NMS.HID_DIM,
topk = cfg.MODEL.RELATION_NMS.TOPK,
iou_method= cfg.MODEL.RELATION_NMS.IOU_METHOD)
self.nms_fg_weight = torch.tensor([1., cfg.MODEL.RELATION_NMS.WEIGHT])
self.mt_fg_weight = torch.tensor([1.,10.])
self.alpha = cfg.MODEL.RELATION_NMS.ALPHA
self.gamma = cfg.MODEL.RELATION_NMS.GAMMA
self.boxcoder = BoxCoder(weights=(10., 10., 5., 5.))
self.class_agnostic = cfg.MODEL.RELATION_NMS.CLASS_AGNOSTIC
self.fg_class = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES -1
self.classifier = nn.Linear(128, len(self.target_thresh), bias= True)
self.relu1 = nn.ReLU(inplace=True)
self.fg_thread = cfg.MODEL.RELATION_NMS.FG_THREAD
self.detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG
self.nms = cfg.MODEL.RELATION_NMS.POS_NMS
self.nms_loss_type = cfg.MT.NMS_LOSS_TYPE
self.mode = None
def set_teacher_mode(self, mode):
self.mode = mode
def forward(self, x):
appearance_feature, proposals, cls_score, box_reg, targets = x
self.device = appearance_feature.device
appearance_feature = appearance_feature
cls_score = cls_score
box_reg = box_reg
with torch.no_grad():
sorted_boxlists = self.prepare_ranking(cls_score,
box_reg,
proposals,
targets,
reg_iou=self.reg_iou)
# concate value from different images
boxes_per_image = [len(f) for f in proposals]
idxs = [f.get_field('sorted_idx') for f in sorted_boxlists]
scores = torch.cat([f.get_field('scores') for f in sorted_boxlists])
bboxes = torch.cat([f.bbox.reshape(-1,self.fg_class,4) for f in sorted_boxlists])
objectness = torch.cat([f.get_field('objectness').reshape(-1, self.fg_class) for f in sorted_boxlists])
all_scores = torch.cat([f.get_field('all_scores') for f in
sorted_boxlists])
# add iou information
image_sizes = [f.size for f in sorted_boxlists]
sorted_boxes_per_image = [[*f.shape][0] for f in idxs]
appearance_feature = self.roi_feat_embedding_fc(appearance_feature)
appearance_feature = appearance_feature.split(boxes_per_image, dim=0)
sorted_features = []
nms_rank_embedding = []
for id, feature, box_per_image in zip(idxs, appearance_feature, boxes_per_image):
feature = feature[id]
size = feature.size()
if size[0] <= self.first_n:
first_n = size[0]
else:
first_n = self.first_n
sorted_features.append(feature)
#[rank_dim * batch , feat_dim]
nms_rank_embedding.append( extract_rank_embedding(first_n, self.cfg.MODEL.RELATION_NMS.ROI_FEAT_DIM, device = feature.device))
# [first_n * batchsize, num_fg_classes, 128]
sorted_features = torch.cat(sorted_features, dim= 0)
nms_rank_embedding = torch.cat(nms_rank_embedding, dim = 0)
nms_rank_embedding = self.nms_rank_fc(nms_rank_embedding)
sorted_features = sorted_features + nms_rank_embedding[:,None,:]
boxes_cls_1 = BoxList(bboxes[ :, 0, :], image_sizes[0])
boxes_cls_2 = BoxList(bboxes[ :, 1, :], image_sizes[0])
iou_1 = boxlist_iou(boxes_cls_1, boxes_cls_1)
iou_2 = boxlist_iou(boxes_cls_2, boxes_cls_2)
if self.cfg.MODEL.RELATION_NMS.USE_IOU:
iou = [iou_1, iou_2]
else:
iou = None
nms_position_matrix = extract_multi_position_matrix(bboxes, None, self.geo_feature_dim, 1000,
clswise = self.cfg.MODEL.RELATION_NMS.CLS_WISE_RELATION,)
nms_attention_1 = self.relation_module(sorted_features, nms_position_matrix, iou)
sorted_features = sorted_features + nms_attention_1
sorted_features = self.relu1(sorted_features)
# [first_n * num_fg_classes, 128]
sorted_features = sorted_features.view(-1, self.cfg.MODEL.RELATION_NMS.APPEARANCE_FEAT_DIM)
sorted_features = self.classifier(sorted_features)
# logit_reshape, [first_n, num_fg_classes, num_thread]
sorted_features = sorted_features.view(-1, self.fg_class, len(self.target_thresh))
if not self.reg_iou:
sorted_features = torch.sigmoid(sorted_features)
scores = torch.cat([scores[:,:,None]]*len(self.target_thresh), dim = -1)
loss_dict = {}
if self.training:
if self.reg_iou:
# when use regression donot do sorted_features = scores * sorted_features
reg_label = torch.cat([f.get_field('labels_iou_reg') for f in sorted_boxlists])
reg_label = reg_label.to(scores.device)
reg_label = reg_label.type(torch.cuda.FloatTensor)
sorted_features = sorted_features.to(scores.device)
sorted_features = sorted_features.type(torch.cuda.FloatTensor)
if reg_label.shape is not None:
reg_iou_loss = F.mse_loss(reg_label,sorted_features)
else:
reg_iou_loss = torch.tensor(0.).to(scores.device)
loss_dict['nms_loss'] = reg_iou_loss
else:
sorted_features = scores * sorted_features
labels = torch.cat([f.get_field('labels') for f in sorted_boxlists])
labels = labels.to(scores.device)
labels = labels.type(torch.cuda.FloatTensor)
# WEIGHTED NMS
nms_loss = F.binary_cross_entropy(scores*sorted_features,labels)
loss_dict['nms_loss']=nms_loss
return None, loss_dict
else:
input_scores = scores
if self.reg_iou:
scores = sorted_features* (scores>self.fg_thread).float()
else:
scores = sorted_features * scores
scores = self.merge_multi_thread_score_test(scores)
scores = scores.split(sorted_boxes_per_image, dim = 0)
bboxes = bboxes.split(sorted_boxes_per_image, dim = 0)
input_scores = input_scores.split(sorted_boxes_per_image, dim = 0)
objectness = objectness.split(sorted_boxes_per_image, dim = 0)
all_scores = all_scores.split(sorted_boxes_per_image,
dim = 0)
result = []
for i_score, score, bbox, obj, image_size, prob_boxhead \
in zip(
input_scores,
scores,
bboxes,
objectness,
image_sizes, all_scores):
result_per_image = []
# for nuclei
index = (score[:, 1] >= self.fg_thread).nonzero()[:,
0]
# cls_scores = i_score[index, i,0]
cls_scores = score[index, 1]
cls_scores_all = prob_boxhead[index, 1]
cls_boxes = bbox[index, 1, :]
cls_obj = obj[index, 1]
boxlist_for_class = BoxList(cls_boxes, image_size,
mode='xyxy')
boxlist_for_class.add_field('scores', cls_scores)
boxlist_for_class.add_field('objectness', cls_obj)
boxlist_for_class.add_field('all_scores',
cls_scores_all)
boxlist_for_class = boxlist_nms(
boxlist_for_class, 0.5, score_field="scores"
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field("labels",
torch.full((
num_labels,),
2,
dtype=torch.int64).to(
self.device))
result_per_image.append(boxlist_for_class)
index = (score[:, 0 ] >=
self.fg_thread).nonzero()[:,0]
# cls_scores = i_score[index, i,0]
cls_scores = score[index,0]
# pdb.set_trace()
cls_scores_all = prob_boxhead[index, 0]
cls_boxes = bbox[index, 0, :]
cls_obj = obj[index, 0 ]
boxlist_for_class = BoxList(cls_boxes, image_size, mode='xyxy')
# Pos greedy NMS if POS_NMS!=-1
# boxlist_for_class.add_field('idx', index)
boxlist_for_class.add_field('scores', cls_scores)
boxlist_for_class.add_field('objectness', cls_obj)
boxlist_for_class.add_field('all_scores', cls_scores_all)
# pdb.set_trace()
if self.nms:
# for nuclei
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms, score_field="scores"
)
# pdb.set_trace()
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field("labels",
torch.full((
num_labels,),
1,
dtype=torch.int64).to(self.device))
result_per_image.append(boxlist_for_class)
result_per_image = cat_boxlist(result_per_image)
number_of_detections = len(result_per_image)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.detections_per_img > 0:
cls_scores = result_per_image.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result_per_image = result_per_image[keep]
result.append(result_per_image)
return result, {}
def prepare_reg_label(self, sorted_boxes, sorted_score, targets):
'''
:param sorted_boxes: [ first n, fg_cls_num, 4]
:param indice: [first n, fg_cls_num]
:param sorted_score: [first n, fg_cls_num]
:param targets: Boxlist obj
:return: label [first n, num_thread * fg_cls_num]
'''
TO_REMOVE = 1
labels = targets.get_field('labels')
# output = np.zeros((sorted_boxes.shape[0].numpy(),))
# pdb.set_trace()
# output_list = []
output_reg_list = []
for i in range(self.fg_class):
cls_label_indice = torch.nonzero(labels == (i+1))
cls_target_bbox = targets.bbox[cls_label_indice[:,0]]
# todo: avoid None gt situation
num_valid_gt = len(cls_label_indice)
if num_valid_gt == 0:
output = np.zeros(([*sorted_boxes.shape][0],len(self.target_thresh)))
# output_reg = output.copy()
# output_list.append(output)
output_reg_list.append(output)
else:
output_list_per_class = []
output_reg_list_per_class = []
eye_matrix = np.eye(num_valid_gt)
score_per_class = sorted_score[:, i: i + 1].cpu().numpy()
boxes = sorted_boxes[:, i, :]
boxes = boxes.view(-1, 4)
area1 = (boxes[:, 2] - boxes[:, 0] + TO_REMOVE) * (boxes[:, 3] - boxes[:, 1] + TO_REMOVE)
area2 = (cls_target_bbox[:, 2] - cls_target_bbox[:, 0] + TO_REMOVE) * (cls_target_bbox[:, 3] - cls_target_bbox[:, 1] + TO_REMOVE)
lt = torch.max( boxes[:,None,:2],cls_target_bbox[:, :2]) # [N,M,2]
rb = torch.min( boxes[:,None,2:],cls_target_bbox[:, 2:]) # [N,M,2]
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
# [num_gt, first_n]
iou = inter / (area1[:, None] + area2 - inter)
iou = iou.cpu().numpy()
try:
for thresh in self.target_thresh:
# pdb.set_trace()
output_reg = np.max(iou, 1)
# todo: temp comment
overlap_mask = (iou > thresh)
overlap_iou = iou * overlap_mask
valid_bbox_indices = np.where(overlap_mask)[0]
overlap_score = np.tile(score_per_class, (1, num_valid_gt))
overlap_score *= overlap_mask
max_overlap_indices = np.argmax(iou, axis=1)
max_overlap_mask = eye_matrix[max_overlap_indices]
overlap_score *= max_overlap_mask
overlap_iou =overlap_iou * max_overlap_mask
max_score_indices = np.argmax(overlap_score, axis=0)
max_overlap_iou = overlap_iou[max_score_indices, np.arange(overlap_score.shape[1])]
# output = np.zeros(([*sorted_boxes.shape][0],))
output_reg = np.zeros(([*sorted_boxes.shape][0],))
output_idx, inter_1, inter_2 = np.intersect1d(max_score_indices, valid_bbox_indices,return_indices=True)
# output[output_idx] = 1
output_reg[output_idx] = max_overlap_iou[inter_1]
# output_list_per_class.append(output)
output_reg_list_per_class.append(output_reg)
except:
pdb.set_trace()
# output_per_class = np.stack(output_list_per_class, axis=-1)
output_reg_per_class =np.stack(output_reg_list_per_class, axis=-1)
# pdb.set_trace()
# output_list.append(output_per_class.view())
output_reg_list.append(output_reg_per_class)
# output = np.stack(output_list, axis=1).astype(np.float32, copy=False)
output_reg = np.stack(output_reg_list, axis=1).astype(np.float32, copy=False)
return output_reg
# return (output, output_reg)
def prepare_label(self, sorted_boxes, sorted_score, targets):
'''
:param sorted_boxes: [ first n, fg_cls_num, 4]
:param indice: [first n, fg_cls_num]
:param sorted_score: [first n, fg_cls_num]
:param targets: Boxlist obj
:return: label [first n, num_thread * fg_cls_num]
'''
TO_REMOVE = 1
labels = targets.get_field('labels')
# output = np.zeros((sorted_boxes.shape[0].numpy(),))
output_list = []
for i in range(self.fg_class):
cls_label_indice = torch.nonzero(labels == (i+1))
cls_target_bbox = targets.bbox[cls_label_indice[:,0]]
# todo: avoid None gt situation
num_valid_gt = len(cls_label_indice)
if num_valid_gt == 0:
output = np.zeros(([*sorted_boxes.shape][0],len(self.target_thresh)))
output_list.append(output)
else:
output_list_per_class = []
eye_matrix = np.eye(num_valid_gt)
score_per_class = sorted_score[:, i: i + 1].cpu().numpy()
boxes = sorted_boxes[:, i, :]
boxes = boxes.view(-1, 4)
area1 = (boxes[:, 2] - boxes[:, 0] + TO_REMOVE) * (boxes[:, 3] - boxes[:, 1] + TO_REMOVE)
area2 = (cls_target_bbox[:, 2] - cls_target_bbox[:, 0] + TO_REMOVE) * (cls_target_bbox[:, 3] - cls_target_bbox[:, 1] + TO_REMOVE)
lt = torch.max( boxes[:,None,:2],cls_target_bbox[:, :2]) # [N,M,2]
rb = torch.min( boxes[:,None,2:],cls_target_bbox[:, 2:]) # [N,M,2]
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
# [num_gt, first_n]
iou = inter / (area1[:, None] + area2 - inter)
iou = iou.cpu().numpy()
for thresh in self.target_thresh:
overlap_mask = (iou > thresh)
valid_bbox_indices = np.where(overlap_mask)[0]
overlap_score = np.tile(score_per_class, (1, num_valid_gt))
overlap_score *= overlap_mask
max_overlap_indices = np.argmax(iou, axis=1)
max_overlap_mask = eye_matrix[max_overlap_indices]
overlap_score *= max_overlap_mask
max_score_indices = np.argmax(overlap_score, axis=0)
output = np.zeros(([*sorted_boxes.shape][0],))
output[np.intersect1d(max_score_indices, valid_bbox_indices)] = 1
output_list_per_class.append(output)
output_per_class = np.stack(output_list_per_class, axis=-1)
output_list.append(output_per_class)
output = np.stack(output_list, axis=1).astype(np.float32, copy=False)
return output
def prepare_ranking(self, cls_score, box_regression,
proposals, targets, reg_iou = False):
'''
:param score:[num_per_img*batchsize, class]
:param proposal: list of boxlist
:return:
'''
# if is not train, targets is None which should be set into a none list
boxes_per_image = [len(box) for box in proposals]
concat_boxes = torch.cat([a.bbox for a in proposals], dim=0)
image_shapes = [box.size for box in proposals]
objectness = [f.get_field('objectness') for f in proposals]
proposals = self.boxcoder.decode(
box_regression.view(sum(boxes_per_image), -1), concat_boxes
)
proposals = proposals.split(boxes_per_image, dim=0)
cls_score = cls_score.split(boxes_per_image, dim=0)
results = []
if self.training:
# if idx_t is None:
for prob, boxes_per_img, image_shape, target, obj in zip(
cls_score, proposals, image_shapes, targets, objectness):
boxlist = self.filter_results(boxes_per_img,
target, prob,
image_shape,
self.fg_class+1, obj, reg_iou)
results.append(boxlist)
else:
# test do not have target
for prob, boxes_per_img, image_shape, obj in zip(
cls_score, proposals, image_shapes, objectness
):
boxlist = self.filter_results(boxes_per_img, None, prob, image_shape, self.fg_class+1, obj,reg_iou=reg_iou)
results.append(boxlist)
return results
def filter_results(self, boxes, targets, scores, image_shape ,
num_classes , obj, reg_iou = False):
"""return the sorted boxlist and sorted idx
"""
# unwrap the boxlist to avoid additional overhead.
# if we had multi-class NMS, we could perform this directly on the boxlist
# boxes = boxlist.bbox.reshape(-1, num_classes * 4)
#[n_roi, 4, cls]
# boxes = boxlist.bbox.reshape(-1, 4, num_classes)
boxes = boxes.reshape(-1, 4 * num_classes)
scores = scores.reshape(-1, num_classes)
# pdb.set_trace()
if scores.shape[0] == 0:
pdb.set_trace()
cat_boxes = []
for j in range(1,num_classes):
# skip class 0, because it is the background class
cls_boxes = boxes[:, j* 4 : (j + 1) * 4]
cat_boxes.append(cls_boxes)
boxes = torch.cat([bbox[:,:,None] for bbox in cat_boxes], dim = 2)
# scores = torch.cat([s for s in cat_score])
scores = scores[:,1:]
ori_scores = scores
num_roi = boxes.shape[0]
if num_roi<= self.first_n:
first_n = num_roi
# pdb.set_trace()
else:
first_n = self.first_n
sorted_scores, indices = torch.topk(scores, first_n, dim= 0, largest = True, sorted = True)
if obj.shape[0]<first_n:
indices = indices[:obj.shape[0]]
sorted_scores = sorted_scores[:obj.shape[0]]
if indices.shape[1] !=2:
pdb.set_trace()
cp_s = ori_scores.clone().cpu().numpy()
cp_o = obj.clone().cpu().numpy()
box = boxes.clone().cpu().numpy()
ori_scores = ori_scores[indices]
sorted_obj = obj[indices]
sorted_boxes = boxes[indices]
if sorted_boxes.shape[0] == 0:
pdb.set_trace()
if self.class_agnostic:
# [first_n, num_fg_class, 4]
sorted_boxes = torch.squeeze(sorted_boxes, dim = -1)
else:
try:
mask = torch.arange(0, num_classes - 1).to(device=self.device)
except:
pdb.set_trace()
try:
mask = mask.view(1,-1,1,1).expand(first_n, num_classes-1, 4 , 1)
except:
pdb.set_trace()
sorted_boxes = torch.gather(sorted_boxes, dim = 3, index = mask).squeeze(dim = 3)
if self.training:
labels = self.prepare_label(sorted_boxes,
sorted_scores, targets)
labels_cls = torch.from_numpy(labels).to(sorted_scores.device)
if reg_iou:
labels_reg = self.prepare_reg_label(sorted_boxes, sorted_scores,
targets)
labels_reg = torch.from_numpy(labels_reg).to(sorted_scores.device)
sorted_boxes = sorted_boxes.view(first_n * (num_classes-1), -1)
sorted_obj = sorted_obj.view(first_n * (num_classes-1))
boxlist = BoxList(sorted_boxes, image_shape , mode="xyxy",)
boxlist.add_field('sorted_idx', indices)
boxlist.add_field('objectness', sorted_obj)
boxlist.extra_fields['scores'] =sorted_scores
boxlist.extra_fields["all_scores"] = ori_scores
# boxlist.extra_fields[""]
if self.training:
if reg_iou:
boxlist.extra_fields['labels_iou_reg'] = labels_reg
else:
boxlist.extra_fields['labels'] = labels_cls
boxlist = boxlist.clip_to_image(remove_empty=False)
return boxlist
def merge_multi_thread_score_test(self, scores):
if self.cfg.MODEL.RELATION_NMS.MERGE_METHOD == -1:
scores = torch.mean(scores, -1)
elif self.cfg.MODEL.RELATION_NMS.MERGE_METHOD == -2:
scores = torch.max(scores, -1)
else:
idx = self.cfg.MODEL.RELATION_NMS.MERGE_METHOD
idx = min(max(idx, 0), len(self.target_thresh))
scores = scores[:, :, idx]
return scores
### help function ###
def extract_rank_embedding(rank_dim, feat_dim, wave_length=1000, device = 'cpu'):
""" Extract rank embedding
Args:
rank_dim: maximum of ranks
feat_dim: dimension of embedding feature
wave_length:
Returns:
embedding: [rank_dim, feat_dim]
"""
rank_range = torch.arange(0, rank_dim).to(device).float()
feat_range = torch.arange(feat_dim / 2).to(device)
dim_mat = feat_range / (feat_dim / 2)
dim_mat = 1. / (torch.pow(wave_length, dim_mat))
dim_mat = dim_mat.view(1, -1)
rank_mat = rank_range.view(-1, 1)
mul_mat = rank_mat * dim_mat
sin_mat = torch.sin(mul_mat)
cos_mat = torch.cos(mul_mat)
embedding = torch.cat((sin_mat, cos_mat), -1)
embedding = embedding.to(device)
return embedding
def extract_multi_position_matrix(boxes,iou, dim_g, wave_len, clswise = False):
if iou is not None:
iou = torch.cat([iou[0][None,:,:], iou[1][None,:,:]], 0)[:,:,:,None]
boxes = boxes.permute(1, 0, 2)
if clswise:
# [cls * nroi, 1, 4]
boxes = boxes.reshape(1, -1, 4)
x_min, y_min, x_max, y_max = torch.chunk(boxes, 4, dim=2)
cx = (x_min + x_max) * 0.5
cy = (y_min + y_max) * 0.5
w = (x_max - x_min) + 1.
h = (y_max - y_min) + 1.
delta_x = cx - cx.permute(0,2,1)
delta_x = torch.clamp(torch.abs(delta_x/ w), min = 1e-3)
delta_x = torch.log(delta_x)
delta_y = cy - cy.permute(0,2,1)
delta_y = torch.clamp(torch.abs(delta_y/ h), min = 1e-3)
delta_y = torch.log(delta_y)
delta_w = w / w.permute(0,2,1)
delta_w = torch.log(delta_w)
delta_h = h / h.permute(0,2,1)
delta_h = torch.log(delta_h)
size = delta_h.size()
delta_x = delta_x.view(size[0], size[1],size[2], 1)
delta_y = delta_y.view(size[0], size[1],size[2], 1)
delta_w = delta_w.view(size[0], size[1],size[2], 1)
delta_h = delta_h.view(size[0], size[1],size[2], 1)
# clsn, nrio, nrio, 4
if iou is not None:
position_mat = torch.cat((delta_x, delta_y, delta_w, delta_h, iou), -1)
else:
position_mat = torch.cat((delta_x, delta_y, delta_w, delta_h), -1)
dev = 10 if iou is not None else 8
# pdb.set_trace()
feat_range = torch.arange(dim_g / dev).to(boxes.device)
dim_mat = feat_range / (dim_g / dev)
dim_mat = 1. / (torch.pow(wave_len, dim_mat))
dim_mat = dim_mat.view(1, 1, 1, 1,-1)
if iou is not None:
position_mat = position_mat.view(size[0], size[1], size[2], 5, -1)
else:
position_mat = position_mat.view(size[0], size[1], size[2], 4, -1)
position_mat = 100. * position_mat
mul_mat = position_mat * dim_mat
mul_mat = mul_mat.view(size[0], size[1], size[2],-1)
sin_mat = torch.sin(mul_mat)
cos_mat = torch.cos(mul_mat)
embedding = torch.cat((sin_mat, cos_mat), -1)
return embedding
| [
"torch.nn.functional.binary_cross_entropy",
"maskrcnn_benchmark.structures.bounding_box.BoxList",
"torch.bmm",
"numpy.argmax",
"maskrcnn_benchmark.structures.boxlist_ops.boxlist_iou",
"torch.cat",
"torch.full",
"torch.cos",
"numpy.arange",
"torch.arange",
"numpy.tile",
"torch.no_grad",
"torc... | [((30157, 30175), 'torch.sin', 'torch.sin', (['mul_mat'], {}), '(mul_mat)\n', (30166, 30175), False, 'import torch\n'), ((30190, 30208), 'torch.cos', 'torch.cos', (['mul_mat'], {}), '(mul_mat)\n', (30199, 30208), False, 'import torch\n'), ((30225, 30258), 'torch.cat', 'torch.cat', (['(sin_mat, cos_mat)', '(-1)'], {}), '((sin_mat, cos_mat), -1)\n', (30234, 30258), False, 'import torch\n'), ((30654, 30682), 'torch.chunk', 'torch.chunk', (['boxes', '(4)'], {'dim': '(2)'}), '(boxes, 4, dim=2)\n', (30665, 30682), False, 'import torch\n'), ((30916, 30934), 'torch.log', 'torch.log', (['delta_x'], {}), '(delta_x)\n', (30925, 30934), False, 'import torch\n'), ((31048, 31066), 'torch.log', 'torch.log', (['delta_y'], {}), '(delta_y)\n', (31057, 31066), False, 'import torch\n'), ((31117, 31135), 'torch.log', 'torch.log', (['delta_w'], {}), '(delta_w)\n', (31126, 31135), False, 'import torch\n'), ((31186, 31204), 'torch.log', 'torch.log', (['delta_h'], {}), '(delta_h)\n', (31195, 31204), False, 'import torch\n'), ((32258, 32276), 'torch.sin', 'torch.sin', (['mul_mat'], {}), '(mul_mat)\n', (32267, 32276), False, 'import torch\n'), ((32291, 32309), 'torch.cos', 'torch.cos', (['mul_mat'], {}), '(mul_mat)\n', (32300, 32309), False, 'import torch\n'), ((32326, 32359), 'torch.cat', 'torch.cat', (['(sin_mat, cos_mat)', '(-1)'], {}), '((sin_mat, cos_mat), -1)\n', (32335, 32359), False, 'import torch\n'), ((949, 997), 'torch.nn.Linear', 'nn.Linear', (['geo_feature_dim', 'fc_dim[1]'], {'bias': '(True)'}), '(geo_feature_dim, fc_dim[1], bias=True)\n', (958, 997), True, 'import torch.nn as nn\n'), ((1037, 1089), 'torch.nn.Linear', 'nn.Linear', (['appearance_feature_dim', 'dim[1]'], {'bias': '(True)'}), '(appearance_feature_dim, dim[1], bias=True)\n', (1046, 1089), True, 'import torch.nn as nn\n'), ((1108, 1160), 'torch.nn.Linear', 'nn.Linear', (['appearance_feature_dim', 'dim[0]'], {'bias': '(True)'}), '(appearance_feature_dim, dim[0], bias=True)\n', (1117, 1160), True, 'import torch.nn as nn\n'), ((1263, 1284), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1270, 1284), True, 'import torch.nn as nn\n'), ((1306, 1381), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.fc_dim[1] * appearance_feature_dim)', 'dim[2]', '(1)'], {'groups': 'group'}), '(self.fc_dim[1] * appearance_feature_dim, dim[2], 1, groups=group)\n', (1315, 1381), True, 'import torch.nn as nn\n'), ((3710, 3767), 'torch.topk', 'torch.topk', (['w_mn', 'top_k'], {'dim': '(2)', 'largest': '(True)', 'sorted': '(True)'}), '(w_mn, top_k, dim=2, largest=True, sorted=True)\n', (3720, 3767), False, 'import torch\n'), ((3881, 3908), 'torch.nn.functional.softmax', 'F.softmax', (['w_mn_topk'], {'dim': '(2)'}), '(w_mn_topk, dim=2)\n', (3890, 3908), True, 'from torch.nn import functional as F\n'), ((4039, 4064), 'torch.bmm', 'torch.bmm', (['w_mn_topk', 'f_a'], {}), '(w_mn_topk, f_a)\n', (4048, 4064), False, 'import torch\n'), ((5036, 5142), 'torch.nn.Linear', 'nn.Linear', (['cfg.MODEL.RELATION_NMS.ROI_FEAT_DIM', 'cfg.MODEL.RELATION_NMS.APPEARANCE_FEAT_DIM'], {'bias': '(True)'}), '(cfg.MODEL.RELATION_NMS.ROI_FEAT_DIM, cfg.MODEL.RELATION_NMS.\n APPEARANCE_FEAT_DIM, bias=True)\n', (5045, 5142), True, 'import torch.nn as nn\n'), ((5175, 5281), 'torch.nn.Linear', 'nn.Linear', (['cfg.MODEL.RELATION_NMS.ROI_FEAT_DIM', 'cfg.MODEL.RELATION_NMS.APPEARANCE_FEAT_DIM'], {'bias': '(True)'}), '(cfg.MODEL.RELATION_NMS.ROI_FEAT_DIM, cfg.MODEL.RELATION_NMS.\n APPEARANCE_FEAT_DIM, bias=True)\n', (5184, 5281), True, 'import torch.nn as nn\n'), ((6163, 6213), 'torch.tensor', 'torch.tensor', (['[1.0, cfg.MODEL.RELATION_NMS.WEIGHT]'], {}), '([1.0, cfg.MODEL.RELATION_NMS.WEIGHT])\n', (6175, 6213), False, 'import torch\n'), ((6241, 6266), 'torch.tensor', 'torch.tensor', (['[1.0, 10.0]'], {}), '([1.0, 10.0])\n', (6253, 6266), False, 'import torch\n'), ((6388, 6428), 'maskrcnn_benchmark.modeling.box_coder.BoxCoder', 'BoxCoder', ([], {'weights': '(10.0, 10.0, 5.0, 5.0)'}), '(weights=(10.0, 10.0, 5.0, 5.0))\n', (6396, 6428), False, 'from maskrcnn_benchmark.modeling.box_coder import BoxCoder\n'), ((6655, 6676), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6662, 6676), True, 'import torch.nn as nn\n'), ((9102, 9135), 'torch.cat', 'torch.cat', (['sorted_features'], {'dim': '(0)'}), '(sorted_features, dim=0)\n', (9111, 9135), False, 'import torch\n'), ((9166, 9202), 'torch.cat', 'torch.cat', (['nms_rank_embedding'], {'dim': '(0)'}), '(nms_rank_embedding, dim=0)\n', (9175, 9202), False, 'import torch\n'), ((9367, 9407), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['bboxes[:, 0, :]', 'image_sizes[0]'], {}), '(bboxes[:, 0, :], image_sizes[0])\n', (9374, 9407), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((9431, 9471), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['bboxes[:, 1, :]', 'image_sizes[0]'], {}), '(bboxes[:, 1, :], image_sizes[0])\n', (9438, 9471), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((9489, 9526), 'maskrcnn_benchmark.structures.boxlist_ops.boxlist_iou', 'boxlist_iou', (['boxes_cls_1', 'boxes_cls_1'], {}), '(boxes_cls_1, boxes_cls_1)\n', (9500, 9526), False, 'from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist, boxlist_iou\n'), ((9543, 9580), 'maskrcnn_benchmark.structures.boxlist_ops.boxlist_iou', 'boxlist_iou', (['boxes_cls_2', 'boxes_cls_2'], {}), '(boxes_cls_2, boxes_cls_2)\n', (9554, 9580), False, 'from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist, boxlist_iou\n'), ((24182, 24227), 'torch.cat', 'torch.cat', (['[a.bbox for a in proposals]'], {'dim': '(0)'}), '([a.bbox for a in proposals], dim=0)\n', (24191, 24227), False, 'import torch\n'), ((26369, 26427), 'torch.cat', 'torch.cat', (['[bbox[:, :, None] for bbox in cat_boxes]'], {'dim': '(2)'}), '([bbox[:, :, None] for bbox in cat_boxes], dim=2)\n', (26378, 26427), False, 'import torch\n'), ((26753, 26814), 'torch.topk', 'torch.topk', (['scores', 'first_n'], {'dim': '(0)', 'largest': '(True)', 'sorted': '(True)'}), '(scores, first_n, dim=0, largest=True, sorted=True)\n', (26763, 26814), False, 'import torch\n'), ((28483, 28530), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['sorted_boxes', 'image_shape'], {'mode': '"""xyxy"""'}), "(sorted_boxes, image_shape, mode='xyxy')\n", (28490, 28530), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((30005, 30036), 'torch.pow', 'torch.pow', (['wave_length', 'dim_mat'], {}), '(wave_length, dim_mat)\n', (30014, 30036), False, 'import torch\n'), ((30867, 30889), 'torch.abs', 'torch.abs', (['(delta_x / w)'], {}), '(delta_x / w)\n', (30876, 30889), False, 'import torch\n'), ((30999, 31021), 'torch.abs', 'torch.abs', (['(delta_y / h)'], {}), '(delta_y / h)\n', (31008, 31021), False, 'import torch\n'), ((31531, 31587), 'torch.cat', 'torch.cat', (['(delta_x, delta_y, delta_w, delta_h, iou)', '(-1)'], {}), '((delta_x, delta_y, delta_w, delta_h, iou), -1)\n', (31540, 31587), False, 'import torch\n'), ((31621, 31672), 'torch.cat', 'torch.cat', (['(delta_x, delta_y, delta_w, delta_h)', '(-1)'], {}), '((delta_x, delta_y, delta_w, delta_h), -1)\n', (31630, 31672), False, 'import torch\n'), ((31855, 31883), 'torch.pow', 'torch.pow', (['wave_len', 'dim_mat'], {}), '(wave_len, dim_mat)\n', (31864, 31883), False, 'import torch\n'), ((2735, 2754), 'torch.bmm', 'torch.bmm', (['w_q', 'w_k'], {}), '(w_q, w_k)\n', (2744, 2754), False, 'import torch\n'), ((3050, 3101), 'torch.cat', 'torch.cat', (['[iou[0][None, :, :], iou[1][None, :, :]]'], {}), '([iou[0][None, :, :], iou[1][None, :, :]])\n', (3059, 3101), False, 'import torch\n'), ((7261, 7276), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7274, 7276), False, 'import torch\n'), ((10538, 10568), 'torch.sigmoid', 'torch.sigmoid', (['sorted_features'], {}), '(sorted_features)\n', (10551, 10568), False, 'import torch\n'), ((17300, 17330), 'torch.nonzero', 'torch.nonzero', (['(labels == i + 1)'], {}), '(labels == i + 1)\n', (17313, 17330), False, 'import torch\n'), ((21451, 21481), 'torch.nonzero', 'torch.nonzero', (['(labels == i + 1)'], {}), '(labels == i + 1)\n', (21464, 21481), False, 'import torch\n'), ((26119, 26134), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (26132, 26134), False, 'import pdb\n'), ((27001, 27016), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (27014, 27016), False, 'import pdb\n'), ((27315, 27330), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (27328, 27330), False, 'import pdb\n'), ((27432, 27467), 'torch.squeeze', 'torch.squeeze', (['sorted_boxes'], {'dim': '(-1)'}), '(sorted_boxes, dim=-1)\n', (27445, 27467), False, 'import torch\n'), ((29193, 29215), 'torch.mean', 'torch.mean', (['scores', '(-1)'], {}), '(scores, -1)\n', (29203, 29215), False, 'import torch\n'), ((29905, 29931), 'torch.arange', 'torch.arange', (['(feat_dim / 2)'], {}), '(feat_dim / 2)\n', (29917, 29931), False, 'import torch\n'), ((30436, 30490), 'torch.cat', 'torch.cat', (['[iou[0][None, :, :], iou[1][None, :, :]]', '(0)'], {}), '([iou[0][None, :, :], iou[1][None, :, :]], 0)\n', (30445, 30490), False, 'import torch\n'), ((31751, 31776), 'torch.arange', 'torch.arange', (['(dim_g / dev)'], {}), '(dim_g / dev)\n', (31763, 31776), False, 'import torch\n'), ((3167, 3185), 'torch.log', 'torch.log', (['(iou + 1)'], {}), '(iou + 1)\n', (3176, 3185), False, 'import torch\n'), ((3822, 3844), 'torch.zeros_like', 'torch.zeros_like', (['w_mn'], {}), '(w_mn)\n', (3838, 3844), False, 'import torch\n'), ((11783, 11839), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['(scores * sorted_features)', 'labels'], {}), '(scores * sorted_features, labels)\n', (11805, 11839), True, 'from torch.nn import functional as F\n'), ((13503, 13546), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['cls_boxes', 'image_size'], {'mode': '"""xyxy"""'}), "(cls_boxes, image_size, mode='xyxy')\n", (13510, 13546), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((13879, 13936), 'maskrcnn_benchmark.structures.boxlist_ops.boxlist_nms', 'boxlist_nms', (['boxlist_for_class', '(0.5)'], {'score_field': '"""scores"""'}), "(boxlist_for_class, 0.5, score_field='scores')\n", (13890, 13936), False, 'from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms\n'), ((14850, 14893), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['cls_boxes', 'image_size'], {'mode': '"""xyxy"""'}), "(cls_boxes, image_size, mode='xyxy')\n", (14857, 14893), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((15942, 15971), 'maskrcnn_benchmark.structures.boxlist_ops.cat_boxlist', 'cat_boxlist', (['result_per_image'], {}), '(result_per_image)\n', (15953, 15971), False, 'from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist, boxlist_iou\n'), ((17887, 17907), 'numpy.eye', 'np.eye', (['num_valid_gt'], {}), '(num_valid_gt)\n', (17893, 17907), True, 'import numpy as np\n'), ((18343, 18396), 'torch.max', 'torch.max', (['boxes[:, None, :2]', 'cls_target_bbox[:, :2]'], {}), '(boxes[:, None, :2], cls_target_bbox[:, :2])\n', (18352, 18396), False, 'import torch\n'), ((18427, 18480), 'torch.min', 'torch.min', (['boxes[:, None, 2:]', 'cls_target_bbox[:, 2:]'], {}), '(boxes[:, None, 2:], cls_target_bbox[:, 2:])\n', (18436, 18480), False, 'import torch\n'), ((20454, 20498), 'numpy.stack', 'np.stack', (['output_reg_list_per_class'], {'axis': '(-1)'}), '(output_reg_list_per_class, axis=-1)\n', (20462, 20498), True, 'import numpy as np\n'), ((20759, 20792), 'numpy.stack', 'np.stack', (['output_reg_list'], {'axis': '(1)'}), '(output_reg_list, axis=1)\n', (20767, 20792), True, 'import numpy as np\n'), ((21897, 21917), 'numpy.eye', 'np.eye', (['num_valid_gt'], {}), '(num_valid_gt)\n', (21903, 21917), True, 'import numpy as np\n'), ((22353, 22406), 'torch.max', 'torch.max', (['boxes[:, None, :2]', 'cls_target_bbox[:, :2]'], {}), '(boxes[:, None, :2], cls_target_bbox[:, :2])\n', (22362, 22406), False, 'import torch\n'), ((22437, 22490), 'torch.min', 'torch.min', (['boxes[:, None, 2:]', 'cls_target_bbox[:, 2:]'], {}), '(boxes[:, None, 2:], cls_target_bbox[:, 2:])\n', (22446, 22490), False, 'import torch\n'), ((23571, 23611), 'numpy.stack', 'np.stack', (['output_list_per_class'], {'axis': '(-1)'}), '(output_list_per_class, axis=-1)\n', (23579, 23611), True, 'import numpy as np\n'), ((23683, 23712), 'numpy.stack', 'np.stack', (['output_list'], {'axis': '(1)'}), '(output_list, axis=1)\n', (23691, 23712), True, 'import numpy as np\n'), ((29298, 29319), 'torch.max', 'torch.max', (['scores', '(-1)'], {}), '(scores, -1)\n', (29307, 29319), False, 'import torch\n'), ((3613, 3640), 'torch.clamp', 'torch.clamp', (['w_g'], {'min': '(1e-06)'}), '(w_g, min=1e-06)\n', (3624, 3640), False, 'import torch\n'), ((11267, 11305), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['reg_label', 'sorted_features'], {}), '(reg_label, sorted_features)\n', (11277, 11305), True, 'from torch.nn import functional as F\n'), ((15345, 15407), 'maskrcnn_benchmark.structures.boxlist_ops.boxlist_nms', 'boxlist_nms', (['boxlist_for_class', 'self.nms'], {'score_field': '"""scores"""'}), "(boxlist_for_class, self.nms, score_field='scores')\n", (15356, 15407), False, 'from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms\n'), ((22969, 23012), 'numpy.tile', 'np.tile', (['score_per_class', '(1, num_valid_gt)'], {}), '(score_per_class, (1, num_valid_gt))\n', (22976, 23012), True, 'import numpy as np\n'), ((23105, 23127), 'numpy.argmax', 'np.argmax', (['iou'], {'axis': '(1)'}), '(iou, axis=1)\n', (23114, 23127), True, 'import numpy as np\n'), ((23293, 23325), 'numpy.argmax', 'np.argmax', (['overlap_score'], {'axis': '(0)'}), '(overlap_score, axis=0)\n', (23302, 23325), True, 'import numpy as np\n'), ((23355, 23392), 'numpy.zeros', 'np.zeros', (['([*sorted_boxes.shape][0],)'], {}), '(([*sorted_boxes.shape][0],))\n', (23363, 23392), True, 'import numpy as np\n'), ((27616, 27631), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (27629, 27631), False, 'import pdb\n'), ((27766, 27781), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (27779, 27781), False, 'import pdb\n'), ((27809, 27854), 'torch.gather', 'torch.gather', (['sorted_boxes'], {'dim': '(3)', 'index': 'mask'}), '(sorted_boxes, dim=3, index=mask)\n', (27821, 27854), False, 'import torch\n'), ((28045, 28069), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (28061, 28069), False, 'import torch\n'), ((29843, 29868), 'torch.arange', 'torch.arange', (['(0)', 'rank_dim'], {}), '(0, rank_dim)\n', (29855, 29868), False, 'import torch\n'), ((3263, 3290), 'torch.clamp', 'torch.clamp', (['iou'], {'min': '(1e-06)'}), '(iou, min=1e-06)\n', (3274, 3290), False, 'import torch\n'), ((3382, 3409), 'torch.clamp', 'torch.clamp', (['iou'], {'min': '(1e-06)'}), '(iou, min=1e-06)\n', (3393, 3409), False, 'import torch\n'), ((3524, 3551), 'torch.clamp', 'torch.clamp', (['w_g'], {'min': '(1e-06)'}), '(w_g, min=1e-06)\n', (3535, 3551), False, 'import torch\n'), ((18909, 18923), 'numpy.max', 'np.max', (['iou', '(1)'], {}), '(iou, 1)\n', (18915, 18923), True, 'import numpy as np\n'), ((19191, 19234), 'numpy.tile', 'np.tile', (['score_per_class', '(1, num_valid_gt)'], {}), '(score_per_class, (1, num_valid_gt))\n', (19198, 19234), True, 'import numpy as np\n'), ((19335, 19357), 'numpy.argmax', 'np.argmax', (['iou'], {'axis': '(1)'}), '(iou, axis=1)\n', (19344, 19357), True, 'import numpy as np\n'), ((19603, 19635), 'numpy.argmax', 'np.argmax', (['overlap_score'], {'axis': '(0)'}), '(overlap_score, axis=0)\n', (19612, 19635), True, 'import numpy as np\n'), ((19854, 19891), 'numpy.zeros', 'np.zeros', (['([*sorted_boxes.shape][0],)'], {}), '(([*sorted_boxes.shape][0],))\n', (19862, 19891), True, 'import numpy as np\n'), ((19948, 20022), 'numpy.intersect1d', 'np.intersect1d', (['max_score_indices', 'valid_bbox_indices'], {'return_indices': '(True)'}), '(max_score_indices, valid_bbox_indices, return_indices=True)\n', (19962, 20022), True, 'import numpy as np\n'), ((20321, 20336), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (20334, 20336), False, 'import pdb\n'), ((22907, 22929), 'numpy.where', 'np.where', (['overlap_mask'], {}), '(overlap_mask)\n', (22915, 22929), True, 'import numpy as np\n'), ((23420, 23473), 'numpy.intersect1d', 'np.intersect1d', (['max_score_indices', 'valid_bbox_indices'], {}), '(max_score_indices, valid_bbox_indices)\n', (23434, 23473), True, 'import numpy as np\n'), ((27524, 27556), 'torch.arange', 'torch.arange', (['(0)', '(num_classes - 1)'], {}), '(0, num_classes - 1)\n', (27536, 27556), False, 'import torch\n'), ((28275, 28303), 'torch.from_numpy', 'torch.from_numpy', (['labels_reg'], {}), '(labels_reg)\n', (28291, 28303), False, 'import torch\n'), ((11362, 11379), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (11374, 11379), False, 'import torch\n'), ((14125, 14172), 'torch.full', 'torch.full', (['(num_labels,)', '(2)'], {'dtype': 'torch.int64'}), '((num_labels,), 2, dtype=torch.int64)\n', (14135, 14172), False, 'import torch\n'), ((15638, 15685), 'torch.full', 'torch.full', (['(num_labels,)', '(1)'], {'dtype': 'torch.int64'}), '((num_labels,), 1, dtype=torch.int64)\n', (15648, 15685), False, 'import torch\n'), ((16505, 16524), 'torch.nonzero', 'torch.nonzero', (['keep'], {}), '(keep)\n', (16518, 16524), False, 'import torch\n'), ((19125, 19147), 'numpy.where', 'np.where', (['overlap_mask'], {}), '(overlap_mask)\n', (19133, 19147), True, 'import numpy as np\n'), ((19709, 19742), 'numpy.arange', 'np.arange', (['overlap_score.shape[1]'], {}), '(overlap_score.shape[1])\n', (19718, 19742), True, 'import numpy as np\n')] |
import json
import os
import numpy as np
from absl import flags, app
from sklearn.metrics import (
confusion_matrix,
f1_score,
precision_recall_fscore_support,
accuracy_score,
)
import seaborn as sns
import matplotlib.pyplot as plt
from dataloader.audio import EmotionDataset
# This scoring assumes only one emotion per file in the reference
flags.DEFINE_string("ref", None, "Path to reference emotions")
flags.DEFINE_string("pred", None, "Path to predicted emotions")
flags.DEFINE_string("output", None, "Path to predicted emotions")
flags.DEFINE_string("emotion_set_path", None, "path to emotion set")
flags.DEFINE_boolean("single", False, "Score only a single file")
flags.DEFINE_list("actor_ignore", [], "actors to ignore in scoring")
FLAGS = flags.FLAGS
flags.mark_flag_as_required("ref")
flags.mark_flag_as_required("pred")
flags.mark_flag_as_required("output")
flags.mark_flag_as_required("emotion_set_path")
def get_stats(refs, preds, emotion2id, acc_type):
"""
:param refs: List of reference emotion classes
:param preds: List of predicted emotion classes
:param acc_type: String used in confusion matrix png name
"""
cm = confusion_matrix(refs, preds)
f1_scores = f1_score(refs, preds, average=None)
precisions, recalls, _, _ = precision_recall_fscore_support(refs, preds, average=None)
acc = accuracy_score(refs, preds)
print(f"Frame-wise accuracy: {acc:.4f}")
results = {
"accuracy": acc,
}
for f1, precision, recall, emotion in zip(f1_scores, precisions, recalls, emotion2id.keys()):
print(f"{emotion}, precision={precision:.3f}, recall={recall:.3f}, f1={f1:.3f}")
results[emotion] = {
"precision": precision,
"recall": recall,
"f1": f1,
}
print(cm)
np.savetxt(f"{FLAGS.output}/confusion_matrix_{acc_type}.txt", cm)
np.save(f"{FLAGS.output}/confusion_matrix_{acc_type}.npy", cm)
ax = plt.subplot()
sns.heatmap(cm, annot=False, ax=ax)
ax.set_xlabel("Predicted labels")
ax.set_ylabel("True labels")
ax.set_title("Confusion Matrix")
plt.savefig(f"{FLAGS.output}/confusion_matrix_{acc_type}.png")
plt.clf()
return results
def overall_stats(ref, pred, emotion_set_path, single=False):
"""
:param ref: Path to file(s) containing reference emotion timings
:param pred: Path to file(s) containing predicted emotion timings
:param emotion_set_path: Path to emotion set
:param single: If true, paths are to single files (assume they are filelists otherwise)
:return:
"""
emotion2id = EmotionDataset.get_emotion_to_id_mapping(emotion_set_path)
# prepare files
if single:
ref_emotions = [ref]
pred_files = [pred]
else:
with open(ref) as inf:
ref_emotions = [line.strip().split()[1] for line in inf]
with open(pred) as inf:
pred_files = [line.strip() for line in inf]
# loop over files, gathering results
results = {}
all_preds = []
all_refs = []
file_preds = []
file_refs = []
for ref_emotion, pred_file in zip(ref_emotions, pred_files):
actor = pred_file.split("-")[-1].split(".")[0]
if actor in FLAGS.actor_ignore:
continue
emotion_id = emotion2id[ref_emotion]
frame_preds = []
with open(pred_file) as in_f:
for line in in_f:
frame_pred = int(line.strip().split()[2])
frame_preds.append(frame_pred)
all_refs.append(emotion_id)
all_preds.extend(frame_preds)
counts = np.bincount(np.array(frame_preds))
file_preds.append(np.argmax(counts))
file_refs.append(emotion_id)
results["frame_wise"] = get_stats(all_refs, all_preds, emotion2id, "frame_wise")
results["file_wise"] = get_stats(file_refs, file_preds, emotion2id, "file_wise")
return results
def score(unused_argv):
os.makedirs(FLAGS.output, exist_ok=True)
results = overall_stats(FLAGS.ref, FLAGS.pred, FLAGS.emotion_set_path, FLAGS.single)
print(json.dumps(results, indent=4))
with open(f"{FLAGS.output}/score_results.json", "w") as outfile:
json.dump(results, outfile, indent=4)
if __name__ == "__main__":
app.run(score)
| [
"seaborn.heatmap",
"matplotlib.pyplot.clf",
"numpy.argmax",
"sklearn.metrics.accuracy_score",
"json.dumps",
"sklearn.metrics.f1_score",
"absl.flags.DEFINE_boolean",
"absl.flags.DEFINE_list",
"sklearn.metrics.precision_recall_fscore_support",
"numpy.savetxt",
"absl.flags.mark_flag_as_required",
... | [((359, 421), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ref"""', 'None', '"""Path to reference emotions"""'], {}), "('ref', None, 'Path to reference emotions')\n", (378, 421), False, 'from absl import flags, app\n'), ((422, 485), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""pred"""', 'None', '"""Path to predicted emotions"""'], {}), "('pred', None, 'Path to predicted emotions')\n", (441, 485), False, 'from absl import flags, app\n'), ((486, 551), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output"""', 'None', '"""Path to predicted emotions"""'], {}), "('output', None, 'Path to predicted emotions')\n", (505, 551), False, 'from absl import flags, app\n'), ((552, 620), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""emotion_set_path"""', 'None', '"""path to emotion set"""'], {}), "('emotion_set_path', None, 'path to emotion set')\n", (571, 620), False, 'from absl import flags, app\n'), ((621, 686), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""single"""', '(False)', '"""Score only a single file"""'], {}), "('single', False, 'Score only a single file')\n", (641, 686), False, 'from absl import flags, app\n'), ((687, 755), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""actor_ignore"""', '[]', '"""actors to ignore in scoring"""'], {}), "('actor_ignore', [], 'actors to ignore in scoring')\n", (704, 755), False, 'from absl import flags, app\n'), ((776, 810), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""ref"""'], {}), "('ref')\n", (803, 810), False, 'from absl import flags, app\n'), ((811, 846), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""pred"""'], {}), "('pred')\n", (838, 846), False, 'from absl import flags, app\n'), ((847, 884), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""output"""'], {}), "('output')\n", (874, 884), False, 'from absl import flags, app\n'), ((885, 932), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""emotion_set_path"""'], {}), "('emotion_set_path')\n", (912, 932), False, 'from absl import flags, app\n'), ((1175, 1204), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['refs', 'preds'], {}), '(refs, preds)\n', (1191, 1204), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_recall_fscore_support, accuracy_score\n'), ((1221, 1256), 'sklearn.metrics.f1_score', 'f1_score', (['refs', 'preds'], {'average': 'None'}), '(refs, preds, average=None)\n', (1229, 1256), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_recall_fscore_support, accuracy_score\n'), ((1289, 1347), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['refs', 'preds'], {'average': 'None'}), '(refs, preds, average=None)\n', (1320, 1347), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_recall_fscore_support, accuracy_score\n'), ((1358, 1385), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['refs', 'preds'], {}), '(refs, preds)\n', (1372, 1385), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_recall_fscore_support, accuracy_score\n'), ((1811, 1876), 'numpy.savetxt', 'np.savetxt', (['f"""{FLAGS.output}/confusion_matrix_{acc_type}.txt"""', 'cm'], {}), "(f'{FLAGS.output}/confusion_matrix_{acc_type}.txt', cm)\n", (1821, 1876), True, 'import numpy as np\n'), ((1881, 1943), 'numpy.save', 'np.save', (['f"""{FLAGS.output}/confusion_matrix_{acc_type}.npy"""', 'cm'], {}), "(f'{FLAGS.output}/confusion_matrix_{acc_type}.npy', cm)\n", (1888, 1943), True, 'import numpy as np\n'), ((1954, 1967), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (1965, 1967), True, 'import matplotlib.pyplot as plt\n'), ((1972, 2007), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(False)', 'ax': 'ax'}), '(cm, annot=False, ax=ax)\n', (1983, 2007), True, 'import seaborn as sns\n'), ((2120, 2182), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{FLAGS.output}/confusion_matrix_{acc_type}.png"""'], {}), "(f'{FLAGS.output}/confusion_matrix_{acc_type}.png')\n", (2131, 2182), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2196), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2194, 2196), True, 'import matplotlib.pyplot as plt\n'), ((2607, 2665), 'dataloader.audio.EmotionDataset.get_emotion_to_id_mapping', 'EmotionDataset.get_emotion_to_id_mapping', (['emotion_set_path'], {}), '(emotion_set_path)\n', (2647, 2665), False, 'from dataloader.audio import EmotionDataset\n'), ((3954, 3994), 'os.makedirs', 'os.makedirs', (['FLAGS.output'], {'exist_ok': '(True)'}), '(FLAGS.output, exist_ok=True)\n', (3965, 3994), False, 'import os\n'), ((4273, 4287), 'absl.app.run', 'app.run', (['score'], {}), '(score)\n', (4280, 4287), False, 'from absl import flags, app\n'), ((4094, 4123), 'json.dumps', 'json.dumps', (['results'], {'indent': '(4)'}), '(results, indent=4)\n', (4104, 4123), False, 'import json\n'), ((4202, 4239), 'json.dump', 'json.dump', (['results', 'outfile'], {'indent': '(4)'}), '(results, outfile, indent=4)\n', (4211, 4239), False, 'import json\n'), ((3628, 3649), 'numpy.array', 'np.array', (['frame_preds'], {}), '(frame_preds)\n', (3636, 3649), True, 'import numpy as np\n'), ((3677, 3694), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (3686, 3694), True, 'import numpy as np\n')] |
import glob
import pandas as pd
from datetime import datetime
import xml.etree.ElementTree as ET
import numpy as np
# Define a a function to extract .csv files
def extract_from_csv(file_to_process):
dataframe = pd.read_csv(file_to_process, index_col=0)
return dataframe
# Define a a function to extract .json files
def extract_from_json(file_to_process):
dataframe = pd.read_json(file_to_process,lines=True)
return dataframe
# Define a a function to extract .xml files
def extract_from_xml(file_to_process):
dataframe = pd.DataFrame(columns=["car_model", "year_of_manufacture", "price", "fuel"])
tree = ET.parse(file_to_process)
root = tree.getroot()
for car in root:
car_model = car.find("car_model").text
year_of_manufacture = int(car.find("year_of_manufacture").text)
price = float(car.find("price").text)
fuel = str(car.find("fuel").text)
dataframe = dataframe.append({"car_model":car_model, "year_of_manufacture":year_of_manufacture,
"price":price, "fuel":fuel}, ignore_index=True)
return dataframe
# Define a a function to extract entire files
def extract():
# create an empty data frame to hold extracted data
extracted_data = pd.DataFrame(columns=["car_model", "year_of_manufacture", "price", "fuel"])
#process all csv files
for csvfile in glob.glob("*.csv"):
extracted_data = extracted_data.append(extract_from_csv(csvfile), ignore_index=True)
#process all json files
for jsonfile in glob.glob("*.json"):
extracted_data = extracted_data.append(extract_from_json(jsonfile), ignore_index=True)
#process all xml files
for xmlfile in glob.glob("*.xml"):
extracted_data = extracted_data.append(extract_from_xml(xmlfile), ignore_index=True)
return extracted_data
# Define a function to transform fuel price from USD to GBP and drop USD price column
def transform(data):
# Transform price from USD to GBP
data['price in GBP'] = np.round(np.float64(data['price'] * 0.732398), 3)
# Drop 'price' column (in USD)
data = data.drop(['price'], axis=1)
return data
# Define a function to load the transformed data to a csv file
def load(data_to_load):
data_to_load.to_csv('transformed_data.csv', index=False)
# Define a function to log the ETL process
def log(message):
timestamp_format = '%Y-%h-%d-%H:%M:%S' # Year-Monthname-Day-Hour-Minute-Second
now = datetime.now() # get current timestamp
timestamp = now.strftime(timestamp_format)
with open("logfile.txt","a") as f:
f.write(timestamp + ',' + message + '\n')
# Run ETL
if __name__ == '__main__':
log("ETL Job Started")
log("Extract phase Started")
extracted_data = extract()
log("Extract phase Ended")
log("Transform phase Started")
transformed_data = transform(extracted_data)
log("Transform phase Ended")
log("Load phase Started")
load(transformed_data)
log("Load phase Ended")
log("ETL Job Ended")
| [
"pandas.DataFrame",
"xml.etree.ElementTree.parse",
"pandas.read_csv",
"pandas.read_json",
"glob.glob",
"numpy.float64",
"datetime.datetime.now"
] | [((216, 257), 'pandas.read_csv', 'pd.read_csv', (['file_to_process'], {'index_col': '(0)'}), '(file_to_process, index_col=0)\n', (227, 257), True, 'import pandas as pd\n'), ((381, 422), 'pandas.read_json', 'pd.read_json', (['file_to_process'], {'lines': '(True)'}), '(file_to_process, lines=True)\n', (393, 422), True, 'import pandas as pd\n'), ((543, 618), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['car_model', 'year_of_manufacture', 'price', 'fuel']"}), "(columns=['car_model', 'year_of_manufacture', 'price', 'fuel'])\n", (555, 618), True, 'import pandas as pd\n'), ((630, 655), 'xml.etree.ElementTree.parse', 'ET.parse', (['file_to_process'], {}), '(file_to_process)\n', (638, 655), True, 'import xml.etree.ElementTree as ET\n'), ((1231, 1306), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['car_model', 'year_of_manufacture', 'price', 'fuel']"}), "(columns=['car_model', 'year_of_manufacture', 'price', 'fuel'])\n", (1243, 1306), True, 'import pandas as pd\n'), ((1358, 1376), 'glob.glob', 'glob.glob', (['"""*.csv"""'], {}), "('*.csv')\n", (1367, 1376), False, 'import glob\n'), ((1528, 1547), 'glob.glob', 'glob.glob', (['"""*.json"""'], {}), "('*.json')\n", (1537, 1547), False, 'import glob\n'), ((1695, 1713), 'glob.glob', 'glob.glob', (['"""*.xml"""'], {}), "('*.xml')\n", (1704, 1713), False, 'import glob\n'), ((2472, 2486), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2484, 2486), False, 'from datetime import datetime\n'), ((2025, 2061), 'numpy.float64', 'np.float64', (["(data['price'] * 0.732398)"], {}), "(data['price'] * 0.732398)\n", (2035, 2061), True, 'import numpy as np\n')] |
import numpy as np
from scipy import sparse
def _preprare_data_in_groups(X, y=None, sample_weights=None):
"""
Takes the first column of the feature Matrix X given and
transforms the data into groups accordingly.
Parameters
----------
X : (2d-array like) Feature matrix with the first column the group label
y : (optional, 1d-array like) target values
sample_weights : (optional, 1d-array like) sample weights
Returns
-------
sizes: (1d-array) group sizes
X_features : (2d-array) features sorted per group
y : (None or 1d-array) Target sorted per group
sample_weights: (None or 1d-array) sample weights sorted per group
"""
if sparse.issparse(X):
group_labels = X.getcol(0).toarray()[:,0]
else:
group_labels = X[:,0]
group_indices = group_labels.argsort()
group_labels = group_labels[group_indices]
_, sizes = np.unique(group_labels, return_counts=True)
X_sorted = X[group_indices]
X_features = X_sorted[:, 1:]
if y is not None:
y = y[group_indices]
if sample_weights is not None:
sample_weights = sample_weights[group_indices]
return sizes, X_sorted, X_features, y, sample_weights
| [
"scipy.sparse.issparse",
"numpy.unique"
] | [((699, 717), 'scipy.sparse.issparse', 'sparse.issparse', (['X'], {}), '(X)\n', (714, 717), False, 'from scipy import sparse\n'), ((916, 959), 'numpy.unique', 'np.unique', (['group_labels'], {'return_counts': '(True)'}), '(group_labels, return_counts=True)\n', (925, 959), True, 'import numpy as np\n')] |
from rdkit import Chem
from rdkit.Chem import MACCSkeys
from random import shuffle
import numpy as np
import pandas as pd
#import torch
import scipy
from scipy import sparse
from rdkit.Chem.AllChem import GetMorganFingerprintAsBitVect as Morgan
from functools import partial
import os
#print(os.listdir())
_mcf = pd.read_csv('pt_metrics_utils/mcf.csv')
_pains = pd.read_csv('pt_metrics_utils/wehi_pains.csv', names=['smarts', 'names'])
_filters = [Chem.MolFromSmarts(x) for x in _mcf.append(_pains, sort=True)['smarts'].values]
def get_mol(smiles_or_mol):
'''
Loads SMILES/molecule into RDKit's object
'''
if isinstance(smiles_or_mol, str):
if len(smiles_or_mol) == 0:
return None
mol = Chem.MolFromSmiles(smiles_or_mol)
if mol is None:
return None
try:
Chem.SanitizeMol(mol)
except ValueError:
return None
return mol
else:
return smiles_or_mol
"""
def average_agg_tanimoto(stock_vecs, gen_vecs,
batch_size=5000, agg='max',
device='cpu', p=1):
#
#For each molecule in gen_vecs finds closest molecule in stock_vecs.
#Returns average tanimoto score for between these molecules
#Parameters:
#stock_vecs: numpy array <n_vectors x dim>
#gen_vecs: numpy array <n_vectors' x dim>
#agg: max or mean
#p: power for averaging: (mean x^p)^(1/p)
#
assert agg in ['max', 'mean'], "Can aggregate only max or mean"
agg_tanimoto = np.zeros(len(gen_vecs))
total = np.zeros(len(gen_vecs))
for j in range(0, stock_vecs.shape[0], batch_size):
x_stock = torch.tensor(stock_vecs[j:j + batch_size]).to(device).float()
for i in range(0, gen_vecs.shape[0], batch_size):
y_gen = torch.tensor(gen_vecs[i:i + batch_size]).to(device).float()
y_gen = y_gen.transpose(0, 1)
tp = torch.mm(x_stock, y_gen)
jac = (tp / (x_stock.sum(1, keepdim=True) +
y_gen.sum(0, keepdim=True) - tp)).cpu().numpy()
jac[np.isnan(jac)] = 1
if p != 1:
jac = jac**p
if agg == 'max':
agg_tanimoto[i:i + y_gen.shape[1]] = np.maximum(
agg_tanimoto[i:i + y_gen.shape[1]], jac.max(0))
elif agg == 'mean':
agg_tanimoto[i:i + y_gen.shape[1]] += jac.sum(0)
total[i:i + y_gen.shape[1]] += jac.shape[0]
if agg == 'mean':
agg_tanimoto /= total
if p != 1:
agg_tanimoto = (agg_tanimoto)**(1/p)
return np.mean(agg_tanimoto)
"""
def fingerprint(smiles_or_mol, fp_type='maccs', dtype=None, morgan__r=2,
morgan__n=1024, *args, **kwargs):
"""
Generates fingerprint for SMILES
If smiles is invalid, returns None
Returns numpy array of fingerprint bits
Parameters:
smiles: SMILES string
type: type of fingerprint: [MACCS|morgan]
dtype: if not None, specifies the dtype of returned array
"""
fp_type = fp_type.lower()
molecule = get_mol(smiles_or_mol, *args, **kwargs)
if molecule is None:
return None
if fp_type == 'maccs':
keys = MACCSkeys.GenMACCSKeys(molecule)
keys = np.array(keys.GetOnBits())
fingerprint = np.zeros(166, dtype='uint8')
if len(keys) != 0:
fingerprint[keys - 1] = 1 # We drop 0-th key that is always zero
elif fp_type == 'morgan':
fingerprint = np.asarray(Morgan(molecule, morgan__r, nBits=morgan__n),
dtype='uint8')
else:
raise ValueError("Unknown fingerprint type {}".format(fp_type))
if dtype is not None:
fingerprint = fingerprint.astype(dtype)
return fingerprint
def fingerprints(smiles_mols_array, already_unique=False, *args,
**kwargs):
'''
Computes fingerprints of smiles np.array/list/pd.Series with n_jobs workers
e.g.fingerprints(smiles_mols_array, type='morgan', n_jobs=10)
Inserts np.NaN to rows corresponding to incorrect smiles.
IMPORTANT: if there is at least one np.NaN, the dtype would be float
Parameters:
smiles_mols_array: list/array/pd.Series of smiles or already computed
RDKit molecules
n_jobs: number of parralel workers to execute
already_unique: flag for performance reasons, if smiles array is big
and already unique. Its value is set to True if smiles_mols_array
contain RDKit molecules already.
'''
if isinstance(smiles_mols_array, pd.Series):
smiles_mols_array = smiles_mols_array.values
else:
smiles_mols_array = np.asarray(smiles_mols_array)
if not isinstance(smiles_mols_array[0], str):
already_unique = True
if not already_unique:
smiles_mols_array, inv_index = np.unique(smiles_mols_array,
return_inverse=True)
fps = [fingerprint(i, *args, **kwargs) for i in smiles_mols_array]
length = 1
for fp in fps:
if fp is not None:
length = fp.shape[-1]
first_fp = fp
break
fps = [fp if fp is not None else np.array([np.NaN]).repeat(length)[None, :]
for fp in fps]
if sparse.issparse(first_fp):
fps = sparse.vstack(fps).tocsr()
else:
fps = np.vstack(fps)
if not already_unique:
return fps[inv_index]
else:
return fps
def mol_passes_filters(smi,
isomericSmiles=False):
"""
Checks if mol
* passes MCF and PAINS filters,
* has only allowed atoms
* is not charged
"""
#allowed = {'C', 'N', 'S', 'O', 'F', 'Cl', 'Br', 'H'}
try:
mol = Chem.MolFromSmiles(smi)
except:
return False
else:
if mol == None:
return False
ring_info = mol.GetRingInfo()
if ring_info.NumRings() != 0 and any(
len(x) >= 8 for x in ring_info.AtomRings()
):
return False
h_mol = Chem.AddHs(mol)
if any(atom.GetFormalCharge() != 0 for atom in mol.GetAtoms()):
return False
"""
if any(atom.GetSymbol() not in allowed for atom in mol.GetAtoms()):
return False
"""
if any(h_mol.HasSubstructMatch(smarts) for smarts in _filters):
return False
smiles = Chem.MolToSmiles(mol, isomericSmiles=isomericSmiles)
if smiles is None or len(smiles) == 0:
return False
if Chem.MolFromSmiles(smiles) is None:
return False
return True
| [
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"scipy.sparse.vstack",
"pandas.read_csv",
"scipy.sparse.issparse",
"rdkit.Chem.MolFromSmarts",
"numpy.asarray",
"numpy.zeros",
"rdkit.Chem.SanitizeMol",
"numpy.vstack",
"numpy.array",
"rdkit.Chem.MolToSmiles",
"rdkit.Chem.AddHs",
"rdkit.Che... | [((313, 352), 'pandas.read_csv', 'pd.read_csv', (['"""pt_metrics_utils/mcf.csv"""'], {}), "('pt_metrics_utils/mcf.csv')\n", (324, 352), True, 'import pandas as pd\n'), ((362, 435), 'pandas.read_csv', 'pd.read_csv', (['"""pt_metrics_utils/wehi_pains.csv"""'], {'names': "['smarts', 'names']"}), "('pt_metrics_utils/wehi_pains.csv', names=['smarts', 'names'])\n", (373, 435), True, 'import pandas as pd\n'), ((448, 469), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['x'], {}), '(x)\n', (466, 469), False, 'from rdkit import Chem\n'), ((5332, 5357), 'scipy.sparse.issparse', 'sparse.issparse', (['first_fp'], {}), '(first_fp)\n', (5347, 5357), False, 'from scipy import sparse\n'), ((6095, 6110), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['mol'], {}), '(mol)\n', (6105, 6110), False, 'from rdkit import Chem\n'), ((6411, 6463), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {'isomericSmiles': 'isomericSmiles'}), '(mol, isomericSmiles=isomericSmiles)\n', (6427, 6463), False, 'from rdkit import Chem\n'), ((735, 768), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles_or_mol'], {}), '(smiles_or_mol)\n', (753, 768), False, 'from rdkit import Chem\n'), ((3257, 3289), 'rdkit.Chem.MACCSkeys.GenMACCSKeys', 'MACCSkeys.GenMACCSKeys', (['molecule'], {}), '(molecule)\n', (3279, 3289), False, 'from rdkit.Chem import MACCSkeys\n'), ((3354, 3382), 'numpy.zeros', 'np.zeros', (['(166)'], {'dtype': '"""uint8"""'}), "(166, dtype='uint8')\n", (3362, 3382), True, 'import numpy as np\n'), ((4732, 4761), 'numpy.asarray', 'np.asarray', (['smiles_mols_array'], {}), '(smiles_mols_array)\n', (4742, 4761), True, 'import numpy as np\n'), ((4909, 4958), 'numpy.unique', 'np.unique', (['smiles_mols_array'], {'return_inverse': '(True)'}), '(smiles_mols_array, return_inverse=True)\n', (4918, 4958), True, 'import numpy as np\n'), ((5424, 5438), 'numpy.vstack', 'np.vstack', (['fps'], {}), '(fps)\n', (5433, 5438), True, 'import numpy as np\n'), ((5803, 5826), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (5821, 5826), False, 'from rdkit import Chem\n'), ((6535, 6561), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (6553, 6561), False, 'from rdkit import Chem\n'), ((842, 863), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['mol'], {}), '(mol)\n', (858, 863), False, 'from rdkit import Chem\n'), ((3551, 3595), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'Morgan', (['molecule', 'morgan__r'], {'nBits': 'morgan__n'}), '(molecule, morgan__r, nBits=morgan__n)\n', (3557, 3595), True, 'from rdkit.Chem.AllChem import GetMorganFingerprintAsBitVect as Morgan\n'), ((5373, 5391), 'scipy.sparse.vstack', 'sparse.vstack', (['fps'], {}), '(fps)\n', (5386, 5391), False, 'from scipy import sparse\n'), ((5256, 5274), 'numpy.array', 'np.array', (['[np.NaN]'], {}), '([np.NaN])\n', (5264, 5274), True, 'import numpy as np\n')] |
#Import necessarry tools from torch
import torch
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam, ParallelSGD
from tpstorch.ml.nn import BKELossEXP, BKELossFTS
#Import model-specific classes
from brownian_ml import CommittorNet, BrownianParticle
import numpy as np
#Save the rank and world size
from tpstorch import _rank, _world_size
rank = _rank
world_size = _world_size
#Import any other things
import tqdm, sys
torch.manual_seed(0)
np.random.seed(0)
prefix = 'vanilla'
#Set initial configuration and BP simulator
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(_rank/(_world_size-1))
kT = 1/15.0
bp_sampler = BrownianParticle(dt=5e-3,gamma=1.0,kT=kT, kappa=50,initial = initial_config,prefix=prefix,save_config=True)
bp_sampler_bc = BrownianParticle(dt=5e-3,gamma=1.0,kT=kT, kappa=0.0,initial = initial_config,prefix=prefix,save_config=True)
#Initialize neural net
committor = CommittorNet(d=1,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("initial_nn"))
#Construct EXPSimulation
batch_size = 4
datarunner = EXPReweightSimulation(bp_sampler, committor, period=100, batch_size=batch_size, dimN=1)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = bp_sampler_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5)
#optimizer = ParallelAdam(committor.parameters(), lr=1e-2)#, momentum=0.90,weight_decay=1e-3
optimizer = ParallelSGD(committor.parameters(), lr=5e-4,momentum=0.95)
#Save loss function statistics
loss_io = []
if _rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Save timing statistics
import time
time_io = open("{}_timing_{}.txt".format(prefix,rank),'w')
#Training loop
for epoch in range(1):
if _rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 2500:
t0 = time.time()
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
t1 = time.time()
sampling_time = t1-t0
t0 = time.time()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cost.backward()
optimizer.step()
t1 = time.time()
optimization_time = t1-t0
time_io.write('{:d} {:.5E} {:.5E} \n'.format(actual_counter+1,sampling_time, optimization_time))#main_loss.item(),bc_loss.item()))
time_io.flush()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
bc_loss = loss.bc_loss
if _rank == 0:
#Print statistics
print('[{}] loss: {:.5E} penalty: {:.5E} lr: {:.3E}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(loss.zl)
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
actual_counter += 1
| [
"brownian_ml.CommittorNet",
"numpy.random.seed",
"torch.manual_seed",
"brownian_ml.BrownianParticle",
"torch.load",
"time.time",
"tpstorch.ml.nn.BKELossEXP",
"tpstorch.ml.data.EXPReweightSimulation",
"torch.no_grad",
"torch.tensor"
] | [((528, 548), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (545, 548), False, 'import torch\n'), ((549, 566), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (563, 566), True, 'import numpy as np\n'), ((640, 662), 'torch.tensor', 'torch.tensor', (['[[-1.0]]'], {}), '([[-1.0]])\n', (652, 662), False, 'import torch\n'), ((669, 690), 'torch.tensor', 'torch.tensor', (['[[1.0]]'], {}), '([[1.0]])\n', (681, 690), False, 'import torch\n'), ((818, 934), 'brownian_ml.BrownianParticle', 'BrownianParticle', ([], {'dt': '(0.005)', 'gamma': '(1.0)', 'kT': 'kT', 'kappa': '(50)', 'initial': 'initial_config', 'prefix': 'prefix', 'save_config': '(True)'}), '(dt=0.005, gamma=1.0, kT=kT, kappa=50, initial=\n initial_config, prefix=prefix, save_config=True)\n', (834, 934), False, 'from brownian_ml import CommittorNet, BrownianParticle\n'), ((942, 1059), 'brownian_ml.BrownianParticle', 'BrownianParticle', ([], {'dt': '(0.005)', 'gamma': '(1.0)', 'kT': 'kT', 'kappa': '(0.0)', 'initial': 'initial_config', 'prefix': 'prefix', 'save_config': '(True)'}), '(dt=0.005, gamma=1.0, kT=kT, kappa=0.0, initial=\n initial_config, prefix=prefix, save_config=True)\n', (958, 1059), False, 'from brownian_ml import CommittorNet, BrownianParticle\n'), ((1235, 1327), 'tpstorch.ml.data.EXPReweightSimulation', 'EXPReweightSimulation', (['bp_sampler', 'committor'], {'period': '(100)', 'batch_size': 'batch_size', 'dimN': '(1)'}), '(bp_sampler, committor, period=100, batch_size=\n batch_size, dimN=1)\n', (1256, 1327), False, 'from tpstorch.ml.data import EXPReweightSimulation\n'), ((1428, 1615), 'tpstorch.ml.nn.BKELossEXP', 'BKELossEXP', ([], {'bc_sampler': 'bp_sampler_bc', 'committor': 'committor', 'lambda_A': '(10000.0)', 'lambda_B': '(10000.0)', 'start_react': 'start', 'start_prod': 'end', 'n_bc_samples': '(100)', 'bc_period': '(10)', 'batch_size_bc': '(0.5)'}), '(bc_sampler=bp_sampler_bc, committor=committor, lambda_A=10000.0,\n lambda_B=10000.0, start_react=start, start_prod=end, n_bc_samples=100,\n bc_period=10, batch_size_bc=0.5)\n', (1438, 1615), False, 'from tpstorch.ml.nn import BKELossEXP, BKELossFTS\n'), ((1155, 1179), 'torch.load', 'torch.load', (['"""initial_nn"""'], {}), "('initial_nn')\n", (1165, 1179), False, 'import torch\n'), ((1087, 1119), 'brownian_ml.CommittorNet', 'CommittorNet', ([], {'d': '(1)', 'num_nodes': '(200)'}), '(d=1, num_nodes=200)\n', (1099, 1119), False, 'from brownian_ml import CommittorNet, BrownianParticle\n'), ((2338, 2349), 'time.time', 'time.time', ([], {}), '()\n', (2347, 2349), False, 'import time\n'), ((2482, 2493), 'time.time', 'time.time', ([], {}), '()\n', (2491, 2493), False, 'import time\n'), ((2547, 2558), 'time.time', 'time.time', ([], {}), '()\n', (2556, 2558), False, 'import time\n'), ((2797, 2808), 'time.time', 'time.time', ([], {}), '()\n', (2806, 2808), False, 'import time\n'), ((3064, 3079), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3077, 3079), False, 'import torch\n')] |
import numpy as np
from PIL import Image
from tqdm import tqdm
import paddle
from paddle.vision.datasets import Cifar10
from paddle.vision import transforms
def config_dataset(config):
if "cifar" in config["dataset"]:
config["topK"] = -1
config["n_class"] = 10
elif config["dataset"] in ["nuswide_21", "nuswide_21_m"]:
config["topK"] = 5000
config["n_class"] = 21
elif config["dataset"] == "nuswide_81_m":
config["topK"] = 5000
config["n_class"] = 81
elif config["dataset"] == "coco":
config["topK"] = 5000
config["n_class"] = 80
elif config["dataset"] == "imagenet":
config["topK"] = 1000
config["n_class"] = 100
elif config["dataset"] == "mirflickr":
config["topK"] = -1
config["n_class"] = 38
elif config["dataset"] == "voc2012":
config["topK"] = -1
config["n_class"] = 20
config["data_path"] = "/dataset/" + config["dataset"] + "/" #by default
if config["dataset"] == "nuswide_21":
config["data_path"] = "/dataset/NUS-WIDE/"
if config["dataset"] in ["nuswide_21_m", "nuswide_81_m"]:
config["data_path"] = "/dataset/nus_wide_m/"
if config["dataset"] == "coco":
config["data_path"] = "/dataset/COCO_2014/"
if config["dataset"] == "voc2012":
config["data_path"] = "/dataset/"
config["data"] = {
"train_set": {"list_path": "./data/" + config["dataset"] + "/train.txt", "batch_size": config["batch_size"]},
"database": {"list_path": "./data/" + config["dataset"] + "/database.txt", "batch_size": config["batch_size"]},
"test": {"list_path": "./data/" + config["dataset"] + "/test.txt", "batch_size": config["batch_size"]}}
return config
class ImageList(object):
def __init__(self, data_path, image_list, transform):
self.imgs = [(data_path + val.split()[0], np.array([int(la) for la in val.split()[1:]])) for val in image_list]
self.transform = transform
def __getitem__(self, index):
path, target = self.imgs[index]
img = Image.open(path).convert('RGB')
img = self.transform(img)
return img, target, index
def __len__(self):
return len(self.imgs)
def image_transform(resize_size, crop_size, data_set):
if data_set == "train_set":
step = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(crop_size)]
else:
step = [transforms.CenterCrop(crop_size)]
return transforms.Compose([transforms.Resize(resize_size)]
+ step +
[transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
class MyCIFAR10(Cifar10):
def __getitem__(self, index):
img, target = self.data[index]
img = np.reshape(img, [3, 32, 32])
img = img.transpose([1, 2, 0]).astype("uint8")
img = Image.fromarray(img)
img = self.transform(img)
target = np.eye(10)[np.array(target)]
return img, target.astype("float32"), index
def get_index(dataset, label):
rslt = []
for i in range(len(dataset)):
if dataset[i][1] == label:
rslt.append(i)
return np.array(rslt)
def cifar_dataset(config):
batch_size = config["batch_size"]
train_size = 500
test_size = 100
if config["dataset"] == "cifar10-2":
train_size = 5000
test_size = 1000
transform = transforms.Compose([
transforms.Resize(config["crop_size"]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train_dataset = MyCIFAR10(data_file=None,
mode="train",
transform=transform,
download=True)
test_dataset = MyCIFAR10(data_file=None,
mode="test",
transform=transform)
database_dataset = MyCIFAR10(data_file=None,
mode="test",
transform=transform)
X = train_dataset.data
X.extend(test_dataset.data)
first = True
for label in range(10):
index = get_index(X, label)
N = index.shape[0]
perm = np.random.permutation(N)
index = index[perm]
if first:
test_index = index[:test_size]
train_index = index[test_size: train_size + test_size]
database_index = index[train_size + test_size:]
else:
test_index = np.concatenate((test_index, index[:test_size]))
train_index = np.concatenate((train_index, index[test_size: train_size + test_size]))
database_index = np.concatenate((database_index, index[train_size + test_size:]))
first = False
if config["dataset"] == "cifar10":
# test:1000, train:5000, database:54000
pass
elif config["dataset"] == "cifar10-1":
# test:1000, train:5000, database:59000
database_index = np.concatenate((train_index, database_index))
elif config["dataset"] == "cifar10-2":
# test:10000, train:50000, database:50000
database_index = train_index
train_dataset.data = np.array(X)[train_index]
test_dataset.data = np.array(X)[test_index]
database_dataset.data = np.array(X)[database_index]
print("train_dataset", train_dataset.data.shape[0])
print("test_dataset", test_dataset.data.shape[0])
print("database_dataset", database_dataset.data.shape[0])
train_dataset.data = list(train_dataset.data)
test_dataset.data = list(test_dataset.data)
database_dataset.data = list(database_dataset.data)
train_loader = paddle.io.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=1)
test_loader = paddle.io.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1)
database_loader = paddle.io.DataLoader(dataset=database_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1)
return train_loader, test_loader, database_loader, \
train_index.shape[0], test_index.shape[0], database_index.shape[0]
def get_data(config):
if "cifar" in config["dataset"]:
return cifar_dataset(config)
dsets = {}
dset_loaders = {}
data_config = config["data"]
for data_set in ["train_set", "test", "database"]:
dsets[data_set] = ImageList(config["data_path"],
open(data_config[data_set]["list_path"]).readlines(),
transform=image_transform(config["resize_size"], config["crop_size"], data_set))
dset_loaders[data_set] = paddle.io.DataLoader(dsets[data_set],
batch_size=data_config[data_set]["batch_size"],
shuffle=True, num_workers=4)
return dset_loaders["train_set"], dset_loaders["test"], dset_loaders["database"], \
len(dsets["train_set"]), len(dsets["test"]), len(dsets["database"])
@paddle.no_grad()
def compute_result(dataloader, net, device):
all_feas, all_image_id = None, None
net.eval()
for img, label, index in tqdm(dataloader):
batch_feas = net(img)
batch_feas = paddle.sign(batch_feas).astype("float32")
if all_feas is None:
all_feas = batch_feas
all_image_id = label
else:
all_feas = paddle.concat([all_feas, batch_feas])
all_image_id = paddle.concat([all_image_id, label]) #why not
return all_feas, all_image_id
draw_range = [1, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000, 7500, 8000, 8500,
9000, 9500, 10000]
def pr_curve(rF, qF, rL, qL, draw_range=draw_range):
# https://blog.csdn.net/HackerTom/article/details/89425729
n_query = qF.shape[0]
Gnd = (np.dot(qL, rL.transpose()) > 0).astype(np.float32)
Rank = np.argsort(CalcHammingDist(qF, rF))
P, R = [], []
for k in tqdm(draw_range):
p = np.zeros(n_query)
r = np.zeros(n_query)
for it in range(n_query):
gnd = Gnd[it]
gnd_all = np.sum(gnd)
if gnd_all == 0:
continue
asc_id = Rank[it][:k]
gnd = gnd[asc_id]
gnd_r = np.sum(gnd)
p[it] = gnd_r / k
r[it] = gnd_r / gnd_all
P.append(np.mean(p))
R.append(np.mean(r))
return P, R
def CalcHammingDist(B1, B2):
q = B2.shape[1]
distH = 0.5 * (q - np.dot(B1, B2.transpose()))
return distH
def CalcTopMap(rB, qB, retrievalL, queryL, topk):
num_query = queryL.shape[0]
topkmap = 0
for iter in tqdm(range(num_query)):
gnd = (np.dot(queryL[iter, :], retrievalL.transpose()) > 0).astype(np.float32)
hamm = CalcHammingDist(qB[iter, :], rB)
ind = np.argsort(hamm)
gnd = gnd[ind]
tgnd = gnd[0:topk]
tsum = np.sum(tgnd).astype(int)
if tsum == 0:
continue
count = np.linspace(1, tsum, tsum)
tindex = np.asarray(np.where(tgnd == 1)) + 1.0
topkmap_ = np.mean(count / (tindex))
topkmap = topkmap + topkmap_
topkmap = topkmap / num_query
return topkmap
if __name__=="__main__":
import paddle.optimizer as optim
config = {
"alpha": 0.1,
# "optimizer":{"type": optim.SGD, "optim_params": {"lr": 0.05, "weight_decay": 10 ** -5}},
"optimizer": {"type": optim.RMSProp, "optim_params": {"lr": 1e-5, "weight_decay": 10 ** -5}},
"info": "[DSH]",
"resize_size": 256,
"crop_size": 224,
"batch_size": 64,
#"net": AlexNet,
#"net": ResNet,
"dataset": "cifar10",
# "dataset": "cifar10-1",
# "dataset": "cifar10-2",
# "dataset": "coco",
# "dataset": "mirflickr",
# "dataset": "voc2012",
# "dataset": "imagenet",
# "dataset": "nuswide_21",
# "dataset": "nuswide_21_m",
# "dataset": "nuswide_81_m",
"epoch": 250,
"test_map": 15,
"save_path": "save/DSH_resnet",
"bit_list": [48],
}
get_data(config)
| [
"numpy.sum",
"paddle.concat",
"numpy.argsort",
"numpy.mean",
"paddle.no_grad",
"paddle.vision.transforms.CenterCrop",
"numpy.reshape",
"numpy.linspace",
"paddle.vision.transforms.RandomCrop",
"tqdm.tqdm",
"paddle.vision.transforms.ToTensor",
"numpy.random.permutation",
"paddle.sign",
"nump... | [((7717, 7733), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (7731, 7733), False, 'import paddle\n'), ((3369, 3383), 'numpy.array', 'np.array', (['rslt'], {}), '(rslt)\n', (3377, 3383), True, 'import numpy as np\n'), ((5903, 6003), 'paddle.io.DataLoader', 'paddle.io.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=\n True, num_workers=1)\n', (5923, 6003), False, 'import paddle\n'), ((6159, 6259), 'paddle.io.DataLoader', 'paddle.io.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(1)'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=\n False, num_workers=1)\n', (6179, 6259), False, 'import paddle\n'), ((6416, 6519), 'paddle.io.DataLoader', 'paddle.io.DataLoader', ([], {'dataset': 'database_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(1)'}), '(dataset=database_dataset, batch_size=batch_size,\n shuffle=False, num_workers=1)\n', (6436, 6519), False, 'import paddle\n'), ((7863, 7879), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (7867, 7879), False, 'from tqdm import tqdm\n'), ((8692, 8708), 'tqdm.tqdm', 'tqdm', (['draw_range'], {}), '(draw_range)\n', (8696, 8708), False, 'from tqdm import tqdm\n'), ((2959, 2987), 'numpy.reshape', 'np.reshape', (['img', '[3, 32, 32]'], {}), '(img, [3, 32, 32])\n', (2969, 2987), True, 'import numpy as np\n'), ((3062, 3082), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3077, 3082), False, 'from PIL import Image\n'), ((4452, 4476), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (4473, 4476), True, 'import numpy as np\n'), ((5417, 5428), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5425, 5428), True, 'import numpy as np\n'), ((5467, 5478), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5475, 5478), True, 'import numpy as np\n'), ((5519, 5530), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5527, 5530), True, 'import numpy as np\n'), ((7324, 7443), 'paddle.io.DataLoader', 'paddle.io.DataLoader', (['dsets[data_set]'], {'batch_size': "data_config[data_set]['batch_size']", 'shuffle': '(True)', 'num_workers': '(4)'}), "(dsets[data_set], batch_size=data_config[data_set][\n 'batch_size'], shuffle=True, num_workers=4)\n", (7344, 7443), False, 'import paddle\n'), ((8722, 8739), 'numpy.zeros', 'np.zeros', (['n_query'], {}), '(n_query)\n', (8730, 8739), True, 'import numpy as np\n'), ((8752, 8769), 'numpy.zeros', 'np.zeros', (['n_query'], {}), '(n_query)\n', (8760, 8769), True, 'import numpy as np\n'), ((9560, 9576), 'numpy.argsort', 'np.argsort', (['hamm'], {}), '(hamm)\n', (9570, 9576), True, 'import numpy as np\n'), ((9727, 9753), 'numpy.linspace', 'np.linspace', (['(1)', 'tsum', 'tsum'], {}), '(1, tsum, tsum)\n', (9738, 9753), True, 'import numpy as np\n'), ((9829, 9852), 'numpy.mean', 'np.mean', (['(count / tindex)'], {}), '(count / tindex)\n', (9836, 9852), True, 'import numpy as np\n'), ((2365, 2398), 'paddle.vision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2396, 2398), False, 'from paddle.vision import transforms\n'), ((2400, 2432), 'paddle.vision.transforms.RandomCrop', 'transforms.RandomCrop', (['crop_size'], {}), '(crop_size)\n', (2421, 2432), False, 'from paddle.vision import transforms\n'), ((2460, 2492), 'paddle.vision.transforms.CenterCrop', 'transforms.CenterCrop', (['crop_size'], {}), '(crop_size)\n', (2481, 2492), False, 'from paddle.vision import transforms\n'), ((3134, 3144), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (3140, 3144), True, 'import numpy as np\n'), ((3145, 3161), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (3153, 3161), True, 'import numpy as np\n'), ((3638, 3676), 'paddle.vision.transforms.Resize', 'transforms.Resize', (["config['crop_size']"], {}), "(config['crop_size'])\n", (3655, 3676), False, 'from paddle.vision import transforms\n'), ((3686, 3707), 'paddle.vision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3705, 3707), False, 'from paddle.vision import transforms\n'), ((3717, 3783), 'paddle.vision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (3737, 3783), False, 'from paddle.vision import transforms\n'), ((4736, 4783), 'numpy.concatenate', 'np.concatenate', (['(test_index, index[:test_size])'], {}), '((test_index, index[:test_size]))\n', (4750, 4783), True, 'import numpy as np\n'), ((4810, 4880), 'numpy.concatenate', 'np.concatenate', (['(train_index, index[test_size:train_size + test_size])'], {}), '((train_index, index[test_size:train_size + test_size]))\n', (4824, 4880), True, 'import numpy as np\n'), ((4911, 4975), 'numpy.concatenate', 'np.concatenate', (['(database_index, index[train_size + test_size:])'], {}), '((database_index, index[train_size + test_size:]))\n', (4925, 4975), True, 'import numpy as np\n'), ((5215, 5260), 'numpy.concatenate', 'np.concatenate', (['(train_index, database_index)'], {}), '((train_index, database_index))\n', (5229, 5260), True, 'import numpy as np\n'), ((8108, 8145), 'paddle.concat', 'paddle.concat', (['[all_feas, batch_feas]'], {}), '([all_feas, batch_feas])\n', (8121, 8145), False, 'import paddle\n'), ((8173, 8209), 'paddle.concat', 'paddle.concat', (['[all_image_id, label]'], {}), '([all_image_id, label])\n', (8186, 8209), False, 'import paddle\n'), ((8852, 8863), 'numpy.sum', 'np.sum', (['gnd'], {}), '(gnd)\n', (8858, 8863), True, 'import numpy as np\n'), ((9002, 9013), 'numpy.sum', 'np.sum', (['gnd'], {}), '(gnd)\n', (9008, 9013), True, 'import numpy as np\n'), ((9097, 9107), 'numpy.mean', 'np.mean', (['p'], {}), '(p)\n', (9104, 9107), True, 'import numpy as np\n'), ((9126, 9136), 'numpy.mean', 'np.mean', (['r'], {}), '(r)\n', (9133, 9136), True, 'import numpy as np\n'), ((2104, 2120), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2114, 2120), False, 'from PIL import Image\n'), ((2628, 2649), 'paddle.vision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2647, 2649), False, 'from paddle.vision import transforms\n'), ((2682, 2757), 'paddle.vision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2702, 2757), False, 'from paddle.vision import transforms\n'), ((7932, 7955), 'paddle.sign', 'paddle.sign', (['batch_feas'], {}), '(batch_feas)\n', (7943, 7955), False, 'import paddle\n'), ((9643, 9655), 'numpy.sum', 'np.sum', (['tgnd'], {}), '(tgnd)\n', (9649, 9655), True, 'import numpy as np\n'), ((9783, 9802), 'numpy.where', 'np.where', (['(tgnd == 1)'], {}), '(tgnd == 1)\n', (9791, 9802), True, 'import numpy as np\n'), ((2526, 2556), 'paddle.vision.transforms.Resize', 'transforms.Resize', (['resize_size'], {}), '(resize_size)\n', (2543, 2556), False, 'from paddle.vision import transforms\n')] |
#! /usr/bin/env python
from __future__ import print_function
import numpy as np
from scipy.special import gamma, gammaincc
from scipy.interpolate import RegularGridInterpolator
from k_correction import GAMA_KCorrection
class LuminosityFunction(object):
def __init__(self):
pass
def __initialize_interpolator(self):
#Initializes a RegularGridInterpolator for converting number densities
#at a certain to redshift to the corresponding magnitude threshold
# arrays of z and log_n, and empty 2d array magnitudes
redshifts = np.arange(0, 1, 0.01)
log_number_densities = np.arange(-12, -0.5, 0.01)
magnitudes = np.zeros((len(redshifts),len(log_number_densities)))
# fill in 2d array of magnitudes
mags = np.arange(-25, -10, 0.001)
for i in range(len(redshifts)):
# find number density at each magnitude in mags
log_ns = np.log10(self.Phi_cumulative(mags, redshifts[i]))
# find this number density in the array log_number_densities
idx = np.searchsorted(log_ns, log_number_densities)
# interpolate to find magnitude at this number density
f = (log_number_densities - log_ns[idx-1]) / \
(log_ns[idx] - log_ns[idx-1])
magnitudes[i,:] = mags[idx-1] + f*(mags[idx] - mags[idx-1])
# create RegularGridInterpolator object
return RegularGridInterpolator((redshifts,log_number_densities),
magnitudes, bounds_error=False, fill_value=None)
def Phi(self, magnitude, redshift):
pass
def Phi_cumulative(self, magnitude, redshift):
pass
def mag2lum(self, magnitude):
"""
Convert magnitude to luminosity
"""
return 10**((4.76 - magnitude)/2.5)
def lum2mag(self, luminosity):
"""
Convert luminosity to magnitude
"""
return 4.76 - 2.5*np.log10(luminosity)
def magnitude(self, number_density, redshift):
"""
Convert number density to magnitude
"""
points = np.array(list(zip(redshift, np.log10(number_density))))
return self._interpolator(points)
class LuminosityFunctionSchechter(LuminosityFunction):
"""
Schecter luminosity function with evolution
"""
def __init__(self, Phi_star, M_star, alpha, P, Q):
# Evolving Shechter luminosity function parameters
self.Phi_star = Phi_star
self.M_star = M_star
self.alpha = alpha
self.P = P
self.Q = Q
def Phi(self, magnitude, redshift):
# evolve M_star and Phi_star to redshift
M_star = self.M_star - self.Q * (redshift - 0.1)
Phi_star = self.Phi_star * 10**(0.4*self.P*redshift)
# calculate luminosity function
lf = 0.4 * np.log(10) * Phi_star
lf *= (10**(0.4*(M_star-magnitude)))**(self.alpha+1)
lf *= np.exp(-10**(0.4*(M_star-magnitude)))
return lf
def Phi_cumulative(self, magnitude, redshift):
# evolve M_star and Phi_star to redshift
M_star = self.M_star - self.Q * (redshift - 0.1)
Phi_star = self.Phi_star * 10**(0.4*self.P*redshift)
# calculate cumulative luminosity function
t = 10**(0.4 * (M_star-magnitude))
lf = Phi_star*(gammaincc(self.alpha+2, t)*gamma(self.alpha+2) - \
t**(self.alpha+1)*np.exp(-t)) / (self.alpha+1)
return lf
class LuminosityFunctionTabulated(LuminosityFunction):
"""
Luminosity function from tabulated file, with evolution
"""
def __init__(self, filename, P, Q):
self.magnitude, self.log_number_density = \
np.loadtxt(filename, unpack=True)
self.P = P
self.Q = Q
self.__lf_interpolator = \
RegularGridInterpolator((self.magnitude,), self.log_number_density,
bounds_error=False, fill_value=None)
def Phi(self, magnitude, redshift):
pass
def Phi_cumulative(self, magnitude, redshift):
# shift magnitudes to redshift z=0.1
magnitude01 = magnitude + self.Q * (redshift - 0.1)
# find interpolated number density at z=0.1
log_lf01 = self.__lf_interpolator(magnitude01)
# shift back to redshift
log_lf = log_lf01 + 0.4 * self.P * (redshift - 0.1)
return 10**log_lf
class LuminosityFunctionTarget(LuminosityFunction):
def __init__(self, filename, Phi_star, M_star, alpha, P, Q):
self.lf_sdss = LuminosityFunctionTabulated(filename, P, Q)
self.lf_gama = \
LuminosityFunctionSchechter(Phi_star, M_star, alpha, P, Q)
self._interpolator = \
self._LuminosityFunction__initialize_interpolator()
def transition(self, redshift):
"""
Function which describes the transition between the SDSS LF
at low z and the GAMA LF at high z
"""
return 1. / (1. + np.exp(-100*(redshift-0.15)))
def Phi(self, magnitude, redshift):
pass
def Phi_cumulative(self, magnitude, redshift):
w = self.transition(redshift)
lf_sdss = self.lf_sdss.Phi_cumulative(magnitude, redshift)
lf_gama = self.lf_gama.Phi_cumulative(magnitude, redshift)
return w*lf_sdss + (1-w)*lf_gama
def test():
import matplotlib.pyplot as plt
import parameters as par
mags = np.arange(0,-25,-0.1)
z = np.ones(len(mags))* 0.0005
lf_targ = LuminosityFunctionTarget(par.lf_file, par.Phi_star, par.M_star,
par.alpha, par.P, par.Q)
logn = np.log10(lf_targ.Phi_cumulative(mags, z))
mag = lf_targ.magnitude(10**logn, z)
plt.plot(mags, logn)
plt.plot(mag, logn, ls="--")
plt.show()
lf_gama = LuminosityFunctionSchechter(par.Phi_star, par.M_star, par.alpha,
par.P, par.Q)
lf_sdss = LuminosityFunctionTabulated(par.lf_file, par.P, par.Q)
lf_targ = LuminosityFunctionTarget(par.lf_file, par.Phi_star, par.M_star,
par.alpha, par.P, par.Q)
for z in np.arange(0, 0.26, 0.05):
phi = lf_sdss.Phi_cumulative(mags, z)
plt.plot(mags, phi, c="b")
phi = lf_gama.Phi_cumulative(mags, z)
plt.plot(mags, phi, c="g")
phi = lf_targ.Phi_cumulative(mags, z)
plt.plot(mags, phi, c="r", ls="--")
plt.yscale("log")
plt.title("z = %.2f"%z)
plt.xlabel("mag")
plt.ylabel("cumulative LF")
plt.xlim(-18, -23)
plt.ylim(1e-6, 3e-2)
plt.show()
if __name__ == "__main__":
test()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"scipy.special.gamma",
"numpy.searchsorted",
"scipy.interpolate.RegularGridInterpolator",
"numpy.arange",
"numpy.exp",
"... | [((5543, 5566), 'numpy.arange', 'np.arange', (['(0)', '(-25)', '(-0.1)'], {}), '(0, -25, -0.1)\n', (5552, 5566), True, 'import numpy as np\n'), ((5843, 5863), 'matplotlib.pyplot.plot', 'plt.plot', (['mags', 'logn'], {}), '(mags, logn)\n', (5851, 5863), True, 'import matplotlib.pyplot as plt\n'), ((5868, 5896), 'matplotlib.pyplot.plot', 'plt.plot', (['mag', 'logn'], {'ls': '"""--"""'}), "(mag, logn, ls='--')\n", (5876, 5896), True, 'import matplotlib.pyplot as plt\n'), ((5901, 5911), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5909, 5911), True, 'import matplotlib.pyplot as plt\n'), ((6275, 6299), 'numpy.arange', 'np.arange', (['(0)', '(0.26)', '(0.05)'], {}), '(0, 0.26, 0.05)\n', (6284, 6299), True, 'import numpy as np\n'), ((580, 601), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (589, 601), True, 'import numpy as np\n'), ((633, 659), 'numpy.arange', 'np.arange', (['(-12)', '(-0.5)', '(0.01)'], {}), '(-12, -0.5, 0.01)\n', (642, 659), True, 'import numpy as np\n'), ((803, 829), 'numpy.arange', 'np.arange', (['(-25)', '(-10)', '(0.001)'], {}), '(-25, -10, 0.001)\n', (812, 829), True, 'import numpy as np\n'), ((1481, 1592), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(redshifts, log_number_densities)', 'magnitudes'], {'bounds_error': '(False)', 'fill_value': 'None'}), '((redshifts, log_number_densities), magnitudes,\n bounds_error=False, fill_value=None)\n', (1504, 1592), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((2985, 3028), 'numpy.exp', 'np.exp', (['(-10 ** (0.4 * (M_star - magnitude)))'], {}), '(-10 ** (0.4 * (M_star - magnitude)))\n', (2991, 3028), True, 'import numpy as np\n'), ((3787, 3820), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'unpack': '(True)'}), '(filename, unpack=True)\n', (3797, 3820), True, 'import numpy as np\n'), ((3908, 4016), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(self.magnitude,)', 'self.log_number_density'], {'bounds_error': '(False)', 'fill_value': 'None'}), '((self.magnitude,), self.log_number_density,\n bounds_error=False, fill_value=None)\n', (3931, 4016), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((6355, 6381), 'matplotlib.pyplot.plot', 'plt.plot', (['mags', 'phi'], {'c': '"""b"""'}), "(mags, phi, c='b')\n", (6363, 6381), True, 'import matplotlib.pyplot as plt\n'), ((6437, 6463), 'matplotlib.pyplot.plot', 'plt.plot', (['mags', 'phi'], {'c': '"""g"""'}), "(mags, phi, c='g')\n", (6445, 6463), True, 'import matplotlib.pyplot as plt\n'), ((6519, 6554), 'matplotlib.pyplot.plot', 'plt.plot', (['mags', 'phi'], {'c': '"""r"""', 'ls': '"""--"""'}), "(mags, phi, c='r', ls='--')\n", (6527, 6554), True, 'import matplotlib.pyplot as plt\n'), ((6564, 6581), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6574, 6581), True, 'import matplotlib.pyplot as plt\n'), ((6590, 6615), 'matplotlib.pyplot.title', 'plt.title', (["('z = %.2f' % z)"], {}), "('z = %.2f' % z)\n", (6599, 6615), True, 'import matplotlib.pyplot as plt\n'), ((6622, 6639), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""mag"""'], {}), "('mag')\n", (6632, 6639), True, 'import matplotlib.pyplot as plt\n'), ((6648, 6675), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cumulative LF"""'], {}), "('cumulative LF')\n", (6658, 6675), True, 'import matplotlib.pyplot as plt\n'), ((6684, 6702), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-18)', '(-23)'], {}), '(-18, -23)\n', (6692, 6702), True, 'import matplotlib.pyplot as plt\n'), ((6711, 6732), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1e-06)', '(0.03)'], {}), '(1e-06, 0.03)\n', (6719, 6732), True, 'import matplotlib.pyplot as plt\n'), ((6740, 6750), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6748, 6750), True, 'import matplotlib.pyplot as plt\n'), ((1105, 1150), 'numpy.searchsorted', 'np.searchsorted', (['log_ns', 'log_number_densities'], {}), '(log_ns, log_number_densities)\n', (1120, 1150), True, 'import numpy as np\n'), ((2000, 2020), 'numpy.log10', 'np.log10', (['luminosity'], {}), '(luminosity)\n', (2008, 2020), True, 'import numpy as np\n'), ((2888, 2898), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (2894, 2898), True, 'import numpy as np\n'), ((5090, 5122), 'numpy.exp', 'np.exp', (['(-100 * (redshift - 0.15))'], {}), '(-100 * (redshift - 0.15))\n', (5096, 5122), True, 'import numpy as np\n'), ((2186, 2210), 'numpy.log10', 'np.log10', (['number_density'], {}), '(number_density)\n', (2194, 2210), True, 'import numpy as np\n'), ((3388, 3416), 'scipy.special.gammaincc', 'gammaincc', (['(self.alpha + 2)', 't'], {}), '(self.alpha + 2, t)\n', (3397, 3416), False, 'from scipy.special import gamma, gammaincc\n'), ((3415, 3436), 'scipy.special.gamma', 'gamma', (['(self.alpha + 2)'], {}), '(self.alpha + 2)\n', (3420, 3436), False, 'from scipy.special import gamma, gammaincc\n'), ((3484, 3494), 'numpy.exp', 'np.exp', (['(-t)'], {}), '(-t)\n', (3490, 3494), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
import argparse
import logging
import os
import pickle
import sys
import numpy as np
import pandas as pd
import misc.logging_utils as logging_utils
import misc.parallel as parallel
import misc.utils as utils
import misc.pandas_utils as pandas_utils
from misc.suppress_stdout_stderr import suppress_stdout_stderr
logger = logging.getLogger(__name__)
default_num_cpus = 1
default_periodic_models = []
default_nonperiodic_models = []
default_periodic_offset_start = -20
default_periodic_offset_end = 0
default_metagene_profile_length = 21
default_type_field = 'type'
default_position_field = 'position'
default_count_field = 'count'
default_iterations = 500
default_chains = 2
default_seed = 8675309
class NullDevice():
def write(self, s):
pass
def estimate_marginal_likelihoods(signal, periodic_models, nonperiodic_models, iterations, chains, seed):
# construct the input for the models
x_1 = signal[0::3]
x_2 = signal[1::3]
x_3 = signal[2::3]
T = len(x_1)
very_high_prior_location = max(signal)
data = {
"x_1": x_1,
"x_2": x_2,
"x_3": x_3,
"T": T,
"very_high_prior_location": very_high_prior_location
}
# get the likelihood for each of the models
bft_periodic = [
pm.sampling(data=data, iter=iterations, chains=chains, n_jobs=1, seed=seed, refresh=-1)
for pm in periodic_models]
bft_nonperiodic = [
nm.sampling(data=data, iter=iterations, chains=chains, n_jobs=1, seed=seed, refresh=-1)
for nm in nonperiodic_models]
return (bft_periodic, bft_nonperiodic)
def estimate_profile_bayes_factors(profile, args):
length = profile['length'].iloc[0]
# read in the relevant models
periodic_models = [pickle.load(open(pm, 'rb')) for pm in args.periodic_models]
nonperiodic_models = [pickle.load(open(npm, 'rb')) for npm in args.nonperiodic_models]
# pull out the start offsets ("position" field) and counts
mask_start = profile[args.type_field] == 'start'
start_profile_df = profile.loc[mask_start]
start_profile_df = start_profile_df.sort_values(args.position_field)
start_positions = start_profile_df[args.position_field].values
start_counts = start_profile_df[args.count_field].values
# find the positions of the offsets of interest within the arrays
begin_start_pos = np.where(start_positions==args.periodic_offset_start)[0]
if len(begin_start_pos) == 0:
msg = "Did not find any start offsets for length: {}".format(length)
logging.warning(msg)
return None
begin_index = begin_start_pos[0]
stop_start_pos = np.where(start_positions==args.periodic_offset_end)[0]
if len(stop_start_pos) == 0:
msg = "Did not find any stop offsets for length: {}".format(length)
logging.warning(msg)
return None
stop_index = stop_start_pos[0]
# collect all of the results as a data frame
ret = []
for i in range(begin_index, stop_index+1):
offset = start_positions[i]
msg = "Length: {}, Offset: {}".format(length, offset)
logger.debug(msg)
# pull out the signal for this offset
signal = start_counts[i:i+args.metagene_profile_length]
(bft_periodic, bft_nonperiodic) = estimate_marginal_likelihoods(signal,
periodic_models, nonperiodic_models,
iterations=args.iterations,chains=args.chains,seed=args.seed)
# extract the parameters of interest
m_periodic_ex = [m.extract(pars=['lp__']) for m in bft_periodic]
m_nonperiodic_ex = [m.extract(pars=['lp__']) for m in bft_nonperiodic]
# now, choose the best model of each class, based on mean likelihood
m_periodic_means = [np.mean(m_ex['lp__']) for m_ex in m_periodic_ex]
m_nonperiodic_means = [np.mean(m_ex['lp__']) for m_ex in m_nonperiodic_ex]
max_periodic_mean = np.argmax(m_periodic_means)
max_nonperiodic_mean = np.argmax(m_nonperiodic_means)
# select the best sampling results
m_periodic_ex = m_periodic_ex[max_periodic_mean]
m_nonperiodic_ex = m_nonperiodic_ex[max_nonperiodic_mean]
profile_sum = np.sum(signal)
profile_peak = signal[0]
v = {
"offset": offset,
"p_periodic_mean": np.mean(m_periodic_ex['lp__']),
"p_periodic_var": np.var(m_periodic_ex['lp__']),
"p_nonperiodic_mean": np.mean(m_nonperiodic_ex['lp__']),
"p_nonperiodic_var": np.var(m_nonperiodic_ex['lp__']),
'profile_sum': profile_sum,
'profile_peak': profile_peak
}
v['bayes_factor_mean'] = v['p_periodic_mean'] - v['p_nonperiodic_mean']
v['bayes_factor_var'] = v['p_periodic_var'] + v['p_periodic_var']
ret.append(pd.Series(v))
ret = pd.DataFrame(ret)
ret['length'] = length
return ret
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script estimates the Bayes factors for all metagene profiles in the "
"given file. The script accepts as input multiple \"periodic\" and \"nonperiodic\" "
"models. It uses the models of each type with the best mean to estimate the Bayes "
"factor distributions.\n\nIt contains some hard-coded field names.")
parser.add_argument('metagene_profiles', help="The (csv) file containing the metagene profiles")
parser.add_argument('out', help="The output (csv.gz) file")
parser.add_argument('--periodic-models', help="A list of pickled StanModel files which contain "
"models that somehow represent periodic metagene profiles", nargs="+",
default=default_periodic_models)
parser.add_argument('--nonperiodic-models', help="A list of pickled StanModel files which contain "
"models that somehow represent nonperiodic metagene profiles", nargs="+",
default=default_nonperiodic_models)
parser.add_argument('--periodic-offset-start', help="The position, relative to the translation "
"initiation site, to begin calculating periodicity Bayes factors (inclusive)", type=int,
default=default_periodic_offset_start)
parser.add_argument('--periodic-offset-end', help="The position, relative to the translation "
"initiation site, to stop calculating periodicity Bayes factors (inclusive)", type=int,
default=default_periodic_offset_end)
parser.add_argument('--metagene-profile-length', help="The length of the profile to use in the "
"models. metagene_profile_length + periodic_offset_end must be consistent with the length "
"of the extracted metagene profile. The length must be divisible by three.", type=int,
default=default_metagene_profile_length)
parser.add_argument('-s', '--seed', help="The random seeds to use for inference",
type=int, default=default_seed)
parser.add_argument('-c', '--chains', help="The number of MCMC chains to use", type=int,
default=default_chains)
parser.add_argument('-i', '--iterations', help="The number of MCMC iterations to use for "
"each chain", type=int, default=default_iterations)
parser.add_argument('-p', '--num-cpus', help="The number of CPUs to use. Each read "
"length will be processed in its own thread (so that is the maximum number of CPUs "
"that is useful).", type=int, default=default_num_cpus)
parser.add_argument('--type-field', default=default_type_field)
parser.add_argument('--count-field', default=default_count_field)
parser.add_argument('--position-field', default=default_position_field)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
# we will parallelize based on the lengths. So we need to know which lengths
# are present in the metagene profiles file
metagene_profiles = pd.read_csv(args.metagene_profiles)
lengths = list(metagene_profiles['length'].unique())
length_str = ','.join(str(int(l)) for l in lengths)
msg = "Estimating Bayes factors for lengths: {}".format(length_str)
logger.info(msg)
length_groups = metagene_profiles.groupby('length')
with suppress_stdout_stderr():
all_profile_estimates_df = parallel.apply_parallel_groups(
length_groups,
args.num_cpus,
estimate_profile_bayes_factors,
args,
progress_bar=True
)
msg = "Combining estimates into one data frame"
logger.info(msg)
all_profile_estimates_df = utils.remove_nones(all_profile_estimates_df)
all_profile_estimates_df = pd.concat(all_profile_estimates_df)
pandas_utils.write_df(all_profile_estimates_df, args.out, index=False)
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"misc.pandas_utils.write_df",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.argmax",
"pandas.read_csv",
"logging.warning",
"misc.logging_utils.update_logging",
"misc.utils.remove_nones",
"misc.logging_utils.add_logging_options",
"misc.suppress_stdout_stderr.suppress_stdout_s... | [((350, 377), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (367, 377), False, 'import logging\n'), ((4920, 4937), 'pandas.DataFrame', 'pd.DataFrame', (['ret'], {}), '(ret)\n', (4932, 4937), True, 'import pandas as pd\n'), ((5007, 5416), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""This script estimates the Bayes factors for all metagene profiles in the given file. The script accepts as input multiple "periodic" and "nonperiodic" models. It uses the models of each type with the best mean to estimate the Bayes factor distributions.\n\nIt contains some hard-coded field names."""'}), '(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n """This script estimates the Bayes factors for all metagene profiles in the given file. The script accepts as input multiple "periodic" and "nonperiodic" models. It uses the models of each type with the best mean to estimate the Bayes factor distributions.\n\nIt contains some hard-coded field names."""\n )\n', (5030, 5416), False, 'import argparse\n'), ((7784, 7825), 'misc.logging_utils.add_logging_options', 'logging_utils.add_logging_options', (['parser'], {}), '(parser)\n', (7817, 7825), True, 'import misc.logging_utils as logging_utils\n'), ((7861, 7895), 'misc.logging_utils.update_logging', 'logging_utils.update_logging', (['args'], {}), '(args)\n', (7889, 7895), True, 'import misc.logging_utils as logging_utils\n'), ((8050, 8085), 'pandas.read_csv', 'pd.read_csv', (['args.metagene_profiles'], {}), '(args.metagene_profiles)\n', (8061, 8085), True, 'import pandas as pd\n'), ((8740, 8784), 'misc.utils.remove_nones', 'utils.remove_nones', (['all_profile_estimates_df'], {}), '(all_profile_estimates_df)\n', (8758, 8784), True, 'import misc.utils as utils\n'), ((8817, 8852), 'pandas.concat', 'pd.concat', (['all_profile_estimates_df'], {}), '(all_profile_estimates_df)\n', (8826, 8852), True, 'import pandas as pd\n'), ((8858, 8928), 'misc.pandas_utils.write_df', 'pandas_utils.write_df', (['all_profile_estimates_df', 'args.out'], {'index': '(False)'}), '(all_profile_estimates_df, args.out, index=False)\n', (8879, 8928), True, 'import misc.pandas_utils as pandas_utils\n'), ((2425, 2480), 'numpy.where', 'np.where', (['(start_positions == args.periodic_offset_start)'], {}), '(start_positions == args.periodic_offset_start)\n', (2433, 2480), True, 'import numpy as np\n'), ((2601, 2621), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (2616, 2621), False, 'import logging\n'), ((2701, 2754), 'numpy.where', 'np.where', (['(start_positions == args.periodic_offset_end)'], {}), '(start_positions == args.periodic_offset_end)\n', (2709, 2754), True, 'import numpy as np\n'), ((2873, 2893), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (2888, 2893), False, 'import logging\n'), ((3988, 4015), 'numpy.argmax', 'np.argmax', (['m_periodic_means'], {}), '(m_periodic_means)\n', (3997, 4015), True, 'import numpy as np\n'), ((4047, 4077), 'numpy.argmax', 'np.argmax', (['m_nonperiodic_means'], {}), '(m_nonperiodic_means)\n', (4056, 4077), True, 'import numpy as np\n'), ((4268, 4282), 'numpy.sum', 'np.sum', (['signal'], {}), '(signal)\n', (4274, 4282), True, 'import numpy as np\n'), ((8360, 8384), 'misc.suppress_stdout_stderr.suppress_stdout_stderr', 'suppress_stdout_stderr', ([], {}), '()\n', (8382, 8384), False, 'from misc.suppress_stdout_stderr import suppress_stdout_stderr\n'), ((8422, 8543), 'misc.parallel.apply_parallel_groups', 'parallel.apply_parallel_groups', (['length_groups', 'args.num_cpus', 'estimate_profile_bayes_factors', 'args'], {'progress_bar': '(True)'}), '(length_groups, args.num_cpus,\n estimate_profile_bayes_factors, args, progress_bar=True)\n', (8452, 8543), True, 'import misc.parallel as parallel\n'), ((3827, 3848), 'numpy.mean', 'np.mean', (["m_ex['lp__']"], {}), "(m_ex['lp__'])\n", (3834, 3848), True, 'import numpy as np\n'), ((3907, 3928), 'numpy.mean', 'np.mean', (["m_ex['lp__']"], {}), "(m_ex['lp__'])\n", (3914, 3928), True, 'import numpy as np\n'), ((4392, 4422), 'numpy.mean', 'np.mean', (["m_periodic_ex['lp__']"], {}), "(m_periodic_ex['lp__'])\n", (4399, 4422), True, 'import numpy as np\n'), ((4454, 4483), 'numpy.var', 'np.var', (["m_periodic_ex['lp__']"], {}), "(m_periodic_ex['lp__'])\n", (4460, 4483), True, 'import numpy as np\n'), ((4519, 4552), 'numpy.mean', 'np.mean', (["m_nonperiodic_ex['lp__']"], {}), "(m_nonperiodic_ex['lp__'])\n", (4526, 4552), True, 'import numpy as np\n'), ((4587, 4619), 'numpy.var', 'np.var', (["m_nonperiodic_ex['lp__']"], {}), "(m_nonperiodic_ex['lp__'])\n", (4593, 4619), True, 'import numpy as np\n'), ((4887, 4899), 'pandas.Series', 'pd.Series', (['v'], {}), '(v)\n', (4896, 4899), True, 'import pandas as pd\n')] |
"""
A module that contains a metaclass mixin that provides Galois field class properties.
"""
import math
import numpy as np
from .._poly_conversion import integer_to_poly, poly_to_str
from ._dtypes import DTYPES
class PropertiesMeta(type):
"""
A mixin metaclass that contains Galois field properties.
"""
# pylint: disable=no-value-for-parameter
def __init__(cls, name, bases, namespace, **kwargs):
cls._characteristic = kwargs.get("characteristic", 0)
cls._degree = kwargs.get("degree", 0)
cls._order = kwargs.get("order", 0)
cls._order_str = None
cls._ufunc_mode = None
cls._ufunc_target = None
cls._dtypes = cls._determine_dtypes()
super().__init__(name, bases, namespace, **kwargs)
if "irreducible_poly" in kwargs:
cls._irreducible_poly = kwargs["irreducible_poly"]
cls._irreducible_poly_int = cls._irreducible_poly.integer
else:
cls._irreducible_poly = None
cls._irreducible_poly_int = 0
cls._primitive_element = kwargs.get("primitive_element", None)
cls._is_primitive_poly = kwargs.get("is_primitive_poly", None)
cls._prime_subfield = None
cls._display_mode = "int"
if cls.degree == 1:
cls._order_str = f"order={cls.order}"
else:
cls._order_str = f"order={cls.characteristic}^{cls.degree}"
###############################################################################
# Helper methods
###############################################################################
def _determine_dtypes(cls):
"""
At a minimum, valid dtypes are ones that can hold x for x in [0, order).
"""
dtypes = [dtype for dtype in DTYPES if np.iinfo(dtype).max >= cls.order - 1]
if len(dtypes) == 0:
dtypes = [np.object_]
return dtypes
###############################################################################
# Class attributes
###############################################################################
@property
def name(cls):
"""
str: The Galois field name.
Examples
--------
.. ipython:: python
galois.GF(2).name
galois.GF(2**8).name
galois.GF(31).name
galois.GF(7**5).name
"""
if cls._degree == 1:
return f"GF({cls._characteristic})"
else:
return f"GF({cls._characteristic}^{cls._degree})"
@property
def characteristic(cls):
r"""
int: The prime characteristic :math:`p` of the Galois field :math:`\mathrm{GF}(p^m)`. Adding
:math:`p` copies of any element will always result in :math:`0`.
Examples
--------
.. ipython:: python
GF = galois.GF(2**8, display="poly")
GF.characteristic
a = GF.Random(low=1); a
a * GF.characteristic
@suppress
GF.display();
.. ipython:: python
GF = galois.GF(31)
GF.characteristic
a = GF.Random(low=1); a
a * GF.characteristic
"""
return cls._characteristic
@property
def degree(cls):
r"""
int: The prime characteristic's degree :math:`m` of the Galois field :math:`\mathrm{GF}(p^m)`. The degree
is a positive integer.
Examples
--------
.. ipython:: python
galois.GF(2).degree
galois.GF(2**8).degree
galois.GF(31).degree
galois.GF(7**5).degree
"""
return cls._degree
@property
def order(cls):
r"""
int: The order :math:`p^m` of the Galois field :math:`\mathrm{GF}(p^m)`. The order of the field is also equal to
the field's size.
Examples
--------
.. ipython:: python
galois.GF(2).order
galois.GF(2**8).order
galois.GF(31).order
galois.GF(7**5).order
"""
return cls._order
@property
def irreducible_poly(cls):
r"""
galois.Poly: The irreducible polynomial :math:`f(x)` of the Galois field :math:`\mathrm{GF}(p^m)`. The irreducible
polynomial is of degree :math:`m` over :math:`\mathrm{GF}(p)`.
Examples
--------
.. ipython:: python
galois.GF(2).irreducible_poly
galois.GF(2**8).irreducible_poly
galois.GF(31).irreducible_poly
galois.GF(7**5).irreducible_poly
"""
# Ensure accesses of this property don't alter it
return cls._irreducible_poly.copy()
# return Poly(cls._irreducible_poly, field=cls.prime_subfield)
@property
def is_primitive_poly(cls):
r"""
bool: Indicates whether the :obj:`irreducible_poly` is a primitive polynomial. If so, :math:`x` is a primitive element
of the Galois field.
Examples
--------
.. ipython:: python
GF = galois.GF(2**8, display="poly")
GF.irreducible_poly
GF.primitive_element
# The irreducible polynomial is a primitive polynomial if the primitive element is a root
GF.irreducible_poly(GF.primitive_element, field=GF)
GF.is_primitive_poly
@suppress
GF.display();
Here is an example using the :math:`\mathrm{GF}(2^8)` field from AES, which does not use a primitive polynomial.
.. ipython:: python
GF = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]), display="poly")
GF.irreducible_poly
GF.primitive_element
# The irreducible polynomial is a primitive polynomial if the primitive element is a root
GF.irreducible_poly(GF.primitive_element, field=GF)
GF.is_primitive_poly
@suppress
GF.display();
"""
return cls._is_primitive_poly
@property
def primitive_element(cls):
r"""
galois.FieldArray: A primitive element :math:`\alpha` of the Galois field :math:`\mathrm{GF}(p^m)`. A primitive element is a multiplicative
generator of the field, such that :math:`\mathrm{GF}(p^m) = \{0, 1, \alpha, \alpha^2, \dots, \alpha^{p^m - 2}\}`.
A primitive element is a root of the primitive polynomial :math:`f(x)`, such that :math:`f(\alpha) = 0` over
:math:`\mathrm{GF}(p^m)`.
Examples
--------
.. ipython:: python
galois.GF(2).primitive_element
galois.GF(2**8).primitive_element
galois.GF(31).primitive_element
galois.GF(7**5).primitive_element
"""
# Ensure accesses of this property doesn't alter it
return cls(cls._primitive_element) # pylint: disable=no-value-for-parameter
@property
def primitive_elements(cls):
r"""
galois.FieldArray: All primitive elements :math:`\alpha` of the Galois field :math:`\mathrm{GF}(p^m)`. A primitive element is a multiplicative
generator of the field, such that :math:`\mathrm{GF}(p^m) = \{0, 1, \alpha, \alpha^2, \dots, \alpha^{p^m - 2}\}`.
Examples
--------
.. ipython:: python
galois.GF(2).primitive_elements
galois.GF(2**8).primitive_elements
galois.GF(31).primitive_elements
galois.GF(7**5).primitive_elements
"""
n = cls.order - 1
totatives = [t for t in range(1, n + 1) if math.gcd(n, t) == 1]
powers = np.array(totatives)
return np.sort(cls.primitive_element ** powers)
@property
def is_prime_field(cls):
"""
bool: Indicates if the field's order is prime.
Examples
--------
.. ipython:: python
galois.GF(2).is_prime_field
galois.GF(2**8).is_prime_field
galois.GF(31).is_prime_field
galois.GF(7**5).is_prime_field
"""
return cls._degree == 1
@property
def is_extension_field(cls):
"""
bool: Indicates if the field's order is a prime power.
Examples
--------
.. ipython:: python
galois.GF(2).is_extension_field
galois.GF(2**8).is_extension_field
galois.GF(31).is_extension_field
galois.GF(7**5).is_extension_field
"""
return cls._degree > 1
@property
def prime_subfield(cls):
r"""
galois.FieldClass: The prime subfield :math:`\mathrm{GF}(p)` of the extension field :math:`\mathrm{GF}(p^m)`.
Examples
--------
.. ipython:: python
print(galois.GF(2).prime_subfield.properties)
print(galois.GF(2**8).prime_subfield.properties)
print(galois.GF(31).prime_subfield.properties)
print(galois.GF(7**5).prime_subfield.properties)
"""
return cls._prime_subfield
@property
def dtypes(cls):
"""
list: List of valid integer :obj:`numpy.dtype` values that are compatible with this Galois field. Creating an array with an
unsupported dtype will throw a `TypeError` exception.
Examples
--------
.. ipython:: python
GF = galois.GF(2); GF.dtypes
GF = galois.GF(2**8); GF.dtypes
GF = galois.GF(31); GF.dtypes
GF = galois.GF(7**5); GF.dtypes
For Galois fields that cannot be represented by :obj:`numpy.int64`, the only valid dtype is :obj:`numpy.object_`.
.. ipython:: python
GF = galois.GF(2**100); GF.dtypes
GF = galois.GF(36893488147419103183); GF.dtypes
"""
return cls._dtypes
@property
def display_mode(cls):
r"""
str: The representation of Galois field elements, either `"int"`, `"poly"`, or `"power"`. This can be
changed with :func:`display`.
Examples
--------
For the polynomial representation, when the primitive element is :math:`\alpha = x` in :math:`\mathrm{GF}(p)[x]` the polynomial
indeterminate used is :math:`\alpha`.
.. ipython:: python
GF = galois.GF(2**8)
print(GF.properties)
a = GF.Random()
print(GF.display_mode, a)
with GF.display("poly"):
print(GF.display_mode, a)
with GF.display("power"):
print(GF.display_mode, a)
# The display mode is reset after exiting the context manager
print(GF.display_mode, a)
But when the primitive element is :math:`\alpha \ne x` in :math:`\mathrm{GF}(p)[x]`, the polynomial
indeterminate used is :math:`x`.
.. ipython:: python
GF = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]))
print(GF.properties)
a = GF.Random()
print(GF.display_mode, a)
with GF.display("poly"):
print(GF.display_mode, a)
with GF.display("power"):
print(GF.display_mode, a)
# The display mode is reset after exiting the context manager
print(GF.display_mode, a)
The power representation displays elements as powers of :math:`\alpha` the primitive element, see
:obj:`FieldClass.primitive_element`.
.. ipython:: python
with GF.display("power"):
print(GF.display_mode, a)
# The display mode is reset after exiting the context manager
print(GF.display_mode, a)
"""
return cls._display_mode
@property
def ufunc_mode(cls):
"""
str: The mode for ufunc compilation, either `"jit-lookup"`, `"jit-calculate"`, or `"python-calculate"`.
Examples
--------
.. ipython:: python
galois.GF(2).ufunc_mode
galois.GF(2**8).ufunc_mode
galois.GF(31).ufunc_mode
galois.GF(7**5).ufunc_mode
"""
return cls._ufunc_mode
@property
def ufunc_modes(cls):
"""
list: All supported ufunc modes for this Galois field array class.
Examples
--------
.. ipython:: python
galois.GF(2).ufunc_modes
galois.GF(2**8).ufunc_modes
galois.GF(31).ufunc_modes
galois.GF(2**100).ufunc_modes
"""
if cls.dtypes == [np.object_]:
return ["python-calculate"]
else:
return ["jit-lookup", "jit-calculate"]
@property
def default_ufunc_mode(cls):
"""
str: The default ufunc arithmetic mode for this Galois field.
Examples
--------
.. ipython:: python
galois.GF(2).default_ufunc_mode
galois.GF(2**8).default_ufunc_mode
galois.GF(31).default_ufunc_mode
galois.GF(2**100).default_ufunc_mode
"""
if cls.dtypes == [np.object_]:
return "python-calculate"
elif cls.order <= 2**20:
return "jit-lookup"
else:
return "jit-calculate"
@property
def properties(cls):
"""
str: A formatted string displaying relevant properties of the Galois field.
Examples
--------
.. ipython:: python
GF = galois.GF(2); print(GF.properties)
GF = galois.GF(2**8); print(GF.properties)
GF = galois.GF(31); print(GF.properties)
GF = galois.GF(7**5); print(GF.properties)
"""
string = f"{cls.name}:"
string += f"\n characteristic: {cls.characteristic}"
string += f"\n degree: {cls.degree}"
string += f"\n order: {cls.order}"
string += f"\n irreducible_poly: {cls.irreducible_poly.string}"
string += f"\n is_primitive_poly: {cls.is_primitive_poly}"
string += f"\n primitive_element: {poly_to_str(integer_to_poly(cls.primitive_element, cls.characteristic))}"
return string
| [
"numpy.sort",
"numpy.array",
"numpy.iinfo",
"math.gcd"
] | [((7639, 7658), 'numpy.array', 'np.array', (['totatives'], {}), '(totatives)\n', (7647, 7658), True, 'import numpy as np\n'), ((7674, 7714), 'numpy.sort', 'np.sort', (['(cls.primitive_element ** powers)'], {}), '(cls.primitive_element ** powers)\n', (7681, 7714), True, 'import numpy as np\n'), ((7601, 7615), 'math.gcd', 'math.gcd', (['n', 't'], {}), '(n, t)\n', (7609, 7615), False, 'import math\n'), ((1802, 1817), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1810, 1817), True, 'import numpy as np\n')] |
from logging import getLogger
import numpy as np
from eit_ai.pytorch.dataset import PYTORCH_DATASET_HANDLERS
from eit_ai.pytorch.models import (PYTORCH_MODEL_HANDLERS, PYTORCH_MODELS,
StdPytorchModelHandler)
from eit_ai.raw_data.raw_samples import RawSamples
from eit_ai.train_utils.dataset import AiDatasetHandler
from eit_ai.train_utils.lists import (ListPytorchDatasetHandlers,
ListPytorchModelHandlers,
ListPytorchModels, ListWorkspaces,
get_from_dict)
from eit_ai.train_utils.metadata import MetaData
from eit_ai.train_utils.workspace import (AiWorkspace, WrongDatasetError,
WrongSingleXError, meas_duration)
logger = getLogger(__name__)
################################################################################
# Pytorch Models
################################################################################
class PyTorchWorkspace(AiWorkspace):
""" Generator class for pytorch models """
def select_model_dataset(
self,
model_handler: ListPytorchModelHandlers = None,
dataset_handler: ListPytorchDatasetHandlers = None,
model:ListPytorchModels=None,
metadata:MetaData=None)-> None:
if model_handler is None and dataset_handler is None and model is None:
model_handler = metadata.model_handler
dataset_handler = metadata.dataset_handler
model=metadata.model_type
model_h_cls,listmodobj = get_from_dict(
model_handler, PYTORCH_MODEL_HANDLERS, ListPytorchModelHandlers, True)
dataset_h_cls,listdataobj= get_from_dict(
dataset_handler,PYTORCH_DATASET_HANDLERS, ListPytorchDatasetHandlers, True)
_, listmodgenobj=get_from_dict(
model, PYTORCH_MODELS, ListPytorchModels, True)
self.model_handler = model_h_cls()
self.dataset_handler = dataset_h_cls()
metadata.set_model_dataset_type(
ListWorkspaces.PyTorch, listmodobj, listdataobj, listmodgenobj)
def build_dataset(self, raw_samples: RawSamples, metadata: MetaData) -> None:
""""""
self.dataset_handler.build(raw_samples, metadata)
def build_model(self, metadata: MetaData) -> None:
self.model_handler.build(metadata=metadata)
def run_training(self, metadata: MetaData = None) -> None:
_, duration = self._run_training(metadata, return_duration=True)
metadata.set_training_duration(duration)
logger.info(f'### Training lasted: {duration} ###')
@meas_duration
def _run_training(self, metadata: MetaData = None, **kwargs) -> None:
self.model_handler.train(dataset=self.dataset_handler, metadata=metadata)
def get_prediction(
self,
metadata:MetaData,
dataset:AiDatasetHandler=None,
single_X:np.ndarray= None,
**kwargs)-> np.ndarray:
logger.info('### Prediction started: ... ###')
prediction, duration =self._get_prediction(
metadata, dataset= dataset,
single_X=single_X, return_duration=True, **kwargs)
logger.info(f'### Prediction lasted: {duration} ###')
return prediction
@meas_duration
def _get_prediction(
self,
metadata:MetaData,
dataset:AiDatasetHandler=None,
single_X:np.ndarray= None,
**kwargs)-> np.ndarray:
X_pred=self.dataset_handler.get_X('test')
if metadata.model_type == 'Conv1dNet':
X_pred = np.reshape(X_pred,(-1, 1, 256))
# another dataset can be here predicted (only test part)
if dataset is not None:
if not isinstance(dataset, type(self.dataset_handler)):
raise WrongDatasetError(
f'{dataset= } and {self.dataset_handler} dont have same type...')
X_pred=dataset.get_X('test')
# Single passed X can be here predicted, after been formated
if single_X is not None:
if not isinstance(single_X, np.ndarray):
raise WrongSingleXError(f'{single_X= } is not an np.ndarray ')
X_pred= self.dataset_handler.format_single_X(single_X, metadata)
return self.model_handler.predict(X_pred=X_pred, metadata=metadata, **kwargs)
def save_model(self, metadata: MetaData) -> None:
model_saving_path = self.model_handler.save(metadata=metadata)
metadata.set_model_saving_path(model_saving_path)
def load_model(self, metadata: MetaData) -> None:
"""select the model and dataset (need to be build after)"""
self.select_model_dataset(metadata=metadata)
self.model_handler.load(metadata=metadata)
if __name__ == "__main__":
import logging
from glob_utils.log.log import change_level_logging, main_log
main_log()
change_level_logging(logging.DEBUG)
# X = np.random.randn(100, 4)
# Y = np.random.randn(100)
# Y = Y[:, np.newaxis]
# rdn_dataset = PytorchDataset(X, Y)
# test = StdPytorchDataset()
# MetaData()
# new_model = StdPytorchModelManager()
# # for epoch in range(50):
# new_model.train(test, 50)
| [
"eit_ai.train_utils.workspace.WrongSingleXError",
"glob_utils.log.log.change_level_logging",
"eit_ai.train_utils.lists.get_from_dict",
"eit_ai.train_utils.workspace.WrongDatasetError",
"numpy.reshape",
"glob_utils.log.log.main_log",
"logging.getLogger"
] | [((817, 836), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (826, 836), False, 'from logging import getLogger\n'), ((4960, 4970), 'glob_utils.log.log.main_log', 'main_log', ([], {}), '()\n', (4968, 4970), False, 'from glob_utils.log.log import change_level_logging, main_log\n'), ((4975, 5010), 'glob_utils.log.log.change_level_logging', 'change_level_logging', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (4995, 5010), False, 'from glob_utils.log.log import change_level_logging, main_log\n'), ((1609, 1697), 'eit_ai.train_utils.lists.get_from_dict', 'get_from_dict', (['model_handler', 'PYTORCH_MODEL_HANDLERS', 'ListPytorchModelHandlers', '(True)'], {}), '(model_handler, PYTORCH_MODEL_HANDLERS,\n ListPytorchModelHandlers, True)\n', (1622, 1697), False, 'from eit_ai.train_utils.lists import ListPytorchDatasetHandlers, ListPytorchModelHandlers, ListPytorchModels, ListWorkspaces, get_from_dict\n'), ((1742, 1836), 'eit_ai.train_utils.lists.get_from_dict', 'get_from_dict', (['dataset_handler', 'PYTORCH_DATASET_HANDLERS', 'ListPytorchDatasetHandlers', '(True)'], {}), '(dataset_handler, PYTORCH_DATASET_HANDLERS,\n ListPytorchDatasetHandlers, True)\n', (1755, 1836), False, 'from eit_ai.train_utils.lists import ListPytorchDatasetHandlers, ListPytorchModelHandlers, ListPytorchModels, ListWorkspaces, get_from_dict\n'), ((1879, 1940), 'eit_ai.train_utils.lists.get_from_dict', 'get_from_dict', (['model', 'PYTORCH_MODELS', 'ListPytorchModels', '(True)'], {}), '(model, PYTORCH_MODELS, ListPytorchModels, True)\n', (1892, 1940), False, 'from eit_ai.train_utils.lists import ListPytorchDatasetHandlers, ListPytorchModelHandlers, ListPytorchModels, ListWorkspaces, get_from_dict\n'), ((3635, 3667), 'numpy.reshape', 'np.reshape', (['X_pred', '(-1, 1, 256)'], {}), '(X_pred, (-1, 1, 256))\n', (3645, 3667), True, 'import numpy as np\n'), ((3864, 3961), 'eit_ai.train_utils.workspace.WrongDatasetError', 'WrongDatasetError', (['f"""dataset= {dataset!r} and {self.dataset_handler} dont have same type..."""'], {}), "(\n f'dataset= {dataset!r} and {self.dataset_handler} dont have same type...')\n", (3881, 3961), False, 'from eit_ai.train_utils.workspace import AiWorkspace, WrongDatasetError, WrongSingleXError, meas_duration\n'), ((4187, 4253), 'eit_ai.train_utils.workspace.WrongSingleXError', 'WrongSingleXError', (['f"""single_X= {single_X!r} is not an np.ndarray """'], {}), "(f'single_X= {single_X!r} is not an np.ndarray ')\n", (4204, 4253), False, 'from eit_ai.train_utils.workspace import AiWorkspace, WrongDatasetError, WrongSingleXError, meas_duration\n')] |
import numpy as np
import matplotlib.pyplot as plt
import padding
def filtering(src, mask, pad_type='zero', return_uint8=True):
print('filtering start...')
h, w = src.shape[:2]
mh, mw = mask.shape[:2]
pad_img = padding.padding(src=src, pad_size=(mh//2, mw//2), pad_type=pad_type)
dst = np.zeros((h, w))
#filtering
for row in range(h):
for col in range(w):
val = np.sum(pad_img[row:row+mh, col:col+mw] * mask)
dst[row, col] = val
if return_uint8:
# cliping 0 ~ 255
dst = np.clip(dst, 0, 255)
dst = np.round(dst)
dst = dst.astype(np.uint8)
print('filtering end...')
return dst
def getFilter(ftype, fsize, sigma=1.0):
if ftype == 'average':
mask = np.ones(fsize)
mask = mask/mask.sum()
elif ftype == 'sharpening':
base = np.zeros(fsize)
base[fsize[0]//2,fsize[1]//2] = 2
average = np.ones(fsize)
average = average/average.sum()
mask = base - average
elif ftype == 'gaussian1D':
assert len(fsize)==1, 'gaussian 1D mask size must be 1D Tuple( ex:(3,) )'
fsize = fsize[0]
x = np.arange(fsize) - fsize//2
gaus1D = 1/(np.sqrt(2*np.pi)*sigma) * np.exp(-((x*x)/(2*sigma*sigma)))
mask = gaus1D / np.sum(gaus1D)
mask = mask[:,np.newaxis]
elif ftype == 'gaussian2D':
(fh, fw) = fsize
y, x = np.mgrid[-(fh//2):(fh//2)+1, -(fw//2):(fw//2)+1]
gaus2D = 1/(2*np.pi*sigma*sigma) * np.exp(-((x*x + y*y)/(2*sigma*sigma)))
mask = gaus2D / np.sum(gaus2D)
elif ftype == 'DoG':
y, x = np.mgrid[-(fsize//2):(fsize//2)+1, -(fsize//2):(fsize//2)+1]
DoG_x = (-x / sigma**2) * np.exp(-((x**2 + y**2)/(2 * sigma**2)))
DoG_y = (-y / sigma**2) * np.exp(-((x**2 + y**2)/(2 * sigma**2)))
DoG_x = DoG_x - (DoG_x.sum()/fsize**2)
DoG_y = DoG_y - (DoG_y.sum()/fsize**2)
return DoG_x, DoG_y
return mask
def showFilteringTest(img):
# get filters
fsize = (5,5)
average_mask = getFilter(ftype='average', fsize=fsize)
sharp_mask = getFilter(ftype='sharpening', fsize=fsize)
# filtering with zero padding
average_img_zero_pad = filtering(src=img, mask=average_mask, pad_type='zero')
sharp_img_zero_pad = filtering(src=img, mask=sharp_mask, pad_type='zero')
# filtering with mirror padding
average_img_mirror_pad = filtering(src=img, mask=average_mask, pad_type='mirror')
sharp_img_mirror_pad = filtering(src=img, mask=sharp_mask, pad_type='mirror')
plt.figure(figsize=(12,8))
plt.subplot(2,3,1)
plt.title('5x5 average filter with zero padding')
plt.imshow(average_img_zero_pad, cmap='gray', vmin=0, vmax=255)
plt.subplot(2,3,2)
plt.title('original image')
plt.imshow(img.copy(), cmap='gray', vmin=0, vmax=255)
plt.subplot(2,3,3)
plt.title('5x5 sharpening filter with zero padding')
plt.imshow(sharp_img_zero_pad, cmap='gray', vmin=0, vmax=255)
plt.subplot(2,3,4)
plt.title('5x5 average filter with mirror padding')
plt.imshow(average_img_mirror_pad, cmap='gray', vmin=0, vmax=255)
plt.subplot(2,3,5)
plt.title('original image')
plt.imshow(img.copy(), cmap='gray', vmin=0, vmax=255)
plt.subplot(2,3,6)
plt.title('5x5 sharpening filter with mirror padding')
plt.imshow(sharp_img_mirror_pad, cmap='gray', vmin=0, vmax=255)
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"padding.padding",
"numpy.clip",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"numpy.round",
"numpy.sqrt"
] | [((230, 302), 'padding.padding', 'padding.padding', ([], {'src': 'src', 'pad_size': '(mh // 2, mw // 2)', 'pad_type': 'pad_type'}), '(src=src, pad_size=(mh // 2, mw // 2), pad_type=pad_type)\n', (245, 302), False, 'import padding\n'), ((310, 326), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (318, 326), True, 'import numpy as np\n'), ((2605, 2632), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (2615, 2632), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2656), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (2647, 2656), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2708), 'matplotlib.pyplot.title', 'plt.title', (['"""5x5 average filter with zero padding"""'], {}), "('5x5 average filter with zero padding')\n", (2668, 2708), True, 'import matplotlib.pyplot as plt\n'), ((2713, 2776), 'matplotlib.pyplot.imshow', 'plt.imshow', (['average_img_zero_pad'], {'cmap': '"""gray"""', 'vmin': '(0)', 'vmax': '(255)'}), "(average_img_zero_pad, cmap='gray', vmin=0, vmax=255)\n", (2723, 2776), True, 'import matplotlib.pyplot as plt\n'), ((2782, 2802), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (2793, 2802), True, 'import matplotlib.pyplot as plt\n'), ((2805, 2832), 'matplotlib.pyplot.title', 'plt.title', (['"""original image"""'], {}), "('original image')\n", (2814, 2832), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2916), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (2907, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2919, 2971), 'matplotlib.pyplot.title', 'plt.title', (['"""5x5 sharpening filter with zero padding"""'], {}), "('5x5 sharpening filter with zero padding')\n", (2928, 2971), True, 'import matplotlib.pyplot as plt\n'), ((2976, 3037), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sharp_img_zero_pad'], {'cmap': '"""gray"""', 'vmin': '(0)', 'vmax': '(255)'}), "(sharp_img_zero_pad, cmap='gray', vmin=0, vmax=255)\n", (2986, 3037), True, 'import matplotlib.pyplot as plt\n'), ((3043, 3063), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (3054, 3063), True, 'import matplotlib.pyplot as plt\n'), ((3066, 3117), 'matplotlib.pyplot.title', 'plt.title', (['"""5x5 average filter with mirror padding"""'], {}), "('5x5 average filter with mirror padding')\n", (3075, 3117), True, 'import matplotlib.pyplot as plt\n'), ((3122, 3187), 'matplotlib.pyplot.imshow', 'plt.imshow', (['average_img_mirror_pad'], {'cmap': '"""gray"""', 'vmin': '(0)', 'vmax': '(255)'}), "(average_img_mirror_pad, cmap='gray', vmin=0, vmax=255)\n", (3132, 3187), True, 'import matplotlib.pyplot as plt\n'), ((3193, 3213), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (3204, 3213), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3243), 'matplotlib.pyplot.title', 'plt.title', (['"""original image"""'], {}), "('original image')\n", (3225, 3243), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3327), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (3318, 3327), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3384), 'matplotlib.pyplot.title', 'plt.title', (['"""5x5 sharpening filter with mirror padding"""'], {}), "('5x5 sharpening filter with mirror padding')\n", (3339, 3384), True, 'import matplotlib.pyplot as plt\n'), ((3389, 3452), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sharp_img_mirror_pad'], {'cmap': '"""gray"""', 'vmin': '(0)', 'vmax': '(255)'}), "(sharp_img_mirror_pad, cmap='gray', vmin=0, vmax=255)\n", (3399, 3452), True, 'import matplotlib.pyplot as plt\n'), ((3458, 3468), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3466, 3468), True, 'import matplotlib.pyplot as plt\n'), ((556, 576), 'numpy.clip', 'np.clip', (['dst', '(0)', '(255)'], {}), '(dst, 0, 255)\n', (563, 576), True, 'import numpy as np\n'), ((591, 604), 'numpy.round', 'np.round', (['dst'], {}), '(dst)\n', (599, 604), True, 'import numpy as np\n'), ((773, 787), 'numpy.ones', 'np.ones', (['fsize'], {}), '(fsize)\n', (780, 787), True, 'import numpy as np\n'), ((415, 465), 'numpy.sum', 'np.sum', (['(pad_img[row:row + mh, col:col + mw] * mask)'], {}), '(pad_img[row:row + mh, col:col + mw] * mask)\n', (421, 465), True, 'import numpy as np\n'), ((871, 886), 'numpy.zeros', 'np.zeros', (['fsize'], {}), '(fsize)\n', (879, 886), True, 'import numpy as np\n'), ((948, 962), 'numpy.ones', 'np.ones', (['fsize'], {}), '(fsize)\n', (955, 962), True, 'import numpy as np\n'), ((1186, 1202), 'numpy.arange', 'np.arange', (['fsize'], {}), '(fsize)\n', (1195, 1202), True, 'import numpy as np\n'), ((1261, 1299), 'numpy.exp', 'np.exp', (['(-(x * x / (2 * sigma * sigma)))'], {}), '(-(x * x / (2 * sigma * sigma)))\n', (1267, 1299), True, 'import numpy as np\n'), ((1318, 1332), 'numpy.sum', 'np.sum', (['gaus1D'], {}), '(gaus1D)\n', (1324, 1332), True, 'import numpy as np\n'), ((1536, 1584), 'numpy.exp', 'np.exp', (['(-((x * x + y * y) / (2 * sigma * sigma)))'], {}), '(-((x * x + y * y) / (2 * sigma * sigma)))\n', (1542, 1584), True, 'import numpy as np\n'), ((1600, 1614), 'numpy.sum', 'np.sum', (['gaus2D'], {}), '(gaus2D)\n', (1606, 1614), True, 'import numpy as np\n'), ((1235, 1253), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1242, 1253), True, 'import numpy as np\n'), ((1752, 1799), 'numpy.exp', 'np.exp', (['(-((x ** 2 + y ** 2) / (2 * sigma ** 2)))'], {}), '(-((x ** 2 + y ** 2) / (2 * sigma ** 2)))\n', (1758, 1799), True, 'import numpy as np\n'), ((1826, 1873), 'numpy.exp', 'np.exp', (['(-((x ** 2 + y ** 2) / (2 * sigma ** 2)))'], {}), '(-((x ** 2 + y ** 2) / (2 * sigma ** 2)))\n', (1832, 1873), True, 'import numpy as np\n')] |
import os
import subprocess
import sys
import importlib
import inspect
import functools
import tensorflow as tf
import numpy as np
from baselines.common import tf_util as U
import re
def store_args(method):
"""Stores provided method args as instance attributes.
"""
argspec = inspect.getfullargspec(method)
defaults = {}
if argspec.defaults is not None:
defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
if argspec.kwonlydefaults is not None:
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
# Get default arg values
args = defaults.copy()
# Add provided arg values
for name, value in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
def get_subdir_by_params(path_params, ctr=0):
param_strs = []
def shorten_split_elem(elem_str, chars_to_split):
split_elems = elem_str.split(chars_to_split[0])
short_split_elem_strs = []
for split_elem in split_elems:
if len(chars_to_split) == 1:
if split_elem.find("_") == -1:
short_split_elem = str(split_elem)
else:
short_split_elem = "_".join([us_elem[:2] for us_elem in split_elem.split("_")])
else:
short_split_elem = shorten_split_elem(split_elem, chars_to_split[1:])
short_split_elem_strs.append(short_split_elem)
short_ret_str = chars_to_split[0].join(short_split_elem_strs)
return short_ret_str
for p,v in sorted(path_params.items()):
if str(v) == '':
continue
this_key_str = "".join([s[:3] for s in p.split("_")])
chars_to_split = [",", ":", "[", "]"]
this_v_str = shorten_split_elem(str(v), chars_to_split)
this_param_str = '{}:{}'.format(this_key_str, this_v_str)
param_strs.append(this_param_str)
subdir_str = "|".join(param_strs)
subdir_str += "|" + str(ctr)
# param_subdir = "_".join(
# ['{}:{}'.format("".join([s[:2] for s in p.split("_")]), str(v).split(":")[-1]) for p, v in
# sorted(path_params.items()) if str(v) != '']) + "_" + str(ctr)
return subdir_str
def get_git_label():
try:
git_label = str(subprocess.check_output(["git", 'describe', '--always'])).strip()[2:-3]
except:
git_label = ''
return git_label
def import_function(spec):
"""Import a function identified by a string like "pkg.module:fn_name".
"""
mod_name, fn_name = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn
def flatten_grads(var_list, grads):
"""Flattens a variables and their gradients.
"""
if len(var_list) == 0:
return []
try:
grad_list = [tf.reshape(grad, [U.numel(v)]) for (v, grad) in zip(var_list, grads)]
except Exception as e:
print(e)
grad_list = [tf.reshape(grad, [U.numel(v)]) for (v, grad) in zip(var_list, grads)]
return tf.concat(grad_list, 0)
def flatten_grads_compact(var_list, grads):
"""Flattens a variables and their gradients.
"""
if len(var_list) == 0:
return []
return tf.concat([tf.reshape(grad, [U.numel(v)])
for (v, grad) in zip(var_list, grads)], 0)
def nn(input, layers_sizes, reuse=None, flatten=False, name=""):
"""Creates a simple neural network
"""
for i, size in enumerate(layers_sizes):
activation = tf.nn.relu if i < len(layers_sizes) - 1 else None
input = tf.layers.dense(inputs=input,
units=size,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse,
name=name + '_' + str(i))
if activation:
input = activation(input)
if flatten:
assert layers_sizes[-1] == 1
input = tf.reshape(input, [-1])
return input
def critic_nn(input, layers_sizes, q_limit, reuse=None, flatten=False, name=""):
ret_nn = nn(input, layers_sizes, reuse=reuse, flatten=flatten, name=name)
q_init = -0.067
# q_limit = q_limit
q_offset = -np.log(q_limit / q_init - 1)
output = tf.sigmoid(ret_nn + q_offset) * q_limit
return output
def install_mpi_excepthook():
import sys
from mpi4py import MPI
old_hook = sys.excepthook
def new_hook(a, b, c):
old_hook(a, b, c)
sys.stdout.flush()
sys.stderr.flush()
MPI.COMM_WORLD.Abort()
sys.excepthook = new_hook
def mpi_fork(n, extra_mpi_args=[]):
"""Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children
"""
if n <= 1:
return "child"
if os.getenv("IN_MPI") is None:
env = os.environ.copy()
env.update(
MKL_NUM_THREADS="1",
OMP_NUM_THREADS="1",
IN_MPI="1"
)
# "-bind-to core" is crucial for good performance
args = ["mpirun", "-np", str(n)] + \
extra_mpi_args + \
[sys.executable]
args += sys.argv
ret = subprocess.check_call(args, env=env)
print(ret)
return "parent"
else:
install_mpi_excepthook()
return "child"
def convert_episode_to_batch_major(episode):
"""Converts an episode to have the batch dimension in the major (first)
dimension.
"""
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
# make inputs batch-major instead of time-major
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch
def transitions_in_episode_batch(episode_batch):
"""Number of transitions in a given episode batch.
"""
shape = episode_batch['u'].shape
return shape[0] * shape[1]
def reshape_for_broadcasting(source, target):
"""Reshapes a tensor (source) to have the correct shape and dtype of the target
before broadcasting it with MPI.
"""
dim = len(target.get_shape())
shape = ([1] * (dim - 1)) + [-1]
return tf.reshape(tf.cast(source, target.dtype), shape)
def prob_dist2discrete(prob_dist):
discrete = np.argmax(prob_dist, axis=-1)
# discrete = np.reshape(discrete, newshape=(prob_dist.shape[0],-1))
return discrete
def physical_cpu_core_count():
try:
res = open('/proc/cpuinfo').read()
idx = res.find('cpu cores') + len("cpu cores")
idx = res.find(": ", idx) + len(": ")
nl_idx = res.find("\n", idx)
res = res[idx:nl_idx]
res = int(res)
if res > 0:
return res
except IOError:
return 0
pass
| [
"tensorflow.contrib.layers.xavier_initializer",
"numpy.argmax",
"os.environ.copy",
"tensorflow.reshape",
"sys.stdout.flush",
"subprocess.check_call",
"tensorflow.concat",
"tensorflow.cast",
"sys.stderr.flush",
"importlib.import_module",
"subprocess.check_output",
"baselines.common.tf_util.nume... | [((289, 319), 'inspect.getfullargspec', 'inspect.getfullargspec', (['method'], {}), '(method)\n', (311, 319), False, 'import inspect\n'), ((604, 627), 'functools.wraps', 'functools.wraps', (['method'], {}), '(method)\n', (619, 627), False, 'import functools\n'), ((2849, 2882), 'importlib.import_module', 'importlib.import_module', (['mod_name'], {}), '(mod_name)\n', (2872, 2882), False, 'import importlib\n'), ((3312, 3335), 'tensorflow.concat', 'tf.concat', (['grad_list', '(0)'], {}), '(grad_list, 0)\n', (3321, 3335), True, 'import tensorflow as tf\n'), ((6532, 6561), 'numpy.argmax', 'np.argmax', (['prob_dist'], {'axis': '(-1)'}), '(prob_dist, axis=-1)\n', (6541, 6561), True, 'import numpy as np\n'), ((4244, 4267), 'tensorflow.reshape', 'tf.reshape', (['input', '[-1]'], {}), '(input, [-1])\n', (4254, 4267), True, 'import tensorflow as tf\n'), ((4506, 4534), 'numpy.log', 'np.log', (['(q_limit / q_init - 1)'], {}), '(q_limit / q_init - 1)\n', (4512, 4534), True, 'import numpy as np\n'), ((4548, 4577), 'tensorflow.sigmoid', 'tf.sigmoid', (['(ret_nn + q_offset)'], {}), '(ret_nn + q_offset)\n', (4558, 4577), True, 'import tensorflow as tf\n'), ((4772, 4790), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4788, 4790), False, 'import sys\n'), ((4799, 4817), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (4815, 4817), False, 'import sys\n'), ((4826, 4848), 'mpi4py.MPI.COMM_WORLD.Abort', 'MPI.COMM_WORLD.Abort', ([], {}), '()\n', (4846, 4848), False, 'from mpi4py import MPI\n'), ((5088, 5107), 'os.getenv', 'os.getenv', (['"""IN_MPI"""'], {}), "('IN_MPI')\n", (5097, 5107), False, 'import os\n'), ((5131, 5148), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (5146, 5148), False, 'import os\n'), ((5471, 5507), 'subprocess.check_call', 'subprocess.check_call', (['args'], {'env': 'env'}), '(args, env=env)\n', (5492, 5507), False, 'import subprocess\n'), ((6443, 6472), 'tensorflow.cast', 'tf.cast', (['source', 'target.dtype'], {}), '(source, target.dtype)\n', (6450, 6472), True, 'import tensorflow as tf\n'), ((3249, 3259), 'baselines.common.tf_util.numel', 'U.numel', (['v'], {}), '(v)\n', (3256, 3259), True, 'from baselines.common import tf_util as U\n'), ((3971, 4009), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4007, 4009), True, 'import tensorflow as tf\n'), ((5831, 5853), 'numpy.array', 'np.array', (['episode[key]'], {}), '(episode[key])\n', (5839, 5853), True, 'import numpy as np\n'), ((3118, 3128), 'baselines.common.tf_util.numel', 'U.numel', (['v'], {}), '(v)\n', (3125, 3128), True, 'from baselines.common import tf_util as U\n'), ((3523, 3533), 'baselines.common.tf_util.numel', 'U.numel', (['v'], {}), '(v)\n', (3530, 3533), True, 'from baselines.common import tf_util as U\n'), ((2557, 2613), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'describe', '--always']"], {}), "(['git', 'describe', '--always'])\n", (2580, 2613), False, 'import subprocess\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 11:19:34 2018
@author: jennywong
"""
###############################################################################
# GETPARAMETERS.PY #
###############################################################################
# Description: reference parameters to be passed into the BVP for the
# spherical steady state slurry
#def getparameters(layer_thickness,csb_heatflux,thermal_conductivity):
# icb_heatflux,ic_age):
import numpy as np
from scipy.constants import gas_constant
from scipy import integrate, interpolate
from slurpy.coreproperties import icb_radius,density_liquidO, \
density_solidFe,heat_capacity,latent_heat,aO,aFe,aSSi,deltaV_solidFe_liquidFe, \
gigayear, alphaT, alphaXi, bulk_modulus, cmb_radius, gruneisen
from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki
# CSB radius
def getcsbradius(layer_thickness):
csb_radius=icb_radius+layer_thickness
return csb_radius
# CSB temperature
def getcsbtemp(layer_thickness):
csb_radius= getcsbradius(layer_thickness)
csb_temp=liquidus(csb_radius)
return csb_temp
# CSB temperature gradient
def getcsbtempgrad(layer_thickness,csb_heatflux,thermal_conductivity):
csb_radius=getcsbradius(layer_thickness)
csb_temp_grad=csb_heatflux*1e12/(-4*thermal_conductivity*np.pi*csb_radius**2)
return csb_temp_grad
def getcsbmassoxygen(mol_conc_oxygen_bulk, mol_conc_SSi=8):
mol_conc_O=mol_conc_oxygen_bulk*1e-2
mol_conc_SSi = mol_conc_SSi*1e-2
mol_conc_Fe = 1-mol_conc_O-mol_conc_SSi
acore = mol_conc_Fe*aFe+mol_conc_SSi*aSSi+mol_conc_O*aO # atomic weight of core material
# CSB oxygen concentration
mass_conc_O = mol_conc_O*aO/acore
return mass_conc_O,acore
# Change in specific volume upon melting
def getchangevolmelting(mol_conc_oxygen_bulk,density0,mol_conc_SSi=8):
mass_conc_O = getcsbmassoxygen(mol_conc_oxygen_bulk, mol_conc_SSi)[0]
deltaV_liquidFeO_solidFe = (1-mass_conc_O)/density0 + \
mass_conc_O/density_liquidO - 1/density_solidFe
return deltaV_liquidFeO_solidFe
# Change in specific volume of liquid and solid iron
def getchangevolmeltingFe(density0):
# Change in specific volume between solid and liquid
deltaV_solid_liquid = 1/density0 - 1/density_solidFe
return deltaV_solid_liquid
# Freezing speed
def getfreezingspeed(icb_heatflux):
freezing_speed=icb_heatflux*1e12/(4*np.pi*icb_radius**2*density_solidFe*latent_heat)
return freezing_speed
# ICB heat flux
def geticbheatflux(freezing_speed):
icb_heatflux = freezing_speed*(4*np.pi*icb_radius**2*density_solidFe*latent_heat)*1e-12
return icb_heatflux
# CSB heat flux
def getcsbheatflux(St,freezing_speed,density0,csb_radius):
csb_heatflux = St*density0*freezing_speed*latent_heat*4*np.pi*csb_radius**2*1e-12
return csb_heatflux
# Thermal conductivity
def getthermalconductivity(Le,density0,self_diffusion=0.98e-8):
thermal_conductivity=Le*density0*heat_capacity*self_diffusion
return thermal_conductivity
# Initial guess for snow speed
def getinitialsnowspeed(ic_age):
initial_snow_speed = icb_radius/(3*ic_age*gigayear) # assume volumetric growth rather than linear growth
return initial_snow_speed
# Core cooling rate
def getcorecoolingrate(core_cooling_rate):
core_cooling_rate_out=core_cooling_rate/gigayear
return core_cooling_rate_out
# Snow speed given IC age
def getsnowspeed(ic_age):
snow_speed=icb_radius/(3*ic_age*gigayear)
return snow_speed
# IC age given ICB speed
def geticage(icb_speed):
ic_age=icb_radius/(3*icb_speed*gigayear)
return ic_age
# Freezing speed given Pe
def getfreezingspeedfromPe(Pe,csb_radius,self_diffusion):
freezing_speed = Pe*self_diffusion/csb_radius
return freezing_speed
#%%
# Kphi - prefactor in solid flux eq (2.9.1g)
def getKphi(sedimentation_constant, radius, mol_conc_oxygen_bulk, mol_conc_SSi=8):
gravity=premgravity(radius)
density=premdensity(radius)
deltaV_solid_liquid = getchangevolmeltingFe(density[-1])
Kphi=sedimentation_constant*gravity*density*deltaV_solid_liquid
return Kphi
def getphi(Kphi,solidflux):
phi = (-solidflux/Kphi)**(3/5)
return phi
#%% Scalings
# Temp
def get_tempScale(csb_heatflux,csb_radius,density0,freezing_speed):
scale_temp = csb_heatflux*1e12/(4*np.pi*csb_radius**2*density0* \
heat_capacity*freezing_speed)
return scale_temp
# Solid flux
def get_jScale(freezing_speed):
scale_j = freezing_speed*density_solidFe
return scale_j
#%% Dimensionless parameters
# Lip
def getLip(csb_radius,model='prem'):
gravity=premgravity(csb_radius)
if model == 'prem':
density=premdensity(csb_radius)
elif model == 'ohtaki':
_,density = ohtaki(csb_radius)
Lip=deltaV_solidFe_liquidFe*gravity*density*csb_radius/latent_heat
return Lip,gravity,density
# Lix
def getLix(mass_conc_O):
L2=1000*gas_constant*mass_conc_O/(aO*heat_capacity)
return L2
# Stefan number
def getStefan(icb_heatflux,csb_heatflux,csb_radius):
St=csb_heatflux*icb_radius**2/(icb_heatflux*csb_radius**2)
return St
# Lewis number
def getLewis(thermal_conductivity,self_diffusion,density0):
Le=thermal_conductivity/(density0*
heat_capacity*self_diffusion)
return Le
# Peclet number
def getPeclet(freezing_speed,csb_radius,self_diffusion):
Pe=freezing_speed*csb_radius/(self_diffusion)
return Pe
#%% Dimensional parameters
def getdimensional(layer_thickness,Pe,St,Le, \
mol_conc_oxygen_bulk=8., mol_conc_SSi=8., \
self_diffusion=0.98e-8):
csb_radius = getcsbradius(layer_thickness)
density0=premdensity(csb_radius)
mass_conc_O,acore=getcsbmassoxygen(mol_conc_oxygen_bulk, mol_conc_SSi)
freezing_speed = getfreezingspeedfromPe(Pe,csb_radius,self_diffusion)
icb_heatflux=geticbheatflux(freezing_speed)
csb_heatflux=getcsbheatflux(St,freezing_speed,density0,csb_radius)
thermal_conductivity=getthermalconductivity(Le, density0)
return (icb_heatflux,csb_heatflux,thermal_conductivity)
# %% Postprocessing
def slurrydensity(radius,temp,xi,solidflux,mol_conc_oxygen_bulk, \
sedimentation_constant,mol_conc_SSi=8,model='prem'):
# Density profile across slurry layer
csb_radius=radius[-1]
if model=='prem':
csb_density=premdensity(csb_radius)
elif model =='ohtaki':
_,csb_density=ohtaki(csb_radius)
# Solid fraction
Kphi=getKphi(sedimentation_constant,radius,mol_conc_oxygen_bulk)
phi=(-solidflux/Kphi)**(3/5)
# Density fluctuations
deltaV_solid_liquid=getchangevolmeltingFe(csb_density)
temp_denFluc=-csb_density*alphaT*(temp-temp[-1])
xi_denFluc=-csb_density*alphaXi*(xi-xi[-1])
phi_denFluc=csb_density*(csb_density*deltaV_solid_liquid+alphaXi*xi)*(phi-phi[-1])
density_fluc= temp_denFluc+xi_denFluc+phi_denFluc
# Hydrostatic density
slurry_gravity=premgravity(radius)
density_hydrostatic=1/(slurry_gravity*(radius-csb_radius)/bulk_modulus+ \
1/csb_density)
# Total density
density_slurry=density_hydrostatic+density_fluc
return (density_slurry, phi, temp_denFluc, xi_denFluc, phi_denFluc,density_fluc)
# %%
def adiabat(oc_radius,csb_temp,n):
# Construct adiabat across outer core given CSB temperature
# Gravity
oc_gravity=premgravity(oc_radius)
# Seismic parameter
seismic_parameter=premvp(oc_radius)**2
# Pre-allocate
temp_adiabat=np.zeros(n)
# Integrand for the adiabat
integrand=oc_gravity*gruneisen/seismic_parameter
# Boundary condition
temp_adiabat[0] = csb_temp
# Integrate
for i in range(1,n):
temp_adiabat[i]=csb_temp*np.exp(-integrate.simps(integrand[0:i+1], \
oc_radius[0:i+1]))
return temp_adiabat
#%% Calculate difference in CMB temp after 1Ga by constructing adiabats
# anchored at present day and after 1Ga
def get_cooling(icb_speed,csb_temp, radius_oc, csb_radius, cmb_radius):
delta_t = gigayear
temp_ad0 = adiabat(radius_oc,csb_temp,50)
cmb_temp0 = temp_ad0[-1]
print('CMB temp present day is {:.0f}K'.format(cmb_temp0))
csb_radius1 = csb_radius+icb_speed*delta_t
radius_oc1 = np.linspace(csb_radius1,cmb_radius)
temp_ad1 = adiabat(radius_oc1,liquidus(csb_radius1),50)
cmb_temp1 = temp_ad1[-1]
print('CMB temp after 1Ga is {:.0f}K'.format(cmb_temp1))
cooling_rate = (cmb_temp1 - cmb_temp0)/delta_t
cooling_rate_Ga = cooling_rate*gigayear
print('Cooling rate is {:.2f}K/Ga'.format(cooling_rate_Ga))
return cooling_rate, cmb_temp0, temp_ad0
#%% Heat flux across core-mantle boundary
def heatflux(radius,temp,xi,solidflux,phi,temp_grad,xi_grad,density_slurry, \
icb_speed,icb_heatflux,layer_thickness,thermal_conductivity, \
csb_heatflux,n):
csb_radius = radius[-1]
# GRAVITATIONAL POWER
# Gravitational potential
radius_psi = np.linspace(icb_radius,cmb_radius)
gravity_psi = premgravity(radius_psi)
psi = np.zeros(radius_psi.size)
for i in range(radius_psi.size-1):
psi[i] = -integrate.simps(gravity_psi[i:],radius_psi[i:])
# Slurry
f = interpolate.interp1d(radius_psi,psi)
psi_slurry = f(radius)
oxygen_mass_slurry = 4*np.pi*density_solidFe*layer_thickness**2*icb_speed*xi[-1]
mass_slurry = integrate.simps(density_slurry*4*np.pi*radius**2,radius)
Qg_slurry = -alphaXi*oxygen_mass_slurry/mass_slurry* \
integrate.simps(density_slurry*psi_slurry*4*np.pi*radius**2,radius)
# print('Change in oxygen mass (slurry) is {}'.format(oxygen_mass_slurry))
# print('Mass of slurry is {}'.format(mass_slurry))
print('Qg slurry is {:.2f}TW'.format(Qg_slurry*1e-12))
# Outer core
radius_oc = np.linspace(csb_radius,cmb_radius)
psi_oc = f(radius_oc)
surf = -psi[0]*alphaXi*density_slurry[-1]*xi[-1]*icb_speed*4*np.pi*csb_radius**2
# Mass of OC
density_oc = premdensity(radius_oc)
mass_oc = integrate.simps(density_oc*4*np.pi*radius_oc**2,radius_oc)
oxygen_mass_oc = - density_slurry[-1]*icb_speed*4*np.pi*csb_radius**2*xi[-1]/mass_oc
bulk = integrate.simps(alphaXi*psi_oc*oxygen_mass_oc*4*np.pi*radius_oc**2,radius_oc)
# print('Change in oxygen mass (outer core) is {}'.format(oxygen_mass_oc))
# print('Mass of outer core is {}'.format(mass_oc))
print('Qg surface term in outer core is {:.2f}TW'.format(surf*1e-12))
print('Qg bulk term in outer core is {:.5f}TW'.format(bulk*1e-12))
# Total gravitational energy
Qg_oc = surf+bulk
Qg = Qg_slurry + Qg_oc
print('Total Qg is {:.2f}TW'.format(Qg*1e-12))
# LATENT HEAT
Ql=4*np.pi*icb_radius**2*density_solidFe*icb_speed*latent_heat
print('Total Ql is {:.2f}TW'.format(Ql*1e-12))
# SECULAR COOLING
# Cooling rate
csb_temp = temp[-1]
cooling_rate,cmb_temp,temp_ad = get_cooling(icb_speed,csb_temp,radius_oc,
csb_radius, cmb_radius)
# Outer core
Qs_oc = - heat_capacity*cooling_rate/cmb_temp \
*integrate.simps(density_oc*temp_ad*4*np.pi*radius_oc**2,radius_oc)
print('Qs in outer core is {:.2f}TW'.format(Qs_oc*1e-12))
# Slurry
Qs_slurry = csb_heatflux - Qg_slurry - Ql
print('Qs in slurry {:.2f}TW'.format(Qs_slurry*1e-12))
Qs = Qs_slurry+Qs_oc
print('Total Qs is {:.2f}TW'.format(Qs*1e-12))
# CMB heat flux
Q_cmb = csb_heatflux+Qs_oc+Qg_oc
print('Qcmb is is {:.2f}TW'.format((Q_cmb)*1e-12))
return (Q_cmb, Qs, Qs_slurry, Qs_oc, Ql, Qg, Qg_oc, Qg_slurry, cooling_rate,
cmb_temp, temp_ad) | [
"slurpy.lookup.liquidus",
"numpy.zeros",
"slurpy.lookup.ohtaki",
"slurpy.lookup.premgravity",
"numpy.linspace",
"scipy.interpolate.interp1d",
"slurpy.lookup.premvp",
"slurpy.lookup.premdensity",
"scipy.integrate.simps"
] | [((1203, 1223), 'slurpy.lookup.liquidus', 'liquidus', (['csb_radius'], {}), '(csb_radius)\n', (1211, 1223), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((4057, 4076), 'slurpy.lookup.premgravity', 'premgravity', (['radius'], {}), '(radius)\n', (4068, 4076), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((4089, 4108), 'slurpy.lookup.premdensity', 'premdensity', (['radius'], {}), '(radius)\n', (4100, 4108), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((4775, 4798), 'slurpy.lookup.premgravity', 'premgravity', (['csb_radius'], {}), '(csb_radius)\n', (4786, 4798), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((5856, 5879), 'slurpy.lookup.premdensity', 'premdensity', (['csb_radius'], {}), '(csb_radius)\n', (5867, 5879), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((7131, 7150), 'slurpy.lookup.premgravity', 'premgravity', (['radius'], {}), '(radius)\n', (7142, 7150), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((7567, 7589), 'slurpy.lookup.premgravity', 'premgravity', (['oc_radius'], {}), '(oc_radius)\n', (7578, 7589), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((7694, 7705), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (7702, 7705), True, 'import numpy as np\n'), ((8450, 8486), 'numpy.linspace', 'np.linspace', (['csb_radius1', 'cmb_radius'], {}), '(csb_radius1, cmb_radius)\n', (8461, 8486), True, 'import numpy as np\n'), ((9177, 9212), 'numpy.linspace', 'np.linspace', (['icb_radius', 'cmb_radius'], {}), '(icb_radius, cmb_radius)\n', (9188, 9212), True, 'import numpy as np\n'), ((9230, 9253), 'slurpy.lookup.premgravity', 'premgravity', (['radius_psi'], {}), '(radius_psi)\n', (9241, 9253), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((9264, 9289), 'numpy.zeros', 'np.zeros', (['radius_psi.size'], {}), '(radius_psi.size)\n', (9272, 9289), True, 'import numpy as np\n'), ((9419, 9456), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['radius_psi', 'psi'], {}), '(radius_psi, psi)\n', (9439, 9456), False, 'from scipy import integrate, interpolate\n'), ((9590, 9655), 'scipy.integrate.simps', 'integrate.simps', (['(density_slurry * 4 * np.pi * radius ** 2)', 'radius'], {}), '(density_slurry * 4 * np.pi * radius ** 2, radius)\n', (9605, 9655), False, 'from scipy import integrate, interpolate\n'), ((10008, 10043), 'numpy.linspace', 'np.linspace', (['csb_radius', 'cmb_radius'], {}), '(csb_radius, cmb_radius)\n', (10019, 10043), True, 'import numpy as np\n'), ((10189, 10211), 'slurpy.lookup.premdensity', 'premdensity', (['radius_oc'], {}), '(radius_oc)\n', (10200, 10211), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((10226, 10293), 'scipy.integrate.simps', 'integrate.simps', (['(density_oc * 4 * np.pi * radius_oc ** 2)', 'radius_oc'], {}), '(density_oc * 4 * np.pi * radius_oc ** 2, radius_oc)\n', (10241, 10293), False, 'from scipy import integrate, interpolate\n'), ((10385, 10479), 'scipy.integrate.simps', 'integrate.simps', (['(alphaXi * psi_oc * oxygen_mass_oc * 4 * np.pi * radius_oc ** 2)', 'radius_oc'], {}), '(alphaXi * psi_oc * oxygen_mass_oc * 4 * np.pi * radius_oc **\n 2, radius_oc)\n', (10400, 10479), False, 'from scipy import integrate, interpolate\n'), ((4839, 4862), 'slurpy.lookup.premdensity', 'premdensity', (['csb_radius'], {}), '(csb_radius)\n', (4850, 4862), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((6543, 6566), 'slurpy.lookup.premdensity', 'premdensity', (['csb_radius'], {}), '(csb_radius)\n', (6554, 6566), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((7636, 7653), 'slurpy.lookup.premvp', 'premvp', (['oc_radius'], {}), '(oc_radius)\n', (7642, 7653), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((8520, 8541), 'slurpy.lookup.liquidus', 'liquidus', (['csb_radius1'], {}), '(csb_radius1)\n', (8528, 8541), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((9714, 9792), 'scipy.integrate.simps', 'integrate.simps', (['(density_slurry * psi_slurry * 4 * np.pi * radius ** 2)', 'radius'], {}), '(density_slurry * psi_slurry * 4 * np.pi * radius ** 2, radius)\n', (9729, 9792), False, 'from scipy import integrate, interpolate\n'), ((11365, 11442), 'scipy.integrate.simps', 'integrate.simps', (['(density_oc * temp_ad * 4 * np.pi * radius_oc ** 2)', 'radius_oc'], {}), '(density_oc * temp_ad * 4 * np.pi * radius_oc ** 2, radius_oc)\n', (11380, 11442), False, 'from scipy import integrate, interpolate\n'), ((4911, 4929), 'slurpy.lookup.ohtaki', 'ohtaki', (['csb_radius'], {}), '(csb_radius)\n', (4917, 4929), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((6616, 6634), 'slurpy.lookup.ohtaki', 'ohtaki', (['csb_radius'], {}), '(csb_radius)\n', (6622, 6634), False, 'from slurpy.lookup import liquidus, premgravity, premdensity, premvp, ohtaki\n'), ((9347, 9395), 'scipy.integrate.simps', 'integrate.simps', (['gravity_psi[i:]', 'radius_psi[i:]'], {}), '(gravity_psi[i:], radius_psi[i:])\n', (9362, 9395), False, 'from scipy import integrate, interpolate\n'), ((7929, 7984), 'scipy.integrate.simps', 'integrate.simps', (['integrand[0:i + 1]', 'oc_radius[0:i + 1]'], {}), '(integrand[0:i + 1], oc_radius[0:i + 1])\n', (7944, 7984), False, 'from scipy import integrate, interpolate\n')] |
from moviepy.editor import *
from PIL import Image, ImageOps
import numpy as np
from glob import glob
def make_youtube_video(introfile, audiofile, cover_img, outputfn, firstchapter=False):
"""
Generate a video of an audiobook file given following parameters.
:param introfile: introduction video clip (ie. introduction of Readit AI)
:param audiofile: audiobook recording (wav, mp3, etc)
:param cover_img: cover image during the audiobook playback
:param outputfn: output filename of the youtube video
:param firstchapter: boolean (determines whether to place audio clip from the introfile)
:return: None
:side-effect: saves the generated video as defined in outputfn
"""
intro_clip = VideoFileClip(introfile)
audio_clip = AudioFileClip(audiofile)
# resize & pad the cover image
im = Image.open(cover_img)
d_sz = intro_clip.size
ratio = min(d_sz[0]/im.size[0], d_sz[1]/im.size[1])
im_rsz = im.resize([int(s * ratio) for s in im.size])
im_rsz_pad = ImageOps.pad(im_rsz, d_sz)
cover_npimg = np.asarray(im_rsz_pad)
if firstchapter:
video_clip = ImageClip(cover_npimg)\
.set_duration(audio_clip.duration)\
.set_audio(audio_clip)\
.set_fps(intro_clip.fps)
video_clip_rsz = video_clip.resize(intro_clip.size)
final_video = concatenate_videoclips([intro_clip, video_clip_rsz])
final_video.write_videofile(outputfn)
else:
cover_img_duration = audio_clip.duration - intro_clip.duration
img_clip = ImageClip(cover_npimg).set_duration(cover_img_duration)
final_video = concatenate_videoclips([intro_clip, img_clip]).set_audio(audio_clip).set_fps(intro_clip.fps)
final_video.write_videofile(outputfn)
if __name__ == "__main__":
introfile = 'readitai_intro.mp4'
audio_dir = 'outputs/Frankenstein/audiobook_gcwavenet'
cover_img = 'inputs/Frankenstein/Frankenstein.jpg'
output_dir = os.path.join(os.path.dirname(audio_dir), 'video')
os.makedirs(output_dir, exist_ok=True)
book_name = os.path.basename(os.path.dirname(audio_dir))
audiofiles = sorted(glob(os.path.join(audio_dir, '*.wav')), key=os.path.basename)
for i, audiofile in enumerate(audiofiles[4:5]):
i = i+0
outputfn = os.path.join(output_dir, 'Chapter {}.mp4'.format(i + 1))
if not i:
make_youtube_video(introfile, audiofile, cover_img, outputfn, firstchapter=True)
else:
make_youtube_video(introfile, audiofile, cover_img, outputfn)
| [
"PIL.ImageOps.pad",
"numpy.asarray",
"PIL.Image.open"
] | [((842, 863), 'PIL.Image.open', 'Image.open', (['cover_img'], {}), '(cover_img)\n', (852, 863), False, 'from PIL import Image, ImageOps\n'), ((1022, 1048), 'PIL.ImageOps.pad', 'ImageOps.pad', (['im_rsz', 'd_sz'], {}), '(im_rsz, d_sz)\n', (1034, 1048), False, 'from PIL import Image, ImageOps\n'), ((1067, 1089), 'numpy.asarray', 'np.asarray', (['im_rsz_pad'], {}), '(im_rsz_pad)\n', (1077, 1089), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
np.random.seed(42) #设置随机种子
x = np.random.randn(100) #产生100符合均值为0,方差为1的正太分布数据
#手动计算直方图
bins = np.linspace(-5, 5, 20) #直方图的区间
counts = np.zeros_like(bins) #产生一个形状(维度)和bins相同,值全为0的矩阵
#为x中的每个元素在bins中找出其所在的索引
i = np.searchsorted(bins, x)
#为每个区间加1
np.add.at(counts, i, 1)
plt.plot(bins, counts, linestyle='steps')
plt.show() | [
"numpy.zeros_like",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.searchsorted",
"numpy.linspace",
"numpy.add.at"
] | [((117, 135), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (131, 135), True, 'import numpy as np\n'), ((148, 168), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (163, 168), True, 'import numpy as np\n'), ((211, 233), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(20)'], {}), '(-5, 5, 20)\n', (222, 233), True, 'import numpy as np\n'), ((251, 270), 'numpy.zeros_like', 'np.zeros_like', (['bins'], {}), '(bins)\n', (264, 270), True, 'import numpy as np\n'), ((327, 351), 'numpy.searchsorted', 'np.searchsorted', (['bins', 'x'], {}), '(bins, x)\n', (342, 351), True, 'import numpy as np\n'), ((362, 385), 'numpy.add.at', 'np.add.at', (['counts', 'i', '(1)'], {}), '(counts, i, 1)\n', (371, 385), True, 'import numpy as np\n'), ((387, 428), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', 'counts'], {'linestyle': '"""steps"""'}), "(bins, counts, linestyle='steps')\n", (395, 428), True, 'import matplotlib.pyplot as plt\n'), ((429, 439), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (437, 439), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# Copyright 2019 <NAME>
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dask.base import tokenize
import numpy
import dask
import dask.delayed
import dask.optimization
import xarray
import pandas
import typing as T
import dask.array
import dask.dataframe
import itertools
"""Helper functions
These functions are low-level, and mainly intended for internal use
"""
def map_blocks_to_delayed(
da: xarray.DataArray,
func,
axis=None,
name="blocks-to-delayed",
args=[],
kwargs={},
) -> T.List[T.Tuple[T.List[int], T.Any]]:
"""
Run some function 'func' on each dask chunk of 'da'
The function is called like `func(da_block, offset=offset)` - `da_block` the chunk
to work on, with coordinates from `da`; and `offset`, the location of
`block` within `da`.
The function is wrapped as a :obj:`dask.delayed`, :func:`chunk_map` returns
a list of (offset, delayed result) for each chunk of `da`.
If you're wanting to convert the results back into an array, see
:func:`xarray.map_blocks` or :func:`dask.array.map_blocks`
>>> def func(da_chunk, offset):
... return da_chunk.mean().values
>>> da = xarray.DataArray(numpy.eye(10), dims=['x','y'])
>>> da = da.chunk({'x': 5})
>>> results = map_blocks_to_delayed(da, func)
>>> results #doctest: +ELLIPSIS
[([0, 0], Delayed(...)), ([5, 0], Delayed(...))]
>>> dask.compute(results)
([([0, 0], array(0.1)), ([5, 0], array(0.1))],)
Args:
da: Input DataArray
func: Function to run
args, kwargs: Passed to func
Returns:
List of tuples with the chunk offset in `da` and a delayed result of
running `func` on that chunk
"""
data = da.data
# data.dask = data.__dask_optimize__(data.__dask_graph__(), data.__dask_keys__())
offsets = []
block_id = []
for i in range(da.ndim):
chunks = data.chunks[i]
block_id.append(range(len(chunks)))
offsets.append(numpy.cumsum([0, *chunks[:-1]]))
results = []
for chunk in itertools.product(*block_id):
size = [data.chunks[d][chunk[d]] for d in range(da.ndim)]
offset = [offsets[d][chunk[d]] for d in range(da.ndim)]
block = data.blocks[chunk]
# block.dask, _ = dask.optimization.cull(block.__dask_graph__, block.__dask_layers__())
# mark = time.perf_counter()
# block.dask = block.__dask_optimize__(
# block.__dask_graph__(), block.__dask_keys__()
# )
# print("opt", time.perf_counter() - mark)
coords = {
da.dims[d]: da.coords[da.dims[d]][offset[d] : offset[d] + block.shape[d]]
for d in range(da.ndim)
}
da_block = xarray.DataArray(
block, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs
)
# da_block = da[
# tuple(slice(offset[d], offset[d] + size[d]) for d in range(da.ndim))
# ]
name = name + "-" + tokenize(block.name)
result = dask.delayed(func, name=name)(da_block, *args, offset=offset, **kwargs)
results.append((offset, result))
return results
def chunk_count(da: xarray.DataArray) -> numpy.number:
"""
Returns the number of chunks in the dataset
"""
if da.chunks is None:
raise Exception
return numpy.prod([len(c) for c in da.chunks]).astype("i")
def chunk_size(da: xarray.DataArray) -> float:
"""
Returns the size of the first dask chunk in the dataset
"""
return numpy.prod(da.data.chunksize) * da.data.itemsize
def graph_size(da: xarray.DataArray) -> int:
"""
Returns number of nodes in the dask graph
"""
return len(da.__dask_graph__())
def dask_report(da: xarray.DataArray) -> None:
"""
Print info about a dask array
"""
print("Chunk Count:", chunk_count(da))
print("Chunk Size:", dask.utils.format_bytes(chunk_size(da)))
print("Graph Size:", graph_size(da))
def optimized_dask_get(graph, keys, optimizer=None, sync=True):
"""
Compute a dask low-level graph with some optimization
"""
try:
client = dask.distributed.get_client()
except ValueError:
client = dask
if optimizer:
graph, _ = optimizer(graph, keys)
else:
graph, _ = dask.optimization.cull(graph, keys)
graph, _ = dask.optimization.fuse(graph, keys)
return client.get(graph, keys, sync=sync)
def throttle_futures(graph, key_list, optimizer=None, max_tasks=None):
"""
Run futures in parallel, with a maximum of 'max_tasks' at once
Args:
graph: Dask task graph
key_list: Iterable of keys from 'graph' to compute
max_tasks: Maximum number of tasks to run at once (if none use the
number of workers)
"""
try:
client = dask.distributed.get_client()
except ValueError:
# No cluster, run in serial
return [optimized_dask_get(graph, k) for k in key_list]
futures = []
keys = iter(key_list)
if max_tasks is None:
max_tasks = len(client.cluster.workers)
# Build up initial max_tasks future list
for i in range(min(max_tasks, len(key_list))):
futures.append(
optimized_dask_get(graph, next(keys), optimizer=optimizer, sync=False)
)
# Add new futures as the existing ones are completed
ac = dask.distributed.as_completed(futures, with_results=True)
results = []
for f, result in ac:
try:
ac.add(
optimized_dask_get(graph, next(keys), optimizer=optimizer, sync=False)
)
except StopIteration:
pass
results.append(result)
return results
def locate_block_in_dataarray(
block: dask.array.Array,
name: str,
dims: T.List[str],
coords: T.Dict[T.Hashable, T.Any],
block_info: T.Dict[str, T.Any],
):
"""
Locates the metadata of the current block
Args:
da (dask.array.Array): A block of xda
xda (xarray.DataArray): Whole DataArray being operated on
block_info: Block metadata
Returns:
xarray.DataArray with the block and its metadata
"""
if block_info is not None:
subset = {
d: slice(x0, x1) for d, (x0, x1) in zip(dims, block_info["array-location"])
}
out_coords = {}
for k, v in coords.items():
out_coords[k] = v.isel(
{kk: vv for kk, vv in subset.items() if kk in v.dims}
)
else:
out_coords = coords
return xarray.DataArray(block, name=name, dims=dims, coords=out_coords)
def map_blocks_array_to_dataframe(
func: T.Callable[..., pandas.DataFrame],
array: dask.array.Array,
*args,
meta: pandas.DataFrame,
**kwargs
) -> dask.dataframe.DataFrame:
"""
Apply a function `func` to each dask chunk, returning a dataframe
'func' will be set up to run on each dask chunk of 'array', returning a
dataframe. These dataframes are then collated into a single dask dataframe.
The returned dataframe is in an arbitrary order, it may be sorted with
:meth:`dask.dataframe.DataFrame.set_index`.
>>> da = dask.array.zeros((10, 10), chunks=(5, 5))
>>> def func(da):
... return pandas.DataFrame({"mean": da.mean()}, index=[1])
>>> meta = pandas.DataFrame({"mean": pandas.Series([], dtype=da.dtype)})
>>> map_blocks_array_to_dataframe(func, da, meta=meta) # doctest: +NORMALIZE_WHITESPACE
Dask DataFrame Structure:
mean
npartitions=4
float64
...
...
...
...
Dask Name: func, 8 tasks
The mapping function behaves the same as :func:`dask.array.map_blocks`.
If it has a keyword argument `block_info`, that argument will be filled
with information about the block location.
The block can be located within a :obj:`xarray.DataArray`, adding the
correct coordinate metadata, with :func:`locate_block_in_dataarray`:
>>> xda = xarray.DataArray(da, dims=['t','x'])
>>> def func(da, block_info=None):
... da = locate_block_in_dataarray(da, xda, block_info[0])
... return pandas.DataFrame({"mean": da.mean().values}, index=[1])
Args:
func ((:obj:`numpy.array`, **kwargs) -> :obj:`pandas.DataFrame`):
Function to run on each block
array (:obj:`dask.array.Array`): Dask array to operate on
meta (:obj:`pandas.DataFrame`): Sample dataframe with the correct
output columns
*args, **kwargs: Passed to 'func'
Returns:
:obj:`dask.dataframe.DataFrame`, with each block the result of applying
'func' to a block of 'array', in an arbitrary order
"""
if getattr(array, "npartitions", None) is None:
return func(array, *args, **kwargs)
# Use the array map blocks, with a dummy meta (as we won't be making an array)
mapped = dask.array.map_blocks(
func, array, *args, **kwargs, meta=numpy.array((), dtype="i")
)
return array_blocks_to_dataframe(mapped, meta)
def array_blocks_to_dataframe(
array: dask.array.Array, meta: pandas.DataFrame
) -> dask.dataframe.DataFrame:
"""
Convert the blocks from a dask array to a dask dataframe
"""
# Grab the Dask graph from the array map
graph = array.dask
name = array.name
# Flatten the results to 1d
# Keys in the graph layer are (name, chunk_coord)
# We'll replace chunk_coord with a scalar value
layer = {}
for i, v in enumerate(graph.layers[name].values()):
layer[(name, i)] = v
graph.layers[name] = dask.highlevelgraph.MaterializedLayer(layer)
# Low level dask dataframe constructor
df = dask.dataframe.core.new_dd_object(
graph, name, meta, [None] * (array.npartitions + 1)
)
return df
from itertools import zip_longest
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args)
# An array-like value for typing
ArrayVar = T.TypeVar("ArrayVar", xarray.DataArray, dask.array.Array, numpy.ndarray)
def throttled_compute(arr: ArrayVar, *, n: int, name: T.Hashable = None) -> ArrayVar:
"""
Compute a Dask object N chunks at a time
Args:
obj: Object to compute
n: Number of chunks to process at once
name: Dask layer name to compute (default obj.name)
Returns:
'obj', with each chunk computed
"""
# Copy the input in case it's a xarray object
obj = arr
if isinstance(arr, xarray.DataArray):
# Work on the data
obj = arr.data
if not hasattr(obj, "dask") or isinstance(arr, numpy.ndarray):
# Short-circuit non-dask arrays
return arr
# Current dask scheduler
schedule = dask.base.get_scheduler(collections=[obj])
# Get the layer to work on
if name is None:
name = obj.name
top_layer = obj.dask.layers[name]
result = {}
# Compute chunks N at a time
for x in grouper(top_layer, n):
x = [xx for xx in x if xx is not None]
values = schedule(obj.dask, list(x))
result.update(dict(zip(x, values)))
# Build a new dask graph
layer = dask.highlevelgraph.MaterializedLayer(result)
graph = dask.highlevelgraph.HighLevelGraph.from_collections(name, layer)
obj.dask = graph
if isinstance(arr, xarray.DataArray):
# Add back metadata
obj = xarray.DataArray(
obj, name=arr.name, dims=arr.dims, coords=arr.coords, attrs=arr.attrs
)
return obj
def visualize_block(arr: dask.array.Array):
"""
Visualise the graph of a single chunk from 'arr'
"""
name = arr.name
graph = arr.dask
layer = graph.layers[name]
block = next(layer.values())
culled = graph.cull(set(block))
graph = dask.dot.to_graphviz(culled)
return graph
| [
"dask.dataframe.core.new_dd_object",
"dask.dot.to_graphviz",
"dask.delayed",
"dask.highlevelgraph.HighLevelGraph.from_collections",
"dask.optimization.fuse",
"dask.base.tokenize",
"itertools.zip_longest",
"dask.optimization.cull",
"numpy.cumsum",
"numpy.array",
"xarray.DataArray",
"dask.highle... | [((10737, 10809), 'typing.TypeVar', 'T.TypeVar', (['"""ArrayVar"""', 'xarray.DataArray', 'dask.array.Array', 'numpy.ndarray'], {}), "('ArrayVar', xarray.DataArray, dask.array.Array, numpy.ndarray)\n", (10746, 10809), True, 'import typing as T\n'), ((2601, 2629), 'itertools.product', 'itertools.product', (['*block_id'], {}), '(*block_id)\n', (2618, 2629), False, 'import itertools\n'), ((5925, 5982), 'dask.distributed.as_completed', 'dask.distributed.as_completed', (['futures'], {'with_results': '(True)'}), '(futures, with_results=True)\n', (5954, 5982), False, 'import dask\n'), ((7106, 7170), 'xarray.DataArray', 'xarray.DataArray', (['block'], {'name': 'name', 'dims': 'dims', 'coords': 'out_coords'}), '(block, name=name, dims=dims, coords=out_coords)\n', (7122, 7170), False, 'import xarray\n'), ((10247, 10291), 'dask.highlevelgraph.MaterializedLayer', 'dask.highlevelgraph.MaterializedLayer', (['layer'], {}), '(layer)\n', (10284, 10291), False, 'import dask\n'), ((10345, 10436), 'dask.dataframe.core.new_dd_object', 'dask.dataframe.core.new_dd_object', (['graph', 'name', 'meta', '([None] * (array.npartitions + 1))'], {}), '(graph, name, meta, [None] * (array.\n npartitions + 1))\n', (10378, 10436), False, 'import dask\n'), ((10672, 10690), 'itertools.zip_longest', 'zip_longest', (['*args'], {}), '(*args)\n', (10683, 10690), False, 'from itertools import zip_longest\n'), ((11492, 11534), 'dask.base.get_scheduler', 'dask.base.get_scheduler', ([], {'collections': '[obj]'}), '(collections=[obj])\n', (11515, 11534), False, 'import dask\n'), ((11915, 11960), 'dask.highlevelgraph.MaterializedLayer', 'dask.highlevelgraph.MaterializedLayer', (['result'], {}), '(result)\n', (11952, 11960), False, 'import dask\n'), ((11973, 12037), 'dask.highlevelgraph.HighLevelGraph.from_collections', 'dask.highlevelgraph.HighLevelGraph.from_collections', (['name', 'layer'], {}), '(name, layer)\n', (12024, 12037), False, 'import dask\n'), ((12541, 12569), 'dask.dot.to_graphviz', 'dask.dot.to_graphviz', (['culled'], {}), '(culled)\n', (12561, 12569), False, 'import dask\n'), ((3273, 3360), 'xarray.DataArray', 'xarray.DataArray', (['block'], {'dims': 'da.dims', 'coords': 'coords', 'name': 'da.name', 'attrs': 'da.attrs'}), '(block, dims=da.dims, coords=coords, name=da.name, attrs=da\n .attrs)\n', (3289, 3360), False, 'import xarray\n'), ((4071, 4100), 'numpy.prod', 'numpy.prod', (['da.data.chunksize'], {}), '(da.data.chunksize)\n', (4081, 4100), False, 'import numpy\n'), ((4680, 4709), 'dask.distributed.get_client', 'dask.distributed.get_client', ([], {}), '()\n', (4707, 4709), False, 'import dask\n'), ((4845, 4880), 'dask.optimization.cull', 'dask.optimization.cull', (['graph', 'keys'], {}), '(graph, keys)\n', (4867, 4880), False, 'import dask\n'), ((4900, 4935), 'dask.optimization.fuse', 'dask.optimization.fuse', (['graph', 'keys'], {}), '(graph, keys)\n', (4922, 4935), False, 'import dask\n'), ((5372, 5401), 'dask.distributed.get_client', 'dask.distributed.get_client', ([], {}), '()\n', (5399, 5401), False, 'import dask\n'), ((12145, 12236), 'xarray.DataArray', 'xarray.DataArray', (['obj'], {'name': 'arr.name', 'dims': 'arr.dims', 'coords': 'arr.coords', 'attrs': 'arr.attrs'}), '(obj, name=arr.name, dims=arr.dims, coords=arr.coords,\n attrs=arr.attrs)\n', (12161, 12236), False, 'import xarray\n'), ((2533, 2564), 'numpy.cumsum', 'numpy.cumsum', (['[0, *chunks[:-1]]'], {}), '([0, *chunks[:-1]])\n', (2545, 2564), False, 'import numpy\n'), ((3528, 3548), 'dask.base.tokenize', 'tokenize', (['block.name'], {}), '(block.name)\n', (3536, 3548), False, 'from dask.base import tokenize\n'), ((3567, 3596), 'dask.delayed', 'dask.delayed', (['func'], {'name': 'name'}), '(func, name=name)\n', (3579, 3596), False, 'import dask\n'), ((9614, 9640), 'numpy.array', 'numpy.array', (['()'], {'dtype': '"""i"""'}), "((), dtype='i')\n", (9625, 9640), False, 'import numpy\n')] |
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
path = os.path.join('output', 'test_1.csv')
df = pd.read_csv(path)
df = df.groupby(['Index', 'Asked for articles'])['Time in MS'].mean().reset_index()
data_np = df.values # converting to numpy
unique_classes = np.unique(data_np[:, 0])
data_dict = {}
# mysql Fulltext: Red
# mysql: Green
# elastic: Blue
colors = ['red', 'green', 'blue', 'red', 'green', 'blue', 'red', 'green', 'blue']
# 1000K: '-'
# 100K: '-.'
# 10K: ':'
linestyles = ['-', '-', '-', '-.', '-.', '-.', ':', ':', ':']
i = 0
for u_class in unique_classes:
class_data = data_np[data_np[:, 0] == u_class, :] # masks out rows from class
data_dict.update({u_class: {'x': class_data[:, 1], 'y': class_data[:, 2]}})
plt.plot(data_dict[u_class]['x'], data_dict[u_class]['y'], label=u_class, linestyle=linestyles[i], color=colors[i])
i = i+1
plt.ylabel('Time in MS')
plt.yscale('log')
plt.xlabel('Asked for articles')
plt.title('Simple query - English | Only Topics | Single user')
plt.legend(bbox_to_anchor=(1, 1))
save_path = os.path.join('output', 'topics')
plt.savefig(save_path, bbox_inches='tight')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"numpy.unique"
] | [((91, 127), 'os.path.join', 'os.path.join', (['"""output"""', '"""test_1.csv"""'], {}), "('output', 'test_1.csv')\n", (103, 127), False, 'import os\n'), ((133, 150), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (144, 150), True, 'import pandas as pd\n'), ((295, 319), 'numpy.unique', 'np.unique', (['data_np[:, 0]'], {}), '(data_np[:, 0])\n', (304, 319), True, 'import numpy as np\n'), ((903, 927), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time in MS"""'], {}), "('Time in MS')\n", (913, 927), True, 'import matplotlib.pyplot as plt\n'), ((928, 945), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (938, 945), True, 'import matplotlib.pyplot as plt\n'), ((946, 978), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Asked for articles"""'], {}), "('Asked for articles')\n", (956, 978), True, 'import matplotlib.pyplot as plt\n'), ((979, 1042), 'matplotlib.pyplot.title', 'plt.title', (['"""Simple query - English | Only Topics | Single user"""'], {}), "('Simple query - English | Only Topics | Single user')\n", (988, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1076), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 1)'}), '(bbox_to_anchor=(1, 1))\n', (1053, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1121), 'os.path.join', 'os.path.join', (['"""output"""', '"""topics"""'], {}), "('output', 'topics')\n", (1101, 1121), False, 'import os\n'), ((1122, 1165), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'bbox_inches': '"""tight"""'}), "(save_path, bbox_inches='tight')\n", (1133, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1176), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1174, 1176), True, 'import matplotlib.pyplot as plt\n'), ((775, 894), 'matplotlib.pyplot.plot', 'plt.plot', (["data_dict[u_class]['x']", "data_dict[u_class]['y']"], {'label': 'u_class', 'linestyle': 'linestyles[i]', 'color': 'colors[i]'}), "(data_dict[u_class]['x'], data_dict[u_class]['y'], label=u_class,\n linestyle=linestyles[i], color=colors[i])\n", (783, 894), True, 'import matplotlib.pyplot as plt\n')] |
import copy
import numpy as np
import time
from .freezable import Freezable
class Timing(Freezable):
def __init__(self, node, method_name=None):
self.__name = type(node).__name__
self.__method_name = method_name
self.__start = 0
self.__first_start = 0
self.__last_stop = 0
self.__time = 0
self.freeze()
def start(self):
self.__start = time.time()
if self.__first_start == 0:
self.__first_start = self.__start
def stop(self):
if self.__start == 0:
return
t = time.time()
self.__time += (t - self.__start)
self.__start = 0
self.__last_stop = t
def elapsed(self):
'''Accumulated time elapsed between calls to start() and stop().'''
if self.__start == 0:
return self.__time
return self.__time + (time.time() - self.__start)
def span(self):
'''Timestamps of the first call to start() and last call to stop().'''
return self.__first_start, self.__last_stop
def get_node_name(self):
return self.__name
def get_method_name(self):
return self.__method_name
class TimingSummary(Freezable):
'''Holds repeated Timings of the same node/method to be queried for statistics.'''
def __init__(self):
self.timings = []
self.times = []
self.freeze()
def add(self, timing):
'''Add a Timing to this summary.'''
self.timings.append(timing)
self.times.append(timing.elapsed())
def merge(self, other):
'''Merge another summary into this one.'''
for timing in other.timings:
self.add(timing)
def counts(self):
return len(self.times)
def min(self):
return np.min(self.times)
def max(self):
return np.max(self.times)
def mean(self):
return np.mean(self.times)
def median(self):
return np.median(self.times)
class ProfilingStats(Freezable):
def __init__(self):
self.__summaries = {}
self.freeze()
def add(self, timing):
'''Add a Timing instance. Timings are grouped by their class and method names.'''
node_name = timing.get_node_name()
method_name = timing.get_method_name()
id = (node_name, method_name)
if id not in self.__summaries:
self.__summaries[id] = TimingSummary()
self.__summaries[id].add(copy.deepcopy(timing))
def merge_with(self, other):
'''Combine statitics of two ProfilingStats.'''
for id, summary in other.__summaries.items():
if id in self.__summaries:
self.__summaries[id].merge(copy.deepcopy(summary))
else:
self.__summaries[id] = copy.deepcopy(summary)
def get_timing_summaries(self):
'''Get a dictionary (node_name,method_name) -> TimingSummary.'''
return self.__summaries
def get_timing_summary(self, node_name, method_name=None):
'''Get a :class:`TimingSummary` for the given node and method name.'''
if (node_name, method_name) not in self.__summaries:
raise RuntimeError("No timing summary for node %s, method %s"%(node_name,method_name))
return self.__summaries[(node_name,method_name)]
def span(self):
'''Timestamps of the first call to start() and last call to stop() over
all Timings added.'''
spans = [t.span() for (_, summary) in self.__summaries.items() for t in summary.timings]
first_start = min([span[0] for span in spans])
last_stop = max([span[1] for span in spans])
return first_start, last_stop
def span_time(self):
'''Time between the first call to start() and last call to stop() over
any timing.'''
start, stop = self.span()
return stop - start
| [
"copy.deepcopy",
"numpy.median",
"time.time",
"numpy.min",
"numpy.mean",
"numpy.max"
] | [((411, 422), 'time.time', 'time.time', ([], {}), '()\n', (420, 422), False, 'import time\n'), ((587, 598), 'time.time', 'time.time', ([], {}), '()\n', (596, 598), False, 'import time\n'), ((1795, 1813), 'numpy.min', 'np.min', (['self.times'], {}), '(self.times)\n', (1801, 1813), True, 'import numpy as np\n'), ((1849, 1867), 'numpy.max', 'np.max', (['self.times'], {}), '(self.times)\n', (1855, 1867), True, 'import numpy as np\n'), ((1904, 1923), 'numpy.mean', 'np.mean', (['self.times'], {}), '(self.times)\n', (1911, 1923), True, 'import numpy as np\n'), ((1962, 1983), 'numpy.median', 'np.median', (['self.times'], {}), '(self.times)\n', (1971, 1983), True, 'import numpy as np\n'), ((2466, 2487), 'copy.deepcopy', 'copy.deepcopy', (['timing'], {}), '(timing)\n', (2479, 2487), False, 'import copy\n'), ((888, 899), 'time.time', 'time.time', ([], {}), '()\n', (897, 899), False, 'import time\n'), ((2796, 2818), 'copy.deepcopy', 'copy.deepcopy', (['summary'], {}), '(summary)\n', (2809, 2818), False, 'import copy\n'), ((2715, 2737), 'copy.deepcopy', 'copy.deepcopy', (['summary'], {}), '(summary)\n', (2728, 2737), False, 'import copy\n')] |
import unittest
import numpy as np
from mil.models import AttentionDeepPoolingMil
from mil.utils.padding import Padding
class TestAttentionDeepPoolingMil(unittest.TestCase):
def setUp(self):
self.training_bag = np.random.normal(0, 1, (30, 3, 28, 28, 1))
self.training_label = np.zeros(30)
self.training_label[15:] = 1
self.model = AttentionDeepPoolingMil()
def test_when_fitting_keras_model_all_correct_with_padding(self):
pipeline = [('padding', Padding())]
self.model.fit(self.training_bag, self.training_label, verbose=0, epochs=10)
y_pred = self.model.predict(self.training_bag)
self.assertEqual(len(y_pred), 30)
def test_when_fitting_with_2d_all_correct(self):
""" when modelling with [bags, instances, features] shape works well """
training_bag = np.random.normal(0, 1, (30, 10, 28))
self.model.fit(training_bag, self.training_label, verbose=0, epochs=10)
y_pred = self.model.predict(training_bag)
self.assertEqual(len(y_pred), 30)
def test_when_fitting_with_3d_all_correct(self):
""" when modelling with [bags, instances, features, features] shape works well """
training_bag = np.random.normal(0, 1, (30, 10, 28, 28))
self.model.fit(training_bag, self.training_label, verbose=0, epochs=10)
y_pred = self.model.predict(training_bag)
self.assertEqual(len(y_pred), 30)
def test_when_fitting_with_4d_all_correct(self):
""" when modelling with [bags, instances, features, features, features] shape works well """
training_bag = np.random.normal(0, 1, (30, 10, 28, 28, 1))
self.model.fit(training_bag, self.training_label, verbose=0, epochs=10)
y_pred = self.model.predict(training_bag)
self.assertEqual(len(y_pred), 30)
def test_when_fitting_with_5d_all_correct(self):
""" when modelling with [bags, instances, features, features, features, features] shape works well """
training_bag = np.random.normal(0, 1, (30, 10, 28, 28, 20, 1))
self.model.fit(training_bag, self.training_label, verbose=0, epochs=10)
y_pred = self.model.predict(training_bag)
self.assertEqual(len(y_pred), 30)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"mil.models.AttentionDeepPoolingMil",
"numpy.zeros",
"numpy.random.normal",
"mil.utils.padding.Padding"
] | [((2370, 2385), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2383, 2385), False, 'import unittest\n'), ((241, 283), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(30, 3, 28, 28, 1)'], {}), '(0, 1, (30, 3, 28, 28, 1))\n', (257, 283), True, 'import numpy as np\n'), ((315, 327), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (323, 327), True, 'import numpy as np\n'), ((388, 413), 'mil.models.AttentionDeepPoolingMil', 'AttentionDeepPoolingMil', ([], {}), '()\n', (411, 413), False, 'from mil.models import AttentionDeepPoolingMil\n'), ((891, 927), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(30, 10, 28)'], {}), '(0, 1, (30, 10, 28))\n', (907, 927), True, 'import numpy as np\n'), ((1283, 1323), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(30, 10, 28, 28)'], {}), '(0, 1, (30, 10, 28, 28))\n', (1299, 1323), True, 'import numpy as np\n'), ((1689, 1732), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(30, 10, 28, 28, 1)'], {}), '(0, 1, (30, 10, 28, 28, 1))\n', (1705, 1732), True, 'import numpy as np\n'), ((2108, 2155), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(30, 10, 28, 28, 20, 1)'], {}), '(0, 1, (30, 10, 28, 28, 20, 1))\n', (2124, 2155), True, 'import numpy as np\n'), ((524, 533), 'mil.utils.padding.Padding', 'Padding', ([], {}), '()\n', (531, 533), False, 'from mil.utils.padding import Padding\n')] |
from tensorflow.keras.models import load_model
from tensorflow.python.keras.backend import set_session
import tensorflow as tf
from flask import Flask, request, render_template, jsonify, send_file, url_for
import os
from PIL import Image, ImageOps
import numpy as np
import math
import time
import base64
app = Flask(__name__)
app.static_folder = 'static'
dir_path = os.path.dirname(os.path.realpath(__file__))
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
MODEL_PATH = os.path.join(os.getcwd(), 'models', 'keras_model.h5')
sess = tf.Session()
graph = tf.compat.v1.get_default_graph()
set_session(sess)
model = load_model(MODEL_PATH)
def read_labels():
x = None
labels = list()
with open(os.path.join(os.getcwd(), 'models', 'labels.txt'), 'r') as f:
x = [line.rstrip('\n') for line in f]
for item in x:
print("Item : {}".format(item))
split_label = item.split(" ")
label_index = split_label[0]
merger = ""
for i in range(1, len(split_label)):
merger += split_label[i] + " "
print(("Merger Index : {} ".format(merger)))
labels.append({"id": label_index, "label": merger[0:len(merger)-1]})
return labels
@app.route('/detect', methods=['POST'])
def post_example():
global sess
global graph
with graph.as_default():
# perform the prediction
set_session(sess)
np.set_printoptions(suppress=True)
if not request.headers.get('Content-type') is None:
if(request.headers.get('Content-type').split(';')[0] == 'multipart/form-data'):
if 'image' in request.files.keys():
print("inside get image statement")
file = request.files['image']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(os.getcwd(), 'static', 'uploads', file.filename)
print("Upload Path : {}".format(uploaded_img_path))
img.save(uploaded_img_path)
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
print("Image path {}".format(uploaded_img_path))
image = Image.open(uploaded_img_path)
#resize the image to a 224x224 with the same strategy as in TM2:
#resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
image = image.convert('RGB')
#turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
#load the labels
labels = read_labels()
print(labels[0])
# run the inference
prediction = model.predict(data)
print(prediction[0])
scores = list()
for i in range(0, len(prediction[0])):
print("Id : {}".format(i))
print("Score: {}".format(float(prediction[0][i])))
print("label: {}".format(labels[i]['label']))
scores.append({"id": i, "label": float(prediction[0][i]), "score": labels[i]['label']})
result = {
"inference": scores
}
return jsonify(result), 200
else:
return jsonify(get_status_code("Invalid body", "Please provide valid format for Image 2")), 415
elif(request.headers.get('Content-type') == 'application/json'):
if(request.data == b''):
return jsonify(get_status_code("Invalid body", "Please provide valid format for Image")), 415
else:
body = request.get_json()
if "image_string" in body.keys():
str_image = body['image_string']
# str_image = img_string.split(',')[1]
imgdata = base64.b64decode(str_image)
uploaded_img_path = os.path.join(os.getcwd(), 'static', 'uploads', str(time.time())+".jpg")
# img = "uploads\\" + str(int(round(time.time() * 1000))) + "image_file.jpg"
with open(uploaded_img_path, 'wb') as f:
f.write(imgdata)
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
print("Image path {}".format(uploaded_img_path))
image = Image.open(uploaded_img_path)
#resize the image to a 224x224 with the same strategy as in TM2:
#resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
image = image.convert('RGB')
#turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
#load the labels
labels = read_labels()
print(labels[0])
# run the inference
prediction = model.predict(data)
print(prediction[0])
scores = list()
for i in range(0, len(prediction[0])):
scores.append({"id": i, "label": float(prediction[0][i]), "score": labels[i]['label']})
result = {
"inference": scores
}
return jsonify(result), 200
else:
return jsonify(get_status_code("Invalid header", "Please provide correct header with correct data")), 415
else:
return jsonify(get_status_code("Invalid Header", "Please provide valid header")), 401
def get_status_code(argument, message):
res = {
"error": {
"code": argument,
"message": message
}
}
return res
if __name__=="__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host="0.0.0.0", port=port)
| [
"base64.b64decode",
"flask.jsonify",
"numpy.ndarray",
"flask.request.get_json",
"tensorflow.python.keras.backend.set_session",
"numpy.set_printoptions",
"flask.request.headers.get",
"flask.request.files.keys",
"tensorflow.keras.models.load_model",
"PIL.ImageOps.fit",
"tensorflow.compat.v1.get_de... | [((312, 327), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (317, 327), False, 'from flask import Flask, request, render_template, jsonify, send_file, url_for\n'), ((531, 543), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (541, 543), True, 'import tensorflow as tf\n'), ((553, 585), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (583, 585), True, 'import tensorflow as tf\n'), ((586, 603), 'tensorflow.python.keras.backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (597, 603), False, 'from tensorflow.python.keras.backend import set_session\n'), ((612, 634), 'tensorflow.keras.models.load_model', 'load_model', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (622, 634), False, 'from tensorflow.keras.models import load_model\n'), ((384, 410), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (400, 410), False, 'import os\n'), ((482, 493), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (491, 493), False, 'import os\n'), ((1378, 1395), 'tensorflow.python.keras.backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (1389, 1395), False, 'from tensorflow.python.keras.backend import set_session\n'), ((1404, 1438), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (1423, 1438), True, 'import numpy as np\n'), ((6914, 6942), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(5000)'], {}), "('PORT', 5000)\n", (6928, 6942), False, 'import os\n'), ((715, 726), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (724, 726), False, 'import os\n'), ((1455, 1490), 'flask.request.headers.get', 'request.headers.get', (['"""Content-type"""'], {}), "('Content-type')\n", (1474, 1490), False, 'from flask import Flask, request, render_template, jsonify, send_file, url_for\n'), ((1622, 1642), 'flask.request.files.keys', 'request.files.keys', ([], {}), '()\n', (1640, 1642), False, 'from flask import Flask, request, render_template, jsonify, send_file, url_for\n'), ((1776, 1799), 'PIL.Image.open', 'Image.open', (['file.stream'], {}), '(file.stream)\n', (1786, 1799), False, 'from PIL import Image, ImageOps\n'), ((2064, 2116), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, 224, 224, 3)', 'dtype': 'np.float32'}), '(shape=(1, 224, 224, 3), dtype=np.float32)\n', (2074, 2116), True, 'import numpy as np\n'), ((2234, 2263), 'PIL.Image.open', 'Image.open', (['uploaded_img_path'], {}), '(uploaded_img_path)\n', (2244, 2263), False, 'from PIL import Image, ImageOps\n'), ((2513, 2555), 'PIL.ImageOps.fit', 'ImageOps.fit', (['image', 'size', 'Image.ANTIALIAS'], {}), '(image, size, Image.ANTIALIAS)\n', (2525, 2555), False, 'from PIL import Image, ImageOps\n'), ((2695, 2712), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2705, 2712), True, 'import numpy as np\n'), ((3957, 3992), 'flask.request.headers.get', 'request.headers.get', (['"""Content-type"""'], {}), "('Content-type')\n", (3976, 3992), False, 'from flask import Flask, request, render_template, jsonify, send_file, url_for\n'), ((1866, 1877), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1875, 1877), False, 'import os\n'), ((3753, 3768), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (3760, 3768), False, 'from flask import Flask, request, render_template, jsonify, send_file, url_for\n'), ((4221, 4239), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (4237, 4239), False, 'from flask import Flask, request, render_template, jsonify, send_file, url_for\n'), ((1515, 1550), 'flask.request.headers.get', 'request.headers.get', (['"""Content-type"""'], {}), "('Content-type')\n", (1534, 1550), False, 'from flask import Flask, request, render_template, jsonify, send_file, url_for\n'), ((4448, 4475), 'base64.b64decode', 'base64.b64decode', (['str_image'], {}), '(str_image)\n', (4464, 4475), False, 'import base64\n'), ((4836, 4888), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, 224, 224, 3)', 'dtype': 'np.float32'}), '(shape=(1, 224, 224, 3), dtype=np.float32)\n', (4846, 4888), True, 'import numpy as np\n'), ((5014, 5043), 'PIL.Image.open', 'Image.open', (['uploaded_img_path'], {}), '(uploaded_img_path)\n', (5024, 5043), False, 'from PIL import Image, ImageOps\n'), ((5309, 5351), 'PIL.ImageOps.fit', 'ImageOps.fit', (['image', 'size', 'Image.ANTIALIAS'], {}), '(image, size, Image.ANTIALIAS)\n', (5321, 5351), False, 'from PIL import Image, ImageOps\n'), ((5503, 5520), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (5513, 5520), True, 'import numpy as np\n'), ((4533, 4544), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4542, 4544), False, 'import os\n'), ((6433, 6448), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (6440, 6448), False, 'from flask import Flask, request, render_template, jsonify, send_file, url_for\n'), ((4571, 4582), 'time.time', 'time.time', ([], {}), '()\n', (4580, 4582), False, 'import time\n')] |
import numpy as np
class Control:
def __init__(self, model):
# Bind model
self.model = model
# Desired x_pos
self.xd = 0.0
# Control parameters
self.N = 100 # Prediction and control horizon
# Control parameters
if self.model.name == 'Pendulum':
self.Q = np.mat([[100, 0, 0, 0], [0, 10, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self.R = np.mat(np.identity(1))
self.P = np.mat([[1000, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
else:
self.Q = np.mat([[1.0, 0.0], [0.0, 1.0]])
self.R = np.mat(np.identity(1))
self.P = np.mat([[1.0, 0.0], [0.0, 1.0]])
# Get dynamics
A = np.mat(self.model.A_disc)
B = np.mat(self.model.B_disc)
# Calculate matrix relating initial state to all successive states
Ahat = np.eye(len(A))
for i in range(self.N):
An = np.linalg.matrix_power(A, i + 1)
Ahat = np.r_[Ahat, An]
Ahat = Ahat[len(A):, :]
# Calculate matrix relating control signals to successive states
Cbar = np.zeros((len(A), self.N))
for i in range(self.N):
tmp = None
for ii in range(i + 1):
tm = np.linalg.matrix_power(A, ii) * B
if tmp is None:
tmp = tm
else:
tmp = np.c_[tm, tmp]
for _ in np.arange(i, self.N - 1):
tm = np.zeros(B.shape)
if tmp is None:
tmp = tm
else:
tmp = np.c_[tmp, tm]
Cbar = np.r_[Cbar, tmp]
Cbar = Cbar[len(A):, :]
# Calculate penalty matrices
tm1 = np.eye(self.N)
tm1[self.N - 1, self.N - 1] = 0
tm2 = np.zeros((self.N, self.N))
tm2[self.N - 1, self.N - 1] = 1
Qbar = np.kron(tm1, self.Q) + np.kron(tm2, self.P)
Rbar = np.kron(np.eye(self.N), self.R)
# Calculate objective derivative solution
self.H = Cbar.T * Qbar * Cbar + Rbar
self.F_trans = Ahat.T * Qbar * Cbar
def set_desired_position(self, x):
self.xd = x
def control(self, state):
# Initial state
x0 = np.reshape(np.mat(state), (len(state), 1))
# Solve for optimal control
uopt = -self.H.I * self.F_trans.T * x0
# Take only first control signal
u = uopt[0]
return u
| [
"numpy.zeros",
"numpy.identity",
"numpy.linalg.matrix_power",
"numpy.arange",
"numpy.kron",
"numpy.eye",
"numpy.mat"
] | [((803, 828), 'numpy.mat', 'np.mat', (['self.model.A_disc'], {}), '(self.model.A_disc)\n', (809, 828), True, 'import numpy as np\n'), ((841, 866), 'numpy.mat', 'np.mat', (['self.model.B_disc'], {}), '(self.model.B_disc)\n', (847, 866), True, 'import numpy as np\n'), ((1838, 1852), 'numpy.eye', 'np.eye', (['self.N'], {}), '(self.N)\n', (1844, 1852), True, 'import numpy as np\n'), ((1907, 1933), 'numpy.zeros', 'np.zeros', (['(self.N, self.N)'], {}), '((self.N, self.N))\n', (1915, 1933), True, 'import numpy as np\n'), ((370, 437), 'numpy.mat', 'np.mat', (['[[100, 0, 0, 0], [0, 10, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[100, 0, 0, 0], [0, 10, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (376, 437), True, 'import numpy as np\n'), ((503, 604), 'numpy.mat', 'np.mat', (['[[1000, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[1000, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n', (509, 604), True, 'import numpy as np\n'), ((636, 668), 'numpy.mat', 'np.mat', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (642, 668), True, 'import numpy as np\n'), ((734, 766), 'numpy.mat', 'np.mat', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (740, 766), True, 'import numpy as np\n'), ((1022, 1054), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['A', '(i + 1)'], {}), '(A, i + 1)\n', (1044, 1054), True, 'import numpy as np\n'), ((1529, 1553), 'numpy.arange', 'np.arange', (['i', '(self.N - 1)'], {}), '(i, self.N - 1)\n', (1538, 1553), True, 'import numpy as np\n'), ((1989, 2009), 'numpy.kron', 'np.kron', (['tm1', 'self.Q'], {}), '(tm1, self.Q)\n', (1996, 2009), True, 'import numpy as np\n'), ((2012, 2032), 'numpy.kron', 'np.kron', (['tm2', 'self.P'], {}), '(tm2, self.P)\n', (2019, 2032), True, 'import numpy as np\n'), ((2056, 2070), 'numpy.eye', 'np.eye', (['self.N'], {}), '(self.N)\n', (2062, 2070), True, 'import numpy as np\n'), ((2368, 2381), 'numpy.mat', 'np.mat', (['state'], {}), '(state)\n', (2374, 2381), True, 'import numpy as np\n'), ((466, 480), 'numpy.identity', 'np.identity', (['(1)'], {}), '(1)\n', (477, 480), True, 'import numpy as np\n'), ((697, 711), 'numpy.identity', 'np.identity', (['(1)'], {}), '(1)\n', (708, 711), True, 'import numpy as np\n'), ((1576, 1593), 'numpy.zeros', 'np.zeros', (['B.shape'], {}), '(B.shape)\n', (1584, 1593), True, 'import numpy as np\n'), ((1350, 1379), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['A', 'ii'], {}), '(A, ii)\n', (1372, 1379), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import numpy as np
forceControlData = np.load('force_control_response.npz')
F = forceControlData['force']
U = forceControlData['displacement']
plt.plot(U, F, marker='o')
plt.xlabel('Displacement')
plt.ylabel('Force')
plotComparison = True
if plotComparison:
dispControlData = np.load('disp_control_response.npz')
F2 = dispControlData['force']
U2 = dispControlData['displacement']
plt.figure()
plt.plot(U, F, marker='s')
plt.plot(U2, F2, marker='o')
plt.xlabel('Displacement')
plt.ylabel('Force')
plt.show()
#plt.savefig('FD_arch.pdf')
| [
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((76, 113), 'numpy.load', 'np.load', (['"""force_control_response.npz"""'], {}), "('force_control_response.npz')\n", (83, 113), True, 'import numpy as np\n'), ((182, 208), 'matplotlib.pyplot.plot', 'plt.plot', (['U', 'F'], {'marker': '"""o"""'}), "(U, F, marker='o')\n", (190, 208), True, 'from matplotlib import pyplot as plt\n'), ((209, 235), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Displacement"""'], {}), "('Displacement')\n", (219, 235), True, 'from matplotlib import pyplot as plt\n'), ((236, 255), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Force"""'], {}), "('Force')\n", (246, 255), True, 'from matplotlib import pyplot as plt\n'), ((569, 579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (577, 579), True, 'from matplotlib import pyplot as plt\n'), ((320, 356), 'numpy.load', 'np.load', (['"""disp_control_response.npz"""'], {}), "('disp_control_response.npz')\n", (327, 356), True, 'import numpy as np\n'), ((436, 448), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (446, 448), True, 'from matplotlib import pyplot as plt\n'), ((453, 479), 'matplotlib.pyplot.plot', 'plt.plot', (['U', 'F'], {'marker': '"""s"""'}), "(U, F, marker='s')\n", (461, 479), True, 'from matplotlib import pyplot as plt\n'), ((484, 512), 'matplotlib.pyplot.plot', 'plt.plot', (['U2', 'F2'], {'marker': '"""o"""'}), "(U2, F2, marker='o')\n", (492, 512), True, 'from matplotlib import pyplot as plt\n'), ((517, 543), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Displacement"""'], {}), "('Displacement')\n", (527, 543), True, 'from matplotlib import pyplot as plt\n'), ((548, 567), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Force"""'], {}), "('Force')\n", (558, 567), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
import random
import matplotlib.pyplot as plt
import matplotlib
from io_utilities import load_data
from visualizations import show_clusters_centroids
def distance(a,b):
"""
Compute Euclidean Distance Between Two Points
Input:
a (list): an n-dimensional list or array
b (list): an n-dimensional list or array
Output:
The Euclidean Distance between vectors a and b
"""
return np.sqrt(np.sum((np.array(b)-np.array(a))**2))
def get_clusters(points,centroids):
"""
Returns a list of clusters given all the points in the dataset and
the current centroids.
Input:
points (list of lists): A list with every point in the dataset
centroids (list of lists): A list with the current centroids
Output:
clusters (list of lists of lists): A List of Clusters. Each cluster
is also a list of points in the cluster.
"""
clusters = [[] for f in centroids]
for i, point in enumerate(points):
point_to_centroids = []
for j, centroid in enumerate(centroids):
point_to_centroids.append(distance(point,centroid))
closest_idx = np.argmin(point_to_centroids)
clusters[closest_idx].append(point)
return clusters
def update_centroids(clusters):
"""
Given a list of clusters (as prepared by get_clusters) get the new centroids
Input:
clusters (list of lists of lists): A List of Clusters. Each cluster
is also a list of points in the cluster.
Output:
A (list of lists): The new centroids.
"""
new_centroids = []
for cluster in clusters:
new_centroids.append(np.mean(cluster,axis = 0))
return new_centroids
def k_means(points, k, iterations=10):
"""
K Means Unsupervised ML Algorithm Implementation with Forgy Initialization
Input:
points (numpy array): a 2D Array with the data to cluster.
k (int): The number of clusters to find
"""
idx = np.random.randint(len(points),size=k)
centroids = points[idx,:]
clusters = get_clusters(points,centroids)
for i in range(iterations):
if i % 1 == 0:
if i == 0:
title = "Initialization"
else:
title = "Iteration {}".format(i+1)
show_clusters_centroids(
clusters,
centroids,
title,
)
clusters = get_clusters(points,centroids)
centroids = update_centroids(clusters)
return clusters,centroids
if __name__ == "__main__":
data = load_data('./data/iris.data')
k = 3
X = np.array([f[:-1] for f in data])
y = np.array([f[-1] for f in data])
clusters,centroids = k_means(X,3)
show_clusters_centroids(clusters,centroids,"Result", keep=True)
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.argmin",
"io_utilities.load_data",
"visualizations.show_clusters_centroids",
"numpy.mean",
"numpy.array"
] | [((2602, 2631), 'io_utilities.load_data', 'load_data', (['"""./data/iris.data"""'], {}), "('./data/iris.data')\n", (2611, 2631), False, 'from io_utilities import load_data\n'), ((2651, 2683), 'numpy.array', 'np.array', (['[f[:-1] for f in data]'], {}), '([f[:-1] for f in data])\n', (2659, 2683), True, 'import numpy as np\n'), ((2692, 2723), 'numpy.array', 'np.array', (['[f[-1] for f in data]'], {}), '([f[-1] for f in data])\n', (2700, 2723), True, 'import numpy as np\n'), ((2768, 2833), 'visualizations.show_clusters_centroids', 'show_clusters_centroids', (['clusters', 'centroids', '"""Result"""'], {'keep': '(True)'}), "(clusters, centroids, 'Result', keep=True)\n", (2791, 2833), False, 'from visualizations import show_clusters_centroids\n'), ((2836, 2846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2844, 2846), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1203), 'numpy.argmin', 'np.argmin', (['point_to_centroids'], {}), '(point_to_centroids)\n', (1183, 1203), True, 'import numpy as np\n'), ((1675, 1699), 'numpy.mean', 'np.mean', (['cluster'], {'axis': '(0)'}), '(cluster, axis=0)\n', (1682, 1699), True, 'import numpy as np\n'), ((2318, 2369), 'visualizations.show_clusters_centroids', 'show_clusters_centroids', (['clusters', 'centroids', 'title'], {}), '(clusters, centroids, title)\n', (2341, 2369), False, 'from visualizations import show_clusters_centroids\n'), ((459, 470), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (467, 470), True, 'import numpy as np\n'), ((471, 482), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (479, 482), True, 'import numpy as np\n')] |
import numpy as np
def simpson(f, a, b, nstrips):
'''
Compute the quadrature of f on [a, b].
Parameters
----------
f : function
The integrand
a : float
The start of the domain
b : float
The end of the domain
nstrips : int
The number of strips
Returns
-------
I : float
The integral approximation
'''
x, dx = np.linspace(a, b, num=2*nstrips+1, endpoint=True, retstep=True)
return dx / 3 * (f(x[0]) + f(x[-1]) + 4 * np.sum(f(x[1:-1:2]))+ 2 * np.sum(f(x[2:-1:2])))
| [
"numpy.linspace"
] | [((405, 472), 'numpy.linspace', 'np.linspace', (['a', 'b'], {'num': '(2 * nstrips + 1)', 'endpoint': '(True)', 'retstep': '(True)'}), '(a, b, num=2 * nstrips + 1, endpoint=True, retstep=True)\n', (416, 472), True, 'import numpy as np\n')] |
import numpy as np
from sklearn import svm
import matplotlib.pyplot as plt
X = np.array([[0,0],[1,1]])
Y = [0,1]
clf.fit(X,Y)
clf =svm.SVC()
svc(kernel='precomputed')
clf.predict(gram)
plt.scatter(X[:,0],X[:,1],c=Y,s=50,cmap='spring')
plt.show()
| [
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"numpy.array",
"sklearn.svm.SVC"
] | [((82, 108), 'numpy.array', 'np.array', (['[[0, 0], [1, 1]]'], {}), '([[0, 0], [1, 1]])\n', (90, 108), True, 'import numpy as np\n'), ((137, 146), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (144, 146), False, 'from sklearn import svm\n'), ((194, 249), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'Y', 's': '(50)', 'cmap': '"""spring"""'}), "(X[:, 0], X[:, 1], c=Y, s=50, cmap='spring')\n", (205, 249), True, 'import matplotlib.pyplot as plt\n'), ((245, 255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (253, 255), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
import math
from decimal import *
import numpy as np
# CHANGEME:
probtab = np.loadtxt(open("data/probability_table.csv", "rb"), delimiter=",").astype(Decimal)
print("The marginal probabilities of all the values of 𝑋")
print(probtab)
print("- " * 10)
px = np.sum(probtab, axis=0)
py = np.sum(probtab, axis=1)
print("P(X):", px)
print("P(Y):", py)
# sum of all columns and rows must be one
assert sum(px) == 1
assert sum(py) == 1
print("Shannon’s entropy of the plaintext")
print("𝐻(𝑋)=−∑𝑃(𝑥)log2𝑃(𝑥)")
hx = -sum([i * math.log2(i) for i in px])
print("-(", end="")
for i in px:
print(f"{i:.3f}*log2({i:.3f})", end=" + ")
print(f") = {hx:.3f} bits")
print("- " * 10)
print("The conditional entropy 𝐻(𝑋|𝑌)")
print("𝐻(𝑋|𝑌)=−∑y∑x𝑃(𝑥,𝑦)log2𝑃(𝑥|𝑦)")
hxy = 0
print("-(", end="")
for k, i in zip(py, probtab):
for j in i:
hxy += (j * math.log2(j / k))
print(f"{j}*log2({j}/{k})", end=" + ")
hxy = -hxy
print(f") = {hxy:.3f} bits")
print("- " * 10)
print("The mutual information")
print("𝐼(𝑋,𝑌)=𝐻(𝑋)−𝐻(𝑋|𝑌)")
print(f"{hx:.3f} - {hxy:.3f} = {(hx - hxy):.3f} bits")
if (hx - hxy) == 0:
print("The cipher is perfectly secret")
else:
print("The cipher is not perfectly secret because I(X,Y) != 0")
# The uncertainty about the plaintext is reduced afer receieving the ciphertext
| [
"math.log2",
"numpy.sum"
] | [((283, 306), 'numpy.sum', 'np.sum', (['probtab'], {'axis': '(0)'}), '(probtab, axis=0)\n', (289, 306), True, 'import numpy as np\n'), ((312, 335), 'numpy.sum', 'np.sum', (['probtab'], {'axis': '(1)'}), '(probtab, axis=1)\n', (318, 335), True, 'import numpy as np\n'), ((864, 880), 'math.log2', 'math.log2', (['(j / k)'], {}), '(j / k)\n', (873, 880), False, 'import math\n'), ((547, 559), 'math.log2', 'math.log2', (['i'], {}), '(i)\n', (556, 559), False, 'import math\n')] |
"""
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
Author:<NAME>
"""
import os, glob
from xml.dom.minidom import parse
import xml.dom.minidom
import config
import numpy as np
from sklearn.cluster import KMeans
datase_name = 'inria_person'
inria_person_info = {'img_dir': 'inria_person/PICTURES_LABELS_TRAIN/PICTURES/',
'anotation_dir': 'inria_person/PICTURES_LABELS_TRAIN/ANOTATION/'}
hazy_person_info = {'img_dir': 'hazy_person/PICTURES_LABELS_TRAIN/PICTURES/',
'anotation_dir': 'hazy_person/PICTURES_LABELS_TRAIN/ANOTATION/'}
dataset_info_map = {'inria_person': inria_person_info,
'hazy_person': hazy_person_info}
annotations_name = glob.glob(os.path.join(dataset_info_map[datase_name]['anotation_dir'], '*xml'))
person_h_w = []
for annotation_name in annotations_name:
DOMTree = xml.dom.minidom.parse(annotation_name)
collection = DOMTree.documentElement
objs = collection.getElementsByTagName("size")
for obj in objs:
img_w = int(obj.getElementsByTagName('width')[0].childNodes[0].data)
img_h = int(obj.getElementsByTagName('height')[0].childNodes[0].data)
objs = collection.getElementsByTagName("object")
for obj in objs:
obj_type = obj.getElementsByTagName('name')[0].childNodes[0].data
if obj_type == "person":
bbox = obj.getElementsByTagName('bndbox')[0]
ymin = int(bbox.getElementsByTagName('ymin')[0].childNodes[0].data)
xmin = int(bbox.getElementsByTagName('xmin')[0].childNodes[0].data)
ymax = int(bbox.getElementsByTagName('ymax')[0].childNodes[0].data)
xmax = int(bbox.getElementsByTagName('xmax')[0].childNodes[0].data)
std_p_h = int((ymax - ymin)/img_h * config.img_size[0])
std_p_w = int((xmax - xmin) / img_w * config.img_size[1])
person_h_w.append(np.array([std_p_h, std_p_w]))
pass
clf = KMeans(n_clusters=2)
clf.fit(np.array(person_h_w))
print(clf.cluster_centers_)
| [
"numpy.array",
"sklearn.cluster.KMeans",
"os.path.join"
] | [((2015, 2035), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (2021, 2035), False, 'from sklearn.cluster import KMeans\n'), ((790, 858), 'os.path.join', 'os.path.join', (["dataset_info_map[datase_name]['anotation_dir']", '"""*xml"""'], {}), "(dataset_info_map[datase_name]['anotation_dir'], '*xml')\n", (802, 858), False, 'import os, glob\n'), ((2044, 2064), 'numpy.array', 'np.array', (['person_h_w'], {}), '(person_h_w)\n', (2052, 2064), True, 'import numpy as np\n'), ((1968, 1996), 'numpy.array', 'np.array', (['[std_p_h, std_p_w]'], {}), '([std_p_h, std_p_w])\n', (1976, 1996), True, 'import numpy as np\n')] |
# Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
import numpy as np
import pytest
from tiledb.cf.creator import DataspaceRegistry
from tiledb.cf.netcdf_engine import (
NetCDF4CoordToDimConverter,
NetCDF4DimToDimConverter,
NetCDF4ScalarToDimConverter,
)
netCDF4 = pytest.importorskip("netCDF4")
class TestNetCDFCoordToDimConverterUnlimCoord:
"""This class tests the NetCDFCoordToDimConverter class for a simple NetCDF
coordinate.
This test use an example NetCDF file with the following root group:
Dimensions:
value(unlim)
Variables:
float64 value(value)
"""
def test_class_properties(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dataset.createDimension("value")
var = dataset.createVariable("value", np.float64, ("value",))
var[:] = np.random.rand((8))
registry = DataspaceRegistry()
converter = NetCDF4CoordToDimConverter.from_netcdf(registry, var)
assert converter.name == var.name
assert converter.domain is None
assert converter.dtype == np.dtype(np.float64)
assert isinstance(repr(converter), str)
assert converter.input_var_name == "value"
assert converter.input_dim_name == "value"
assert converter.input_var_dtype == np.dtype(np.float64)
@pytest.mark.parametrize("indexer", [slice(None), slice(1, 3), slice(2, 4)])
def test_get_values(self, indexer):
"""Tests getting coordinate values from a NetCDF coordinate
NetCDF:
dimensions:
value (unlim)
variables:
real64 value(value) = [array of 8 random values]
"""
data = np.random.rand((8))
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dataset.createDimension("value")
var = dataset.createVariable("value", np.float64, ("value",))
var[:] = data
registry = DataspaceRegistry()
converter = NetCDF4CoordToDimConverter.from_netcdf(registry, var)
result = converter.get_values(dataset, sparse=True, indexer=indexer)
np.testing.assert_equal(result, data[indexer])
def test_get_query_size(self):
data = np.random.rand((8))
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dataset.createDimension("value")
var = dataset.createVariable("value", np.float64, ("value",))
var[:] = data
registry = DataspaceRegistry()
converter = NetCDF4CoordToDimConverter.from_netcdf(registry, var)
query_size = converter.get_query_size(dataset)
np.testing.assert_equal(query_size, 8)
def test_get_values_no_data_error(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dataset.createDimension("value")
var = dataset.createVariable("value", np.float64, ("value",))
registry = DataspaceRegistry()
converter = NetCDF4CoordToDimConverter.from_netcdf(registry, var)
with pytest.raises(ValueError):
converter.get_values(dataset, sparse=True, indexer=slice(None))
def test_get_values_dense_error(self):
data = np.random.rand((8))
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dataset.createDimension("value")
var = dataset.createVariable("value", np.float64, ("value",))
var[:] = data
registry = DataspaceRegistry()
converter = NetCDF4CoordToDimConverter.from_netcdf(registry, var)
with pytest.raises(NotImplementedError):
converter.get_values(dataset, sparse=False, indexer=slice(None))
def test_get_values_bad_step_error(self):
data = np.random.rand((8))
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dataset.createDimension("value")
var = dataset.createVariable("value", np.float64, ("value",))
var[:] = data
registry = DataspaceRegistry()
converter = NetCDF4CoordToDimConverter.from_netcdf(registry, var)
with pytest.raises(ValueError):
converter.get_values(dataset, sparse=False, indexer=slice(0, 8, 2))
def test_get_value_no_variable_error(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dataset.createDimension("value")
var = dataset.createVariable("value", np.float64, ("value",))
registry = DataspaceRegistry()
converter = NetCDF4CoordToDimConverter.from_netcdf(registry, var)
group = dataset.createGroup("group1")
with pytest.raises(KeyError):
converter.get_values(group, sparse=True, indexer=slice(None))
def test_get_value_wrong_ndim_error(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dataset.createDimension("value")
var = dataset.createVariable("value", np.float64, ("value",))
registry = DataspaceRegistry()
converter = NetCDF4CoordToDimConverter.from_netcdf(registry, var)
group = dataset.createGroup("group1")
group.createVariable("value", np.float64, tuple())
with pytest.raises(ValueError):
converter.get_values(group, sparse=True, indexer=slice(None))
class TestNetCDFDimToDimConverterSimpleDim:
"""This class tests the NetCDFDimToDimConverter class for a simple NetCDF
dimension.
This test uses an example NetCDF file with a dimension row(8) in the root
group.
"""
def test_class_properties(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", 8)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 1000, np.uint64
)
assert isinstance(repr(converter), str)
assert converter.input_dim_name == dim.name
assert converter.input_dim_size == dim.size
assert not converter.is_unlimited
assert converter.name == dim.name
assert converter.domain == (0, dim.size - 1)
assert converter.dtype == np.uint64
@pytest.mark.parametrize(
"sparse,values", [(True, np.arange(0, 8)), (False, slice(0, 8))]
)
def test_get_values(self, sparse, values):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", 8)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 1000, np.uint64
)
result = converter.get_values(dataset, sparse=sparse, indexer=slice(None))
np.testing.assert_equal(result, values)
def test_get_query_size(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", 8)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 1000, np.uint64
)
query_size = converter.get_query_size(dataset)
np.testing.assert_equal(query_size, 8)
@pytest.mark.parametrize(
"sparse,values", [(True, np.arange(0, 8)), (False, slice(0, 8))]
)
def test_get_values_from_subgroup(self, sparse, values):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", 8)
group = dataset.createGroup("group1")
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 1000, np.uint64
)
result = converter.get_values(group, sparse=sparse, indexer=slice(None))
np.testing.assert_equal(result, values)
def test_no_dim_error(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", 8)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 1000, np.uint64
)
with netCDF4.Dataset("no_dims.nc", mode="w", diskless=True) as dataset:
group = dataset.createGroup("group")
with pytest.raises(KeyError):
converter.get_values(group, sparse=False, indexer=slice(None))
def test_get_values_bad_step_error(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", 8)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 1000, np.uint64
)
with pytest.raises(ValueError):
converter.get_values(dataset, sparse=False, indexer=slice(0, 8, 2))
class TestNetCDFDimToDimConverterUnlimitedDim:
"""This class tests the NetCDFDimToDimConverter class for an unlimited
NetCDF dimension."""
def test_class_properties(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", None)
max_size = 100
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, max_size, np.uint64
)
assert isinstance(repr(converter), str)
assert converter.input_dim_name == dim.name
assert converter.input_dim_size == dim.size
assert converter.is_unlimited
assert converter.name == dim.name
assert converter.domain == (0, max_size - 1)
assert converter.dtype == np.uint64
@pytest.mark.parametrize("sparse", [True, False])
def test_get_values_no_data(self, sparse):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", None)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 100, np.uint64
)
with pytest.raises(IndexError):
converter.get_values(dataset, sparse=sparse, indexer=slice(None))
@pytest.mark.parametrize(
"sparse,indexer,expected_result",
[
(True, slice(None), np.arange(0, 10)),
(True, slice(2, 5), np.arange(2, 5)),
(False, slice(None), slice(0, 10)),
(False, slice(2, 5), slice(2, 5)),
],
)
def test_get_values(self, sparse, indexer, expected_result):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", None)
var = dataset.createVariable("data", np.int32, ("row",))
size = 10
var[:] = np.arange(size)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 100, np.uint64
)
result = converter.get_values(dataset, sparse=sparse, indexer=indexer)
np.testing.assert_equal(result, expected_result)
def test_get_query_size(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", None)
var = dataset.createVariable("data", np.int32, ("row",))
size = 10
var[:] = np.arange(size)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 100, np.uint64
)
query_size = converter.get_query_size(dataset)
np.testing.assert_equal(query_size, size)
def test_data_too_large_error(self):
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("row", None)
var = dataset.createVariable("data", np.int32, ("row",))
size = 11
var[:] = np.arange(size)
registry = DataspaceRegistry()
converter = NetCDF4DimToDimConverter.from_netcdf(
registry, dim, 10, np.uint64
)
with pytest.raises(IndexError):
converter.get_values(dataset, sparse=True, indexer=slice(None))
class TestNetCDFScalarToDimConverter:
def test_class_properties(self):
registry = DataspaceRegistry()
converter = NetCDF4ScalarToDimConverter.create(registry, "__scalars", np.uint32)
assert converter.name == "__scalars"
assert converter.domain == (0, 0)
assert converter.dtype == np.dtype(np.uint32)
def test_repr(self):
registry = DataspaceRegistry()
converter = NetCDF4ScalarToDimConverter.create(registry, "__scalars", np.uint32)
isinstance(repr(converter), str)
@pytest.mark.parametrize(
"sparse,indexer,expected_result",
[
(True, slice(None), np.arange(0, 1)),
(True, slice(0, 1), np.arange(0, 1)),
(False, slice(None), slice(0, 1)),
],
)
def test_get_values(self, sparse, indexer, expected_result):
registry = DataspaceRegistry()
converter = NetCDF4ScalarToDimConverter.create(registry, "__scalars", np.uint32)
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
result = converter.get_values(dataset, sparse=sparse, indexer=indexer)
np.testing.assert_equal(result, expected_result)
def test_get_values_bad_step_error(self):
registry = DataspaceRegistry()
converter = NetCDF4ScalarToDimConverter.create(registry, "__scalars", np.uint32)
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
with pytest.raises(ValueError):
converter.get_values(dataset, sparse=False, indexer=slice(0, 1, -1))
def test_get_values_bad_start_error(self):
registry = DataspaceRegistry()
converter = NetCDF4ScalarToDimConverter.create(registry, "__scalars", np.uint32)
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
with pytest.raises(IndexError):
converter.get_values(dataset, sparse=False, indexer=slice(-1, 1))
def test_get_values_bad_stop_error(self):
registry = DataspaceRegistry()
converter = NetCDF4ScalarToDimConverter.create(registry, "__scalars", np.uint32)
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
with pytest.raises(IndexError):
converter.get_values(dataset, sparse=False, indexer=slice(0, 10))
def test_get_query_size(self):
registry = DataspaceRegistry()
converter = NetCDF4ScalarToDimConverter.create(registry, "__scalars", np.uint32)
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
result = converter.get_query_size(dataset)
np.testing.assert_equal(result, 1)
| [
"pytest.importorskip",
"tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf",
"numpy.dtype",
"tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf",
"pytest.raises",
"numpy.arange",
"numpy.testing.assert_equal",
"numpy.random.rand",
"pytest.mark.parametrize",
"tiledb.cf.netcdf_e... | [((291, 321), 'pytest.importorskip', 'pytest.importorskip', (['"""netCDF4"""'], {}), "('netCDF4')\n", (310, 321), False, 'import pytest\n'), ((10119, 10167), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sparse"""', '[True, False]'], {}), "('sparse', [True, False])\n", (10142, 10167), False, 'import pytest\n'), ((1782, 1799), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (1796, 1799), True, 'import numpy as np\n'), ((2237, 2283), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'data[indexer]'], {}), '(result, data[indexer])\n', (2260, 2283), True, 'import numpy as np\n'), ((2335, 2352), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (2349, 2352), True, 'import numpy as np\n'), ((2768, 2806), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['query_size', '(8)'], {}), '(query_size, 8)\n', (2791, 2806), True, 'import numpy as np\n'), ((3356, 3373), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (3370, 3373), True, 'import numpy as np\n'), ((3918, 3935), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (3932, 3935), True, 'import numpy as np\n'), ((12840, 12859), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (12857, 12859), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((12880, 12948), 'tiledb.cf.netcdf_engine.NetCDF4ScalarToDimConverter.create', 'NetCDF4ScalarToDimConverter.create', (['registry', '"""__scalars"""', 'np.uint32'], {}), "(registry, '__scalars', np.uint32)\n", (12914, 12948), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((13135, 13154), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (13152, 13154), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((13175, 13243), 'tiledb.cf.netcdf_engine.NetCDF4ScalarToDimConverter.create', 'NetCDF4ScalarToDimConverter.create', (['registry', '"""__scalars"""', 'np.uint32'], {}), "(registry, '__scalars', np.uint32)\n", (13209, 13243), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((13616, 13635), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (13633, 13635), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((13656, 13724), 'tiledb.cf.netcdf_engine.NetCDF4ScalarToDimConverter.create', 'NetCDF4ScalarToDimConverter.create', (['registry', '"""__scalars"""', 'np.uint32'], {}), "(registry, '__scalars', np.uint32)\n", (13690, 13724), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((14015, 14034), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (14032, 14034), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((14055, 14123), 'tiledb.cf.netcdf_engine.NetCDF4ScalarToDimConverter.create', 'NetCDF4ScalarToDimConverter.create', (['registry', '"""__scalars"""', 'np.uint32'], {}), "(registry, '__scalars', np.uint32)\n", (14089, 14123), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((14400, 14419), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (14417, 14419), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((14440, 14508), 'tiledb.cf.netcdf_engine.NetCDF4ScalarToDimConverter.create', 'NetCDF4ScalarToDimConverter.create', (['registry', '"""__scalars"""', 'np.uint32'], {}), "(registry, '__scalars', np.uint32)\n", (14474, 14508), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((14781, 14800), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (14798, 14800), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((14821, 14889), 'tiledb.cf.netcdf_engine.NetCDF4ScalarToDimConverter.create', 'NetCDF4ScalarToDimConverter.create', (['registry', '"""__scalars"""', 'np.uint32'], {}), "(registry, '__scalars', np.uint32)\n", (14855, 14889), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((15151, 15170), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (15168, 15170), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((15191, 15259), 'tiledb.cf.netcdf_engine.NetCDF4ScalarToDimConverter.create', 'NetCDF4ScalarToDimConverter.create', (['registry', '"""__scalars"""', 'np.uint32'], {}), "(registry, '__scalars', np.uint32)\n", (15225, 15259), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((885, 902), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (899, 902), True, 'import numpy as np\n'), ((928, 947), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (945, 947), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((972, 1025), 'tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf', 'NetCDF4CoordToDimConverter.from_netcdf', (['registry', 'var'], {}), '(registry, var)\n', (1010, 1025), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((2050, 2069), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (2067, 2069), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((2094, 2147), 'tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf', 'NetCDF4CoordToDimConverter.from_netcdf', (['registry', 'var'], {}), '(registry, var)\n', (2132, 2147), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((2603, 2622), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (2620, 2622), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((2647, 2700), 'tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf', 'NetCDF4CoordToDimConverter.from_netcdf', (['registry', 'var'], {}), '(registry, var)\n', (2685, 2700), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((3075, 3094), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (3092, 3094), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((3119, 3172), 'tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf', 'NetCDF4CoordToDimConverter.from_netcdf', (['registry', 'var'], {}), '(registry, var)\n', (3157, 3172), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((3624, 3643), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (3641, 3643), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((3668, 3721), 'tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf', 'NetCDF4CoordToDimConverter.from_netcdf', (['registry', 'var'], {}), '(registry, var)\n', (3706, 3721), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((4186, 4205), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (4203, 4205), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((4230, 4283), 'tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf', 'NetCDF4CoordToDimConverter.from_netcdf', (['registry', 'var'], {}), '(registry, var)\n', (4268, 4283), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((4683, 4702), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (4700, 4702), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((4727, 4780), 'tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf', 'NetCDF4CoordToDimConverter.from_netcdf', (['registry', 'var'], {}), '(registry, var)\n', (4765, 4780), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((5221, 5240), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (5238, 5240), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((5265, 5318), 'tiledb.cf.netcdf_engine.NetCDF4CoordToDimConverter.from_netcdf', 'NetCDF4CoordToDimConverter.from_netcdf', (['registry', 'var'], {}), '(registry, var)\n', (5303, 5318), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((5984, 6003), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (6001, 6003), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((6028, 6096), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(1000)', 'np.uint64'], {}), '(registry, dim, 1000, np.uint64)\n', (6064, 6096), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((6800, 6819), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (6817, 6819), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((6844, 6912), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(1000)', 'np.uint64'], {}), '(registry, dim, 1000, np.uint64)\n', (6880, 6912), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((7042, 7081), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'values'], {}), '(result, values)\n', (7065, 7081), True, 'import numpy as np\n'), ((7273, 7292), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (7290, 7292), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((7317, 7385), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(1000)', 'np.uint64'], {}), '(registry, dim, 1000, np.uint64)\n', (7353, 7385), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((7487, 7525), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['query_size', '(8)'], {}), '(query_size, 8)\n', (7510, 7525), True, 'import numpy as np\n'), ((7902, 7921), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (7919, 7921), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((7946, 8014), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(1000)', 'np.uint64'], {}), '(registry, dim, 1000, np.uint64)\n', (7982, 8014), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((8142, 8181), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'values'], {}), '(result, values)\n', (8165, 8181), True, 'import numpy as np\n'), ((8371, 8390), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (8388, 8390), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((8415, 8483), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(1000)', 'np.uint64'], {}), '(registry, dim, 1000, np.uint64)\n', (8451, 8483), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((8966, 8985), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (8983, 8985), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((9010, 9078), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(1000)', 'np.uint64'], {}), '(registry, dim, 1000, np.uint64)\n', (9046, 9078), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((9609, 9628), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (9626, 9628), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((9653, 9725), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', 'max_size', 'np.uint64'], {}), '(registry, dim, max_size, np.uint64)\n', (9689, 9725), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((10373, 10392), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (10390, 10392), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((10417, 10484), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(100)', 'np.uint64'], {}), '(registry, dim, 100, np.uint64)\n', (10453, 10484), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((11249, 11264), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (11258, 11264), True, 'import numpy as np\n'), ((11288, 11307), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (11305, 11307), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((11332, 11399), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(100)', 'np.uint64'], {}), '(registry, dim, 100, np.uint64)\n', (11368, 11399), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((11525, 11573), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (11548, 11573), True, 'import numpy as np\n'), ((11857, 11872), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (11866, 11872), True, 'import numpy as np\n'), ((11896, 11915), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (11913, 11915), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((11940, 12007), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(100)', 'np.uint64'], {}), '(registry, dim, 100, np.uint64)\n', (11976, 12007), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((12109, 12150), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['query_size', 'size'], {}), '(query_size, size)\n', (12132, 12150), True, 'import numpy as np\n'), ((12440, 12455), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (12449, 12455), True, 'import numpy as np\n'), ((12479, 12498), 'tiledb.cf.creator.DataspaceRegistry', 'DataspaceRegistry', ([], {}), '()\n', (12496, 12498), False, 'from tiledb.cf.creator import DataspaceRegistry\n'), ((12523, 12589), 'tiledb.cf.netcdf_engine.NetCDF4DimToDimConverter.from_netcdf', 'NetCDF4DimToDimConverter.from_netcdf', (['registry', 'dim', '(10)', 'np.uint64'], {}), '(registry, dim, 10, np.uint64)\n', (12559, 12589), False, 'from tiledb.cf.netcdf_engine import NetCDF4CoordToDimConverter, NetCDF4DimToDimConverter, NetCDF4ScalarToDimConverter\n'), ((13070, 13089), 'numpy.dtype', 'np.dtype', (['np.uint32'], {}), '(np.uint32)\n', (13078, 13089), True, 'import numpy as np\n'), ((13900, 13948), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (13923, 13948), True, 'import numpy as np\n'), ((15407, 15441), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', '(1)'], {}), '(result, 1)\n', (15430, 15441), True, 'import numpy as np\n'), ((1154, 1174), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (1162, 1174), True, 'import numpy as np\n'), ((1385, 1405), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (1393, 1405), True, 'import numpy as np\n'), ((3190, 3215), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3203, 3215), False, 'import pytest\n'), ((3739, 3773), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (3752, 3773), False, 'import pytest\n'), ((4301, 4326), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4314, 4326), False, 'import pytest\n'), ((4848, 4871), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (4861, 4871), False, 'import pytest\n'), ((5449, 5474), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5462, 5474), False, 'import pytest\n'), ((6552, 6567), 'numpy.arange', 'np.arange', (['(0)', '(8)'], {}), '(0, 8)\n', (6561, 6567), True, 'import numpy as np\n'), ((7590, 7605), 'numpy.arange', 'np.arange', (['(0)', '(8)'], {}), '(0, 8)\n', (7599, 7605), True, 'import numpy as np\n'), ((8660, 8683), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (8673, 8683), False, 'import pytest\n'), ((9126, 9151), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9139, 9151), False, 'import pytest\n'), ((10532, 10557), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (10545, 10557), False, 'import pytest\n'), ((10756, 10772), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (10765, 10772), True, 'import numpy as np\n'), ((10807, 10822), 'numpy.arange', 'np.arange', (['(2)', '(5)'], {}), '(2, 5)\n', (10816, 10822), True, 'import numpy as np\n'), ((12637, 12662), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (12650, 12662), False, 'import pytest\n'), ((13400, 13415), 'numpy.arange', 'np.arange', (['(0)', '(1)'], {}), '(0, 1)\n', (13409, 13415), True, 'import numpy as np\n'), ((13450, 13465), 'numpy.arange', 'np.arange', (['(0)', '(1)'], {}), '(0, 1)\n', (13459, 13465), True, 'import numpy as np\n'), ((14221, 14246), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14234, 14246), False, 'import pytest\n'), ((14606, 14631), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (14619, 14631), False, 'import pytest\n'), ((14987, 15012), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (15000, 15012), False, 'import pytest\n')] |
from __future__ import print_function, division
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import backend as K
from keras.layers import Lambda
from utils.glove_loader import GloveModel
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from utils.dataset_utils import load_dataset
from PIL import Image
import math
import pandas as pd
import sys
import time
# GPU setting
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto(
gpu_options = tf.GPUOptions(
visible_device_list="2", # specify GPU number
allow_growth=True)
)
set_session(tf.Session(config=config))
class DCGAN():
def __init__(self, img_path, txt_path, glove_path):
# Input shape
self.img_rows = 64
self.img_cols = 64
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
self.embedding_dim = 300
self.img_path = img_path
self.txt_path = txt_path
self.glove_path = glove_path
optimizer_g = Adam(0.0005, 0.5)
optimizer_d = Adam(0.00005, 0.5)
# Build the GloVe model
self.glove_model = GloveModel()
self.glove_model.load(data_dir_path=self.glove_path, embedding_dim=self.embedding_dim)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer_d,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
cond_input = Input(shape=(self.embedding_dim,))
img = self.generator([z, cond_input])
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
valid = self.discriminator([img, cond_input])
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model([z, cond_input], valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer_g)
def build_generator(self):
generator_input = Input(shape=(self.latent_dim, ), name="g_input")
cond_input = Input(shape=(self.embedding_dim, ), name="cond_g_input")
cond_output = Dense(100)(cond_input)
G = concatenate([generator_input, cond_output])
G = Dense(256 * 8 * 8, activation="relu")(G)
G = Reshape((8, 8, 256))(G)
G = UpSampling2D()(G)
G = Conv2D(256, kernel_size=3, padding="same")(G)
G = BatchNormalization(momentum=0.8)(G)
G = Activation("relu")(G)
G = UpSampling2D()(G)
G = Conv2D(128, kernel_size=3, padding="same")(G)
G = BatchNormalization(momentum=0.8)(G)
G = Activation("relu")(G)
G = UpSampling2D()(G)
G = Conv2D(64, kernel_size=3, padding="same")(G)
G = BatchNormalization(momentum=0.8)(G)
G = Activation("relu")(G)
G = Conv2D(self.channels, kernel_size=3, padding="same")(G)
generator_output = Activation("tanh")(G)
generator = Model([generator_input, cond_input], generator_output)
generator.summary()
return generator
def build_discriminator(self):
discriminator_input = Input(shape=self.img_shape, name="d_input")
cond_input = Input(shape=(self.embedding_dim, ), name="cond_d_input")
D = Conv2D(64, kernel_size=3, strides=2, padding="same")(discriminator_input)
D = LeakyReLU(alpha=0.2)(D)
D = Dropout(0.25)(D)
D = Conv2D(128, kernel_size=3, strides=2, padding="same")(D)
D = ZeroPadding2D(padding=((0,1),(0,1)))(D)
D = BatchNormalization(momentum=0.8)(D)
D = LeakyReLU(alpha=0.2)(D)
D = Dropout(0.25)(D)
D = Conv2D(256, kernel_size=3, strides=1, padding="same")(D)
D = BatchNormalization(momentum=0.8)(D)
D = LeakyReLU(alpha=0.2)(D)
D = Dropout(0.25)(D)
D = Conv2D(512, kernel_size=3, strides=2, padding="same")(D)
D = BatchNormalization(momentum=0.8)(D)
D = LeakyReLU(alpha=0.2)(D)
cond_d_hidden = Dense(100)(cond_input)
cond_d_hidden = Reshape((1, 1, 100))(cond_d_hidden)
cond_d_output = Lambda(lambda x: K.tile(x, [1, 9, 9, 1]))(cond_d_hidden)
D = concatenate([D, cond_d_output], axis=-1)
D = Conv2D(512, kernel_size=3, strides=1, padding='same')(D)
D = BatchNormalization(momentum=0.8)(D)
D = LeakyReLU(alpha=0.1)(D)
D = Dropout(0.25)(D)
D = Flatten()(D)
discriminator_output = Dense(1, activation='sigmoid')(D)
discriminator = Model([discriminator_input, cond_input], discriminator_output)
discriminator.summary()
return discriminator
def train(self, epochs, batch_size=26, save_interval=20):
# load dataset
X_train, Captions, X_test, Captions_test, Labels = load_dataset(self.img_path, self.txt_path, self.img_shape)
caption_list_train = []
caption_list_test = []
for caption in Captions:
caption_list_train.append([str(caption)])
for caption in Captions_test:
caption_list_test.append([str(caption)])
df = pd.DataFrame(caption_list_train, columns=['caption'])
df.to_csv('./saved_model/caption_train.csv')
df = pd.DataFrame(caption_list_test, columns=['caption'])
df.to_csv('./saved_model/caption_test.csv')
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
batch_count = int(X_train.shape[0] / batch_size)
history = []
history_test = []
for epoch in range(epochs):
for batch_index in range(batch_count):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half of images
# idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[batch_index * batch_size:(batch_index + 1) * batch_size]
texts_input = Captions[batch_index * batch_size:(batch_index + 1) * batch_size]
texts = self.glove_model.encode_docs(texts_input)
# Sample noise and generate a batch of new images
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_imgs = self.generator.predict([noise, texts])
# Train the discriminator (real classified as ones and generated as zeros)
start = time.time()
d_loss_real = self.discriminator.train_on_batch([imgs, texts], valid)
d_loss_fake = self.discriminator.train_on_batch([gen_imgs, texts], fake)
batch_time_d = time.time() - start
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# Train the generator (wants discriminator to mistake images as real)
start = time.time()
g_loss = self.combined.train_on_batch([noise, texts], valid)
batch_time_g = time.time() - start
# Plot the progress
batch_time = batch_time_d + batch_time_g
print ("%d-%d [D loss: %f, acc.: %.2f%%] [G loss: %f] [Time: %f]" % (epoch, batch_index, d_loss[0], 100*d_loss[1], g_loss, batch_time))
history.append([epoch, batch_index, d_loss[0], 100*d_loss[1], g_loss, batch_time])
# Test the model
texts_test = self.glove_model.encode_docs(Captions_test)
noise_test = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_imgs_test = self.generator.predict([noise_test, texts_test])
start = time.time()
d_loss_real_test = self.discriminator.test_on_batch([X_test, texts_test], valid)
d_loss_fake_test = self.discriminator.test_on_batch([gen_imgs_test, texts_test], fake)
batch_time_d_test = time.time() - start
d_loss_test = 0.5 * np.add(d_loss_real_test, d_loss_fake_test)
start = time.time()
g_loss_test = self.combined.test_on_batch([noise_test, texts_test], valid)
batch_time_g_test = time.time() - start
# Plot the test progress
batch_time_test = batch_time_d_test + batch_time_g_test
print ("%d (test) [D loss: %f, acc.: %.2f%%] [G loss: %f] [Time: %f]" % (epoch, d_loss_test[0], 100*d_loss_test[1], g_loss_test, batch_time_test))
history_test.append([epoch, d_loss_test[0], 100*d_loss_test[1], g_loss_test, batch_time_test])
# If at save interval => save generated image samples & training weights
if epoch % save_interval == 0:
idx = np.random.randint(0, X_train.shape[0], batch_size)
texts_input = Captions[idx]
texts = self.glove_model.encode_docs(texts_input)
self.save_imgs(epoch, texts)
self.generator.save_weights(filepath='./saved_model/generator_weights_' + str(epoch) + '.h5')
self.discriminator.save_weights(filepath='./saved_model/discriminator_weights_' + str(epoch) + '.h5')
# save weights & history
df_train = pd.DataFrame(history, columns=['epoch', 'batch', 'd_loss', 'acc', 'g_loss', 'time[sec]'])
df_train.to_csv('./saved_model/history.csv')
df_test = pd.DataFrame(history_test, columns=['epoch', 'd_loss', 'acc', 'g_loss', 'time[sec]'])
df_test.to_csv('./saved_model/history_test.csv')
self.generator.save_weights(filepath='./saved_model/generator_weights.h5')
self.discriminator.save_weights(filepath='./saved_model/discriminator_weights.h5')
def save_imgs(self, epoch, texts, batch_size=26):
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
if batch_size == 260:
texts = self.glove_model.encode_docs(texts)
gen_imgs = self.generator.predict([noise, texts])
gen_img = combine_normalized_images(gen_imgs)
img_from_normalized_img(gen_img).save("images/snapshot/%d.png" % epoch)
def load_model(self, gen_path='./saved_model/generator_weights.h5', dis_path='./saved_model/discriminator_weights.h5'):
"""
Function: load_model
This function loads a pre-trained model.
Input: model_dir_path: designate where weights file is.
Output: None (pre-trained model will be loaded.)
"""
### load weights
self.generator.load_weights(gen_path)
self.discriminator.load_weights(dis_path)
def generate_image_from_text(self, text, flag=True):
### prepare an empty array
noise = np.zeros(shape=(1, self.latent_dim))
encoded_text = np.zeros(shape=(1, self.embedding_dim))
### generate sample for input data
encoded_text[0, :] = self.glove_model.encode_doc(text)
noise[0, :] = np.random.uniform(0, 1, self.latent_dim)
### predict and generate an image
generated_images = self.generator.predict([noise, encoded_text])
generated_image = generated_images[0]
if flag is True:
generated_image = generated_image * 127.5 + 127.5
return Image.fromarray(generated_image.astype(np.uint8))
elif flag is not True:
return generated_image
def combine_normalized_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = generated_images.shape[1:]
image = np.zeros((height * shape[0], width * shape[1], shape[2]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
return image
def img_from_normalized_img(normalized_img):
image = normalized_img * 127.5 + 127.5
return Image.fromarray(image.astype(np.uint8))
def generate_mode():
img_size = (64, 64, 3)
img_path = './emoji/edited/emoji_64x64/'
txt_path = './emoji/description/detailed'
glove_path = './utils/glove.6B.300d.txt'
dcgan = DCGAN(img_path, txt_path, glove_path)
X_train, Captions, _, _, _ = load_dataset(img_path, txt_path, img_size, split_rate=0.0)
print('Loading model...')
dcgan.load_model()
iteration = 0
caption_list = []
print('Generating images...')
for image, caption in zip(X_train, Captions):
edited_image = image * 127.5 + 127.5
edited_image = Image.fromarray(edited_image.astype(np.uint8))
edited_image.save('./images/original/' + str(iteration) + '.png')
generated_image = dcgan.generate_image_from_text(caption)
generated_image.save('./images/output/' + str(iteration) + '.png')
caption_list.append([str(caption)])
iteration += 1
df = pd.DataFrame(caption_list, columns=['caption'])
df.to_csv('./images/caption.csv')
# plot all emojis
dcgan.save_imgs(epoch=5000, texts=Captions, batch_size=260)
print('Done!')
def train_mode():
img_path = './emoji/edited/emoji_64x64/'
txt_path = './emoji/description/detailed'
glove_path = './utils/glove.6B.300d.txt'
dcgan = DCGAN(img_path, txt_path, glove_path)
dcgan.train(epochs=5000, batch_size=26, save_interval=50)
if __name__ == '__main__':
if len(sys.argv) == 2:
if sys.argv[1] == '1':
generate_mode()
elif sys.argv[1] == '0':
train_mode()
else:
print("Unexpected Input Value!")
| [
"utils.dataset_utils.load_dataset",
"numpy.ones",
"keras.models.Model",
"numpy.random.randint",
"numpy.random.normal",
"keras.layers.ZeroPadding2D",
"keras.layers.Input",
"keras.layers.Reshape",
"keras.layers.concatenate",
"tensorflow.GPUOptions",
"pandas.DataFrame",
"keras.layers.convolutiona... | [((521, 535), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (528, 535), True, 'import matplotlib as mpl\n'), ((990, 1015), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1000, 1015), True, 'import tensorflow as tf\n'), ((12406, 12498), 'numpy.zeros', 'np.zeros', (['(height * shape[0], width * shape[1], shape[2])'], {'dtype': 'generated_images.dtype'}), '((height * shape[0], width * shape[1], shape[2]), dtype=\n generated_images.dtype)\n', (12414, 12498), True, 'import numpy as np\n'), ((13138, 13196), 'utils.dataset_utils.load_dataset', 'load_dataset', (['img_path', 'txt_path', 'img_size'], {'split_rate': '(0.0)'}), '(img_path, txt_path, img_size, split_rate=0.0)\n', (13150, 13196), False, 'from utils.dataset_utils import load_dataset\n'), ((13782, 13829), 'pandas.DataFrame', 'pd.DataFrame', (['caption_list'], {'columns': "['caption']"}), "(caption_list, columns=['caption'])\n", (13794, 13829), True, 'import pandas as pd\n'), ((856, 913), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'visible_device_list': '"""2"""', 'allow_growth': '(True)'}), "(visible_device_list='2', allow_growth=True)\n", (869, 913), True, 'import tensorflow as tf\n'), ((1452, 1469), 'keras.optimizers.Adam', 'Adam', (['(0.0005)', '(0.5)'], {}), '(0.0005, 0.5)\n', (1456, 1469), False, 'from keras.optimizers import Adam\n'), ((1492, 1508), 'keras.optimizers.Adam', 'Adam', (['(5e-05)', '(0.5)'], {}), '(5e-05, 0.5)\n', (1496, 1508), False, 'from keras.optimizers import Adam\n'), ((1571, 1583), 'utils.glove_loader.GloveModel', 'GloveModel', ([], {}), '()\n', (1581, 1583), False, 'from utils.glove_loader import GloveModel\n'), ((2070, 2101), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (2075, 2101), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((2123, 2157), 'keras.layers.Input', 'Input', ([], {'shape': '(self.embedding_dim,)'}), '(shape=(self.embedding_dim,))\n', (2128, 2157), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((2605, 2634), 'keras.models.Model', 'Model', (['[z, cond_input]', 'valid'], {}), '([z, cond_input], valid)\n', (2610, 2634), False, 'from keras.models import Sequential, Model\n'), ((2774, 2821), 'keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)', 'name': '"""g_input"""'}), "(shape=(self.latent_dim,), name='g_input')\n", (2779, 2821), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((2844, 2899), 'keras.layers.Input', 'Input', ([], {'shape': '(self.embedding_dim,)', 'name': '"""cond_g_input"""'}), "(shape=(self.embedding_dim,), name='cond_g_input')\n", (2849, 2899), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((2959, 3002), 'keras.layers.concatenate', 'concatenate', (['[generator_input, cond_output]'], {}), '([generator_input, cond_output])\n', (2970, 3002), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((3739, 3793), 'keras.models.Model', 'Model', (['[generator_input, cond_input]', 'generator_output'], {}), '([generator_input, cond_input], generator_output)\n', (3744, 3793), False, 'from keras.models import Sequential, Model\n'), ((3914, 3957), 'keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape', 'name': '"""d_input"""'}), "(shape=self.img_shape, name='d_input')\n", (3919, 3957), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((3979, 4034), 'keras.layers.Input', 'Input', ([], {'shape': '(self.embedding_dim,)', 'name': '"""cond_d_input"""'}), "(shape=(self.embedding_dim,), name='cond_d_input')\n", (3984, 4034), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((4958, 4998), 'keras.layers.concatenate', 'concatenate', (['[D, cond_d_output]'], {'axis': '(-1)'}), '([D, cond_d_output], axis=-1)\n', (4969, 4998), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((5296, 5358), 'keras.models.Model', 'Model', (['[discriminator_input, cond_input]', 'discriminator_output'], {}), '([discriminator_input, cond_input], discriminator_output)\n', (5301, 5358), False, 'from keras.models import Sequential, Model\n'), ((5566, 5624), 'utils.dataset_utils.load_dataset', 'load_dataset', (['self.img_path', 'self.txt_path', 'self.img_shape'], {}), '(self.img_path, self.txt_path, self.img_shape)\n', (5578, 5624), False, 'from utils.dataset_utils import load_dataset\n'), ((5879, 5932), 'pandas.DataFrame', 'pd.DataFrame', (['caption_list_train'], {'columns': "['caption']"}), "(caption_list_train, columns=['caption'])\n", (5891, 5932), True, 'import pandas as pd\n'), ((5999, 6051), 'pandas.DataFrame', 'pd.DataFrame', (['caption_list_test'], {'columns': "['caption']"}), "(caption_list_test, columns=['caption'])\n", (6011, 6051), True, 'import pandas as pd\n'), ((6157, 6181), 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (6164, 6181), True, 'import numpy as np\n'), ((6197, 6222), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (6205, 6222), True, 'import numpy as np\n'), ((10059, 10152), 'pandas.DataFrame', 'pd.DataFrame', (['history'], {'columns': "['epoch', 'batch', 'd_loss', 'acc', 'g_loss', 'time[sec]']"}), "(history, columns=['epoch', 'batch', 'd_loss', 'acc', 'g_loss',\n 'time[sec]'])\n", (10071, 10152), True, 'import pandas as pd\n'), ((10220, 10309), 'pandas.DataFrame', 'pd.DataFrame', (['history_test'], {'columns': "['epoch', 'd_loss', 'acc', 'g_loss', 'time[sec]']"}), "(history_test, columns=['epoch', 'd_loss', 'acc', 'g_loss',\n 'time[sec]'])\n", (10232, 10309), True, 'import pandas as pd\n'), ((10608, 10661), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, self.latent_dim)'], {}), '(0, 1, (batch_size, self.latent_dim))\n', (10624, 10661), True, 'import numpy as np\n'), ((11534, 11570), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, self.latent_dim)'}), '(shape=(1, self.latent_dim))\n', (11542, 11570), True, 'import numpy as np\n'), ((11594, 11633), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, self.embedding_dim)'}), '(shape=(1, self.embedding_dim))\n', (11602, 11633), True, 'import numpy as np\n'), ((11763, 11803), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.latent_dim'], {}), '(0, 1, self.latent_dim)\n', (11780, 11803), True, 'import numpy as np\n'), ((12291, 12305), 'math.sqrt', 'math.sqrt', (['num'], {}), '(num)\n', (12300, 12305), False, 'import math\n'), ((2923, 2933), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (2928, 2933), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((3015, 3052), 'keras.layers.Dense', 'Dense', (['(256 * 8 * 8)'], {'activation': '"""relu"""'}), "(256 * 8 * 8, activation='relu')\n", (3020, 3052), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((3068, 3088), 'keras.layers.Reshape', 'Reshape', (['(8, 8, 256)'], {}), '((8, 8, 256))\n', (3075, 3088), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((3104, 3118), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {}), '()\n', (3116, 3118), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3134, 3176), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(256)'], {'kernel_size': '(3)', 'padding': '"""same"""'}), "(256, kernel_size=3, padding='same')\n", (3140, 3176), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3192, 3224), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3210, 3224), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((3240, 3258), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3250, 3258), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((3274, 3288), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {}), '()\n', (3286, 3288), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3304, 3346), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3)', 'padding': '"""same"""'}), "(128, kernel_size=3, padding='same')\n", (3310, 3346), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3362, 3394), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3380, 3394), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((3410, 3428), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3420, 3428), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((3444, 3458), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {}), '()\n', (3456, 3458), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3474, 3515), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3)', 'padding': '"""same"""'}), "(64, kernel_size=3, padding='same')\n", (3480, 3515), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3531, 3563), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3549, 3563), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((3579, 3597), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3589, 3597), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((3613, 3665), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['self.channels'], {'kernel_size': '(3)', 'padding': '"""same"""'}), "(self.channels, kernel_size=3, padding='same')\n", (3619, 3665), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3696, 3714), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (3706, 3714), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((4048, 4100), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(64, kernel_size=3, strides=2, padding='same')\n", (4054, 4100), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((4134, 4154), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4143, 4154), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4170, 4183), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (4177, 4183), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((4199, 4252), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(128, kernel_size=3, strides=2, padding='same')\n", (4205, 4252), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((4268, 4307), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '((0, 1), (0, 1))'}), '(padding=((0, 1), (0, 1)))\n', (4281, 4307), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((4320, 4352), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (4338, 4352), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((4368, 4388), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4377, 4388), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4404, 4417), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (4411, 4417), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((4433, 4486), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(256)'], {'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""'}), "(256, kernel_size=3, strides=1, padding='same')\n", (4439, 4486), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((4502, 4534), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (4520, 4534), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((4550, 4570), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4559, 4570), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4586, 4599), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (4593, 4599), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((4615, 4668), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(512)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(512, kernel_size=3, strides=2, padding='same')\n", (4621, 4668), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((4684, 4716), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (4702, 4716), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((4732, 4752), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4741, 4752), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4781, 4791), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (4786, 4791), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((4828, 4848), 'keras.layers.Reshape', 'Reshape', (['(1, 1, 100)'], {}), '((1, 1, 100))\n', (4835, 4848), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((5011, 5064), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(512)'], {'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""'}), "(512, kernel_size=3, strides=1, padding='same')\n", (5017, 5064), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((5080, 5112), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (5098, 5112), False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((5128, 5148), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (5137, 5148), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((5164, 5177), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5171, 5177), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((5193, 5202), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5200, 5202), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((5237, 5267), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5242, 5267), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate\n'), ((8387, 8440), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, self.latent_dim)'], {}), '(0, 1, (batch_size, self.latent_dim))\n', (8403, 8440), True, 'import numpy as np\n'), ((8538, 8549), 'time.time', 'time.time', ([], {}), '()\n', (8547, 8549), False, 'import time\n'), ((8889, 8900), 'time.time', 'time.time', ([], {}), '()\n', (8898, 8900), False, 'import time\n'), ((7001, 7054), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, self.latent_dim)'], {}), '(0, 1, (batch_size, self.latent_dim))\n', (7017, 7054), True, 'import numpy as np\n'), ((7237, 7248), 'time.time', 'time.time', ([], {}), '()\n', (7246, 7248), False, 'import time\n'), ((7766, 7777), 'time.time', 'time.time', ([], {}), '()\n', (7775, 7777), False, 'import time\n'), ((8774, 8785), 'time.time', 'time.time', ([], {}), '()\n', (8783, 8785), False, 'import time\n'), ((8826, 8868), 'numpy.add', 'np.add', (['d_loss_real_test', 'd_loss_fake_test'], {}), '(d_loss_real_test, d_loss_fake_test)\n', (8832, 8868), True, 'import numpy as np\n'), ((9020, 9031), 'time.time', 'time.time', ([], {}), '()\n', (9029, 9031), False, 'import time\n'), ((9563, 9613), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]', 'batch_size'], {}), '(0, X_train.shape[0], batch_size)\n', (9580, 9613), True, 'import numpy as np\n'), ((4905, 4928), 'keras.backend.tile', 'K.tile', (['x', '[1, 9, 9, 1]'], {}), '(x, [1, 9, 9, 1])\n', (4911, 4928), True, 'from keras import backend as K\n'), ((7455, 7466), 'time.time', 'time.time', ([], {}), '()\n', (7464, 7466), False, 'import time\n'), ((7506, 7538), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (7512, 7538), True, 'import numpy as np\n'), ((7886, 7897), 'time.time', 'time.time', ([], {}), '()\n', (7895, 7897), False, 'import time\n')] |
import models
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from functools import reduce
import torch.optim as optim
import utils
from datetime import datetime
from PIL import Image
from scipy.spatial.distance import pdist, squareform
import torchvision.transforms as transforms
import argparse
import os
import glob
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--lr', type=float, default=0.001, metavar='N',
help='learning_rate')
parser.add_argument('--margin', type=float, default=1, metavar='N',
help='learning_rate')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--train_dir', type=str, default='./data-train', metavar='N',
help='train-data')
parser.add_argument('--test_dir', type=str, default='./data-test', metavar='N',
help='test-data')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
base_transform = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor()])
noisy_transform = transforms.RandomChoice([
transforms.ColorJitter(brightness=0.5),
transforms.ColorJitter(contrast=0.5),
transforms.ColorJitter(saturation=0.2),
])
def random_choice_without_i(no_choices, i):
r = np.random.choice(no_choices)
if r == i:
return random_choice_without_i(no_choices, i)
return r
class NoisyImageDataset(object):
def __init__(self, root_dir, transform=None, noisy_transform=None):
print('load data from %s' % root_dir)
self.image_paths = sorted(glob.glob(root_dir + '/*.jpeg'))
assert self.image_paths != 0, "No images found in {}".format(root_dir)
self.image_names = [os.path.basename(path) for path in self.image_paths]
self.transform = transform
self.noisy_transform = transforms.Compose([noisy_transform, self.transform])
self.total_data = len(self.image_paths)
def __len__(self):
return self.total_data
def __getitem__(self, index):
image, x = self._load_and_transform(index)
pos_x = self.noisy_transform(image)
ridx = random_choice_without_i(self.total_data, index)
_, neg_x = self._load_and_transform(ridx)
return x, pos_x, neg_x
def _load_and_transform(self, index):
image_path = self.image_paths[index]
# Returns image in RGB format; each pixel ranges between 0.0 and 1.0
image = Image.open(image_path).convert('RGB')
x = self.transform(image)
return image, x
def get(self, index):
return self.__getitem__(index)
noisy_dataset = NoisyImageDataset(args.train_dir, transform=base_transform, noisy_transform=noisy_transform)
trainloader = torch.utils.data.DataLoader(noisy_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
test_noisy_dataset = NoisyImageDataset(args.test_dir, transform=base_transform, noisy_transform=noisy_transform)
testloader = torch.utils.data.DataLoader(test_noisy_dataset, batch_size=args.batch_size, shuffle=False, num_workers=1)
class Model(nn.Module):
def __init__(self, verbose=False):
super(Model, self).__init__()
self.vgg = models.VGG16()
self.encoder = nn.Sequential(
nn.Linear(3072+4096, 4096),
nn.ReLU(True)
)
self.path1 = nn.Sequential(
nn.MaxPool2d(4, 4),
nn.Conv2d(3, 96, 8, 4),
nn.MaxPool2d(6, 2),
Flatten(),
)
self.path2 = nn.Sequential(
nn.MaxPool2d(8, 8),
nn.Conv2d(3, 96, 8, 4),
nn.MaxPool2d(3, 1),
Flatten(),
)
self.dropout = nn.Dropout(p=0.2)
def _normalize(self, x):
norm = x.norm(p=2, dim=1, keepdim=True)
return x.div(norm)
def forward(self, x):
vgg_x = self._normalize(self.vgg.forward_pass(x))
path1_x = self.path1(x)
path2_x = self.path2(x)
p_x = self._normalize(torch.cat((path1_x, path2_x), dim=1))
x = torch.cat((vgg_x, p_x), dim=1)
x = self.dropout(x)
x = self.encoder(x)
return self._normalize(x)
def parameters(self):
return list(self.encoder.parameters()) + list(self.path1.parameters()) + list(self.path2.parameters())
running_loss = []
model = Model().to(device)
triplet_loss = nn.TripletMarginLoss(margin=args.margin, p=2)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
print('margin = %f' % args.margin)
print('learning rate = %f' % args.lr)
for epoch in range(args.epochs):
for i, data in enumerate(trainloader, 0):
optimizer.zero_grad()
z = list(map(lambda x: model(x.to(device)), data))
loss = triplet_loss(*z)
loss.backward()
optimizer.step()
# print statistics
running_loss.append(loss.item())
if i % args.log_interval == 0:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, np.mean(running_loss)))
running_loss = []
print('Computing features for testing set')
embeddings = np.zeros((len(testloader.dataset), 4096))
test_loss = []
with torch.no_grad():
for i, data in enumerate(testloader):
x = data[0]
z = model(x.to(device))
embeddings[i*args.batch_size:(i+1)*args.batch_size] = z.cpu()
dist_matrix = squareform(pdist(embeddings, metric='euclidean'))
print(dist_matrix.shape)
nearest_neighbors = np.argsort(dist_matrix, axis=1)
for k in [1, 3, 5]:
print('===== k=%d =====' % k)
utils.get_stats(testloader.dataset.image_paths, nearest_neighbors, k=k)
# todo: save to file
# compute feature
| [
"torch.nn.Dropout",
"models.VGG16",
"argparse.ArgumentParser",
"torch.cat",
"numpy.argsort",
"numpy.mean",
"scipy.spatial.distance.pdist",
"torch.device",
"glob.glob",
"torch.no_grad",
"torch.utils.data.DataLoader",
"utils.get_stats",
"torchvision.transforms.Compose",
"numpy.random.choice"... | [((402, 458), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""VAE MNIST Example"""'}), "(description='VAE MNIST Example')\n", (425, 458), False, 'import argparse\n'), ((1571, 1615), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (1583, 1615), False, 'import torch\n'), ((3551, 3654), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['noisy_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(noisy_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=1)\n', (3578, 3654), False, 'import torch\n'), ((3778, 3887), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_noisy_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(1)'}), '(test_noisy_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=1)\n', (3805, 3887), False, 'import torch\n'), ((5176, 5221), 'torch.nn.TripletMarginLoss', 'nn.TripletMarginLoss', ([], {'margin': 'args.margin', 'p': '(2)'}), '(margin=args.margin, p=2)\n', (5196, 5221), True, 'import torch.nn as nn\n'), ((6242, 6273), 'numpy.argsort', 'np.argsort', (['dist_matrix'], {'axis': '(1)'}), '(dist_matrix, axis=1)\n', (6252, 6273), True, 'import numpy as np\n'), ((1535, 1560), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1558, 1560), False, 'import torch\n'), ((2091, 2119), 'numpy.random.choice', 'np.random.choice', (['no_choices'], {}), '(no_choices)\n', (2107, 2119), True, 'import numpy as np\n'), ((5949, 5964), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5962, 5964), False, 'import torch\n'), ((6158, 6195), 'scipy.spatial.distance.pdist', 'pdist', (['embeddings'], {'metric': '"""euclidean"""'}), "(embeddings, metric='euclidean')\n", (6163, 6195), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((6334, 6405), 'utils.get_stats', 'utils.get_stats', (['testloader.dataset.image_paths', 'nearest_neighbors'], {'k': 'k'}), '(testloader.dataset.image_paths, nearest_neighbors, k=k)\n', (6349, 6405), False, 'import utils\n'), ((1809, 1835), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1830, 1835), True, 'import torchvision.transforms as transforms\n'), ((1837, 1858), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1856, 1858), True, 'import torchvision.transforms as transforms\n'), ((1909, 1947), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.5)'}), '(brightness=0.5)\n', (1931, 1947), True, 'import torchvision.transforms as transforms\n'), ((1953, 1989), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'contrast': '(0.5)'}), '(contrast=0.5)\n', (1975, 1989), True, 'import torchvision.transforms as transforms\n'), ((1995, 2033), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'saturation': '(0.2)'}), '(saturation=0.2)\n', (2017, 2033), True, 'import torchvision.transforms as transforms\n'), ((2649, 2702), 'torchvision.transforms.Compose', 'transforms.Compose', (['[noisy_transform, self.transform]'], {}), '([noisy_transform, self.transform])\n', (2667, 2702), True, 'import torchvision.transforms as transforms\n'), ((4006, 4020), 'models.VGG16', 'models.VGG16', ([], {}), '()\n', (4018, 4020), False, 'import models\n'), ((4499, 4516), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (4509, 4516), True, 'import torch.nn as nn\n'), ((4853, 4883), 'torch.cat', 'torch.cat', (['(vgg_x, p_x)'], {'dim': '(1)'}), '((vgg_x, p_x), dim=1)\n', (4862, 4883), False, 'import torch\n'), ((2388, 2419), 'glob.glob', 'glob.glob', (["(root_dir + '/*.jpeg')"], {}), "(root_dir + '/*.jpeg')\n", (2397, 2419), False, 'import glob\n'), ((2529, 2551), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2545, 2551), False, 'import os\n'), ((4071, 4099), 'torch.nn.Linear', 'nn.Linear', (['(3072 + 4096)', '(4096)'], {}), '(3072 + 4096, 4096)\n', (4080, 4099), True, 'import torch.nn as nn\n'), ((4111, 4124), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (4118, 4124), True, 'import torch.nn as nn\n'), ((4184, 4202), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(4)', '(4)'], {}), '(4, 4)\n', (4196, 4202), True, 'import torch.nn as nn\n'), ((4216, 4238), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(96)', '(8)', '(4)'], {}), '(3, 96, 8, 4)\n', (4225, 4238), True, 'import torch.nn as nn\n'), ((4252, 4270), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(6)', '(2)'], {}), '(6, 2)\n', (4264, 4270), True, 'import torch.nn as nn\n'), ((4354, 4372), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(8)', '(8)'], {}), '(8, 8)\n', (4366, 4372), True, 'import torch.nn as nn\n'), ((4386, 4408), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(96)', '(8)', '(4)'], {}), '(3, 96, 8, 4)\n', (4395, 4408), True, 'import torch.nn as nn\n'), ((4422, 4440), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)', '(1)'], {}), '(3, 1)\n', (4434, 4440), True, 'import torch.nn as nn\n'), ((4802, 4838), 'torch.cat', 'torch.cat', (['(path1_x, path2_x)'], {'dim': '(1)'}), '((path1_x, path2_x), dim=1)\n', (4811, 4838), False, 'import torch\n'), ((3264, 3286), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (3274, 3286), False, 'from PIL import Image\n'), ((5774, 5795), 'numpy.mean', 'np.mean', (['running_loss'], {}), '(running_loss)\n', (5781, 5795), True, 'import numpy as np\n')] |
import torch
import logging
import sys
import sacred
import scipy.ndimage
import numpy as np
def place_tensor(tensor):
"""
Places a tensor on GPU, if PyTorch sees CUDA; otherwise, the returned tensor
remains on CPU.
"""
if torch.cuda.is_available():
return tensor.cuda()
return tensor
def sanitize_sacred_arguments(args):
"""
This function goes through and sanitizes the arguments to native types.
Lists and dictionaries passed through Sacred automatically become
ReadOnlyLists and ReadOnlyDicts. This function will go through and
recursively change them to native lists and dicts.
`args` can be a single token, a list of items, or a dictionary of items.
The return type will be a native token, list, or dictionary.
"""
if isinstance(args, list): # Captures ReadOnlyLists
return [
sanitize_sacred_arguments(item) for item in args
]
elif isinstance(args, dict): # Captures ReadOnlyDicts
return {
str(key) : sanitize_sacred_arguments(val) \
for key, val in args.items()
}
else: # Return the single token as-is
return args
def save_model(model, save_path):
"""
Saves the given model at the given path. This saves the state of the model
(i.e. trained layers and parameters), and the arguments used to create the
model (i.e. a dictionary of the original arguments).
"""
save_dict = {
"model_state": model.state_dict(),
"model_creation_args": model.creation_args
}
torch.save(save_dict, save_path)
def restore_model(model_class, load_path):
"""
Restores a model from the given path. `model_class` must be the class for
which the saved model was created from. This will create a model of this
class, using the loaded creation arguments. It will then restore the learned
parameters to the model.
"""
load_dict = torch.load(load_path)
model_state = load_dict["model_state"]
model_creation_args = load_dict["model_creation_args"]
model = model_class(**model_creation_args)
model.load_state_dict(model_state)
return model
def convolution_size(
given_size, num_layers, kernel_sizes, padding=0, strides=1, dilations=1,
inverse=False
):
"""
Computes the size of the convolutional output after applying several layers
of convolution to an input of a given size. Alternatively, this can also
compute the size of a convolutional input needed to create the given size
for an output.
Arguments:
`given_size`: the size of an input sequence, or the size of a desired
output sequence
`num_layers`: number of convolutional layers to apply
`kernel_sizes`: array of kernel sizes, to be applied in order; can also
be an integer, which is the same kernel size for all layers
`padding`: array of padding amounts, with each value being the amount of
padding on each side of the input at each layer; can also be an
integer, which is the same padding for all layers
`strides`: array of stride values, with each value being the stride
at each layer; can also be an integer, which is the same stride for
all layers
`dilations`: array of dilation values, with each value being the
dilation at each layer; can also be an integer, which is the same
dilation for all layers
`inverse`: if True, computes the size of input needed to generate an
output of size `given_size`
Returns the size of the sequence after convolutional layers of these
specifications are applied in order.
"""
if type(kernel_sizes) is int:
kernel_sizes = [kernel_sizes] * num_layers
else:
assert len(kernel_sizes) == num_layers
if type(padding) is int:
padding = [padding] * num_layers
else:
assert len(padding) == num_layers
if type(strides) is int:
strides = [strides] * num_layers
else:
assert len(strides) == num_layers
if type(dilations) is int:
dilations = [dilations] * num_layers
else:
assert len(dilations) == num_layers
size = given_size
if not inverse:
for i in range(num_layers):
size = int(
(size + (2 * padding[i]) - (dilations[i] * (kernel_sizes[i] - 1)) \
- 1) / strides[i]
) + 1
else:
for i in range(num_layers - 1, -1, -1):
size = (strides[i] * (size - 1)) - (2 * padding[i]) + \
(dilations[i] * (kernel_sizes[i] - 1)) + 1
return size
def smooth_tensor_1d(input_tensor, smooth_sigma):
"""
Smooths an input tensor along a dimension using a Gaussian filter.
Arguments:
`input_tensor`: a A x B tensor to smooth along the second dimension
`smooth_sigma`: width of the Gaussian to use for smoothing; this is the
standard deviation of the Gaussian to use, and the Gaussian will be
truncated after 1 sigma (i.e. the smoothing window is
1 + (2 * sigma); sigma of 0 means no smoothing
Returns an array the same shape as the input tensor, with the dimension of
`B` smoothed.
"""
# Generate the kernel
if smooth_sigma == 0:
sigma, truncate = 1, 0
else:
sigma, truncate = smooth_sigma, 1
base = np.zeros(1 + (2 * sigma))
base[sigma] = 1 # Center of window is 1 everywhere else is 0
kernel = scipy.ndimage.gaussian_filter(base, sigma=sigma, truncate=truncate)
kernel = place_tensor(torch.tensor(kernel))
# Expand the input and kernel to 3D, with channels of 1
# Also make the kernel float-type, as the input is going to be of type float
input_tensor = torch.unsqueeze(input_tensor, dim=1)
kernel = torch.unsqueeze(torch.unsqueeze(kernel, dim=0), dim=1).float()
smoothed = torch.nn.functional.conv1d(
input_tensor, kernel, padding=sigma
)
return torch.squeeze(smoothed, dim=1)
| [
"torch.load",
"numpy.zeros",
"torch.save",
"torch.squeeze",
"torch.cuda.is_available",
"torch.nn.functional.conv1d",
"torch.unsqueeze",
"torch.tensor"
] | [((244, 269), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (267, 269), False, 'import torch\n'), ((1567, 1599), 'torch.save', 'torch.save', (['save_dict', 'save_path'], {}), '(save_dict, save_path)\n', (1577, 1599), False, 'import torch\n'), ((1942, 1963), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (1952, 1963), False, 'import torch\n'), ((5431, 5454), 'numpy.zeros', 'np.zeros', (['(1 + 2 * sigma)'], {}), '(1 + 2 * sigma)\n', (5439, 5454), True, 'import numpy as np\n'), ((5813, 5849), 'torch.unsqueeze', 'torch.unsqueeze', (['input_tensor'], {'dim': '(1)'}), '(input_tensor, dim=1)\n', (5828, 5849), False, 'import torch\n'), ((5942, 6005), 'torch.nn.functional.conv1d', 'torch.nn.functional.conv1d', (['input_tensor', 'kernel'], {'padding': 'sigma'}), '(input_tensor, kernel, padding=sigma)\n', (5968, 6005), False, 'import torch\n'), ((6032, 6062), 'torch.squeeze', 'torch.squeeze', (['smoothed'], {'dim': '(1)'}), '(smoothed, dim=1)\n', (6045, 6062), False, 'import torch\n'), ((5630, 5650), 'torch.tensor', 'torch.tensor', (['kernel'], {}), '(kernel)\n', (5642, 5650), False, 'import torch\n'), ((5879, 5909), 'torch.unsqueeze', 'torch.unsqueeze', (['kernel'], {'dim': '(0)'}), '(kernel, dim=0)\n', (5894, 5909), False, 'import torch\n')] |
#!/usr/local/anaconda/bin/python
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 08 14:16:09 2015
@author: Derrick
"""
import glob
import os
import shutil
import sys
import numpy as np
import obspy
import pandas as pd
# get start and stop times
# stations = glob.glob(os.path.join(conDir,'*'))
countSoFar = 0 # This global variable used to accurately keep progress bar.
def divideIntoHours(utc1, utc2):
"""
Function to take two utc date time objects and create a generator to yield
all time in between by hour
Inputs can be any obspy readable format
"""
utc1 = obspy.UTCDateTime(utc1)
utc2 = obspy.UTCDateTime(utc2)
# convert to time stamps (epoch time)
ts1 = utc1.timestamp - utc1.timestamp % 3600
ts2 = utc2.timestamp - utc2.timestamp % 3600
t = ts1
while t <= ts2:
yield obspy.UTCDateTime(t) # yield a value
t += 3600 # add an hour
def makePath(conDir, starow, utc):
"""
Function to make a path corresponding to current station and time
"""
year = '%04d' % utc.year
julday = '%03d' % utc.julday
hour = '%02d' % utc.hour
stanet = starow.NETWORK + '.' + starow.STATION # NET.STATION format
path = os.path.join(conDir, stanet, year, julday, stanet + '.' +
year + '-' + julday + 'T' + hour + '*')
return path
def checkQuality(stPath):
"""
load a path to an obspy trace and check quality
"""
st = obspy.read(stPath)
lengthStream = len(st)
gaps = st.getGaps()
gapsum = np.sum([x[-2] for x in gaps])
starttime = min([x.stats.starttime.timestamp for x in st])
endtime = max([x.stats.endtime.timestamp for x in st])
duration = endtime - starttime
if len(gaps) > 0:
hasGaps = True
else:
hasGaps = False
exists = True
outDict = {'Exists': exists, 'HasGaps': hasGaps, 'Length': lengthStream,
'Gaps': gapsum, 'Timestamp': utc, 'Duration': duration}
return outDict
def _move_files2trash(fil, newDir):
pathlist = fil.split(os.path.sep)
newpathlist = pathlist[:] # slice to make copy
newpathlist[0] = newDir
dirPath = os.path.join(*newpathlist[:-1])
if not os.path.exists(dirPath): # if the directory isn't there make it
os.makedirs(dirPath)
shutil.move(os.path.join(*pathlist), os.path.join(*newpathlist)) # move file
# This will create and update the progress bar
def _progress_bar(total, fileMoveCount):
global countSoFar
countSoFar += 1
width = 25
percent = float((float(countSoFar) / float(total)) * 100.0)
completed = int(percent)
totalLeft = 100
completedAmount = int(completed / (float(totalLeft) / float(width)))
spaceAmount = int((float(totalLeft) - float(completed)) /
(float(totalLeft) / float(width)))
for i in xrange(width):
sys.stdout.write("\r[" + "=" * completedAmount + " " * spaceAmount +
"]" + str(round(float(percent), 2)) + "%" + " " + str(countSoFar) +
"/" + str(total) + " Bad Files:" + str(fileMoveCount) + " ")
sys.stdout.flush()
# This will count the number of files in the folder structure
def _file_count(con_dir):
print("Counting files for progress bar..."),
sys.stdout.flush()
count = 0
for subdir, dirs, files in os.walk(con_dir):
for file in files:
count = count + 1
print("DONE\n")
return (count)
## Check continuity of Continuous Data Directory
# Required inputs
def check_data_quality(con_dir='ContinuousWaveForms',
eve_dir='EventsWaveForms',
stakey='StationKey.csv',
temkey='EventKey.csv',
move_files=False,
write_files=True,
bad_files_name='BadContinousWaveForms.txt',
bad_files_dir='BadContinuousWaveForms',
max_gap_duration=1,
minDuration=3570):
# Init dataframe
columns = ['Exists', 'HasGaps', 'Length', 'Gaps', 'Timestamp', 'Duration']
df = pd.DataFrame(columns=columns)
# Parameters for moving files
file_move_count = 0 # This keeps a count of how many files were moved
# read station/template keys
if isinstance(stakey, str):
stakey = pd.read_csv(stakey)
elif not isinstance(stakey, pd.DataFrame):
raise Exception('stakey must be string or DataFrame')
if isinstance(temkey, str):
temkey = pd.read_csv(temkey)
elif not isinstance(temkey, pd.DataFrame):
raise Exception('temkey must be string or DataFrame')
print
"\nWrite to File = " + str(write_files)
print
"Move files = " + str(move_files) + "\n"
counted_files = _file_count(con_dir)
print
"Beginning Data Quality check..."
for stanum, starow in stakey.iterrows(): # iter through station info
utcGenerator = divideIntoHours(starow.STARTTIME, starow.ENDTIME)
for utc in utcGenerator:
utcpath = makePath(con_dir, starow, utc)
fil = glob.glob(utcpath)
_progress_bar(counted_files, file_move_count)
if len(fil) > 0:
qualDict = checkQuality(fil[0])
df.loc[len(df)] = pd.Series(qualDict)
if (move_files or write_files):
gaps = qualDict['Gaps']
duration = qualDict['Duration']
if gaps > max_gap_duration or duration < minDuration:
file_move_count = file_move_count + 1
if move_files:
_move_files2trash(fil[0], bad_files_dir)
if write_files:
f = open(bad_files_name, 'a')
f.write(str(fil[0]) + '\n')
f.close()
elif len(fil) > 1:
print
'More than one file found for station hour pair'
sys.exit(1)
else:
qualDict = {'Exists': False, 'HasGaps': False, 'Length': 0,
'Duration': 0, 'Gaps': [], 'Path': fil,
'Timestamp': utc.timestamp}
df.loc[len(df)] = pd.Series(qualDict)
if (write_files):
f = open(bad_files_name, 'w')
f.write("Total Files Checked:" + str(counted_files) +
" Total number of bad files:" + str(file_move_count) + '\n')
f.close()
print
"Quality check complete"
return df
| [
"pandas.DataFrame",
"numpy.sum",
"os.makedirs",
"pandas.read_csv",
"os.walk",
"os.path.exists",
"sys.stdout.flush",
"obspy.UTCDateTime",
"pandas.Series",
"glob.glob",
"os.path.join",
"sys.exit",
"obspy.read"
] | [((625, 648), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['utc1'], {}), '(utc1)\n', (642, 648), False, 'import obspy\n'), ((661, 684), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['utc2'], {}), '(utc2)\n', (678, 684), False, 'import obspy\n'), ((1260, 1361), 'os.path.join', 'os.path.join', (['conDir', 'stanet', 'year', 'julday', "(stanet + '.' + year + '-' + julday + 'T' + hour + '*')"], {}), "(conDir, stanet, year, julday, stanet + '.' + year + '-' +\n julday + 'T' + hour + '*')\n", (1272, 1361), False, 'import os\n'), ((1512, 1530), 'obspy.read', 'obspy.read', (['stPath'], {}), '(stPath)\n', (1522, 1530), False, 'import obspy\n'), ((1598, 1627), 'numpy.sum', 'np.sum', (['[x[-2] for x in gaps]'], {}), '([x[-2] for x in gaps])\n', (1604, 1627), True, 'import numpy as np\n'), ((2237, 2268), 'os.path.join', 'os.path.join', (['*newpathlist[:-1]'], {}), '(*newpathlist[:-1])\n', (2249, 2268), False, 'import os\n'), ((3392, 3410), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3408, 3410), False, 'import sys\n'), ((3458, 3474), 'os.walk', 'os.walk', (['con_dir'], {}), '(con_dir)\n', (3465, 3474), False, 'import os\n'), ((4268, 4297), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (4280, 4297), True, 'import pandas as pd\n'), ((2281, 2304), 'os.path.exists', 'os.path.exists', (['dirPath'], {}), '(dirPath)\n', (2295, 2304), False, 'import os\n'), ((2355, 2375), 'os.makedirs', 'os.makedirs', (['dirPath'], {}), '(dirPath)\n', (2366, 2375), False, 'import os\n'), ((2393, 2416), 'os.path.join', 'os.path.join', (['*pathlist'], {}), '(*pathlist)\n', (2405, 2416), False, 'import os\n'), ((2418, 2444), 'os.path.join', 'os.path.join', (['*newpathlist'], {}), '(*newpathlist)\n', (2430, 2444), False, 'import os\n'), ((3224, 3242), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3240, 3242), False, 'import sys\n'), ((4496, 4515), 'pandas.read_csv', 'pd.read_csv', (['stakey'], {}), '(stakey)\n', (4507, 4515), True, 'import pandas as pd\n'), ((4678, 4697), 'pandas.read_csv', 'pd.read_csv', (['temkey'], {}), '(temkey)\n', (4689, 4697), True, 'import pandas as pd\n'), ((879, 899), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['t'], {}), '(t)\n', (896, 899), False, 'import obspy\n'), ((5274, 5292), 'glob.glob', 'glob.glob', (['utcpath'], {}), '(utcpath)\n', (5283, 5292), False, 'import glob\n'), ((5466, 5485), 'pandas.Series', 'pd.Series', (['qualDict'], {}), '(qualDict)\n', (5475, 5485), True, 'import pandas as pd\n'), ((6215, 6226), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6223, 6226), False, 'import sys\n'), ((6484, 6503), 'pandas.Series', 'pd.Series', (['qualDict'], {}), '(qualDict)\n', (6493, 6503), True, 'import pandas as pd\n')] |
import requests
import torch
import numpy as np
from . import payloadutils
class Helper():
def __init__(self, datasource, task='binary_classifier'):
self.dp_settings = None
self.agg_settings = None
self.task = task
self.datasource = datasource
def set_diff_privacy(self, mechanism='laplacian', epsilon=1, norm_bound=1):
self.dp_settings = dict(
mechanism=mechanism,
epsilon=epsilon,
gradient_bound=norm_bound
)
def set_aggregation_privacy(self, mechanism='standard', threshold=10):
self.agg_settings = dict(
mechanism=mechanism,
K=threshold
)
def get_privacy_settings(self):
privacy_settings = dict(
method=self.dp_settings['mechanism'],
epsilon=self.dp_settings['epsilon'],
norm_bound=self.dp_settings['gradient_bound'],
K=self.agg_settings['K']
)
return privacy_settings
def set_model_name(self, model_name):
self.model_name = model_name
def set_loss_fn(self, lossfn, loss_kwargs):
self.loss_name = lossfn
self.loss_kwargs = loss_kwargs
def set_helper_names(self, helper_names):
self.helper_names = helper_names
def set_endpoints(self, endpoints):
self.endpoints = endpoints
def get_encryption_standard(self):
return "cleartext"
def get_model_settings(self):
return self.model_name, self.loss_name, self.loss_kwargs
def generate_masks(self, sum_to_one=True):
a0 = np.random.uniform()
if sum_to_one:
a1 = 1.0-a0
else:
a1 = -a0
return a0, a1
def create_datapoint_payload(self, input_point, targets, mask, model_tag):
if torch.is_tensor(input_point):
inputs = input_point.squeeze().detach().cpu().numpy().tolist()#.astype(int).tolist()
else:
inputs = input_point
if torch.is_tensor(targets):
targets = targets.squeeze().detach().cpu().numpy().tolist()#.astype(int).tolist()
payload = dict(
aggregation_service_payload=dict(
encryption_standard=self.get_encryption_standard(),
payload=dict(
model_features=inputs,
model_label=targets,
mask=mask,
model_tag=model_tag
)
)
)
return payload
def create_data_payload(self, inputs, targets, masks, model_tag, helpers):
data_payloads = [self.create_datapoint_payload(input, target, mask, model_tag) for
input, target, mask in zip(inputs, targets, masks)]
data_dict = {}
for helper in helpers:
data_dict[helper] = [x for helper_name, x in zip(helpers, data_payloads) if helper_name==helper]
return data_dict
def create_model_payload(self, model):
model_tag, lossfn, lossfn_kwargs = self.get_model_settings()
payload = [
dict(
model_tag=model_tag,
model=dict(
encoded_str=model.decode('latin1'),
encoding='latin1'
),
model_loss_function=dict(
loss_type="singleton",
loss_name=lossfn,
loss_kwargs=lossfn_kwargs
)
)
]
return payload
def get_helper_names(self):
# Make them declare entire string
return self.helper_names
def parse_return_json(self, json_data):
gradients = []
for gradient in json_data['aggregation_model_set']:
grad = {}
for key, val in gradient['model_noisy_gradients'].items():
grad[key] = torch.tensor(np.array(val['__ndarray__'], val['dtype']).reshape(val['shape']), dtype=torch.float32)
gradients.append(grad)
return gradients
def query_helpers(self, data_payload, model_payload, endpoints):
gradient_list = []
helper_list = data_payload.keys()
for ii, helper in enumerate(helper_list):
needed_func = "gradient_computation"
privacy_settings = self.get_privacy_settings()
json_payload = dict(
origin=helper,
function=needed_func,
privacy_settings=privacy_settings,
aggregation_service_payload_set=data_payload[helper],
aggregation_model_set=model_payload
)
return_val = requests.post(endpoints[ii], json=json_payload)
gradients = self.parse_return_json(return_val.json())
gradient_list.append(gradients)
return gradient_list[0]
def merge_gradients(self, gradient_list):
accumulated_grads = gradient_list[0]
if len(gradient_list) > 1:
for grad in gradient_list[1:]:
for param in accumulated_grads:
accumulated_grads[param] += grad[param]
return accumulated_grads
def normalize_gradients(self, gradients, normalizer):
for param in gradients.keys():
gradients[param] /= normalizer
return gradients
def create_private_pseudodata(self, inputs, targets, helper_list):
if self.task=='binary_classifier':
inputs = torch.repeat_interleave(inputs, 4, dim=0)
masks = []
helpers = []
output_targets = [0.0, 1.0]*int(inputs.shape[0]/2)
for ii, target in enumerate(targets):
a0, a1 = self.generate_masks(sum_to_one=False)
b0, b1 = self.generate_masks(sum_to_one=True)
if target==1.0:
masks.extend([a0, b0, a1, b1])
else:
masks.extend([b0, a0, b1, a1])
if len(helper_list)==2:
helpers.extend([helper_list[0], helper_list[0], helper_list[1], helper_list[1]])
else:
helpers.extend([helper_list[0]]*4)
return inputs, output_targets, masks, helpers
def send_post_request(self, inputs, targets, model, normalize_grads = True):
model = payloadutils.pytorch_to_onnx(model, inputs)
n_inputs = len(targets)
helper_list = self.get_helper_names()
if not n_inputs == len(targets) and n_inputs == x.shape[0]:
raise ValueError('We require n_targets==n_inputs')
inputs, targets, masks, helpers = self.create_private_pseudodata(inputs, targets, helper_list)
n_inputs = len(masks)
inputs = [inputs[ii, :] for ii in range(n_inputs)]
data_payload = self.create_data_payload(inputs, targets, masks, self.model_name, helpers)
model_payload = self.create_model_payload(model)
endpoints = self.endpoints
gradient_list = self.query_helpers(data_payload, model_payload, endpoints)
accumulated_gradients = self.merge_gradients(gradient_list)
if normalize_grads:
accumulated_gradients = self.normalize_gradients(accumulated_gradients, n_inputs)
return accumulated_gradients
def set_gradients(self, network: torch.Module, gradients: dict[str: np.array]):
for grad, (name, param) in zip(gradients.keys(), network.named_parameters()):
param.grad = gradients[grad]
def fetch_gradients(self, model: torch.Module, normalize_grads = True):
'''
:param model:
:param normalize_grads:
:return: None
'''
inputs, target_list = map(list,zip(*self.datasource.data_queue))
inputs = torch.tensor(np.asarray(inputs), dtype=torch.float32)
targets = torch.tensor(np.asarray(target_list), dtype=torch.float32)
self.datasource.clear_dataqueue()
self.grads = self.send_post_request(inputs, targets, model, normalize_grads)
self.model = model
return None
def backward(self):
'''
Modifies the state of self.model to have gradients contained in self.grads. Should only be called after
self.fetch_gradients()
:return: None
'''
self.set_gradients(self.model, self.grads)
return None | [
"numpy.random.uniform",
"torch.repeat_interleave",
"numpy.asarray",
"numpy.array",
"requests.post",
"torch.is_tensor"
] | [((1582, 1601), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1599, 1601), True, 'import numpy as np\n'), ((1797, 1825), 'torch.is_tensor', 'torch.is_tensor', (['input_point'], {}), '(input_point)\n', (1812, 1825), False, 'import torch\n'), ((1983, 2007), 'torch.is_tensor', 'torch.is_tensor', (['targets'], {}), '(targets)\n', (1998, 2007), False, 'import torch\n'), ((4626, 4673), 'requests.post', 'requests.post', (['endpoints[ii]'], {'json': 'json_payload'}), '(endpoints[ii], json=json_payload)\n', (4639, 4673), False, 'import requests\n'), ((5429, 5470), 'torch.repeat_interleave', 'torch.repeat_interleave', (['inputs', '(4)'], {'dim': '(0)'}), '(inputs, 4, dim=0)\n', (5452, 5470), False, 'import torch\n'), ((7745, 7763), 'numpy.asarray', 'np.asarray', (['inputs'], {}), '(inputs)\n', (7755, 7763), True, 'import numpy as np\n'), ((7817, 7840), 'numpy.asarray', 'np.asarray', (['target_list'], {}), '(target_list)\n', (7827, 7840), True, 'import numpy as np\n'), ((3864, 3906), 'numpy.array', 'np.array', (["val['__ndarray__']", "val['dtype']"], {}), "(val['__ndarray__'], val['dtype'])\n", (3872, 3906), True, 'import numpy as np\n')] |
import numpy as np
import heapq
from dataclasses import dataclass, field
from typing import Tuple, DefaultDict, Dict, List, Set
from collections import defaultdict
def read_file(filename):
with open(filename) as f:
return [line.strip() for line in f.readlines()]
def parse_input(data: List[str]) -> np.ndarray:
return np.array([[c for c in line] for line in data], dtype = int)
INFINITY = float('inf')
Coords = Tuple[int, int]
Path = List['Node']
@dataclass
class Node:
coords: Coords
cost: int
path_to: DefaultDict['Node', Path] = field(default_factory=lambda: defaultdict(lambda: (INFINITY, [])), repr=False)
connections: List['Node'] = field(default_factory=list, repr=False)
def __post_init__(self):
self.path_to[self] = (0, [self])
def __hash__(self) -> int:
return hash(self.coords)
def __str__(self) -> str:
return f'{self.coords}: {self.cost}'
def __repr__(self):
return f'{self}'
def __lt__(self, other):
return self.coords < other.coords
a = Node((0,0), 5)
b = Node((0,1), 2)
def make_nodes(mx: np.ndarray) -> Dict[Coords, Node]:
nodes: Dict[Coords, Node] = {}
for i in range(mx.shape[0]):
for j in range(mx.shape[1]):
c = (i,j)
nodes[c] = Node(c, mx[i,j])
for i in range(mx.shape[0] - 1):
for j in range(mx.shape[1]):
a = nodes[(i,j)]
b = nodes[(i+1,j)]
a.connections.append(b)
b.connections.append(a)
for i in range(mx.shape[0]):
for j in range(mx.shape[1]-1):
a = nodes[(i,j)]
b = nodes[(i,j+1)]
a.connections.append(b)
b.connections.append(a)
return nodes
def dijkstra(a: Node, b: Node):
queue: List[Node] = []
completed: Set[Node] = set()
heapq.heappush(queue, (a.path_to[a][0], a))
while queue:
cost, node = heapq.heappop(queue)
if node in completed: continue
if node is b: return node
cost, path = node.path_to[a]
for child in node.connections:
if child in completed: continue
new_cost = cost + child.cost
if new_cost < child.path_to[a][0]:
child.path_to[a] = (new_cost, path + [child])
heapq.heappush(queue, (new_cost, child))
completed.add(node)
def part1(inputs: List[str]):
mx = parse_input(inputs)
nodes = make_nodes(mx)
nodes_list = list(nodes.values())
start, end = nodes_list[0], nodes_list[-1]
dj = dijkstra(start, end)
# for node in dj.path_to[start][1]:
# print(f"{node} -> {node.path_to[start][0]}")
return dj.path_to[start][0]
def expand_map(mx: np.ndarray, n = 5):
increment = lambda mx, n: np.mod(mx + n - 1, 9) + 1
result = mx
result = np.concatenate([increment(mx, i) for i in range(n)], axis=1)
result = np.concatenate([increment(result, i) for i in range(n)], axis=0)
return result
def part2(inputs: List[str]):
mx = parse_input(inputs)
mx = expand_map(mx)
nodes = make_nodes(mx)
nodes_list = list(nodes.values())
start, end = nodes_list[0], nodes_list[-1]
dj = dijkstra(start, end)
return dj.path_to[start][0]
test_input = [
'1163751742',
'1381373672',
'2136511328',
'3694931569',
'7463417111',
'1319128137',
'1359912421',
'3125421639',
'1293138521',
'2311944581',
]
DAY = 15
TEST_SOLUTION_1 = 40
TEST_SOLUTION_2 = 315
input_raw = read_file(f'2021/data/day{DAY:02d}/input.txt')
if TEST_SOLUTION_1:
assert part1(test_input) == TEST_SOLUTION_1
print(f"Solution 1:\n{part1(input_raw)}")
if TEST_SOLUTION_2:
assert part2(test_input) == TEST_SOLUTION_2
print(f"Solution 2:\n{part2(input_raw)}")
else:
print(f"Test 2:\n{part2(test_input)}")
else:
print(f"Test 1:\n{part1(test_input)}")
| [
"heapq.heappush",
"numpy.mod",
"heapq.heappop",
"dataclasses.field",
"collections.defaultdict",
"numpy.array"
] | [((336, 393), 'numpy.array', 'np.array', (['[[c for c in line] for line in data]'], {'dtype': 'int'}), '([[c for c in line] for line in data], dtype=int)\n', (344, 393), True, 'import numpy as np\n'), ((681, 720), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'repr': '(False)'}), '(default_factory=list, repr=False)\n', (686, 720), False, 'from dataclasses import dataclass, field\n'), ((1842, 1885), 'heapq.heappush', 'heapq.heappush', (['queue', '(a.path_to[a][0], a)'], {}), '(queue, (a.path_to[a][0], a))\n', (1856, 1885), False, 'import heapq\n'), ((1924, 1944), 'heapq.heappop', 'heapq.heappop', (['queue'], {}), '(queue)\n', (1937, 1944), False, 'import heapq\n'), ((2776, 2797), 'numpy.mod', 'np.mod', (['(mx + n - 1)', '(9)'], {}), '(mx + n - 1, 9)\n', (2782, 2797), True, 'import numpy as np\n'), ((599, 635), 'collections.defaultdict', 'defaultdict', (['(lambda : (INFINITY, []))'], {}), '(lambda : (INFINITY, []))\n', (610, 635), False, 'from collections import defaultdict\n'), ((2304, 2344), 'heapq.heappush', 'heapq.heappush', (['queue', '(new_cost, child)'], {}), '(queue, (new_cost, child))\n', (2318, 2344), False, 'import heapq\n')] |
"""
Base class for Flat File vocabulary importers
"""
import numpy as np
from collections import OrderedDict
from os import path
from vocabulary_importers.vocabulary_importer import VocabularyImporter
class FlatFileVocabularyImporter(VocabularyImporter):
"""Base class for Flat File vocabulary importers
"""
def __init__(self, vocabulary_name, tokens_and_embeddings_filename, delimiter):
super(FlatFileVocabularyImporter, self).__init__(vocabulary_name)
"""Initialize the FlatFileVocabularyImporter.
Args:
vocabulary_name: See base class
tokens_and_embeddings_filename: Name of the file containing the token/word list and embeddings.
Format should be one line per word where the word is at the beginning of the line and the embedding vector follows
seperated by a delimiter.
delimiter: Character that separates the word and the values of the embedding vector.
"""
self.tokens_and_embeddings_filename = tokens_and_embeddings_filename
self.delimiter = delimiter
def _read_vocabulary_and_embeddings(self, vocabulary_dir):
"""Read the raw vocabulary file(s) and return the tokens list with corresponding word vectors
Args:
vocabulary_dir: See base class
"""
tokens_and_embeddings_filepath = path.join(vocabulary_dir, self.tokens_and_embeddings_filename)
tokens_with_embeddings = OrderedDict()
with open(tokens_and_embeddings_filepath, encoding="utf-8") as file:
for _, line in enumerate(file):
values = line.split(self.delimiter)
token = values[0].strip()
if token != "":
token = self._process_token(token)
tokens_with_embeddings[token] = np.array(values[1:], dtype=np.float32)
return tokens_with_embeddings | [
"collections.OrderedDict",
"os.path.join",
"numpy.array"
] | [((1387, 1449), 'os.path.join', 'path.join', (['vocabulary_dir', 'self.tokens_and_embeddings_filename'], {}), '(vocabulary_dir, self.tokens_and_embeddings_filename)\n', (1396, 1449), False, 'from os import path\n'), ((1483, 1496), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1494, 1496), False, 'from collections import OrderedDict\n'), ((1851, 1889), 'numpy.array', 'np.array', (['values[1:]'], {'dtype': 'np.float32'}), '(values[1:], dtype=np.float32)\n', (1859, 1889), True, 'import numpy as np\n')] |
# ===============================================================================================================
# Copyright (c) 2019, Cornell University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the above copyright otice, this list of conditions and
# the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# * Neither the name of Cornell University nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# Author: <NAME> (<EMAIL>)
#
# The research is based upon work supported by the Office of the Director of National Intelligence (ODNI),
# Intelligence Advanced Research Projects Activity (IARPA), via DOI/IBC Contract Number D17PC00287.
# The U.S. Government is authorized to reproduce and distribute copies of this work for Governmental purposes.
# ===============================================================================================================
import numpy as np
from scipy import linalg
import logging
def factorize(matrix):
# QR factorize the submatrix
r, q = linalg.rq(matrix[:, :3])
# compute the translation
t = linalg.lstsq(r, matrix[:, 3:4])[0]
# fix the intrinsic and rotation matrix
# intrinsic matrix's diagonal entries must be all positive
# rotation matrix's determinant must be 1; otherwise there's an reflection component
#logging.info('before fixing, diag of r: {}, {}, {}'.format(r[0, 0], r[1, 1], r[2, 2]))
neg_sign_cnt = int(r[0, 0] < 0) + int(r[1, 1] < 0) + int(r[2, 2] < 0)
if neg_sign_cnt == 1 or neg_sign_cnt == 3:
r = -r
new_neg_sign_cnt = int(r[0, 0] < 0) + int(r[1, 1] < 0) + int(r[2, 2] < 0)
assert (new_neg_sign_cnt == 0 or new_neg_sign_cnt == 2)
fix = np.diag((1, 1, 1))
if r[0, 0] < 0 and r[1, 1] < 0:
fix = np.diag((-1, -1, 1))
elif r[0, 0] < 0 and r[2, 2] < 0:
fix = np.diag((-1, 1, -1))
elif r[1, 1] < 0 and r[2, 2] < 0:
fix = np.diag((1, -1, -1))
r = np.dot(r, fix)
q = np.dot(fix, q)
t = np.dot(fix, t)
assert (linalg.det(q) > 0)
#logging.info('after fixing, diag of r: {}, {}, {}'.format(r[0, 0], r[1, 1], r[2, 2]))
# check correctness
# ratio = np.dot(r, np.hstack((q, t))) / matrix
# assert (np.all(ratio > 0) or np.all(ratio < 0))
# tmp = np.max(np.abs(np.abs(ratio) - np.ones((3, 4))))
# logging.info('factorization, max relative error: {}'.format(tmp))
# assert (np.max(tmp) < 1e-9)
# normalize the r matrix
r /= r[2, 2]
return r, q, t
# colmap convention for pixel indices: (col, row)
def solve_perspective(xx, yy, zz, col, row, keep_mask=None):
diff_size = np.array([yy.size - xx.size, zz.size - xx.size, col.size - xx.size, row.size - xx.size])
assert (np.all(diff_size == 0))
if keep_mask is not None:
# logging.info('discarding {} % outliers'.format((1. - np.sum(keep_mask) / keep_mask.size) * 100.))
xx = xx[keep_mask].reshape((-1, 1))
yy = yy[keep_mask].reshape((-1, 1))
zz = zz[keep_mask].reshape((-1, 1))
row = row[keep_mask].reshape((-1, 1))
col = col[keep_mask].reshape((-1, 1))
# logging.info('solving perspective, xx: [{}, {}], yy: [{}, {}], zz: [{}, {}]'.format(np.min(xx), np.max(xx),
# np.min(yy), np.max(yy),
# np.min(zz), np.max(zz)))
#
# scatter3d(xx, yy, zz, '/data2/temp.jpg')
point_cnt = xx.size
all_ones = np.ones((point_cnt, 1))
all_zeros = np.zeros((point_cnt, 4))
# construct the least square problem
A1 = np.hstack((xx, yy, zz, all_ones,
all_zeros,
-col * xx, -col * yy, -col * zz, -col * all_ones))
A2 = np.hstack((all_zeros,
xx, yy, zz, all_ones,
-row * xx, -row * yy, -row * zz, -row * all_ones))
A = np.vstack((A1, A2))
u, s, vh = linalg.svd(A, full_matrices=False)
# logging.info('smallest singular value: {}'.format(s[11]))
P = np.real(vh[11, :]).reshape((3, 4))
singular_values = ''
for i in range(11, -1, -1):
singular_values += ' {}'.format(np.real(s[i]))
logging.info('singular values: {}'.format(singular_values))
# factorize into standard form
r, q, t = factorize(P)
return r, q, t
| [
"scipy.linalg.rq",
"numpy.zeros",
"numpy.all",
"numpy.ones",
"numpy.hstack",
"numpy.vstack",
"scipy.linalg.svd",
"numpy.array",
"scipy.linalg.det",
"scipy.linalg.lstsq",
"numpy.real",
"numpy.dot",
"numpy.diag"
] | [((2285, 2309), 'scipy.linalg.rq', 'linalg.rq', (['matrix[:, :3]'], {}), '(matrix[:, :3])\n', (2294, 2309), False, 'from scipy import linalg\n'), ((2959, 2977), 'numpy.diag', 'np.diag', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (2966, 2977), True, 'import numpy as np\n'), ((3203, 3217), 'numpy.dot', 'np.dot', (['r', 'fix'], {}), '(r, fix)\n', (3209, 3217), True, 'import numpy as np\n'), ((3226, 3240), 'numpy.dot', 'np.dot', (['fix', 'q'], {}), '(fix, q)\n', (3232, 3240), True, 'import numpy as np\n'), ((3249, 3263), 'numpy.dot', 'np.dot', (['fix', 't'], {}), '(fix, t)\n', (3255, 3263), True, 'import numpy as np\n'), ((3880, 3973), 'numpy.array', 'np.array', (['[yy.size - xx.size, zz.size - xx.size, col.size - xx.size, row.size - xx.size]'], {}), '([yy.size - xx.size, zz.size - xx.size, col.size - xx.size, row.\n size - xx.size])\n', (3888, 3973), True, 'import numpy as np\n'), ((3981, 4003), 'numpy.all', 'np.all', (['(diff_size == 0)'], {}), '(diff_size == 0)\n', (3987, 4003), True, 'import numpy as np\n'), ((4805, 4828), 'numpy.ones', 'np.ones', (['(point_cnt, 1)'], {}), '((point_cnt, 1))\n', (4812, 4828), True, 'import numpy as np\n'), ((4845, 4869), 'numpy.zeros', 'np.zeros', (['(point_cnt, 4)'], {}), '((point_cnt, 4))\n', (4853, 4869), True, 'import numpy as np\n'), ((4920, 5018), 'numpy.hstack', 'np.hstack', (['(xx, yy, zz, all_ones, all_zeros, -col * xx, -col * yy, -col * zz, -col *\n all_ones)'], {}), '((xx, yy, zz, all_ones, all_zeros, -col * xx, -col * yy, -col * zz,\n -col * all_ones))\n', (4929, 5018), True, 'import numpy as np\n'), ((5064, 5162), 'numpy.hstack', 'np.hstack', (['(all_zeros, xx, yy, zz, all_ones, -row * xx, -row * yy, -row * zz, -row *\n all_ones)'], {}), '((all_zeros, xx, yy, zz, all_ones, -row * xx, -row * yy, -row * zz,\n -row * all_ones))\n', (5073, 5162), True, 'import numpy as np\n'), ((5208, 5227), 'numpy.vstack', 'np.vstack', (['(A1, A2)'], {}), '((A1, A2))\n', (5217, 5227), True, 'import numpy as np\n'), ((5243, 5277), 'scipy.linalg.svd', 'linalg.svd', (['A'], {'full_matrices': '(False)'}), '(A, full_matrices=False)\n', (5253, 5277), False, 'from scipy import linalg\n'), ((2348, 2379), 'scipy.linalg.lstsq', 'linalg.lstsq', (['r', 'matrix[:, 3:4]'], {}), '(r, matrix[:, 3:4])\n', (2360, 2379), False, 'from scipy import linalg\n'), ((3028, 3048), 'numpy.diag', 'np.diag', (['(-1, -1, 1)'], {}), '((-1, -1, 1))\n', (3035, 3048), True, 'import numpy as np\n'), ((3277, 3290), 'scipy.linalg.det', 'linalg.det', (['q'], {}), '(q)\n', (3287, 3290), False, 'from scipy import linalg\n'), ((3101, 3121), 'numpy.diag', 'np.diag', (['(-1, 1, -1)'], {}), '((-1, 1, -1))\n', (3108, 3121), True, 'import numpy as np\n'), ((5352, 5370), 'numpy.real', 'np.real', (['vh[11, :]'], {}), '(vh[11, :])\n', (5359, 5370), True, 'import numpy as np\n'), ((5485, 5498), 'numpy.real', 'np.real', (['s[i]'], {}), '(s[i])\n', (5492, 5498), True, 'import numpy as np\n'), ((3174, 3194), 'numpy.diag', 'np.diag', (['(1, -1, -1)'], {}), '((1, -1, -1))\n', (3181, 3194), True, 'import numpy as np\n')] |
from collections import OrderedDict, namedtuple
from functools import partial
from six import string_types
from artemis.config import get_artemis_config_value
from artemis.general.checkpoint_counter import Checkpoints
from artemis.plotting.matplotlib_backend import BarPlot, BoundingBoxPlot, ResamplingLineHistory
from matplotlib.axes import Axes
from matplotlib.gridspec import SubplotSpec
from contextlib import contextmanager
import numpy as np
from matplotlib import pyplot as plt
from artemis.plotting.drawing_plots import redraw_figure
from artemis.plotting.expanding_subplots import select_subplot
from artemis.plotting.matplotlib_backend import get_plot_from_data, TextPlot, MovingPointPlot, Moving2DPointPlot, \
MovingImagePlot, HistogramPlot, CumulativeLineHistogram
from artemis.plotting.matplotlib_backend import LinePlot, ImagePlot, is_server_plotting_on
if is_server_plotting_on():
from artemis.remote.plotting.plotting_client import deconstruct_plotting_server
__author__ = 'peter'
"""
dbplot just takes your data, and plots it. No fuss, no muss. No more thinking about what kind plot to use, or how to
make updating plots of changing variables. Just dbplot it.
dbplot(data, 'my-data')
dbplot will look at your data, and figure out which type of plot is appropriate. If you don't like it, you can
customize it, using the plot_type argument.
dbplot makes online plotting easy. You want to plot updates to your variable? Just dbplot it.
dbplot(var, 'my-var')
dbplot(updated_var, 'my-var')
See demo_dbplot.py for some demos of what dbplot can do.
"""
def dbplot(data, name = None, plot_type = None, axis=None, plot_mode = 'live', draw_now = True, hang = False, title=None,
fig = None, xlabel = None, ylabel = None, draw_every = None, layout=None, legend=None, grid=False,
wait_for_display_sec=0, cornertext = None, reset_color_cycle = False):
"""
Plot arbitrary data and continue execution. This program tries to figure out what type of plot to use.
:param data: Any data. Hopefully, we at dbplot will be able to figure out a plot for it.
:param name: A name uniquely identifying this plot.
:param Union[Callable[[],LinePlot],str,Tuple[Callable, Dict]] plot_type : A specialized constructor to be used the
first time when plotting. Several predefined constructors are defined in the DBPlotTypes class - you can pass
those. For back-compatibility you can also pass a string matching the name of one of the fields in the DBPlotTypes
class.
DBPlotTypes.LINE: Plots a line plot
DBPlotTypes.IMG: An image plot
DBPlotTypes.COLOUR: A colour image plot
DBPlotTypes.PIC: A picture (no scale bars, axis labels, etc)
You can also, pass a tuple of (constructor, keyword_args) where keyword args is a dict of arcuments to the plot
constructor.
:param axis: A string identifying which axis to plot on. By default, it is the same as "name". Only use this
argument if you indend to make multiple dbplots share the same axis.
:param plot_mode: Influences how the data should be used to choose the plot type:
'live': Best for 'live' plots that you intend to update as new data arrives
'static': Best for 'static' plots, that you do not intend to update
'image': Try to represent the plot as an image
:param draw_now: Draw the plot now (you may choose false if you're going to add another plot immediately after and
don't want have to draw this one again.
:param hang: Hang on the plot (wait for it to be closed before continuing)
:param title: Title of the plot (will default to name if not included)
:param fig: Name of the figure - use this when you want to create multiple figures.
:param grid: Turn the grid on
:param wait_for_display_sec: In server mode, you can choose to wait maximally wait_for_display_sec seconds before this
call returns. In case plotting is finished earlier, the call returns earlier. Setting wait_for_display_sec to a negative number will cause the call to block until the plot has been displayed.
"""
if is_server_plotting_on():
# Redirect the function call to the plotting server. The flag gets turned on in a configuration file. It is
# turned off when this file is run ON the plotting server, from the first line in plotting_server.py
arg_locals = locals().copy()
from artemis.remote.plotting.plotting_client import dbplot_remotely
dbplot_remotely(arg_locals=arg_locals)
return
if data.__class__.__module__ == 'torch' and data.__class__.__name__ == 'Tensor':
data = data.detach().cpu().numpy()
plot_object = _get_dbplot_plot_object(fig) # type: _PlotWindow
suplot_dict = plot_object.subplots
if axis is None:
axis=name
if name not in suplot_dict: # Initialize new axis
if isinstance(plot_type, str):
plot = DBPlotTypes.from_string(plot_type)()
elif isinstance(plot_type, tuple):
assert len(plot_type)==2 and isinstance(plot_type[0], str) and isinstance(plot_type[1], dict), 'If you specify a tuple for plot_type, we expect (name, arg_dict). Got: {}'.format(plot_type)
plot_type_name, plot_type_args = plot_type
if isinstance(plot_type_name, str):
plot = DBPlotTypes.from_string(plot_type_name)(**plot_type_args)
elif callable(plot_type_name):
plot = plot_type_name(**plot_type_args)
else:
raise Exception('The first argument of the plot type tuple must be a plot type name or a callable plot type constructor.')
elif plot_type is None:
plot = get_plot_from_data(data, mode=plot_mode)
else:
assert hasattr(plot_type, "__call__")
plot = plot_type()
if isinstance(axis, SubplotSpec):
axis = plt.subplot(axis)
if isinstance(axis, Axes):
ax = axis
ax_name = str(axis)
elif isinstance(axis, string_types) or axis is None:
ax = select_subplot(axis, fig=plot_object.figure, layout=_default_layout if layout is None else layout)
ax_name = axis
# ax.set_title(axis)
else:
raise Exception("Axis specifier must be a string, an Axis object, or a SubplotSpec object. Not {}".format(axis))
if ax_name not in plot_object.axes:
ax.set_title(name)
plot_object.subplots[name] = _Subplot(axis=ax, plot_object=plot)
plot_object.axes[ax_name] = ax
plot_object.subplots[name] = _Subplot(axis=plot_object.axes[ax_name], plot_object=plot)
plt.sca(plot_object.axes[ax_name])
if xlabel is not None:
plot_object.subplots[name].axis.set_xlabel(xlabel)
if ylabel is not None:
plot_object.subplots[name].axis.set_ylabel(ylabel)
if draw_every is not None:
_draw_counters[fig, name] = Checkpoints(draw_every)
if grid:
plt.grid()
plot = plot_object.subplots[name].plot_object
if reset_color_cycle:
use_dbplot_axis(axis, fig=fig, clear=False).set_color_cycle(None)
plot.update(data)
# Update Labels...
if cornertext is not None:
if not hasattr(plot_object.figure, '__cornertext'):
plot_object.figure.__cornertext = next(iter(plot_object.subplots.values())).axis.annotate(cornertext, xy=(0, 0), xytext=(0.01, 0.98), textcoords='figure fraction')
else:
plot_object.figure.__cornertext.set_text(cornertext)
if title is not None:
plot_object.subplots[name].axis.set_title(title)
if legend is not None:
plot_object.subplots[name].axis.legend(legend, loc='best', framealpha=0.5)
if draw_now and not _hold_plots and (draw_every is None or ((fig, name) not in _draw_counters) or _draw_counters[fig, name]()):
plot.plot()
display_figure(plot_object.figure, hang=hang)
return plot_object.subplots[name].axis
_PlotWindow = namedtuple('PlotWindow', ['figure', 'subplots', 'axes'])
_Subplot = namedtuple('Subplot', ['axis', 'plot_object'])
_DBPLOT_FIGURES = {} # An dict<figure_name: _PlotWindow(figure, OrderedDict<subplot_name:_Subplot>)>
_DEFAULT_SIZE = get_artemis_config_value(section='plotting', option='default_fig_size', default_generator=lambda: (10, 8), write_default=True, read_method='eval')
_draw_counters = {}
_hold_plots = False
_hold_plot_counter = None
_default_layout = 'grid'
class DBPlotTypes:
LINE= LinePlot
THICK_LINE= partial(LinePlot, plot_kwargs={'linewidth': 3})
POS_LINE= partial(LinePlot, y_bounds=(0, None), y_bound_extend=(0, 0.05))
SCATTER= partial(LinePlot, plot_kwargs=dict(marker='.', markersize=7), linestyle='')
BBOX= partial(BoundingBoxPlot, linewidth=2, axes_update_mode='expand')
BBOX_R= partial(BoundingBoxPlot, linewidth=2, color='r', axes_update_mode='expand')
BBOX_B= partial(BoundingBoxPlot, linewidth=2, color='b', axes_update_mode='expand')
BBOX_G= partial(BoundingBoxPlot, linewidth=2, color='g', axes_update_mode='expand')
BAR= BarPlot
IMG= ImagePlot
CIMG= partial(ImagePlot, channel_first=True)
LINE_HISTORY= MovingPointPlot
IMG_STABLE= partial(ImagePlot, only_grow_clims=True)
COLOUR= partial(ImagePlot, is_colour_data=True)
EQUAL_ASPECT= partial(ImagePlot, aspect='equal')
IMAGE_HISTORY= MovingImagePlot
FIXED_LINE_HISTORY= partial(MovingPointPlot, buffer_len=100)
LINE_HISTORY_RESAMPLED= partial(ResamplingLineHistory, buffer_len=400)
PIC= partial(ImagePlot, show_clims=False, aspect='equal')
NOTICE= partial(TextPlot, max_history=1, horizontal_alignment='center', vertical_alignment='center', size='x-large')
COST= partial(MovingPointPlot, y_bounds=(0, None), y_bound_extend=(0, 0.05))
PERCENT= partial(MovingPointPlot, y_bounds=(0, 100))
TRAJECTORY= partial(Moving2DPointPlot, axes_update_mode='expand')
TRAJECTORY_PLUS= partial(Moving2DPointPlot, axes_update_mode='expand', x_bounds=(0, None), y_bounds=(0, None))
HISTOGRAM= partial(HistogramPlot, edges = np.linspace(-5, 5, 20))
CUMHIST= partial(CumulativeLineHistogram, edges = np.linspace(-5, 5, 20))
@classmethod
def from_string(cls, str): # For back-compatibility
return getattr(cls, str.upper().replace('-', '_').replace('+', '_PLUS'))
def reset_dbplot():
if is_server_plotting_on():
deconstruct_plotting_server()
else:
for fig_name, plot_window in list(_DBPLOT_FIGURES.items()):
plt.close(plot_window.figure)
del _DBPLOT_FIGURES[fig_name]
def set_dbplot_figure_size(width, height):
global _DEFAULT_SIZE
_DEFAULT_SIZE = (width, height)
def set_dbplot_default_layout(layout):
global _default_layout
_default_layout = layout
def get_dbplot_figure(name=None):
return _DBPLOT_FIGURES[name].figure
def _get_dbplot_plot_object(fig):
if isinstance(fig, plt.Figure):
assert None not in _DBPLOT_FIGURES, "If you pass a figure, you can only do it on the first call to dbplot (for now)"
_DBPLOT_FIGURES[None] = _PlotWindow(figure=fig, subplots=OrderedDict(), axes={})
fig = None
elif fig not in _DBPLOT_FIGURES or not plt.fignum_exists(_DBPLOT_FIGURES[fig].figure.number): # Second condition handles closed figures.
_DBPLOT_FIGURES[fig] = _PlotWindow(figure = _make_dbplot_figure(), subplots=OrderedDict(), axes = {})
if fig is not None:
_DBPLOT_FIGURES[fig].figure.canvas.set_window_title(fig)
return _DBPLOT_FIGURES[fig]
def get_dbplot_subplot(name, fig_name=None):
return _DBPLOT_FIGURES[fig_name].subplots[name].axis
def _make_dbplot_figure():
if _DEFAULT_SIZE is None:
fig= plt.figure()
else:
fig= plt.figure(figsize=_DEFAULT_SIZE) # This is broken in matplotlib2 for some reason
return fig
def freeze_dbplot(name, fig = None):
del _DBPLOT_FIGURES[fig].subplots[name]
def freeze_all_dbplots(fig = None):
for name in _DBPLOT_FIGURES[fig].subplots.keys():
freeze_dbplot(name, fig=fig)
def dbplot_redraw_all(fig = None, hang = False):
for subplot in _DBPLOT_FIGURES[fig].subplots.values():
plt.subplot(subplot.axis)
subplot.plot_object.plot()
display_figure(_DBPLOT_FIGURES[fig].figure, hang)
def display_figure(fig, hang):
if hang is True:
plt.figure(fig.number)
plt.show()
elif hang in (None, False):
redraw_figure(fig)
elif isinstance(hang, (int, float)):
redraw_figure(fig)
plt.pause(hang)
else:
raise TypeError("Can't interpret hang argument {}".format(hang))
@contextmanager
def hold_dbplots(fig = None, hang=False, draw_every = None):
"""
Use this in a "with" statement to prevent plotting until the end.
:param fig:
:return:
"""
if is_server_plotting_on():
# For now, this does nothing. Eventually, it should be made to send a "draw" command through the pipe
yield
return
global _hold_plots
_old_hold_state = _hold_plots
_hold_plots = True
yield
_hold_plots = _old_hold_state
if _old_hold_state:
plot_now = False
elif draw_every is not None:
global _hold_plot_counter
if _hold_plot_counter is None:
_hold_plot_counter = Checkpoints(draw_every)
plot_now = _hold_plot_counter()
else:
plot_now = True
if plot_now and fig in _DBPLOT_FIGURES:
dbplot_redraw_all(fig, hang = hang)
def clear_dbplot(fig = None):
if fig in _DBPLOT_FIGURES:
plt.figure(_DBPLOT_FIGURES[fig].figure.number)
plt.clf()
_DBPLOT_FIGURES[fig].subplots.clear()
_DBPLOT_FIGURES[fig].axes.clear()
def use_dbplot_axis(name, fig=None, layout=None, clear = False, ):
ax = select_subplot(name, fig=_get_dbplot_plot_object(fig).figure, layout=_default_layout if layout is None else layout)
if clear:
ax.clear()
return ax
def dbplot_hang(timeout=None):
if timeout is None:
plt.show()
else:
redraw_figure()
plt.pause(timeout)
def dbplot_collection(collection, name, hang=False, axis = None, draw_every=None, **kwargs):
"""
Plot a collection of items in one go.
:param collection:
:param name:
:param kwargs:
:return:
"""
with hold_dbplots(draw_every=draw_every, hang=hang):
if isinstance(collection, (list, tuple)):
for i, el in enumerate(collection):
dbplot(el, '{}[{}]'.format(name, i), axis='{}[{}]'.format(axis, i) if axis is not None else None, **kwargs)
| [
"artemis.general.checkpoint_counter.Checkpoints",
"matplotlib.pyplot.clf",
"artemis.config.get_artemis_config_value",
"matplotlib.pyplot.figure",
"artemis.remote.plotting.plotting_client.dbplot_remotely",
"matplotlib.pyplot.close",
"artemis.plotting.drawing_plots.redraw_figure",
"artemis.plotting.expa... | [((878, 901), 'artemis.plotting.matplotlib_backend.is_server_plotting_on', 'is_server_plotting_on', ([], {}), '()\n', (899, 901), False, 'from artemis.plotting.matplotlib_backend import LinePlot, ImagePlot, is_server_plotting_on\n'), ((8117, 8173), 'collections.namedtuple', 'namedtuple', (['"""PlotWindow"""', "['figure', 'subplots', 'axes']"], {}), "('PlotWindow', ['figure', 'subplots', 'axes'])\n", (8127, 8173), False, 'from collections import OrderedDict, namedtuple\n'), ((8186, 8232), 'collections.namedtuple', 'namedtuple', (['"""Subplot"""', "['axis', 'plot_object']"], {}), "('Subplot', ['axis', 'plot_object'])\n", (8196, 8232), False, 'from collections import OrderedDict, namedtuple\n'), ((8353, 8504), 'artemis.config.get_artemis_config_value', 'get_artemis_config_value', ([], {'section': '"""plotting"""', 'option': '"""default_fig_size"""', 'default_generator': '(lambda : (10, 8))', 'write_default': '(True)', 'read_method': '"""eval"""'}), "(section='plotting', option='default_fig_size',\n default_generator=lambda : (10, 8), write_default=True, read_method='eval')\n", (8377, 8504), False, 'from artemis.config import get_artemis_config_value\n'), ((4174, 4197), 'artemis.plotting.matplotlib_backend.is_server_plotting_on', 'is_server_plotting_on', ([], {}), '()\n', (4195, 4197), False, 'from artemis.plotting.matplotlib_backend import LinePlot, ImagePlot, is_server_plotting_on\n'), ((8651, 8698), 'functools.partial', 'partial', (['LinePlot'], {'plot_kwargs': "{'linewidth': 3}"}), "(LinePlot, plot_kwargs={'linewidth': 3})\n", (8658, 8698), False, 'from functools import partial\n'), ((8713, 8776), 'functools.partial', 'partial', (['LinePlot'], {'y_bounds': '(0, None)', 'y_bound_extend': '(0, 0.05)'}), '(LinePlot, y_bounds=(0, None), y_bound_extend=(0, 0.05))\n', (8720, 8776), False, 'from functools import partial\n'), ((8876, 8940), 'functools.partial', 'partial', (['BoundingBoxPlot'], {'linewidth': '(2)', 'axes_update_mode': '"""expand"""'}), "(BoundingBoxPlot, linewidth=2, axes_update_mode='expand')\n", (8883, 8940), False, 'from functools import partial\n'), ((8953, 9028), 'functools.partial', 'partial', (['BoundingBoxPlot'], {'linewidth': '(2)', 'color': '"""r"""', 'axes_update_mode': '"""expand"""'}), "(BoundingBoxPlot, linewidth=2, color='r', axes_update_mode='expand')\n", (8960, 9028), False, 'from functools import partial\n'), ((9041, 9116), 'functools.partial', 'partial', (['BoundingBoxPlot'], {'linewidth': '(2)', 'color': '"""b"""', 'axes_update_mode': '"""expand"""'}), "(BoundingBoxPlot, linewidth=2, color='b', axes_update_mode='expand')\n", (9048, 9116), False, 'from functools import partial\n'), ((9129, 9204), 'functools.partial', 'partial', (['BoundingBoxPlot'], {'linewidth': '(2)', 'color': '"""g"""', 'axes_update_mode': '"""expand"""'}), "(BoundingBoxPlot, linewidth=2, color='g', axes_update_mode='expand')\n", (9136, 9204), False, 'from functools import partial\n'), ((9251, 9289), 'functools.partial', 'partial', (['ImagePlot'], {'channel_first': '(True)'}), '(ImagePlot, channel_first=True)\n', (9258, 9289), False, 'from functools import partial\n'), ((9340, 9380), 'functools.partial', 'partial', (['ImagePlot'], {'only_grow_clims': '(True)'}), '(ImagePlot, only_grow_clims=True)\n', (9347, 9380), False, 'from functools import partial\n'), ((9393, 9432), 'functools.partial', 'partial', (['ImagePlot'], {'is_colour_data': '(True)'}), '(ImagePlot, is_colour_data=True)\n', (9400, 9432), False, 'from functools import partial\n'), ((9451, 9485), 'functools.partial', 'partial', (['ImagePlot'], {'aspect': '"""equal"""'}), "(ImagePlot, aspect='equal')\n", (9458, 9485), False, 'from functools import partial\n'), ((9545, 9585), 'functools.partial', 'partial', (['MovingPointPlot'], {'buffer_len': '(100)'}), '(MovingPointPlot, buffer_len=100)\n', (9552, 9585), False, 'from functools import partial\n'), ((9614, 9660), 'functools.partial', 'partial', (['ResamplingLineHistory'], {'buffer_len': '(400)'}), '(ResamplingLineHistory, buffer_len=400)\n', (9621, 9660), False, 'from functools import partial\n'), ((9670, 9722), 'functools.partial', 'partial', (['ImagePlot'], {'show_clims': '(False)', 'aspect': '"""equal"""'}), "(ImagePlot, show_clims=False, aspect='equal')\n", (9677, 9722), False, 'from functools import partial\n'), ((9735, 9847), 'functools.partial', 'partial', (['TextPlot'], {'max_history': '(1)', 'horizontal_alignment': '"""center"""', 'vertical_alignment': '"""center"""', 'size': '"""x-large"""'}), "(TextPlot, max_history=1, horizontal_alignment='center',\n vertical_alignment='center', size='x-large')\n", (9742, 9847), False, 'from functools import partial\n'), ((9854, 9924), 'functools.partial', 'partial', (['MovingPointPlot'], {'y_bounds': '(0, None)', 'y_bound_extend': '(0, 0.05)'}), '(MovingPointPlot, y_bounds=(0, None), y_bound_extend=(0, 0.05))\n', (9861, 9924), False, 'from functools import partial\n'), ((9938, 9981), 'functools.partial', 'partial', (['MovingPointPlot'], {'y_bounds': '(0, 100)'}), '(MovingPointPlot, y_bounds=(0, 100))\n', (9945, 9981), False, 'from functools import partial\n'), ((9998, 10051), 'functools.partial', 'partial', (['Moving2DPointPlot'], {'axes_update_mode': '"""expand"""'}), "(Moving2DPointPlot, axes_update_mode='expand')\n", (10005, 10051), False, 'from functools import partial\n'), ((10073, 10170), 'functools.partial', 'partial', (['Moving2DPointPlot'], {'axes_update_mode': '"""expand"""', 'x_bounds': '(0, None)', 'y_bounds': '(0, None)'}), "(Moving2DPointPlot, axes_update_mode='expand', x_bounds=(0, None),\n y_bounds=(0, None))\n", (10080, 10170), False, 'from functools import partial\n'), ((10500, 10523), 'artemis.plotting.matplotlib_backend.is_server_plotting_on', 'is_server_plotting_on', ([], {}), '()\n', (10521, 10523), False, 'from artemis.plotting.matplotlib_backend import LinePlot, ImagePlot, is_server_plotting_on\n'), ((12988, 13011), 'artemis.plotting.matplotlib_backend.is_server_plotting_on', 'is_server_plotting_on', ([], {}), '()\n', (13009, 13011), False, 'from artemis.plotting.matplotlib_backend import LinePlot, ImagePlot, is_server_plotting_on\n'), ((4547, 4585), 'artemis.remote.plotting.plotting_client.dbplot_remotely', 'dbplot_remotely', ([], {'arg_locals': 'arg_locals'}), '(arg_locals=arg_locals)\n', (4562, 4585), False, 'from artemis.remote.plotting.plotting_client import dbplot_remotely\n'), ((6750, 6784), 'matplotlib.pyplot.sca', 'plt.sca', (['plot_object.axes[ax_name]'], {}), '(plot_object.axes[ax_name])\n', (6757, 6784), True, 'from matplotlib import pyplot as plt\n'), ((10533, 10562), 'artemis.remote.plotting.plotting_client.deconstruct_plotting_server', 'deconstruct_plotting_server', ([], {}), '()\n', (10560, 10562), False, 'from artemis.remote.plotting.plotting_client import deconstruct_plotting_server\n'), ((11867, 11879), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11877, 11879), True, 'from matplotlib import pyplot as plt\n'), ((11903, 11936), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '_DEFAULT_SIZE'}), '(figsize=_DEFAULT_SIZE)\n', (11913, 11936), True, 'from matplotlib import pyplot as plt\n'), ((12333, 12358), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot.axis'], {}), '(subplot.axis)\n', (12344, 12358), True, 'from matplotlib import pyplot as plt\n'), ((12511, 12533), 'matplotlib.pyplot.figure', 'plt.figure', (['fig.number'], {}), '(fig.number)\n', (12521, 12533), True, 'from matplotlib import pyplot as plt\n'), ((12542, 12552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12550, 12552), True, 'from matplotlib import pyplot as plt\n'), ((13725, 13771), 'matplotlib.pyplot.figure', 'plt.figure', (['_DBPLOT_FIGURES[fig].figure.number'], {}), '(_DBPLOT_FIGURES[fig].figure.number)\n', (13735, 13771), True, 'from matplotlib import pyplot as plt\n'), ((13780, 13789), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (13787, 13789), True, 'from matplotlib import pyplot as plt\n'), ((14184, 14194), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14192, 14194), True, 'from matplotlib import pyplot as plt\n'), ((14213, 14228), 'artemis.plotting.drawing_plots.redraw_figure', 'redraw_figure', ([], {}), '()\n', (14226, 14228), False, 'from artemis.plotting.drawing_plots import redraw_figure\n'), ((14237, 14255), 'matplotlib.pyplot.pause', 'plt.pause', (['timeout'], {}), '(timeout)\n', (14246, 14255), True, 'from matplotlib import pyplot as plt\n'), ((5965, 5982), 'matplotlib.pyplot.subplot', 'plt.subplot', (['axis'], {}), '(axis)\n', (5976, 5982), True, 'from matplotlib import pyplot as plt\n'), ((7048, 7071), 'artemis.general.checkpoint_counter.Checkpoints', 'Checkpoints', (['draw_every'], {}), '(draw_every)\n', (7059, 7071), False, 'from artemis.general.checkpoint_counter import Checkpoints\n'), ((7102, 7112), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7110, 7112), True, 'from matplotlib import pyplot as plt\n'), ((10213, 10235), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(20)'], {}), '(-5, 5, 20)\n', (10224, 10235), True, 'import numpy as np\n'), ((10291, 10313), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(20)'], {}), '(-5, 5, 20)\n', (10302, 10313), True, 'import numpy as np\n'), ((10653, 10682), 'matplotlib.pyplot.close', 'plt.close', (['plot_window.figure'], {}), '(plot_window.figure)\n', (10662, 10682), True, 'from matplotlib import pyplot as plt\n'), ((12593, 12611), 'artemis.plotting.drawing_plots.redraw_figure', 'redraw_figure', (['fig'], {}), '(fig)\n', (12606, 12611), False, 'from artemis.plotting.drawing_plots import redraw_figure\n'), ((6150, 6253), 'artemis.plotting.expanding_subplots.select_subplot', 'select_subplot', (['axis'], {'fig': 'plot_object.figure', 'layout': '(_default_layout if layout is None else layout)'}), '(axis, fig=plot_object.figure, layout=_default_layout if \n layout is None else layout)\n', (6164, 6253), False, 'from artemis.plotting.expanding_subplots import select_subplot\n'), ((11266, 11279), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11277, 11279), False, 'from collections import OrderedDict, namedtuple\n'), ((11352, 11405), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['_DBPLOT_FIGURES[fig].figure.number'], {}), '(_DBPLOT_FIGURES[fig].figure.number)\n', (11369, 11405), True, 'from matplotlib import pyplot as plt\n'), ((12661, 12679), 'artemis.plotting.drawing_plots.redraw_figure', 'redraw_figure', (['fig'], {}), '(fig)\n', (12674, 12679), False, 'from artemis.plotting.drawing_plots import redraw_figure\n'), ((12688, 12703), 'matplotlib.pyplot.pause', 'plt.pause', (['hang'], {}), '(hang)\n', (12697, 12703), True, 'from matplotlib import pyplot as plt\n'), ((13467, 13490), 'artemis.general.checkpoint_counter.Checkpoints', 'Checkpoints', (['draw_every'], {}), '(draw_every)\n', (13478, 13490), False, 'from artemis.general.checkpoint_counter import Checkpoints\n'), ((5767, 5807), 'artemis.plotting.matplotlib_backend.get_plot_from_data', 'get_plot_from_data', (['data'], {'mode': 'plot_mode'}), '(data, mode=plot_mode)\n', (5785, 5807), False, 'from artemis.plotting.matplotlib_backend import get_plot_from_data, TextPlot, MovingPointPlot, Moving2DPointPlot, MovingImagePlot, HistogramPlot, CumulativeLineHistogram\n'), ((11535, 11548), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11546, 11548), False, 'from collections import OrderedDict, namedtuple\n')] |
import numpy as np
import torch
import shadow.losses
def test_softmax_mse_loss():
"""Simple test for softmax mse loss."""
input_logits = torch.tensor([[0.5, 1.]])
target_logits = torch.tensor([[1., 1.]])
# softmax of input_logits is 0.3775, 0.6225, and target_logits is 0.5, 0.5
# therefore would expect the MSE loss to be 0.015 because returns the sum over all instead of the mean.
losses = shadow.losses.softmax_mse_loss(input_logits, target_logits)
assert np.allclose(losses.data.cpu().numpy(), np.array(0.015), rtol=1e-3)
def test_softmax_kl_loss():
"""Simple test for softmax kl loss."""
input_logits = torch.tensor([[0.5, 1.]])
target_logits = torch.tensor([[1., 1.]])
# softmax of input_logits is 0.3775, 0.6225, and target_logits is 0.5, 0.5
# TODO: taking its word that the kl loss is 0.0309. Determine independently?
losses = shadow.losses.softmax_kl_loss(input_logits, target_logits)
assert np.allclose(losses.data.cpu().numpy(), np.array(0.0309), rtol=1e-3)
| [
"numpy.array",
"torch.tensor"
] | [((148, 174), 'torch.tensor', 'torch.tensor', (['[[0.5, 1.0]]'], {}), '([[0.5, 1.0]])\n', (160, 174), False, 'import torch\n'), ((194, 220), 'torch.tensor', 'torch.tensor', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (206, 220), False, 'import torch\n'), ((652, 678), 'torch.tensor', 'torch.tensor', (['[[0.5, 1.0]]'], {}), '([[0.5, 1.0]])\n', (664, 678), False, 'import torch\n'), ((698, 724), 'torch.tensor', 'torch.tensor', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (710, 724), False, 'import torch\n'), ((531, 546), 'numpy.array', 'np.array', (['(0.015)'], {}), '(0.015)\n', (539, 546), True, 'import numpy as np\n'), ((1007, 1023), 'numpy.array', 'np.array', (['(0.0309)'], {}), '(0.0309)\n', (1015, 1023), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# extract Balvan data from tifs
import pandas as pd
import skimage.io as skio
import skimage.transform as skt
import skimage.util as sku
from tqdm import tqdm
from glob import glob
import os, random, math, cv2, re
import numpy as np
import matplotlib.pyplot as plt
# %%
src_dir = './Datasets/Zurich_dataset_v1.0/images_tif'
tar_dir = './Datasets/Zurich'
dir_IR = f'{tar_dir}/IR'
dir_RGB = f'{tar_dir}/RGB'
if not os.path.exists(dir_IR):
os.makedirs(dir_IR)
if not os.path.exists(dir_RGB):
os.makedirs(dir_RGB)
names_tif = [os.path.basename(p) for p in glob(f'{src_dir}/*.tif')]
#patt_tif = re.compile(r'zh(\d+).tif')
# find golbal max and min
maxis = []
minis = []
shapes = []
for id_tif in range(1, len(names_tif)+1):
name_tif = f'zh{id_tif}.tif'
# m = patt_tif.match(name_tif)
# if m:
# (id_tif, ) = m.groups()
img = skio.imread(f'{src_dir}/{name_tif}')
maxis.append(img.max(axis=(0,1)))
minis.append(img.min(axis=(0,1)))
shapes.append(img.shape)
ch_maxis = np.asarray(maxis).max(axis=0) # maximum value in each channel
ch_minis = np.asarray(minis).min(axis=0) # minimum value in each channel
shapes = np.asarray(shapes)
for id_tif in tqdm(range(1, len(names_tif)+1)):
name_tif = f'zh{id_tif}.tif'
img = skio.imread(f'{src_dir}/{name_tif}')
img_norm = (img - ch_minis) / (ch_maxis - ch_minis + 1e-15) # channel-wise normalise to 0-1
img_norm = sku.img_as_ubyte(img_norm) # convert to uint8
# skio.imshow(img_norm[..., :0:-1])
img_RGB = img_norm[..., :-1]
img_IR = img_norm[..., -1]
name_save = f'zh{id_tif}.png'
skio.imsave(f'{dir_RGB}/{name_save}', img_RGB)
skio.imsave(f'{dir_IR}/{name_save}', img_IR)
# # visualise
# p_low_RGB = np.percentile(img_RGB, 1, axis=None)
# p_high_RGB = np.percentile(img_RGB, 99, axis=None)
# p_low_IR = np.percentile(img_IR, 1, axis=None)
# p_high_IR = np.percentile(img_IR, 99, axis=None)
#
# img_en = np.empty(img.shape, dtype='uint8')
# img_en[..., :-1] = cv2.normalize(
# src=np.clip(img_RGB, p_low_RGB, p_high_RGB),
# dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# img_en[..., -1] = img_IR = cv2.normalize(
# src=np.clip(img_IR, p_low_IR, p_high_IR),
# dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
#
# skio.imshow(img_en[..., :0:-1])
| [
"os.makedirs",
"os.path.basename",
"skimage.util.img_as_ubyte",
"numpy.asarray",
"os.path.exists",
"glob.glob",
"skimage.io.imsave",
"skimage.io.imread"
] | [((1242, 1260), 'numpy.asarray', 'np.asarray', (['shapes'], {}), '(shapes)\n', (1252, 1260), True, 'import numpy as np\n'), ((459, 481), 'os.path.exists', 'os.path.exists', (['dir_IR'], {}), '(dir_IR)\n', (473, 481), False, 'import os, random, math, cv2, re\n'), ((488, 507), 'os.makedirs', 'os.makedirs', (['dir_IR'], {}), '(dir_IR)\n', (499, 507), False, 'import os, random, math, cv2, re\n'), ((516, 539), 'os.path.exists', 'os.path.exists', (['dir_RGB'], {}), '(dir_RGB)\n', (530, 539), False, 'import os, random, math, cv2, re\n'), ((546, 566), 'os.makedirs', 'os.makedirs', (['dir_RGB'], {}), '(dir_RGB)\n', (557, 566), False, 'import os, random, math, cv2, re\n'), ((585, 604), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (601, 604), False, 'import os, random, math, cv2, re\n'), ((931, 967), 'skimage.io.imread', 'skio.imread', (['f"""{src_dir}/{name_tif}"""'], {}), "(f'{src_dir}/{name_tif}')\n", (942, 967), True, 'import skimage.io as skio\n'), ((1361, 1397), 'skimage.io.imread', 'skio.imread', (['f"""{src_dir}/{name_tif}"""'], {}), "(f'{src_dir}/{name_tif}')\n", (1372, 1397), True, 'import skimage.io as skio\n'), ((1513, 1539), 'skimage.util.img_as_ubyte', 'sku.img_as_ubyte', (['img_norm'], {}), '(img_norm)\n', (1529, 1539), True, 'import skimage.util as sku\n'), ((1707, 1753), 'skimage.io.imsave', 'skio.imsave', (['f"""{dir_RGB}/{name_save}"""', 'img_RGB'], {}), "(f'{dir_RGB}/{name_save}', img_RGB)\n", (1718, 1753), True, 'import skimage.io as skio\n'), ((1759, 1803), 'skimage.io.imsave', 'skio.imsave', (['f"""{dir_IR}/{name_save}"""', 'img_IR'], {}), "(f'{dir_IR}/{name_save}', img_IR)\n", (1770, 1803), True, 'import skimage.io as skio\n'), ((614, 638), 'glob.glob', 'glob', (['f"""{src_dir}/*.tif"""'], {}), "(f'{src_dir}/*.tif')\n", (618, 638), False, 'from glob import glob\n'), ((1090, 1107), 'numpy.asarray', 'np.asarray', (['maxis'], {}), '(maxis)\n', (1100, 1107), True, 'import numpy as np\n'), ((1167, 1184), 'numpy.asarray', 'np.asarray', (['minis'], {}), '(minis)\n', (1177, 1184), True, 'import numpy as np\n')] |
from __future__ import division
from builtins import next
from builtins import str
from builtins import range
from past.utils import old_div
import re
import numpy as np
def parse_file(self):
if self.prog == "GAUSSIAN":
parse_file_gaussian(self)
elif self.prog == "CQ":
parse_file_cq(self)
def parse_file_cq(self):
# All CQ quantities are in AU
# Parse AppliedField
FieldData = np.genfromtxt(self.fieldFile,delimiter = ',')
FieldData = np.delete(FieldData,0,0)
self.time = np.asarray(FieldData[:,0])
self.electricField.x = np.asarray(FieldData[:,1])
self.electricField.y = np.asarray(FieldData[:,2])
self.electricField.z = np.asarray(FieldData[:,3])
self.total_steps = len(self.time)
if self.total_steps:
self.step_size = self.time[1] - self.time[0]
# Parse Dipole (also has energy)
DipoleData = np.genfromtxt(self.dipoleFile,delimiter = ',')
DipoleData = np.delete(DipoleData,0,0)
self.energy = np.asarray(DipoleData[:,1])
self.electricDipole.x = np.asarray(DipoleData[:,2])*0.393456
self.electricDipole.y = np.asarray(DipoleData[:,3])*0.393456
self.electricDipole.z = np.asarray(DipoleData[:,4])*0.393456
def gau2float(gstr):
lsp = gstr.split('D')
return float(lsp[0])*10**(int(lsp[1]))
def parse_file_gaussian(self):
"""Extract important attributes from the Gaussian realtime logfile."""
filename = self.logfile
fin = open(filename)
muX = []
muY = []
muZ = []
mX = []
mY = []
mZ = []
eX = []
eY = []
eZ = []
bX = []
bY = []
bZ = []
t = []
en = []
#FIXME: FOR H2+ RABI ONLY
HOMO= []
LUMO= []
#FIXME: FOR MMPOL ONLY
NAtMM = 0
NumPol = 0
NumChg = 0
MMDip = []
MMChg = []
MMCDip = []
MMCChg = []
for line in fin:
r = re.findall(r'5/.*/12',line)
if 'External field Parameters' in line:
self.envelope['Field'] = True
for j in range(15):
line = next(fin)
if 'Envelope' in line.split()[0]:
self.envelope['Envelope'] = line.split()[2] # string
elif 'Gauge' in line.split()[0]:
self.envelope['Gauge'] = line.split()[2] # string
elif 'Ex' in line.split()[0]:
self.envelope['Ex'] = float(line.split()[2]) # au
elif 'Ey' in line.split()[0]:
self.envelope['Ey'] = float(line.split()[2]) # au
elif 'Ez' in line.split()[0]:
self.envelope['Ez'] = float(line.split()[2]) # au
elif 'Bx' in line.split()[0]:
self.envelope['Bx'] = float(line.split()[2]) # au
elif 'By' in line.split()[0]:
self.envelope['By'] = float(line.split()[2]) # au
elif 'Bz' in line.split()[0]:
self.envelope['Bz'] = float(line.split()[2]) # au
elif 'Range' in line.split()[0]:
self.envelope['Sigma'] = float(line.split()[5]) # au
elif 'Frequency' in line.split()[0]:
self.envelope['Frequency'] = float(line.split()[2]) # au
elif 'Phase' in line.split()[0]:
self.envelope['Phase'] = float(line.split()[2]) # au
elif 't(on)' in line.split()[0]:
self.envelope['TOn'] = float(line.split()[2]) # au
elif 't(off)' in line.split()[0]:
# Exception to fix user setting Toff to obscenely large values
try:
self.envelope['TOff'] = float(line.split()[2]) # au
except ValueError:
self.envelope['TOff'] = 100000000.000 # au
elif 'Terms' in line.split()[0]:
self.envelope['Terms'] = line.split()[3:] # multistring
#break
elif 'No external field applied.' in line:
self.envelope['Field'] = False
elif r:
iops = r[0].split('/')[1:-1][0].split(',')
for iop in iops:
key = iop.split('=')[0]
val = iop.split('=')[1]
self.iops[key] = [val]
elif 'Number of steps =' in line:
self.total_steps = int(line.split()[4])
elif 'Step size =' in line:
self.step_size = float(line.split()[3])
elif 'Orthonormalization method =' in line:
self.orthonorm = line.split()[3]
elif 'Alpha orbital occupation numbers:' in line:
#FIXME ONLY FOR H2+ RABI
line = next(fin)
HOMO.append(float(line.split()[0]))
try:
LUMO.append(float(line.split()[1]))
except IndexError:
LUMO.append(0.0)
elif 'Time =' in line:
time = line.split()
t.append(float(time[2]))
elif 'Dipole Moment (Debye)' in line:
line = next(fin)
dipole = line.split()
muX.append(float(dipole[1])*0.393456)
muY.append(float(dipole[3])*0.393456)
muZ.append(float(dipole[5])*0.393456)
elif 'Magnetic Dipole Moment (a.u.):' in line:
line = next(fin)
dipole = line.split()
mX.append(float(dipole[1]))
mY.append(float(dipole[3]))
mZ.append(float(dipole[5]))
elif 'Energy =' in line:
energy = line.split()
en.append(float(energy[2]))
elif 'Current electric field (a.u.):' in line:
line = next(fin)
efield = line.split()
eX.append(float(efield[1]))
eY.append(float(efield[3]))
eZ.append(float(efield[5]))
elif 'Current magnetic field (a.u.):' in line:
line = next(fin)
bfield = line.split()
bX.append(float(bfield[1]))
bY.append(float(bfield[3]))
bZ.append(float(bfield[5]))
elif 'Current electromagnetic field (a.u.):' in line:
line = next(fin)
efield = line.split()
line = next(fin)
bfield = line.split()
eX.append(float(efield[1]))
eY.append(float(efield[3]))
eZ.append(float(efield[5]))
bX.append(float(bfield[1]))
bY.append(float(bfield[3]))
bZ.append(float(bfield[5]))
elif 'Restart MMUT every' in line:
self.mmut_restart = line.split()[3]
elif 'QM/MM Polarizable Model (MMPol)' in line:
while '------------------------------' not in line:
line = next(fin)
if 'Total number of MM sites' in line:
NAtMM = int(line.split()[-1])
self.NAtMM = NAtMM
elif 'Number of charges' in line:
NumChg = int(line.split()[-1])
self.NumChg = NumChg
elif 'Number of induced dipoles' in line:
NumPol = int(line.split()[-1])
self.NumPol = NumPol
elif 'MMPol Dipoles:' in line or 'Dipoles:' in line:
NDipLn = int(np.ceil(float(NumPol)/5.0))
dips = np.zeros((NumPol,3))
for lidx in range(NDipLn):
line = next(fin)
didx = [int(idx)-1 for idx in line.split()]
for xyz in range(3):
line = next(fin)
dips[didx,xyz] = np.array([gau2float(num) for num in line.split()[1:]])
MMDip.append(dips)
# Save to object, if it exists
if(muX and muY and muZ):
self.electricDipole.x = np.asarray(muX)
self.electricDipole.y = np.asarray(muY)
self.electricDipole.z = np.asarray(muZ)
if(mX and mY and mZ):
self.magneticDipole.x = np.asarray(mX)
self.magneticDipole.y = np.asarray(mY)
self.magneticDipole.z = np.asarray(mZ)
if(eX and eY and eZ):
self.electricField.x = np.asarray(eX)
self.electricField.y = np.asarray(eY)
self.electricField.z = np.asarray(eZ)
if(bX and bY and bZ):
self.magneticField.x = np.asarray(bX)
self.magneticField.y = np.asarray(bY)
self.magneticField.z = np.asarray(bZ)
if MMDip:
self.MMDip = np.array(MMDip)
if(t):
self.time = np.asarray(t)
if(en):
self.energy = np.asarray(en)
#FIXME FOR H2+ RABI ONLY
if(HOMO):
self.HOMO = np.asarray(HOMO)
if(LUMO):
self.LUMO = np.asarray(LUMO)
def clean_data(self):
"""Make all the data arrays the same length, in case the log file
did not finish a full time step (e.g. you killed the job early or are
monitoring a job in progess. Furthermore, delete redundant time steps
corresponding to when MMUT restarts"""
def get_length(data):
"""Get length of array. If array is 'None', make it seem impossibly
large"""
if data.size:
return len(data)
else:
return 1e100
# if doMMUT == True, we will delete duplicate data from MMUT restart
doMMUT = False
lengths = []
for x in self.propertyarrays:
try:
# If it is an array, remove MMUT steps, and grab its length
#FIXME Not sure if MMUT steps are actually double printed in latest
if (doMMUT):
self.__dict__[x] = np.delete(self.__dict__[x],
list(range(int(self.mmut_restart)-1,
self.__dict__[x].shape[0],
int(self.mmut_restart))),
axis=0)
lengths.append(get_length(self.__dict__[x]))
except AttributeError:
try:
# Dipoles, fields, etc., are objects and we want their x/y/z
for q in ['_x','_y','_z']:
#FIXME Again, not sure about MMUT duplicates
if (doMMUT):
self.__dict__[x].__dict__[q] = \
np.delete(self.__dict__[x].__dict__[q],
list(range(int(self.mmut_restart)-1,
self.__dict__[x].__dict__[q].shape[0],
int(self.mmut_restart))),
axis=0)
lengths.append(get_length(self.__dict__[x].__dict__[q]))
except:
#print "Unknown data type: "+str(x)+str(q)
pass
self.min_length = min(lengths)
# truncate all the arrays so they are the same length
truncate(self,self.min_length)
def truncate(self,length):
""" Truncates the property arrays to a given *length* (integer) """
for x in self.propertyarrays:
try:
# If it is an array, truncate its length
self.__dict__[x] = self.__dict__[x][:length]
except TypeError:
try:
# Dipoles, fields, etc., are objects and we want their x/y/z
for q in ['_x','_y','_z']:
self.__dict__[x].__dict__[q] = \
self.__dict__[x].__dict__[q][:length]
except:
#print "Unknown data type: "+str(x)+str(q)
pass
def decode_iops(self):
for iop in self.iops:
# OLD
if iop == '132':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Ehrenfest: do 10 Microiterations')
elif key < 0:
self.iops[iop].append('Ehrenfest: Frozen Nuclei')
else:
self.iops[iop].append(str(key)+' Fock updates per nuclear step')
elif iop == '134':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('0.05 au step size')
else:
self.iops[iop].append(str(key*0.00001)+' au step size')
elif iop == '133':
key = int(self.iops[iop][0])
if (key % 10) == 0:
self.iops[iop].append('First call to l512')
elif (key % 10) == 1:
self.iops[iop].append('First call to l512')
elif (key % 10) == 2:
self.iops[iop].append('Not first call to l512')
elif iop == '177':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Propagation for 50 steps')
else:
self.iops[iop].append('Propagation for '+str(abs(key))+' steps')
elif iop == '136':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Lowdin')
elif key == 1:
self.iops[iop].append('Lowdin')
elif key == 2:
self.iops[iop].append('Cholesky')
elif iop == '137':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('')
else:
self.iops[iop].append('')
elif iop == '138':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('No external field')
if (key % 1000 % 10) == 1:
self.iops[iop].append('Electric Dipole')
if old_div((key % 1000 % 100),10) == 1:
self.iops[iop].append('Electric Quadrupole')
if old_div((key % 1000 % 1000),100) == 1:
self.iops[iop].append('Magnetic Dipole')
if (key // 1000) == 1:
self.iops[iop].append('Velocity Gauge')
else:
self.iops[iop].append('Length Gauge')
elif iop == '139':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('')
else:
self.iops[iop].append('')
elif iop == '140':
key = int(self.iops[iop][0])
if key == -1:
self.iops[iop].append('Overlay 6 Pop at very end')
elif key == 0:
self.iops[iop].append('Overlay 6 Pop every 50 steps')
else:
self.iops[iop].append('Overlay 6 Pop every '+str(key)+' steps')
elif iop == '141':
key = int(self.iops[iop][0])
if key == -1:
self.iops[iop].append('No additional print')
elif (key % 10) == 1:
self.iops[iop].append('Print orbital occu. num')
elif (key % 10) == 2:
self.iops[iop].append('Print orbital energy + orbital occu. num')
elif old_div((key % 100),10) == 1:
self.iops[iop].append('Print electron density difference')
elif old_div((key % 100),100) == 1:
self.iops[iop].append('Debug print')
elif iop == '142':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Print every step')
else:
self.iops[iop].append('Print every '+str(key)+' steps')
elif iop == '143':
key = int(self.iops[iop][0])
if key <= 0:
self.iops[iop].append('Do not restart MMUT')
elif key == 0:
self.iops[iop].append('Restart MMUT every 50 steps')
else:
self.iops[iop].append('Restart MMUT every '+str(key)+' steps')
elif iop == '144':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Print HOMO-6 to LUMO+10')
elif key == -1:
self.iops[iop].append('Print all orbitals')
else:
self.iops[iop].append('Print HOMO-6*N to LUMO+6*N+4')
| [
"past.utils.old_div",
"numpy.asarray",
"numpy.zeros",
"numpy.genfromtxt",
"builtins.next",
"re.findall",
"numpy.array",
"builtins.str",
"builtins.range",
"numpy.delete"
] | [((400, 444), 'numpy.genfromtxt', 'np.genfromtxt', (['self.fieldFile'], {'delimiter': '""","""'}), "(self.fieldFile, delimiter=',')\n", (413, 444), True, 'import numpy as np\n'), ((460, 486), 'numpy.delete', 'np.delete', (['FieldData', '(0)', '(0)'], {}), '(FieldData, 0, 0)\n', (469, 486), True, 'import numpy as np\n'), ((511, 538), 'numpy.asarray', 'np.asarray', (['FieldData[:, 0]'], {}), '(FieldData[:, 0])\n', (521, 538), True, 'import numpy as np\n'), ((563, 590), 'numpy.asarray', 'np.asarray', (['FieldData[:, 1]'], {}), '(FieldData[:, 1])\n', (573, 590), True, 'import numpy as np\n'), ((615, 642), 'numpy.asarray', 'np.asarray', (['FieldData[:, 2]'], {}), '(FieldData[:, 2])\n', (625, 642), True, 'import numpy as np\n'), ((667, 694), 'numpy.asarray', 'np.asarray', (['FieldData[:, 3]'], {}), '(FieldData[:, 3])\n', (677, 694), True, 'import numpy as np\n'), ((854, 899), 'numpy.genfromtxt', 'np.genfromtxt', (['self.dipoleFile'], {'delimiter': '""","""'}), "(self.dipoleFile, delimiter=',')\n", (867, 899), True, 'import numpy as np\n'), ((916, 943), 'numpy.delete', 'np.delete', (['DipoleData', '(0)', '(0)'], {}), '(DipoleData, 0, 0)\n', (925, 943), True, 'import numpy as np\n'), ((969, 997), 'numpy.asarray', 'np.asarray', (['DipoleData[:, 1]'], {}), '(DipoleData[:, 1])\n', (979, 997), True, 'import numpy as np\n'), ((1023, 1051), 'numpy.asarray', 'np.asarray', (['DipoleData[:, 2]'], {}), '(DipoleData[:, 2])\n', (1033, 1051), True, 'import numpy as np\n'), ((1086, 1114), 'numpy.asarray', 'np.asarray', (['DipoleData[:, 3]'], {}), '(DipoleData[:, 3])\n', (1096, 1114), True, 'import numpy as np\n'), ((1149, 1177), 'numpy.asarray', 'np.asarray', (['DipoleData[:, 4]'], {}), '(DipoleData[:, 4])\n', (1159, 1177), True, 'import numpy as np\n'), ((1850, 1877), 're.findall', 're.findall', (['"""5/.*/12"""', 'line'], {}), "('5/.*/12', line)\n", (1860, 1877), False, 'import re\n'), ((7836, 7851), 'numpy.asarray', 'np.asarray', (['muX'], {}), '(muX)\n', (7846, 7851), True, 'import numpy as np\n'), ((7884, 7899), 'numpy.asarray', 'np.asarray', (['muY'], {}), '(muY)\n', (7894, 7899), True, 'import numpy as np\n'), ((7932, 7947), 'numpy.asarray', 'np.asarray', (['muZ'], {}), '(muZ)\n', (7942, 7947), True, 'import numpy as np\n'), ((8006, 8020), 'numpy.asarray', 'np.asarray', (['mX'], {}), '(mX)\n', (8016, 8020), True, 'import numpy as np\n'), ((8053, 8067), 'numpy.asarray', 'np.asarray', (['mY'], {}), '(mY)\n', (8063, 8067), True, 'import numpy as np\n'), ((8100, 8114), 'numpy.asarray', 'np.asarray', (['mZ'], {}), '(mZ)\n', (8110, 8114), True, 'import numpy as np\n'), ((8173, 8187), 'numpy.asarray', 'np.asarray', (['eX'], {}), '(eX)\n', (8183, 8187), True, 'import numpy as np\n'), ((8220, 8234), 'numpy.asarray', 'np.asarray', (['eY'], {}), '(eY)\n', (8230, 8234), True, 'import numpy as np\n'), ((8267, 8281), 'numpy.asarray', 'np.asarray', (['eZ'], {}), '(eZ)\n', (8277, 8281), True, 'import numpy as np\n'), ((8340, 8354), 'numpy.asarray', 'np.asarray', (['bX'], {}), '(bX)\n', (8350, 8354), True, 'import numpy as np\n'), ((8387, 8401), 'numpy.asarray', 'np.asarray', (['bY'], {}), '(bY)\n', (8397, 8401), True, 'import numpy as np\n'), ((8434, 8448), 'numpy.asarray', 'np.asarray', (['bZ'], {}), '(bZ)\n', (8444, 8448), True, 'import numpy as np\n'), ((8484, 8499), 'numpy.array', 'np.array', (['MMDip'], {}), '(MMDip)\n', (8492, 8499), True, 'import numpy as np\n'), ((8543, 8556), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (8553, 8556), True, 'import numpy as np\n'), ((8601, 8615), 'numpy.asarray', 'np.asarray', (['en'], {}), '(en)\n', (8611, 8615), True, 'import numpy as np\n'), ((8691, 8707), 'numpy.asarray', 'np.asarray', (['HOMO'], {}), '(HOMO)\n', (8701, 8707), True, 'import numpy as np\n'), ((8754, 8770), 'numpy.asarray', 'np.asarray', (['LUMO'], {}), '(LUMO)\n', (8764, 8770), True, 'import numpy as np\n'), ((1993, 2002), 'builtins.range', 'range', (['(15)'], {}), '(15)\n', (1998, 2002), False, 'from builtins import range\n'), ((2027, 2036), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (2031, 2036), False, 'from builtins import next\n'), ((11801, 11809), 'builtins.str', 'str', (['key'], {}), '(key)\n', (11804, 11809), False, 'from builtins import str\n'), ((12050, 12066), 'builtins.str', 'str', (['(key * 1e-05)'], {}), '(key * 1e-05)\n', (12053, 12066), False, 'from builtins import str\n'), ((4720, 4729), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (4724, 4729), False, 'from builtins import next\n'), ((13433, 13462), 'past.utils.old_div', 'old_div', (['(key % 1000 % 100)', '(10)'], {}), '(key % 1000 % 100, 10)\n', (13440, 13462), False, 'from past.utils import old_div\n'), ((13545, 13576), 'past.utils.old_div', 'old_div', (['(key % 1000 % 1000)', '(100)'], {}), '(key % 1000 % 1000, 100)\n', (13552, 13576), False, 'from past.utils import old_div\n'), ((5080, 5089), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (5084, 5089), False, 'from builtins import next\n'), ((5352, 5361), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (5356, 5361), False, 'from builtins import next\n'), ((5700, 5709), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (5704, 5709), False, 'from builtins import next\n'), ((5941, 5950), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (5945, 5950), False, 'from builtins import next\n'), ((6189, 6198), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (6193, 6198), False, 'from builtins import next\n'), ((6252, 6261), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (6256, 6261), False, 'from builtins import next\n'), ((14331, 14339), 'builtins.str', 'str', (['key'], {}), '(key)\n', (14334, 14339), False, 'from builtins import str\n'), ((14734, 14756), 'past.utils.old_div', 'old_div', (['(key % 100)', '(10)'], {}), '(key % 100, 10)\n', (14741, 14756), False, 'from past.utils import old_div\n'), ((14855, 14878), 'past.utils.old_div', 'old_div', (['(key % 100)', '(100)'], {}), '(key % 100, 100)\n', (14862, 14878), False, 'from past.utils import old_div\n'), ((15158, 15166), 'builtins.str', 'str', (['key'], {}), '(key)\n', (15161, 15166), False, 'from builtins import str\n'), ((6776, 6785), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (6780, 6785), False, 'from builtins import next\n'), ((7355, 7376), 'numpy.zeros', 'np.zeros', (['(NumPol, 3)'], {}), '((NumPol, 3))\n', (7363, 7376), True, 'import numpy as np\n'), ((7400, 7413), 'builtins.range', 'range', (['NDipLn'], {}), '(NDipLn)\n', (7405, 7413), False, 'from builtins import range\n'), ((15502, 15510), 'builtins.str', 'str', (['key'], {}), '(key)\n', (15505, 15510), False, 'from builtins import str\n'), ((7438, 7447), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (7442, 7447), False, 'from builtins import next\n'), ((7535, 7543), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (7540, 7543), False, 'from builtins import range\n'), ((7572, 7581), 'builtins.next', 'next', (['fin'], {}), '(fin)\n', (7576, 7581), False, 'from builtins import next\n')] |
# Authors: <NAME> <<EMAIL>>
#
# License: BSD-3 (3-clause)
import pytest
import numpy as np
import torch
from braindecode.training.losses import mixup_criterion
def test_mixup_criterion():
n_classes = 2
n_samples = 5
y_a = torch.zeros(n_samples, dtype=torch.int64)
y_b = torch.ones(n_samples, dtype=torch.int64)
lam = torch.arange(.1, 1, 1 / n_samples)
preds = torch.Tensor(
np.random.RandomState(42).randn(n_samples, n_classes)
)
target = (y_a, y_b, lam)
loss = mixup_criterion(preds, target)
expected = - (lam * preds[:, 0] + (1 - lam) * preds[:, 1]).mean()
assert loss == pytest.approx(expected)
target = y_a
loss = mixup_criterion(preds, target)
expected = - preds[:, 0].mean()
assert loss == pytest.approx(expected)
| [
"torch.ones",
"braindecode.training.losses.mixup_criterion",
"numpy.random.RandomState",
"torch.arange",
"torch.zeros",
"pytest.approx"
] | [((237, 278), 'torch.zeros', 'torch.zeros', (['n_samples'], {'dtype': 'torch.int64'}), '(n_samples, dtype=torch.int64)\n', (248, 278), False, 'import torch\n'), ((289, 329), 'torch.ones', 'torch.ones', (['n_samples'], {'dtype': 'torch.int64'}), '(n_samples, dtype=torch.int64)\n', (299, 329), False, 'import torch\n'), ((340, 375), 'torch.arange', 'torch.arange', (['(0.1)', '(1)', '(1 / n_samples)'], {}), '(0.1, 1, 1 / n_samples)\n', (352, 375), False, 'import torch\n'), ((511, 541), 'braindecode.training.losses.mixup_criterion', 'mixup_criterion', (['preds', 'target'], {}), '(preds, target)\n', (526, 541), False, 'from braindecode.training.losses import mixup_criterion\n'), ((684, 714), 'braindecode.training.losses.mixup_criterion', 'mixup_criterion', (['preds', 'target'], {}), '(preds, target)\n', (699, 714), False, 'from braindecode.training.losses import mixup_criterion\n'), ((631, 654), 'pytest.approx', 'pytest.approx', (['expected'], {}), '(expected)\n', (644, 654), False, 'import pytest\n'), ((770, 793), 'pytest.approx', 'pytest.approx', (['expected'], {}), '(expected)\n', (783, 793), False, 'import pytest\n'), ((410, 435), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (431, 435), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
class TestLodResetOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=10, max_value=20),
min_size=4,
max_size=4))
lod_data = draw(
st.sampled_from([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]],
[0, 4, in_shape[0]], [0, 7, in_shape[0]]]))
lod_data1 = draw(
st.sampled_from([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]],
[0, 4, in_shape[0]], [0, 7, in_shape[0]]]))
lod_data2 = draw(
st.sampled_from([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]],
[0, 4, in_shape[0]], [0, 7, in_shape[0]]]))
case_num = draw(st.sampled_from([0, 1]))
def generate_input_x(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
def generate_input_y(*args, **kwargs):
return np.array(lod_data1).astype(np.int32)
if case_num == 0:
build_ops = OpConfig(
type="lod_reset",
inputs={"X": ["input_data_x"],
"Y": []},
outputs={"Out": ["output_data"], },
attrs={"target_lod": lod_data,
'append': True})
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data_x": TensorConfig(data_gen=partial(
generate_input_x, lod=list(lod_data2))),
},
outputs=["output_data"])
elif case_num == 1:
build_ops = OpConfig(
type="lod_reset",
inputs={"X": ["input_data_x"],
"Y": ["input_data_y"]},
outputs={"Out": ["output_data"], },
attrs={"target_lod": [],
'append': True})
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data_x": TensorConfig(data_gen=partial(
generate_input_x, lod=list(lod_data2))),
"input_data_y":
TensorConfig(data_gen=partial(generate_input_y)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["lod_reset"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
| [
"sys.path.append",
"unittest.main",
"functools.partial",
"program_config.OpConfig",
"auto_scan_test.AutoScanTest.__init__",
"hypothesis.strategies.sampled_from",
"numpy.random.random",
"numpy.array",
"hypothesis.strategies.integers"
] | [((622, 644), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (637, 644), False, 'import sys\n'), ((4218, 4242), 'unittest.main', 'unittest.main', ([], {'argv': "['']"}), "(argv=[''])\n", (4231, 4242), False, 'import unittest\n'), ((1111, 1155), 'auto_scan_test.AutoScanTest.__init__', 'AutoScanTest.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1132, 1155), False, 'from auto_scan_test import AutoScanTest, IgnoreReasons\n'), ((1750, 1861), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]], [0, 4, in_shape[0]], [0, 7,\n in_shape[0]]]'], {}), '([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]], [0, 4,\n in_shape[0]], [0, 7, in_shape[0]]])\n', (1765, 1861), True, 'import hypothesis.strategies as st\n'), ((1926, 2037), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]], [0, 4, in_shape[0]], [0, 7,\n in_shape[0]]]'], {}), '([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]], [0, 4,\n in_shape[0]], [0, 7, in_shape[0]]])\n', (1941, 2037), True, 'import hypothesis.strategies as st\n'), ((2102, 2213), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]], [0, 4, in_shape[0]], [0, 7,\n in_shape[0]]]'], {}), '([[0, 3, 5, in_shape[0]], [0, 4, 7, in_shape[0]], [0, 4,\n in_shape[0]], [0, 7, in_shape[0]]])\n', (2117, 2213), True, 'import hypothesis.strategies as st\n'), ((2264, 2287), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[0, 1]'], {}), '([0, 1])\n', (2279, 2287), True, 'import hypothesis.strategies as st\n'), ((2557, 2711), 'program_config.OpConfig', 'OpConfig', ([], {'type': '"""lod_reset"""', 'inputs': "{'X': ['input_data_x'], 'Y': []}", 'outputs': "{'Out': ['output_data']}", 'attrs': "{'target_lod': lod_data, 'append': True}"}), "(type='lod_reset', inputs={'X': ['input_data_x'], 'Y': []}, outputs\n ={'Out': ['output_data']}, attrs={'target_lod': lod_data, 'append': True})\n", (2565, 2711), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\n'), ((1594, 1633), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(10)', 'max_value': '(20)'}), '(min_value=10, max_value=20)\n', (1605, 1633), True, 'import hypothesis.strategies as st\n'), ((3195, 3361), 'program_config.OpConfig', 'OpConfig', ([], {'type': '"""lod_reset"""', 'inputs': "{'X': ['input_data_x'], 'Y': ['input_data_y']}", 'outputs': "{'Out': ['output_data']}", 'attrs': "{'target_lod': [], 'append': True}"}), "(type='lod_reset', inputs={'X': ['input_data_x'], 'Y': [\n 'input_data_y']}, outputs={'Out': ['output_data']}, attrs={'target_lod':\n [], 'append': True})\n", (3203, 3361), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\n'), ((2356, 2382), 'numpy.random.random', 'np.random.random', (['in_shape'], {}), '(in_shape)\n', (2372, 2382), True, 'import numpy as np\n'), ((2469, 2488), 'numpy.array', 'np.array', (['lod_data1'], {}), '(lod_data1)\n', (2477, 2488), True, 'import numpy as np\n'), ((3807, 3832), 'functools.partial', 'partial', (['generate_input_y'], {}), '(generate_input_y)\n', (3814, 3832), False, 'from functools import partial\n')] |
import scipy.io
import torch
import numpy as np
#import time
import os
#######################################################################
# Evaluate
'''
验证,传参:query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam
:param qf torch.Tensor,待识别图片的特征
:param ql numpy.int32,待识别图片的标签
:param qc numpy.int32,待识别图片的相机ID
:param gf torch.Tensor,库图片的特征集
:param gl numpy.ndarray,库图片的标签集
:param gc numpy.ndarray,库图片的相机ID集
'''
def evaluate(qf,ql,qc,gf,gl,gc):
query = qf.view(-1,1) # 重整特征向量为n行1列的张量
# print(query.shape)
score = torch.mm(gf,query) # 矩阵相乘,torch.mul(),矩阵点位相乘
# print("1、score:", score.shape, score) # torch.Size([19732, 1])
score = score.squeeze(1).cpu() # 将位置1的数据取消掉(位置下标从0开始算)
# print("2、score:", score.shape, score) # torch.Size([19732])
score = score.numpy()
# predict index
index = np.argsort(score) #from small to large,排序,升序排序
# print("3、", type(index), index.shape, index)
index = index[::-1]
# print("4、", type(index), index.shape, index)
# index = index[0:2000]
# good index,np.argwhere(),返回满足条件的索引值
query_index = np.argwhere(gl==ql) # 找到与query有相同label的gallery
camera_index = np.argwhere(gc==qc) # 与query有相同camera的gallery
# print("query_index:", type(query_index), query_index)
# print("camera_index", type(camera_index), camera_index)
# 找在query_index,但不在camera_index中的。(即找不同设备下相同label的检索,摒弃相同设备下相同label的)
good_index = np.setdiff1d(query_index, camera_index, assume_unique=True) # assume_unique=True,假设数组中每个值是唯一的
# 坏的标签
junk_index1 = np.argwhere(gl==-1) # 没找到,即为坏的标签
junk_index2 = np.intersect1d(query_index, camera_index) # 相同的人在同一摄像头下,
junk_index = np.append(junk_index2, junk_index1) #.flatten())
CMC_tmp = compute_mAP(index, good_index, junk_index)
return CMC_tmp
'''
计算mAP:平均精度均值
'''
def compute_mAP(index, good_index, junk_index):
ap = 0
cmc = torch.IntTensor(len(index)).zero_() # 初始化命中率张量,默认为0
if good_index.size==0: # if empty,说明库中没找到要query的人,直接返回-1
cmc[0] = -1
return ap,cmc # 这时,精度为0,图片id为-1
# remove junk_index,在index,但不在junk_index中的
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
# find good_index index
ngood = len(good_index)
mask = np.in1d(index, good_index) # good_index中每个元素在index中是否存在,存在则标为True,否则为False
rows_good = np.argwhere(mask==True) # 返回mark=True的下标
# print("rows_good:", type(rows_good), rows_good.shape, rows_good) # numpy.ndarray
rows_good = rows_good.flatten() # 将多维数据降为一维,返回数组的拷贝(修改时不影响原数组)
# print("rows_good:", type(rows_good), rows_good.shape, rows_good) # numpy.ndarray
cmc[rows_good[0]:] = 1
for i in range(ngood): # 库中有ngood个匹配上的人
d_recall = 1.0/ngood
precision = (i+1)*1.0/(rows_good[i]+1)
if rows_good[i]!=0:
old_precision = i*1.0/rows_good[i]
else:
old_precision=1.0
ap = ap + d_recall*(old_precision + precision)/2
return ap, cmc
######################################################################
result = scipy.io.loadmat('pytorch_result.mat') # 加载gallery和query的特征矩阵
query_feature = torch.FloatTensor(result['query_f']) # 待查询图像的特征集
query_cam = result['query_cam'][0] # 待查询图像的相机ID
query_label = result['query_label'][0] # 待查询图像的标签
gallery_feature = torch.FloatTensor(result['gallery_f']) # 库图像
gallery_cam = result['gallery_cam'][0] # 库图像的相机ID
gallery_label = result['gallery_label'][0] # 库图像的标签
multi = os.path.isfile('multi_query.mat') # 是否是个文件,若文件不存在也返回False。(文件存在,则表示开启多进程查询)
if multi:
m_result = scipy.io.loadmat('multi_query.mat')
mquery_feature = torch.FloatTensor(m_result['mquery_f'])
mquery_cam = m_result['mquery_cam'][0]
mquery_label = m_result['mquery_label'][0]
mquery_feature = mquery_feature.cuda()
query_feature = query_feature.cuda() # 待查询图片的特征
gallery_feature = gallery_feature.cuda() # 库图片特征
print(query_feature.shape)
print("len(gallery_label):", len(gallery_label))
CMC = torch.IntTensor(len(gallery_label)).zero_() # 创建一个指定维度(一维,大小为len(gallery_label))和类型(Int,置0)的张量
ap = 0.0
for i in range(len(query_label)):
ap_tmp, CMC_tmp = evaluate(query_feature[i], query_label[i], query_cam[i], gallery_feature, gallery_label, gallery_cam) # 对于每一个图片,都这么识别
if CMC_tmp[0]==-1: # -1表示没识别出来
continue
CMC = CMC + CMC_tmp # 累计每张图片的 CMC_tmp 和 ap_tmp
ap += ap_tmp
# print(i, CMC_tmp[0])
CMC = CMC.float()
CMC = CMC/len(query_label) #average CMC
print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0], CMC[4], CMC[9], ap/len(query_label)))
print(type(CMC), CMC.shape, CMC)
# multiple-query
CMC = torch.IntTensor(len(gallery_label)).zero_()
ap = 0.0
if multi:
for i in range(len(query_label)):
mquery_index1 = np.argwhere(mquery_label==query_label[i])
mquery_index2 = np.argwhere(mquery_cam==query_cam[i])
mquery_index = np.intersect1d(mquery_index1, mquery_index2)
mq = torch.mean(mquery_feature[mquery_index,:], dim=0)
ap_tmp, CMC_tmp = evaluate(mq,query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam)
if CMC_tmp[0]==-1:
continue
CMC = CMC + CMC_tmp
ap += ap_tmp
#print(i, CMC_tmp[0])
CMC = CMC.float()
CMC = CMC/len(query_label) #average CMC
print('multi Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
| [
"torch.mean",
"torch.FloatTensor",
"torch.mm",
"numpy.setdiff1d",
"numpy.argsort",
"numpy.append",
"os.path.isfile",
"numpy.argwhere",
"numpy.intersect1d",
"numpy.in1d"
] | [((3248, 3284), 'torch.FloatTensor', 'torch.FloatTensor', (["result['query_f']"], {}), "(result['query_f'])\n", (3265, 3284), False, 'import torch\n'), ((3454, 3492), 'torch.FloatTensor', 'torch.FloatTensor', (["result['gallery_f']"], {}), "(result['gallery_f'])\n", (3471, 3492), False, 'import torch\n'), ((3651, 3684), 'os.path.isfile', 'os.path.isfile', (['"""multi_query.mat"""'], {}), "('multi_query.mat')\n", (3665, 3684), False, 'import os\n'), ((593, 612), 'torch.mm', 'torch.mm', (['gf', 'query'], {}), '(gf, query)\n', (601, 612), False, 'import torch\n'), ((900, 917), 'numpy.argsort', 'np.argsort', (['score'], {}), '(score)\n', (910, 917), True, 'import numpy as np\n'), ((1163, 1184), 'numpy.argwhere', 'np.argwhere', (['(gl == ql)'], {}), '(gl == ql)\n', (1174, 1184), True, 'import numpy as np\n'), ((1232, 1253), 'numpy.argwhere', 'np.argwhere', (['(gc == qc)'], {}), '(gc == qc)\n', (1243, 1253), True, 'import numpy as np\n'), ((1495, 1554), 'numpy.setdiff1d', 'np.setdiff1d', (['query_index', 'camera_index'], {'assume_unique': '(True)'}), '(query_index, camera_index, assume_unique=True)\n', (1507, 1554), True, 'import numpy as np\n'), ((1622, 1643), 'numpy.argwhere', 'np.argwhere', (['(gl == -1)'], {}), '(gl == -1)\n', (1633, 1643), True, 'import numpy as np\n'), ((1676, 1717), 'numpy.intersect1d', 'np.intersect1d', (['query_index', 'camera_index'], {}), '(query_index, camera_index)\n', (1690, 1717), True, 'import numpy as np\n'), ((1753, 1788), 'numpy.append', 'np.append', (['junk_index2', 'junk_index1'], {}), '(junk_index2, junk_index1)\n', (1762, 1788), True, 'import numpy as np\n'), ((2218, 2257), 'numpy.in1d', 'np.in1d', (['index', 'junk_index'], {'invert': '(True)'}), '(index, junk_index, invert=True)\n', (2225, 2257), True, 'import numpy as np\n'), ((2350, 2376), 'numpy.in1d', 'np.in1d', (['index', 'good_index'], {}), '(index, good_index)\n', (2357, 2376), True, 'import numpy as np\n'), ((2444, 2469), 'numpy.argwhere', 'np.argwhere', (['(mask == True)'], {}), '(mask == True)\n', (2455, 2469), True, 'import numpy as np\n'), ((3813, 3852), 'torch.FloatTensor', 'torch.FloatTensor', (["m_result['mquery_f']"], {}), "(m_result['mquery_f'])\n", (3830, 3852), False, 'import torch\n'), ((4950, 4993), 'numpy.argwhere', 'np.argwhere', (['(mquery_label == query_label[i])'], {}), '(mquery_label == query_label[i])\n', (4961, 4993), True, 'import numpy as np\n'), ((5016, 5055), 'numpy.argwhere', 'np.argwhere', (['(mquery_cam == query_cam[i])'], {}), '(mquery_cam == query_cam[i])\n', (5027, 5055), True, 'import numpy as np\n'), ((5078, 5122), 'numpy.intersect1d', 'np.intersect1d', (['mquery_index1', 'mquery_index2'], {}), '(mquery_index1, mquery_index2)\n', (5092, 5122), True, 'import numpy as np\n'), ((5136, 5186), 'torch.mean', 'torch.mean', (['mquery_feature[mquery_index, :]'], {'dim': '(0)'}), '(mquery_feature[mquery_index, :], dim=0)\n', (5146, 5186), False, 'import torch\n')] |
import sys
import os
# sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.insightface.deploy import face_model
from imutils import paths
import numpy as np
import pickle
import cv2
import os
def genFaceEmbedings():
print('---->>> Creating data embeddings <<<---')
embedding_model = face_model.FaceModel('112,112', "src/insightface/models/model-y1-test2/model,0", 1.24, 0)
# Initialize our lists of extracted facial embeddings and corresponding people names
knownEmbeddings = []
knownNames = []
# Initialize the total number of faces processed
total = 0
# Loop over the imagePaths
imagePaths = list(paths.list_images('datasets\\train'))
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("---->>> processing image {}/{} <<<---".format(i + 1, len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the image
image = cv2.imread(imagePath)
# convert face to RGB color
nimg = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
nimg = np.transpose(nimg, (2, 0, 1))
# Get the face embedding vector
face_embedding = embedding_model.get_feature(nimg)
# add the name of the person + corresponding face
# embedding to their respective list
knownNames.append(name)
knownEmbeddings.append(face_embedding)
total += 1
data = {"embeddings": knownEmbeddings, "names": knownNames}
with open('src/embeddings/embeddings.pickle', 'wb') as handle:
pickle.dump(data, handle)
# genFaceEmbedings() | [
"imutils.paths.list_images",
"pickle.dump",
"cv2.cvtColor",
"numpy.transpose",
"cv2.imread",
"src.insightface.deploy.face_model.FaceModel"
] | [((326, 419), 'src.insightface.deploy.face_model.FaceModel', 'face_model.FaceModel', (['"""112,112"""', '"""src/insightface/models/model-y1-test2/model,0"""', '(1.24)', '(0)'], {}), "('112,112',\n 'src/insightface/models/model-y1-test2/model,0', 1.24, 0)\n", (346, 419), False, 'from src.insightface.deploy import face_model\n'), ((670, 706), 'imutils.paths.list_images', 'paths.list_images', (['"""datasets\\\\train"""'], {}), "('datasets\\\\train')\n", (687, 706), False, 'from imutils import paths\n'), ((999, 1020), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (1009, 1020), False, 'import cv2\n'), ((1072, 1110), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1084, 1110), False, 'import cv2\n'), ((1126, 1155), 'numpy.transpose', 'np.transpose', (['nimg', '(2, 0, 1)'], {}), '(nimg, (2, 0, 1))\n', (1138, 1155), True, 'import numpy as np\n'), ((1595, 1620), 'pickle.dump', 'pickle.dump', (['data', 'handle'], {}), '(data, handle)\n', (1606, 1620), False, 'import pickle\n')] |
import unittest
import numpy as np
import tensorflow.keras.backend as K
import lib.losses as losses
import lib.spatial_geometry as spatial_geometry
from lib.cameras import PinholeCamera
# Losses assume the input:
# y = [ quaternion, translation, shape3D ]
# y = [ q0,q1,q2,q3, t0,t1,t2, sx0,sy0,sz0,...,sxN,syN,szN ]
# y.shape = [ 1, 4 + 3 + 3*Points3D]
#
# multiview_reprojection_loss: Require random poses concatenated at the end
# y_mrl = [ y, quaternion_random_view_0, translation_random_view_0, ..., quaternion_random_view_N, translation_random_view_N ]
# y_mrl = [ y, qr00,qr10,qr20,qr30, t00,t10,t20, ..., qr0N,qr1N,qr2N,qr3N, t0N,t1N,t2N ]
# y_mrl.shape = [ 1, 4 + 3 + 3*Points3D + 7*RandomViewa ]
def create_y_true_y_pred():
y_true = np.array([[1., 0., 0., 0., 0., 0., -30.,
-0.98854052, 2.12746976, -3.8310884,
-0.41849216, 2.69751813, -3.2610400,
1., 0., 0., 0., 0., 0., -40.,
1., 0., 0., 0., 0., 0., -20.]])
y_pred = np.array([[1., 0., 0., 0., 0., 0., -30.,
-0.9889736, 2.11025506, -3.81418583,
-0.42152029, 2.67770837, -3.24673252,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.]])
return y_true, y_pred
# Tests
class TestLosses(unittest.TestCase):
def test_coarse(self):
# Create test samples
y_true, y_pred = create_y_true_y_pred()
loss = losses.coarse(
shape_points=2, beta=1e-2)(K.variable(y_true),
K.variable(y_pred))
loss_tf = K.eval(loss)
mse = lambda x, y: ((x - y)**2).mean(axis=-1)
loss_np = 1. * mse(y_pred[:, 7:7 + 2 * 3], y_true[:, 7:7 + 2 * 3]) + \
1e-2 * mse(y_pred[:, :7], y_true[:, :7])
self.assertAlmostEqual(loss_tf[0], loss_np[0], places=4)
def test_xqt(self):
# Create test samples
y_true, y_pred = create_y_true_y_pred()
loss = losses.xqt(
shape_points=2, beta=1e-2, gamma=1e-3)(K.variable(y_true),
K.variable(y_pred))
loss_tf = K.eval(loss)
mse = lambda x, y: ((x - y)**2).mean(axis=-1)
loss_np = (1. * mse(y_pred[:, 7:7 + 2 * 3], y_true[:, 7:7 + 2 * 3]) +
1e-2 * mse(y_pred[:, :4], y_true[:, :4]) +
1e-3 * mse(y_pred[:, 4:7], y_true[:, 4:7]))
self.assertAlmostEqual(loss_tf[0], loss_np[0], places=4)
def test_geometric_alignment(self):
# Create test samples
y_true, y_pred = create_y_true_y_pred()
loss = losses.geometric_alignment(shape_points=2)(K.variable(y_true), K.variable(y_pred))
loss_tf = K.eval(loss)
x = np.reshape(y_pred[0, 7:7+3*2], (2, 3)).T
x_gt = np.reshape(y_true[0, 7:7+3*2], (2, 3)).T
t = spatial_geometry.quaternion_translation_to_pose(y_pred[0, :4], y_pred[0, 4:7])
t_gt = spatial_geometry.quaternion_translation_to_pose(y_true[0, :4], y_true[0, 4:7])
xt = np.dot(t[:3, :3], x) + np.expand_dims(t[:3, 3], axis=1)
xt_gt = np.dot(t_gt[:3, :3], x_gt) + np.expand_dims(t_gt[:3, 3], axis=1)
loss_np = np.mean(np.mean(np.abs(xt_gt - xt), axis=0), axis=-1)
self.assertAlmostEqual(loss_tf[0], loss_np, places=4)
def test_reprojection(self):
# Create calibration matrix
calibration = np.eye(3)
# Create test samples
y_true, y_pred = create_y_true_y_pred()
loss = losses.reprojection(
shape_points=2,
calibration=calibration)(K.variable(y_true), K.variable(y_pred))
loss_tf = K.eval(loss)
x = np.reshape(y_pred[0, 7:7+3*2], (2, 3)).T
x_gt = np.reshape(y_true[0, 7:7+3*2], (2, 3)).T
camera = PinholeCamera(
calibration=calibration,
pose=spatial_geometry.quaternion_translation_to_pose(y_pred[0, :4], y_pred[0, 4:7]))
x_2d = camera.project(x)
x_2d_gt = camera.project(x_gt)
diff = np.square(x_2d_gt - x_2d)
loss_np = np.mean(np.mean(diff, axis=-1),axis=-1)
self.assertAlmostEqual(loss_tf[0], loss_np, places=4)
def test_multiview_reprojection(self):
# Create calibration matrix
calibration = np.eye(3)
# Create test samples
y_true, y_pred = create_y_true_y_pred()
loss = losses.multiview_reprojection(
shape_points=2,
calibration=calibration,
virtual_views=2)(K.variable(y_true), K.variable(y_pred))
loss_tf = K.eval(loss)
# Loss numpy
loss_np = 0.
x = np.reshape(y_pred[0, 7:7+2*3], (2, 3)).T
x_gt = np.reshape(y_true[0, 7:7+2*3], (2, 3)).T
pose_pred = spatial_geometry.quaternion_translation_to_pose(y_pred[0, :4], y_pred[0, 4:7])
pose_pred_inv = np.linalg.inv(pose_pred)
pose_gt = spatial_geometry.quaternion_translation_to_pose(y_true[0, :4], y_true[0, 4:7])
pose_gt_1 = spatial_geometry.quaternion_translation_to_pose(y_true[0, 13:17], y_true[0, 17:20])
pose_gt_2 = spatial_geometry.quaternion_translation_to_pose(y_true[0, 20:24], y_true[0, 24:27])
# Projection using view 1
# 1. Projective cameras for view 1 non-distorted and distorted
camera_i = PinholeCamera(calibration=calibration, pose=pose_gt_1)
camera_d = PinholeCamera(calibration=calibration, pose=np.matmul(pose_gt_1, np.matmul(pose_gt, pose_pred_inv)))
# 2. Projected 3D points using non-distorted and distorted cameras
x_2d_gt = camera_i.project(x_gt)
x_2d_d = camera_d.project(x)
# 3. Reprojection loss using view 1 (shape error)
diff = np.square(x_2d_gt - x_2d_d)
loss_np += np.mean(diff)
# Projection using view 2
# 1. Projective cameras for view 2 non-distorted and distorted
camera_i = PinholeCamera(calibration=calibration, pose=pose_gt_2)
camera_d = PinholeCamera(calibration=calibration, pose=np.matmul(pose_gt_2, np.matmul(pose_gt, pose_pred_inv)))
# 2. Projected 3D points using non-distorted and distorted cameras
x_2d_gt = camera_i.project(x_gt)
x_2d_d = camera_d.project(x_gt)
# 3. Reprojection loss using view 2 (shape error)
diff = np.square(x_2d_gt - x_2d_d)
loss_np += np.mean(diff)
self.assertAlmostEqual(loss_tf[0], loss_np, places=4)
if __name__ == '__main__':
unittest.main()
| [
"numpy.abs",
"lib.losses.reprojection",
"lib.losses.multiview_reprojection",
"lib.losses.xqt",
"numpy.mean",
"unittest.main",
"numpy.eye",
"lib.losses.geometric_alignment",
"numpy.reshape",
"tensorflow.keras.backend.variable",
"lib.spatial_geometry.quaternion_translation_to_pose",
"numpy.squar... | [((766, 971), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -30.0, -0.98854052, 2.12746976, -3.8310884,\n -0.41849216, 2.69751813, -3.26104, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -40.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -20.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -30.0, -0.98854052, 2.12746976, -\n 3.8310884, -0.41849216, 2.69751813, -3.26104, 1.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, -40.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -20.0]])\n', (774, 971), True, 'import numpy as np\n'), ((1053, 1257), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -30.0, -0.9889736, 2.11025506, -3.81418583,\n -0.42152029, 2.67770837, -3.24673252, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -30.0, -0.9889736, 2.11025506, -\n 3.81418583, -0.42152029, 2.67770837, -3.24673252, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (1061, 1257), True, 'import numpy as np\n'), ((6562, 6577), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6575, 6577), False, 'import unittest\n'), ((1670, 1682), 'tensorflow.keras.backend.eval', 'K.eval', (['loss'], {}), '(loss)\n', (1676, 1682), True, 'import tensorflow.keras.backend as K\n'), ((2236, 2248), 'tensorflow.keras.backend.eval', 'K.eval', (['loss'], {}), '(loss)\n', (2242, 2248), True, 'import tensorflow.keras.backend as K\n'), ((2809, 2821), 'tensorflow.keras.backend.eval', 'K.eval', (['loss'], {}), '(loss)\n', (2815, 2821), True, 'import tensorflow.keras.backend as K\n'), ((2945, 3023), 'lib.spatial_geometry.quaternion_translation_to_pose', 'spatial_geometry.quaternion_translation_to_pose', (['y_pred[0, :4]', 'y_pred[0, 4:7]'], {}), '(y_pred[0, :4], y_pred[0, 4:7])\n', (2992, 3023), True, 'import lib.spatial_geometry as spatial_geometry\n'), ((3039, 3117), 'lib.spatial_geometry.quaternion_translation_to_pose', 'spatial_geometry.quaternion_translation_to_pose', (['y_true[0, :4]', 'y_true[0, 4:7]'], {}), '(y_true[0, :4], y_true[0, 4:7])\n', (3086, 3117), True, 'import lib.spatial_geometry as spatial_geometry\n'), ((3498, 3507), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3504, 3507), True, 'import numpy as np\n'), ((3743, 3755), 'tensorflow.keras.backend.eval', 'K.eval', (['loss'], {}), '(loss)\n', (3749, 3755), True, 'import tensorflow.keras.backend as K\n'), ((4122, 4147), 'numpy.square', 'np.square', (['(x_2d_gt - x_2d)'], {}), '(x_2d_gt - x_2d)\n', (4131, 4147), True, 'import numpy as np\n'), ((4373, 4382), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4379, 4382), True, 'import numpy as np\n'), ((4662, 4674), 'tensorflow.keras.backend.eval', 'K.eval', (['loss'], {}), '(loss)\n', (4668, 4674), True, 'import tensorflow.keras.backend as K\n'), ((4848, 4926), 'lib.spatial_geometry.quaternion_translation_to_pose', 'spatial_geometry.quaternion_translation_to_pose', (['y_pred[0, :4]', 'y_pred[0, 4:7]'], {}), '(y_pred[0, :4], y_pred[0, 4:7])\n', (4895, 4926), True, 'import lib.spatial_geometry as spatial_geometry\n'), ((4951, 4975), 'numpy.linalg.inv', 'np.linalg.inv', (['pose_pred'], {}), '(pose_pred)\n', (4964, 4975), True, 'import numpy as np\n'), ((4994, 5072), 'lib.spatial_geometry.quaternion_translation_to_pose', 'spatial_geometry.quaternion_translation_to_pose', (['y_true[0, :4]', 'y_true[0, 4:7]'], {}), '(y_true[0, :4], y_true[0, 4:7])\n', (5041, 5072), True, 'import lib.spatial_geometry as spatial_geometry\n'), ((5093, 5180), 'lib.spatial_geometry.quaternion_translation_to_pose', 'spatial_geometry.quaternion_translation_to_pose', (['y_true[0, 13:17]', 'y_true[0, 17:20]'], {}), '(y_true[0, 13:17], y_true[0,\n 17:20])\n', (5140, 5180), True, 'import lib.spatial_geometry as spatial_geometry\n'), ((5197, 5284), 'lib.spatial_geometry.quaternion_translation_to_pose', 'spatial_geometry.quaternion_translation_to_pose', (['y_true[0, 20:24]', 'y_true[0, 24:27]'], {}), '(y_true[0, 20:24], y_true[0,\n 24:27])\n', (5244, 5284), True, 'import lib.spatial_geometry as spatial_geometry\n'), ((5407, 5461), 'lib.cameras.PinholeCamera', 'PinholeCamera', ([], {'calibration': 'calibration', 'pose': 'pose_gt_1'}), '(calibration=calibration, pose=pose_gt_1)\n', (5420, 5461), False, 'from lib.cameras import PinholeCamera\n'), ((5812, 5839), 'numpy.square', 'np.square', (['(x_2d_gt - x_2d_d)'], {}), '(x_2d_gt - x_2d_d)\n', (5821, 5839), True, 'import numpy as np\n'), ((5859, 5872), 'numpy.mean', 'np.mean', (['diff'], {}), '(diff)\n', (5866, 5872), True, 'import numpy as np\n'), ((5999, 6053), 'lib.cameras.PinholeCamera', 'PinholeCamera', ([], {'calibration': 'calibration', 'pose': 'pose_gt_2'}), '(calibration=calibration, pose=pose_gt_2)\n', (6012, 6053), False, 'from lib.cameras import PinholeCamera\n'), ((6405, 6432), 'numpy.square', 'np.square', (['(x_2d_gt - x_2d_d)'], {}), '(x_2d_gt - x_2d_d)\n', (6414, 6432), True, 'import numpy as np\n'), ((6452, 6465), 'numpy.mean', 'np.mean', (['diff'], {}), '(diff)\n', (6459, 6465), True, 'import numpy as np\n'), ((1518, 1558), 'lib.losses.coarse', 'losses.coarse', ([], {'shape_points': '(2)', 'beta': '(0.01)'}), '(shape_points=2, beta=0.01)\n', (1531, 1558), True, 'import lib.losses as losses\n'), ((1572, 1590), 'tensorflow.keras.backend.variable', 'K.variable', (['y_true'], {}), '(y_true)\n', (1582, 1590), True, 'import tensorflow.keras.backend as K\n'), ((1631, 1649), 'tensorflow.keras.backend.variable', 'K.variable', (['y_pred'], {}), '(y_pred)\n', (1641, 1649), True, 'import tensorflow.keras.backend as K\n'), ((2063, 2113), 'lib.losses.xqt', 'losses.xqt', ([], {'shape_points': '(2)', 'beta': '(0.01)', 'gamma': '(0.001)'}), '(shape_points=2, beta=0.01, gamma=0.001)\n', (2073, 2113), True, 'import lib.losses as losses\n'), ((2126, 2144), 'tensorflow.keras.backend.variable', 'K.variable', (['y_true'], {}), '(y_true)\n', (2136, 2144), True, 'import tensorflow.keras.backend as K\n'), ((2197, 2215), 'tensorflow.keras.backend.variable', 'K.variable', (['y_pred'], {}), '(y_pred)\n', (2207, 2215), True, 'import tensorflow.keras.backend as K\n'), ((2707, 2749), 'lib.losses.geometric_alignment', 'losses.geometric_alignment', ([], {'shape_points': '(2)'}), '(shape_points=2)\n', (2733, 2749), True, 'import lib.losses as losses\n'), ((2750, 2768), 'tensorflow.keras.backend.variable', 'K.variable', (['y_true'], {}), '(y_true)\n', (2760, 2768), True, 'import tensorflow.keras.backend as K\n'), ((2770, 2788), 'tensorflow.keras.backend.variable', 'K.variable', (['y_pred'], {}), '(y_pred)\n', (2780, 2788), True, 'import tensorflow.keras.backend as K\n'), ((2835, 2877), 'numpy.reshape', 'np.reshape', (['y_pred[0, 7:7 + 3 * 2]', '(2, 3)'], {}), '(y_pred[0, 7:7 + 3 * 2], (2, 3))\n', (2845, 2877), True, 'import numpy as np\n'), ((2891, 2933), 'numpy.reshape', 'np.reshape', (['y_true[0, 7:7 + 3 * 2]', '(2, 3)'], {}), '(y_true[0, 7:7 + 3 * 2], (2, 3))\n', (2901, 2933), True, 'import numpy as np\n'), ((3132, 3152), 'numpy.dot', 'np.dot', (['t[:3, :3]', 'x'], {}), '(t[:3, :3], x)\n', (3138, 3152), True, 'import numpy as np\n'), ((3155, 3187), 'numpy.expand_dims', 'np.expand_dims', (['t[:3, 3]'], {'axis': '(1)'}), '(t[:3, 3], axis=1)\n', (3169, 3187), True, 'import numpy as np\n'), ((3204, 3230), 'numpy.dot', 'np.dot', (['t_gt[:3, :3]', 'x_gt'], {}), '(t_gt[:3, :3], x_gt)\n', (3210, 3230), True, 'import numpy as np\n'), ((3233, 3268), 'numpy.expand_dims', 'np.expand_dims', (['t_gt[:3, 3]'], {'axis': '(1)'}), '(t_gt[:3, 3], axis=1)\n', (3247, 3268), True, 'import numpy as np\n'), ((3604, 3664), 'lib.losses.reprojection', 'losses.reprojection', ([], {'shape_points': '(2)', 'calibration': 'calibration'}), '(shape_points=2, calibration=calibration)\n', (3623, 3664), True, 'import lib.losses as losses\n'), ((3684, 3702), 'tensorflow.keras.backend.variable', 'K.variable', (['y_true'], {}), '(y_true)\n', (3694, 3702), True, 'import tensorflow.keras.backend as K\n'), ((3704, 3722), 'tensorflow.keras.backend.variable', 'K.variable', (['y_pred'], {}), '(y_pred)\n', (3714, 3722), True, 'import tensorflow.keras.backend as K\n'), ((3769, 3811), 'numpy.reshape', 'np.reshape', (['y_pred[0, 7:7 + 3 * 2]', '(2, 3)'], {}), '(y_pred[0, 7:7 + 3 * 2], (2, 3))\n', (3779, 3811), True, 'import numpy as np\n'), ((3825, 3867), 'numpy.reshape', 'np.reshape', (['y_true[0, 7:7 + 3 * 2]', '(2, 3)'], {}), '(y_true[0, 7:7 + 3 * 2], (2, 3))\n', (3835, 3867), True, 'import numpy as np\n'), ((4175, 4197), 'numpy.mean', 'np.mean', (['diff'], {'axis': '(-1)'}), '(diff, axis=-1)\n', (4182, 4197), True, 'import numpy as np\n'), ((4478, 4569), 'lib.losses.multiview_reprojection', 'losses.multiview_reprojection', ([], {'shape_points': '(2)', 'calibration': 'calibration', 'virtual_views': '(2)'}), '(shape_points=2, calibration=calibration,\n virtual_views=2)\n', (4507, 4569), True, 'import lib.losses as losses\n'), ((4603, 4621), 'tensorflow.keras.backend.variable', 'K.variable', (['y_true'], {}), '(y_true)\n', (4613, 4621), True, 'import tensorflow.keras.backend as K\n'), ((4623, 4641), 'tensorflow.keras.backend.variable', 'K.variable', (['y_pred'], {}), '(y_pred)\n', (4633, 4641), True, 'import tensorflow.keras.backend as K\n'), ((4730, 4772), 'numpy.reshape', 'np.reshape', (['y_pred[0, 7:7 + 2 * 3]', '(2, 3)'], {}), '(y_pred[0, 7:7 + 2 * 3], (2, 3))\n', (4740, 4772), True, 'import numpy as np\n'), ((4786, 4828), 'numpy.reshape', 'np.reshape', (['y_true[0, 7:7 + 2 * 3]', '(2, 3)'], {}), '(y_true[0, 7:7 + 2 * 3], (2, 3))\n', (4796, 4828), True, 'import numpy as np\n'), ((3304, 3322), 'numpy.abs', 'np.abs', (['(xt_gt - xt)'], {}), '(xt_gt - xt)\n', (3310, 3322), True, 'import numpy as np\n'), ((3953, 4031), 'lib.spatial_geometry.quaternion_translation_to_pose', 'spatial_geometry.quaternion_translation_to_pose', (['y_pred[0, :4]', 'y_pred[0, 4:7]'], {}), '(y_pred[0, :4], y_pred[0, 4:7])\n', (4000, 4031), True, 'import lib.spatial_geometry as spatial_geometry\n'), ((5546, 5579), 'numpy.matmul', 'np.matmul', (['pose_gt', 'pose_pred_inv'], {}), '(pose_gt, pose_pred_inv)\n', (5555, 5579), True, 'import numpy as np\n'), ((6138, 6171), 'numpy.matmul', 'np.matmul', (['pose_gt', 'pose_pred_inv'], {}), '(pose_gt, pose_pred_inv)\n', (6147, 6171), True, 'import numpy as np\n')] |
import numpy as np
from ...core.likelihood import Likelihood
class BasicGravitationalWaveTransient(Likelihood):
def __init__(self, interferometers, waveform_generator):
"""
A likelihood object, able to compute the likelihood of the data given
some model parameters
The simplest frequency-domain gravitational wave transient likelihood. Does
not include distance/phase marginalization.
Parameters
==========
interferometers: list
A list of `bilby.gw.detector.Interferometer` instances - contains the
detector data and power spectral densities
waveform_generator: bilby.gw.waveform_generator.WaveformGenerator
An object which computes the frequency-domain strain of the signal,
given some set of parameters
"""
super(BasicGravitationalWaveTransient, self).__init__(dict())
self.interferometers = interferometers
self.waveform_generator = waveform_generator
def __repr__(self):
return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={})' \
.format(self.interferometers, self.waveform_generator)
def noise_log_likelihood(self):
""" Calculates the real part of noise log-likelihood
Returns
=======
float: The real part of the noise log likelihood
"""
log_l = 0
for interferometer in self.interferometers:
log_l -= 2. / self.waveform_generator.duration * np.sum(
abs(interferometer.frequency_domain_strain) ** 2 /
interferometer.power_spectral_density_array)
return log_l.real
def log_likelihood(self):
""" Calculates the real part of log-likelihood value
Returns
=======
float: The real part of the log likelihood
"""
log_l = 0
waveform_polarizations = \
self.waveform_generator.frequency_domain_strain(
self.parameters.copy())
if waveform_polarizations is None:
return np.nan_to_num(-np.inf)
for interferometer in self.interferometers:
log_l += self.log_likelihood_interferometer(
waveform_polarizations, interferometer)
return log_l.real
def log_likelihood_interferometer(self, waveform_polarizations,
interferometer):
"""
Parameters
==========
waveform_polarizations: dict
Dictionary containing the desired waveform polarization modes and the related strain
interferometer: bilby.gw.detector.Interferometer
The Interferometer object we want to have the log-likelihood for
Returns
=======
float: The real part of the log-likelihood for this interferometer
"""
signal_ifo = interferometer.get_detector_response(
waveform_polarizations, self.parameters)
log_l = - 2. / self.waveform_generator.duration * np.vdot(
interferometer.frequency_domain_strain - signal_ifo,
(interferometer.frequency_domain_strain - signal_ifo) /
interferometer.power_spectral_density_array)
return log_l.real
| [
"numpy.vdot",
"numpy.nan_to_num"
] | [((2103, 2125), 'numpy.nan_to_num', 'np.nan_to_num', (['(-np.inf)'], {}), '(-np.inf)\n', (2116, 2125), True, 'import numpy as np\n'), ((3052, 3223), 'numpy.vdot', 'np.vdot', (['(interferometer.frequency_domain_strain - signal_ifo)', '((interferometer.frequency_domain_strain - signal_ifo) / interferometer.\n power_spectral_density_array)'], {}), '(interferometer.frequency_domain_strain - signal_ifo, (\n interferometer.frequency_domain_strain - signal_ifo) / interferometer.\n power_spectral_density_array)\n', (3059, 3223), True, 'import numpy as np\n')] |
import numpy as np
def l21shrink(epsilon, x):
"""
Args:
epsilon: the shrinkage parameter
x: matrix to shrink on
Ref:
wiki Regularization: {https://en.wikipedia.org/wiki/Regularization_(mathematics)}
Returns:
The shrunk matrix
"""
output = x.copy()
norm = np.linalg.norm(x, ord=2, axis=0)
for i in range(x.shape[1]):
if norm[i] > epsilon:
for j in range(x.shape[0]):
output[j,i] = x[j,i] - epsilon * x[j,i] / norm[i]
else:
output[:,i] = 0.
return output | [
"numpy.linalg.norm"
] | [((320, 352), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'ord': '(2)', 'axis': '(0)'}), '(x, ord=2, axis=0)\n', (334, 352), True, 'import numpy as np\n')] |
from styx_msgs.msg import TrafficLight
import rospy
import numpy as np
import os
import tensorflow as tf
from utilities import label_map_util
from utilities import visualization_utils as vis_util
import cv2
class TLClassifier(object):
def __init__(self, is_site):
# set default value for no detection
self.current_light = TrafficLight.UNKNOWN
curr_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = curr_dir + '/../../../../image_classification_model/'
if is_site:
model = model_dir + '/protobuf/frozen_inference_graph_ssd_real.pb'
else:
model = model_dir + '/protobuf/frozen_inference_graph_ssd_sim.pb'
num_classes = 4
labels_file = model_dir + '/label_map.pbtxt'
label_map = label_map_util.load_labelmap(labels_file)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes, use_display_name=True)
self.category_index = label_map_util.create_category_index(categories)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph, config=config)
# Input tensor
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents the confidence level for each object
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
self.image_classified = None
print("Frozen graph loaded! model: {}".format(model))
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
image_expanded = np.expand_dims(image, axis=0)
with self.detection_graph.as_default():
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores,
self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_expanded})
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
min_score_thresh = 0.20
merged_scores = {}
for i in range(len(classes)):
class_id = classes[i]
score = scores[i]
if score > min_score_thresh:
if class_id in merged_scores:
merged_scores[class_id] += score
else:
merged_scores[class_id] = score
# Find the class id of the max score
max_score = 0.0
selected_class_id = -1
min_merged_score_thresh = .50
for class_id, score in merged_scores.items():
if score > max_score and score > min_merged_score_thresh:
max_score = score
selected_class_id = class_id
class_name = 'UNKNOWN'
self.current_light = TrafficLight.UNKNOWN
if selected_class_id != -1:
class_name = self.category_index[selected_class_id]['name']
if class_name == 'Red':
self.current_light = TrafficLight.RED
elif class_name == 'Green':
self.current_light = TrafficLight.GREEN
elif class_name == 'Yellow':
self.current_light = TrafficLight.YELLOW
print('TL_CLassifier:: class_name: {}, max score: {}'.format(class_name, max_score))
vis_util.visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, self.category_index,
use_normalized_coordinates=True,
line_thickness=8)
self.image_classified = image
return self.current_light
| [
"utilities.label_map_util.load_labelmap",
"utilities.label_map_util.convert_label_map_to_categories",
"utilities.visualization_utils.visualize_boxes_and_labels_on_image_array",
"os.path.realpath",
"tensorflow.Session",
"numpy.expand_dims",
"tensorflow.ConfigProto",
"tensorflow.gfile.GFile",
"tensorf... | [((790, 831), 'utilities.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['labels_file'], {}), '(labels_file)\n', (818, 831), False, 'from utilities import label_map_util\n'), ((853, 967), 'utilities.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'num_classes', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n num_classes, use_display_name=True)\n', (899, 967), False, 'from utilities import label_map_util\n'), ((993, 1041), 'utilities.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1029, 1041), False, 'from utilities import label_map_util\n'), ((1060, 1076), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1074, 1076), True, 'import tensorflow as tf\n'), ((1156, 1166), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1164, 1166), True, 'import tensorflow as tf\n'), ((2622, 2651), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2636, 2651), True, 'import numpy as np\n'), ((2964, 2981), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (2974, 2981), True, 'import numpy as np\n'), ((2999, 3017), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (3009, 3017), True, 'import numpy as np\n'), ((4374, 4535), 'utilities.visualization_utils.visualize_boxes_and_labels_on_image_array', 'vis_util.visualize_boxes_and_labels_on_image_array', (['image', 'boxes', 'classes', 'scores', 'self.category_index'], {'use_normalized_coordinates': '(True)', 'line_thickness': '(8)'}), '(image, boxes, classes,\n scores, self.category_index, use_normalized_coordinates=True,\n line_thickness=8)\n', (4424, 4535), True, 'from utilities import visualization_utils as vis_util\n'), ((399, 425), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (415, 425), False, 'import os\n'), ((1242, 1255), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1253, 1255), True, 'import tensorflow as tf\n'), ((1502, 1555), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph', 'config': 'config'}), '(graph=self.detection_graph, config=config)\n', (1512, 1555), True, 'import tensorflow as tf\n'), ((1273, 1300), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['model', '"""rb"""'], {}), "(model, 'rb')\n", (1287, 1300), True, 'import tensorflow as tf\n'), ((1434, 1476), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1453, 1476), True, 'import tensorflow as tf\n'), ((3036, 3055), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (3046, 3055), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from scipy import integrate
import numpy as np
plt.style.use('paper')
reds = plt.get_cmap('Reds')
blues = plt.get_cmap('Blues')
class Constants:
hbar_au = 1.0
h_au = hbar_au * 2.0 * np.pi
kb_au = 3.1668114E-6 # hartrees K-1
h_SI = 6.62607004E-34 # J s
na = 6.02214086E23 # molecules mol-1
kb_SI = 1.38064852E-23 # J K-1
kb_kcalKmol = kb_SI / (4.184 * 1000) # kcal K-1 mol-1
n_a = 6.022140857E23 # molecules mol-1
h = 6.62607004E-34 # J s
atm_to_pa = 101325 # Pa
dm_to_m = 0.1 # m
amu_to_kg = 1.660539040E-27 # Kg
c = 299792458 # m s-1
c_in_cm = c * 100 # cm s-1
ang_to_m = 1E-10 # m
ang_to_au = 1.88973 # au Å-1
m_to_ang = 1E10 # Å
m_to_bohr = 1.89e+10 # au m-1
amu_to_au = 1822.888486 # m_e amu-1
kj_mol_to_au = 0.00038087980 # Ha (kJ mol-1)-1
kcal_mol_to_au = 0.001593601 # Ha (kcal mol-1)-1
inverse_ang_inverse_au = 1.0 / 1.88973 # au-1 Å
k_b = kb_kcalKmol * kcal_mol_to_au # kcal mol-1 K-1 -> Ha K-1
r = k_b * n_a # Ha K-1
class Molecule:
@property
def g_igm(self):
return (self._g_rrho - self._e) + self._e_sp
@property
def g_qrrho_ew(self):
raise NotImplementedError
@property
def g_qrrho(self):
return (self._g_qrrho - self._e) + self._e_sp
@property
def h_igm(self):
return (self._h_rrho - self._e) + self._e_sp
@property
def h_qrrho(self):
return (self._h_qrrho - self._e) + self._e_sp
@property
def de_opt_sp(self):
return self._e_sp - self._e
def __init__(self, e, h_qrrho, g_qrrho, e_sp, h_rrho, g_rrho):
self._e = e
self._h_qrrho = h_qrrho
self._g_qrrho = g_qrrho
self._e_sp = e_sp
self._h_rrho = h_rrho
self._g_rrho = g_rrho
class Reaction:
def delta_energies(self, method='igm', ss='1M'):
"""∆G = x ± y kcal mol-1"""
dgs = np.array([627.509*(getattr(self.items[i], f'g_{method}')
+ getattr(self.items[0], f'g_{method}')
- getattr(self.items[1], f'g_{method}'))
for i in range(2, 10)])
dhs = np.array([627.509*(getattr(self.items[i], f'h_{method}')
+ getattr(self.items[0], f'h_{method}')
- getattr(self.items[1], f'h_{method}'))
for i in range(2, 10)])
dsts = dgs - dhs # -T∆S / kcal mol-1
rt = (8.3145 * 298.15) / (1000 * 4.184) # kcal mol-1
if ss == '1M':
correction_kcal = rt * np.log(40605/1661)
elif ss == 'liq':
def eff_vol(molarity):
"""Effective volume in molecules Å-3"""
return molarity * 0.001 * 1E30 / 6.0221409E23
correction_kcal = rt * (np.log(40605 / eff_vol(molarity=55.5/7))
+ np.log(40605 / eff_vol(molarity=55.5))
- np.log(40605 / eff_vol(molarity=55.5/8)))
else:
raise NotImplementedError
dgs += correction_kcal
dsts += correction_kcal
return (np.mean(dgs),
np.std(dgs)/(np.sqrt(len(dgs) - 1)),
np.mean(dsts),
np.std(dsts)/(np.sqrt(len(dsts) - 1)))
def __init__(self, *args):
"""
Reaction in order
monomer
octamer
septamer1
septamer2
septamer3
septamer4
septamer5
septamer6
septamer7
septamer8
:param args:
"""
self.items = args
def plot_dG():
ax.bar(x=[0, 0.5, 1.5, 2.0, 3.0, 3.5],
width=0.5,
height=[e[0] for e in delta_es],
yerr=[e[1] for e in delta_es],
linewidth=0.5,
color=[blues(0.4), reds(0.4),
blues(0.5), reds(0.5),
blues(0.6), reds(0.6)],
edgecolor='darkgray',
capsize=2)
ax.set_ylabel('∆$G$ / kcal mol$^{-1}$')
def plot_dS():
ax.bar(x=[0, 0.5, 1.5, 2.0, 3.0, 3.5],
width=0.5,
height=[e[2] for e in delta_es],
yerr=[e[3] for e in delta_es],
linewidth=0.5,
color=[blues(0.4), reds(0.4),
blues(0.5), reds(0.5),
blues(0.6), reds(0.6)],
hatch='/',
capsize=2)
ax.yaxis.set_ticks_position("right")
ax.yaxis.set_label_position("right")
ax.set_ylabel('-T∆$S$ / kcal mol$^{-1}$')
return
if __name__ == '__main__':
smd = Reaction(Molecule(-76.288703443144, -76.26360045460233, -76.28514535940438, -76.350985605823 , -76.26360045460233, -76.28514535919648),
Molecule(-610.433983658315, -610.2140982219828, -610.277280121547, -610.840350617938, -610.2140982219828, -610.281451708829),
Molecule(-534.12940950742, -533.9371068609033, -533.9942137795163, -534.483816573074, -533.9371068609033, -533.996445983519),
Molecule(-534.12912670771, -533.9377740081061, -533.9935062669732, -534.484124253745, -533.9377740081061, -533.9953996468886),
Molecule(-534.126645015954, -533.935632927275, -533.9921377225431, -534.484446846104, -533.935632927275, -533.9948616348487),
Molecule(-534.123952514425, -533.9321026652082, -533.9909755630321, -534.483679411155, -533.9321026652082, -533.9952296133653),
Molecule(-534.122528497788, -533.9317335339696, -533.9900214595227, -534.482666928205, -533.9317335339696, -533.9941873317987),
Molecule(-534.129192138055, -533.9381699483047, -533.9939597762663, -534.485678939561, -533.9381699483047, -533.9964300553619),
Molecule(-534.121709571807, -533.9317638621948, -533.9872949274167, -534.479553024392, -533.9317638621948, -533.9893161318665),
Molecule(-534.128584701367, -533.9374692734386, -533.9935968301443, -534.484199274648, -533.9374692734386, -533.9962259260279))
for i in range(8):
print('∆E_opt->sp =',
smd.items[i+2].de_opt_sp + smd.items[0].de_opt_sp - smd.items[1].de_opt_sp)
exit()
# ∆G ±
print(smd.delta_energies(method='qrrho', ss='liq')[:2])
exit()
# -------------------------- Plot --------------------------------
fig, ax = plt.subplots()
delta_es = [smd.delta_energies(method='igm', ss='1atm'),
smd.delta_energies(method='igm', ss='liq'),
smd.delta_energies(method='qrrho', ss='liq')]
# plot_dG()
# plot_dS()
ax.plot([-0.5, 4], [0, 0], c='k', lw=1.5)
ax.set_xticks([])
plt.xlim(-0.5, 4)
plt.ylim(-12, 10)
plt.tight_layout()
plt.savefig('water_in_water_TdS.pdf')
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.get_cmap",
"numpy.log",
"matplotlib.pyplot.ylim",
"numpy.std",
"matplotlib.pyplot.style.use",
"numpy.mean",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((79, 101), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""paper"""'], {}), "('paper')\n", (92, 101), True, 'import matplotlib.pyplot as plt\n'), ((109, 129), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Reds"""'], {}), "('Reds')\n", (121, 129), True, 'import matplotlib.pyplot as plt\n'), ((138, 159), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (150, 159), True, 'import matplotlib.pyplot as plt\n'), ((6800, 6814), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6812, 6814), True, 'import matplotlib.pyplot as plt\n'), ((7106, 7123), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5)', '(4)'], {}), '(-0.5, 4)\n', (7114, 7123), True, 'import matplotlib.pyplot as plt\n'), ((7128, 7145), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-12)', '(10)'], {}), '(-12, 10)\n', (7136, 7145), True, 'import matplotlib.pyplot as plt\n'), ((7151, 7169), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7167, 7169), True, 'import matplotlib.pyplot as plt\n'), ((7174, 7211), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""water_in_water_TdS.pdf"""'], {}), "('water_in_water_TdS.pdf')\n", (7185, 7211), True, 'import matplotlib.pyplot as plt\n'), ((3598, 3610), 'numpy.mean', 'np.mean', (['dgs'], {}), '(dgs)\n', (3605, 3610), True, 'import numpy as np\n'), ((3682, 3695), 'numpy.mean', 'np.mean', (['dsts'], {}), '(dsts)\n', (3689, 3695), True, 'import numpy as np\n'), ((3030, 3050), 'numpy.log', 'np.log', (['(40605 / 1661)'], {}), '(40605 / 1661)\n', (3036, 3050), True, 'import numpy as np\n'), ((3628, 3639), 'numpy.std', 'np.std', (['dgs'], {}), '(dgs)\n', (3634, 3639), True, 'import numpy as np\n'), ((3713, 3725), 'numpy.std', 'np.std', (['dsts'], {}), '(dsts)\n', (3719, 3725), True, 'import numpy as np\n')] |
import numpy as np
import random
import matplotlib.pyplot as plt
import time
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
# from p5 import Vector, stroke, circle
import warnings
warnings.simplefilter('error')
class Boid(object):
def __init__(self, n_boids, width, delta_T, r_orientation= 10, noise_var=0.0):
# self.max_force = 0.3
self.speed = 1
self.r_repulsion = 0.5 # 2
# self.r_repulsion2 = 5
self.r_orientation = r_orientation # 5/10
self.r_attraction = 7.5 # 10 (all) default:15
self.n_boids = n_boids
self._delta_T = delta_T
self.beta = 0.8727*0.6*self._delta_T # Maximum turning angle # 3*np.pi#
self.width = width
self.noise_var = noise_var
# self.length = 1 # length of boids 0.5
'''def __init__(
self,
box_size=5.0,
loc_std=0.5,
vel_norm=0.5,
noise_var=0.0,
):
self.n_boids = n_boids
self.box_size = box_size
self.loc_std = loc_std
self.vel_norm = vel_norm
self.noise_var = noise_var
self._boid_types = np.array([0.0, 0.5, 1.0])
# self._max_F = 0.1 / self._delta_T'''
def _clamp(self, loc, vel):
"""
:param loc: 2xN location at one time stamp
:param vel: 2xN velocity at one time stamp
:return: location and velocity after hitting walls and returning after
elastically colliding with walls
"""
assert np.all(loc < self.width * 3)
assert np.all(loc > -self.width * 3)
over = loc > self.width
loc[over] = 2 * self.width - loc[over]
assert np.all(loc <= self.width)
# assert(np.all(vel[over]>0))
vel[over] = -np.abs(vel[over])
under = loc < -self.width
loc[under] = -2 * self.width - loc[under]
# assert (np.all(vel[under] < 0))
assert np.all(loc >= -self.width)
vel[under] = np.abs(vel[under])
return loc, vel
def get_edges(self,args):
if args.partial and args.avoid:
edges = np.random.choice(
np.array([-1., 0., 1.]), size=(self.n_boids, self.n_boids), p=np.array([1/5, 2/5, 2/5])
)
elif args.partial and not args.avoid:
ratio = 3/4 ; value_neg = 0
edges = np.random.choice(
np.array([value_neg, 1.0]), size=(self.n_boids, self.n_boids), p=np.array([1-ratio, ratio])
)
else:
edges = np.ones((self.n_boids, self.n_boids))
np.fill_diagonal(edges, 0)
return edges
def sample_trajectory(
self,
args, i_sim,
T=10000,
sample_freq=1,
edges=None,
):
n = self.n_boids
assert T % sample_freq == 0
# T_save = int(T / sample_freq )
T_save = int(T / sample_freq - 1)
# change r_o
self.r_o2 = 4
burn_in = 20 if args.ver == 1 else 10
burn_in_ = burn_in
Tc_range = 5
Tc_range_ = Tc_range
T_zero = 1
if args.train==1: #
burn_in = np.random.choice(range(5,burn_in_+Tc_range+1))
#burn_in = np.random.choice(range(burn_in,burn_in+Tc_range+2))
if burn_in >= burn_in_+Tc_range:
burn_in = 999
Tc_range = 1; T_zero = 0
if edges is None or args.bat:
edges = self.get_edges(args)
sd = 0.2
speed = self.speed * sd*np.random.rand(n)
r_r = self.r_repulsion + sd*np.random.rand(n)
# r_r2 = self.r_repulsion2 + sd*np.random.rand(n)
r_r2 = None
if self.r_repulsion == self.r_orientation:
r_o1 = r_r
else:
r_o1 = self.r_orientation + sd*np.random.rand(n)/10
r_o2 = r_o1 + (self.r_o2 - self.r_orientation) # + sd*np.random.rand(n)/10
r_a = self.r_attraction + sd*np.random.rand(n)
beta = self.beta
# Initialize location and velocity
loc = np.zeros((Tc_range+T_zero,T_save, 2, n))
vel = np.zeros((Tc_range+T_zero,T_save, 2, n))
phis = np.zeros((Tc_range+T_zero,T_save, 1, n))
edges_res = np.zeros((Tc_range+T_zero,T_save, n, n))
edges_all = np.zeros((Tc_range+T_zero,T, n, n))
treatment = np.zeros((Tc_range+T_zero,T_save, 1))
if args.train==1:
if burn_in < 999:
treatment[0,burn_in:] = 1
else:
for t in range(Tc_range):
if t <= Tc_range-1:
treatment[t,burn_in+t:] = 1
# treatment[:,:,0].T
rand_p = np.random.randn(1, n) * 2* np.pi
loc_next_ = (3 + 3 * np.random.rand(2, n)) * np.array([np.cos(rand_p),np.sin(rand_p)]).squeeze() # 6
try: vel_next_ = np.array([-np.sin(rand_p),np.cos(rand_p)]).squeeze() * self.speed # speed
except: import pdb; pdb.set_trace()
loc_next_ += vel_next_
phi_next = rand_p
# loc[0, :, :], vel[0, :, :] = self._clamp(loc_next, vel_next)
res_prev = np.zeros((n,n))
# run
ttt = 0; angvel_prev = 0
#if args.train==1:
# import pdb; pdb.set_trace()
#else:
start_tt = burn_in
end_tt = burn_in+Tc_range+T_zero
for tt in range(start_tt,end_tt):#+1
loc_next = loc_next_.copy()
vel_next = vel_next_.copy()
counter = 0
counter2 = 0
r_o = r_o1
for t in range(1, T):
if t >= tt and tt < burn_in_+Tc_range_:
r_o = r_o2
res = np.zeros((n,n))
loc_next, vel_next = self._clamp(loc_next, vel_next)
if t % sample_freq == 0:
loc[ttt,counter, :, :], vel[ttt,counter, :, :] = loc_next, vel_next
phis[ttt,counter, :, :] = phi_next
counter += 1
# apply_behavior
di = np.zeros((2,n))
vel_ = vel_next.copy()
for i in range(n):
di[:,i], repul_flag, res = self.repulsion(loc_next, vel_next, i, edges, r_r, r_r2, res, args)
if not repul_flag:
di[:,i], oa_flag, res = self.orient_attract(loc_next, vel_next, i, edges, r_r, r_o, r_a, res, res_prev, args)
# turn angle limitation
signum = np.sign(vel_next[0,i]*di[1,i]-di[0,i]*vel_next[1,i]) # Compute direction of the angle
dotprod = np.dot(di[:,i],vel_next[:,i]) # Dotproduct (needed for angle)
if np.linalg.norm(di[:,i]) > 1e-10:
cos_theta = dotprod/np.linalg.norm(di[:,i])/np.linalg.norm(vel_next[:,i])
if np.abs(cos_theta) <= 1:
try: phi = np.real(signum*np.arccos(cos_theta)) # Compute angle
except: import pdb; pdb.set_trace()
else:
phi = 0.0
else:
phi = 0.0
# phi += 0.001*np.random.rand()
try:
if abs(phi) <= beta:
vel_[:,i] = np.matmul(np.array([[np.cos(phi),-np.sin(phi)],[np.sin(phi),np.cos(phi)]]),vel_next[:,i])
phi_next = phi
elif phi < beta:
vel_[:,i] = np.matmul(np.array([[np.cos(-beta),-np.sin(-beta)],[np.sin(-beta),np.cos(-beta)]]),vel_next[:,i])
phi_next = -beta
else:
vel_[:,i] = np.matmul(np.array([[np.cos(beta),-np.sin(beta)],[np.sin(beta),np.cos(beta)]]),vel_next[:,i])
phi_next = beta
except: import pdb; pdb.set_trace()
# update
loc_next += vel_next*self._delta_T
vel_next = vel_
try: edges_res[ttt,counter2] += res/sample_freq
except: import pdb; pdb.set_trace()
edges_all[ttt,t] = res
res_prev = res.copy()
if t % sample_freq == sample_freq-1 and t > sample_freq:
counter2 += 1
center = np.mean(loc[ttt],2)
vec_ic = loc[ttt] - center[:,:,np.newaxis].repeat(args.n_boids,2)
angvel = np.cross(vec_ic,vel[ttt],axis=1)
try: angvel = np.mean(angvel[burn_in_+Tc_range_:])
except: import pdb; pdb.set_trace()
if tt < burn_in_+Tc_range_ and burn_in < 999:
print("intervention: {}, Ang vel: {:.3f}".format(tt, angvel))
# print('intervention: '+str(tt))
else:
print("no intervention: {}, Ang vel: {:.3f}".format(tt, angvel))
#if tt >=21: #and args.train <= 0:
# import pdb; pdb.set_trace()
ttt += 1
angvel_prev = angvel
edges_result = edges
#if args.train == 0:
# import pdb; pdb.set_trace()
return loc, vel, phis, edges, treatment
def repulsion(self, loc, vel, i, edges, r, r2, res, args):
total = 0
avg_vector = np.zeros(2)
for j in range(self.n_boids):
if args.avoid:
flag = (edges[i,j] != 0)
r_ = r if edges[i,j] == 1 else r2
else:
flag = (edges[i,j] == 1) # (i != j) if args.avoid_all else
r_ = r
if flag:
distance = np.linalg.norm(loc[:,j] - loc[:,i])
if distance < r_[i]:
diff = loc[:,j] - loc[:,i]
diff /= distance
avg_vector += diff
total += 1
res[i,j] += -1
if total > 0:
steering = -avg_vector/total
repul_flag = True
else:
steering = avg_vector
repul_flag = False
return steering, repul_flag, res
def orient_attract(self, loc, vel, i, edges, r_r, r_o, r_a, res, res_prev, args):
total_o = 0
avg_vector = np.zeros(2)
total_a = 0
center_of_mass = np.zeros(2)
for j in range(self.n_boids):
if edges[i,j] == 1:
try: dist = np.linalg.norm(loc[:,j] - loc[:,i])
except: import pdb; pdb.set_trace()
if dist >= r_r[i] and dist < r_o[i]: # orientation
avg_vector += vel[:,j]
total_o += 1
if res_prev[i,j]==1: # attraction -> orientation
res[i,j] += -0.5
elif res_prev[i,j]==-1: # repulsion -> orientation
res[i,j] += 0.5
else: # initial or continuing or unknown
res[i,j] += 0
elif dist >= r_o[i] and dist < r_a[i]: # attraction
center_of_mass += loc[:,j]
total_a += 1
res[i,j] += 1
if total_o > 0:
steering_o = avg_vector/total_o
else:
steering_o = avg_vector
if total_a > 0:
center_of_mass /= total_a
vec_to_com = center_of_mass - loc[:,i]
if np.linalg.norm(vec_to_com) > 0:
vec_to_com = (vec_to_com / np.linalg.norm(vec_to_com))
steering_a = vec_to_com
else:
steering_a = np.zeros(2)
if total_o > 0 and total_a > 0:
steering = (steering_o + steering_a)/2
else:
steering = steering_o + steering_a
return steering, (total_o + total_a), res
| [
"numpy.fill_diagonal",
"numpy.abs",
"warnings.simplefilter",
"numpy.random.randn",
"numpy.zeros",
"numpy.cross",
"numpy.ones",
"numpy.arccos",
"numpy.mean",
"numpy.array",
"pdb.set_trace",
"numpy.linalg.norm",
"numpy.sign",
"numpy.random.rand",
"numpy.dot",
"numpy.cos",
"numpy.sin",
... | [((221, 251), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (242, 251), False, 'import warnings\n'), ((1557, 1585), 'numpy.all', 'np.all', (['(loc < self.width * 3)'], {}), '(loc < self.width * 3)\n', (1563, 1585), True, 'import numpy as np\n'), ((1601, 1630), 'numpy.all', 'np.all', (['(loc > -self.width * 3)'], {}), '(loc > -self.width * 3)\n', (1607, 1630), True, 'import numpy as np\n'), ((1726, 1751), 'numpy.all', 'np.all', (['(loc <= self.width)'], {}), '(loc <= self.width)\n', (1732, 1751), True, 'import numpy as np\n'), ((1972, 1998), 'numpy.all', 'np.all', (['(loc >= -self.width)'], {}), '(loc >= -self.width)\n', (1978, 1998), True, 'import numpy as np\n'), ((2020, 2038), 'numpy.abs', 'np.abs', (['vel[under]'], {}), '(vel[under])\n', (2026, 2038), True, 'import numpy as np\n'), ((2620, 2646), 'numpy.fill_diagonal', 'np.fill_diagonal', (['edges', '(0)'], {}), '(edges, 0)\n', (2636, 2646), True, 'import numpy as np\n'), ((4071, 4114), 'numpy.zeros', 'np.zeros', (['(Tc_range + T_zero, T_save, 2, n)'], {}), '((Tc_range + T_zero, T_save, 2, n))\n', (4079, 4114), True, 'import numpy as np\n'), ((4126, 4169), 'numpy.zeros', 'np.zeros', (['(Tc_range + T_zero, T_save, 2, n)'], {}), '((Tc_range + T_zero, T_save, 2, n))\n', (4134, 4169), True, 'import numpy as np\n'), ((4182, 4225), 'numpy.zeros', 'np.zeros', (['(Tc_range + T_zero, T_save, 1, n)'], {}), '((Tc_range + T_zero, T_save, 1, n))\n', (4190, 4225), True, 'import numpy as np\n'), ((4243, 4286), 'numpy.zeros', 'np.zeros', (['(Tc_range + T_zero, T_save, n, n)'], {}), '((Tc_range + T_zero, T_save, n, n))\n', (4251, 4286), True, 'import numpy as np\n'), ((4304, 4342), 'numpy.zeros', 'np.zeros', (['(Tc_range + T_zero, T, n, n)'], {}), '((Tc_range + T_zero, T, n, n))\n', (4312, 4342), True, 'import numpy as np\n'), ((4360, 4400), 'numpy.zeros', 'np.zeros', (['(Tc_range + T_zero, T_save, 1)'], {}), '((Tc_range + T_zero, T_save, 1))\n', (4368, 4400), True, 'import numpy as np\n'), ((5116, 5132), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (5124, 5132), True, 'import numpy as np\n'), ((9696, 9707), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (9704, 9707), True, 'import numpy as np\n'), ((10645, 10656), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (10653, 10656), True, 'import numpy as np\n'), ((10702, 10713), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (10710, 10713), True, 'import numpy as np\n'), ((1812, 1829), 'numpy.abs', 'np.abs', (['vel[over]'], {}), '(vel[over])\n', (1818, 1829), True, 'import numpy as np\n'), ((3543, 3560), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (3557, 3560), True, 'import numpy as np\n'), ((8722, 8742), 'numpy.mean', 'np.mean', (['loc[ttt]', '(2)'], {}), '(loc[ttt], 2)\n', (8729, 8742), True, 'import numpy as np\n'), ((8841, 8875), 'numpy.cross', 'np.cross', (['vec_ic', 'vel[ttt]'], {'axis': '(1)'}), '(vec_ic, vel[ttt], axis=1)\n', (8849, 8875), True, 'import numpy as np\n'), ((11975, 11986), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (11983, 11986), True, 'import numpy as np\n'), ((2190, 2216), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (2198, 2216), True, 'import numpy as np\n'), ((2573, 2610), 'numpy.ones', 'np.ones', (['(self.n_boids, self.n_boids)'], {}), '((self.n_boids, self.n_boids))\n', (2580, 2610), True, 'import numpy as np\n'), ((3598, 3615), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (3612, 3615), True, 'import numpy as np\n'), ((3969, 3986), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (3983, 3986), True, 'import numpy as np\n'), ((4683, 4704), 'numpy.random.randn', 'np.random.randn', (['(1)', 'n'], {}), '(1, n)\n', (4698, 4704), True, 'import numpy as np\n'), ((4953, 4968), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4966, 4968), False, 'import pdb\n'), ((5676, 5692), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (5684, 5692), True, 'import numpy as np\n'), ((6073, 6089), 'numpy.zeros', 'np.zeros', (['(2, n)'], {}), '((2, n))\n', (6081, 6089), True, 'import numpy as np\n'), ((8900, 8938), 'numpy.mean', 'np.mean', (['angvel[burn_in_ + Tc_range_:]'], {}), '(angvel[burn_in_ + Tc_range_:])\n', (8907, 8938), True, 'import numpy as np\n'), ((10029, 10066), 'numpy.linalg.norm', 'np.linalg.norm', (['(loc[:, j] - loc[:, i])'], {}), '(loc[:, j] - loc[:, i])\n', (10043, 10066), True, 'import numpy as np\n'), ((11796, 11822), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_to_com'], {}), '(vec_to_com)\n', (11810, 11822), True, 'import numpy as np\n'), ((2252, 2283), 'numpy.array', 'np.array', (['[1 / 5, 2 / 5, 2 / 5]'], {}), '([1 / 5, 2 / 5, 2 / 5])\n', (2260, 2283), True, 'import numpy as np\n'), ((2433, 2459), 'numpy.array', 'np.array', (['[value_neg, 1.0]'], {}), '([value_neg, 1.0])\n', (2441, 2459), True, 'import numpy as np\n'), ((4745, 4765), 'numpy.random.rand', 'np.random.rand', (['(2)', 'n'], {}), '(2, n)\n', (4759, 4765), True, 'import numpy as np\n'), ((6545, 6607), 'numpy.sign', 'np.sign', (['(vel_next[0, i] * di[1, i] - di[0, i] * vel_next[1, i])'], {}), '(vel_next[0, i] * di[1, i] - di[0, i] * vel_next[1, i])\n', (6552, 6607), True, 'import numpy as np\n'), ((6661, 6693), 'numpy.dot', 'np.dot', (['di[:, i]', 'vel_next[:, i]'], {}), '(di[:, i], vel_next[:, i])\n', (6667, 6693), True, 'import numpy as np\n'), ((8969, 8984), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8982, 8984), False, 'import pdb\n'), ((10814, 10851), 'numpy.linalg.norm', 'np.linalg.norm', (['(loc[:, j] - loc[:, i])'], {}), '(loc[:, j] - loc[:, i])\n', (10828, 10851), True, 'import numpy as np\n'), ((11871, 11897), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_to_com'], {}), '(vec_to_com)\n', (11885, 11897), True, 'import numpy as np\n'), ((2498, 2526), 'numpy.array', 'np.array', (['[1 - ratio, ratio]'], {}), '([1 - ratio, ratio])\n', (2506, 2526), True, 'import numpy as np\n'), ((3827, 3844), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (3841, 3844), True, 'import numpy as np\n'), ((6747, 6771), 'numpy.linalg.norm', 'np.linalg.norm', (['di[:, i]'], {}), '(di[:, i])\n', (6761, 6771), True, 'import numpy as np\n'), ((8498, 8513), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8511, 8513), False, 'import pdb\n'), ((10886, 10901), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (10899, 10901), False, 'import pdb\n'), ((4779, 4793), 'numpy.cos', 'np.cos', (['rand_p'], {}), '(rand_p)\n', (4785, 4793), True, 'import numpy as np\n'), ((4794, 4808), 'numpy.sin', 'np.sin', (['rand_p'], {}), '(rand_p)\n', (4800, 4808), True, 'import numpy as np\n'), ((6848, 6878), 'numpy.linalg.norm', 'np.linalg.norm', (['vel_next[:, i]'], {}), '(vel_next[:, i])\n', (6862, 6878), True, 'import numpy as np\n'), ((6905, 6922), 'numpy.abs', 'np.abs', (['cos_theta'], {}), '(cos_theta)\n', (6911, 6922), True, 'import numpy as np\n'), ((8253, 8268), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8266, 8268), False, 'import pdb\n'), ((4876, 4890), 'numpy.cos', 'np.cos', (['rand_p'], {}), '(rand_p)\n', (4882, 4890), True, 'import numpy as np\n'), ((6824, 6848), 'numpy.linalg.norm', 'np.linalg.norm', (['di[:, i]'], {}), '(di[:, i])\n', (6838, 6848), True, 'import numpy as np\n'), ((4861, 4875), 'numpy.sin', 'np.sin', (['rand_p'], {}), '(rand_p)\n', (4867, 4875), True, 'import numpy as np\n'), ((7070, 7085), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7083, 7085), False, 'import pdb\n'), ((6984, 7004), 'numpy.arccos', 'np.arccos', (['cos_theta'], {}), '(cos_theta)\n', (6993, 7004), True, 'import numpy as np\n'), ((7496, 7507), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (7502, 7507), True, 'import numpy as np\n'), ((7523, 7534), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (7529, 7534), True, 'import numpy as np\n'), ((7535, 7546), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (7541, 7546), True, 'import numpy as np\n'), ((7509, 7520), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (7515, 7520), True, 'import numpy as np\n'), ((7787, 7800), 'numpy.cos', 'np.cos', (['(-beta)'], {}), '(-beta)\n', (7793, 7800), True, 'import numpy as np\n'), ((7818, 7831), 'numpy.sin', 'np.sin', (['(-beta)'], {}), '(-beta)\n', (7824, 7831), True, 'import numpy as np\n'), ((7832, 7845), 'numpy.cos', 'np.cos', (['(-beta)'], {}), '(-beta)\n', (7838, 7845), True, 'import numpy as np\n'), ((8096, 8108), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (8102, 8108), True, 'import numpy as np\n'), ((8125, 8137), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (8131, 8137), True, 'import numpy as np\n'), ((8138, 8150), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (8144, 8150), True, 'import numpy as np\n'), ((7802, 7815), 'numpy.sin', 'np.sin', (['(-beta)'], {}), '(-beta)\n', (7808, 7815), True, 'import numpy as np\n'), ((8110, 8122), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (8116, 8122), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
results = []
for line in open("results_target.txt","r"):
eval(line)
results_target = results
x_targets = []
y_targets = []
for line in results_target:
x_targets.append(line["target"])
y_targets.append(line["perfs_ft"][-1])
indices = np.argsort(x_targets)
x_targets = np.take_along_axis(np.asarray(x_targets), indices, axis=0)
y_targets = np.take_along_axis(np.asarray(y_targets), indices, axis=0)
results = []
for line in open("results.txt","r"):
eval(line)
prunes = [0.5, 0.8, 0.9, 0.95, 0.98, 0.99, 0.995, 0.998, 0.999]
prunes.reverse()
prunes = 1 - np.asarray(prunes)
plt.plot(x_targets, y_targets, label = "Dynamic width")
for i in range(len(results)):
y = (results[i]["perfs_ft"])
y.reverse()
if results[i]["a"] == -1:
label = "wd = " + str(results[i]["wd"]) + " baseline"
else:
label = "wd = " + str(results[i]["wd"]) + ", a = " + str(results[i]["a"]) + ", width = " + str(results[i]["width"])
plt.plot(prunes, y, label = label)
plt.xlabel("Kept ratio")
plt.ylabel("Accuracy")
plt.xscale("log")
plt.legend(bbox_to_anchor=(1.05, 1))
#plt.tight_layout()
#plt.legend()
plt.show()
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.legend",
"numpy.argsort",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((298, 319), 'numpy.argsort', 'np.argsort', (['x_targets'], {}), '(x_targets)\n', (308, 319), True, 'import numpy as np\n'), ((643, 696), 'matplotlib.pyplot.plot', 'plt.plot', (['x_targets', 'y_targets'], {'label': '"""Dynamic width"""'}), "(x_targets, y_targets, label='Dynamic width')\n", (651, 696), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1157), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)'}), '(bbox_to_anchor=(1.05, 1))\n', (1131, 1157), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1207), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1205, 1207), True, 'import matplotlib.pyplot as plt\n'), ((351, 372), 'numpy.asarray', 'np.asarray', (['x_targets'], {}), '(x_targets)\n', (361, 372), True, 'import numpy as np\n'), ((422, 443), 'numpy.asarray', 'np.asarray', (['y_targets'], {}), '(y_targets)\n', (432, 443), True, 'import numpy as np\n'), ((623, 641), 'numpy.asarray', 'np.asarray', (['prunes'], {}), '(prunes)\n', (633, 641), True, 'import numpy as np\n'), ((1008, 1040), 'matplotlib.pyplot.plot', 'plt.plot', (['prunes', 'y'], {'label': 'label'}), '(prunes, y, label=label)\n', (1016, 1040), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1071), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Kept ratio"""'], {}), "('Kept ratio')\n", (1057, 1071), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1098), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1086, 1098), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1120), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1113, 1120), True, 'import matplotlib.pyplot as plt\n')] |
import os
import torch
import numpy as np
import cv2
from torch.utils.data import Dataset
from torch.nn.functional import interpolate
import matplotlib.pyplot as plt
class LOLDataset(Dataset):
"""LOL Sony dataset."""
def __init__(self, list_file ,root_dir, ps,transform=None):
self.ps = ps
self.list_file = open(list_file, "r")
self.list_file_lines = self.list_file.readlines()
self.root_dir = root_dir
self.transform = transform
self.gt_images = [None] * 2001
self.input_images = [None] * 2001
self.input_gray_images = [None] * 2001
self.input_edge_images = [None] * 2001
def __len__(self):
return len(self.list_file_lines)
def __getitem__(self, idx):
img_names = self.list_file_lines[idx].split(' ')
input_img_name = img_names[0]
gt_img_name = img_names[1]
gt_img_name = gt_img_name.split('\n')[0]
ratio = [1, 2, 3]
ind = input_img_name.split('/')[-1]
ind = ind.split('.')[0]
ind = int(ind)
if self.input_images[ind] is None:
input_img_path = os.path.join(self.root_dir, input_img_name)
input_img = cv2.imread(input_img_path)
input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB)
self.input_images[ind] = np.expand_dims(np.float32(input_img / 255.0), axis=0) * ratio[0]
gt_img_path = os.path.join(self.root_dir, gt_img_name)
im = cv2.imread(gt_img_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
self.gt_images[ind] = np.expand_dims(np.float32(im / 255.0), axis=0)
gray_path = os.path.join(self.root_dir, 'LOL/train/gray/%d.png' % (ind))
input_gray = cv2.imread(gray_path, cv2.IMREAD_GRAYSCALE)
self.input_gray_images[ind] = np.expand_dims(np.expand_dims(np.float32(input_gray / 255.0), axis=2), axis=0)
edge_path = os.path.join(self.root_dir, 'LOL/train/edge/%d.png' % (ind))
input_edge = cv2.imread(edge_path, cv2.IMREAD_GRAYSCALE)
self.input_edge_images[ind] = np.expand_dims(np.expand_dims(np.float32(input_edge / 255.0), axis=2), axis=0)
# crop
H = self.input_images[ind].shape[1]
W = self.input_images[ind].shape[2]
xx = np.random.randint(0, W - self.ps)
yy = np.random.randint(0, H - self.ps)
input_patch = self.input_images[ind][:, yy:yy + self.ps, xx:xx + self.ps, :]
gt_patch = self.gt_images[ind][:, yy:yy + self.ps, xx:xx + self.ps, :]
input_gray_patch = self.input_gray_images[ind][:, yy:yy + self.ps, xx:xx + self.ps, :]
input_edge_patch = self.input_edge_images[ind][:, yy:yy + self.ps, xx:xx + self.ps, :]
if np.random.randint(2, size=1)[0] == 1: # random flip
input_patch = np.flip(input_patch, axis=1)
gt_patch = np.flip(gt_patch, axis=1)
input_gray_patch = np.flip(input_gray_patch, axis=1)
input_edge_patch = np.flip(input_edge_patch, axis=1)
if np.random.randint(2, size=1)[0] == 1:
input_patch = np.flip(input_patch, axis=2)
gt_patch = np.flip(gt_patch, axis=2)
input_gray_patch = np.flip(input_gray_patch, axis=2)
input_edge_patch = np.flip(input_edge_patch, axis=2)
if np.random.randint(2, size=1)[0] == 1: # random transpose
input_patch = np.transpose(input_patch, (0, 2, 1, 3))
gt_patch = np.transpose(gt_patch, (0, 2, 1, 3))
input_gray_patch = np.transpose(input_gray_patch, (0, 2, 1, 3))
input_edge_patch = np.transpose(input_edge_patch, (0, 2, 1, 3))
input_patch = np.minimum(input_patch, 1.0)
gt_patch = np.maximum(gt_patch, 0.0)
input_gray_patch = np.maximum(input_gray_patch, 0.0)
input_edge_patch = np.maximum(input_edge_patch, 0.0)
# input_patch_2 = np.minimum(input_patch * ratio[1], 1.0)
# input_patch_3 = np.minimum(input_patch * ratio[2], 1.0)
in_img = torch.from_numpy(input_patch).permute(0,3,1,2)
gt_img = torch.from_numpy(gt_patch).permute(0,3,1,2)
in_gray_img = torch.from_numpy(input_gray_patch).permute(0,3,1,2)
in_edge_img = torch.from_numpy(input_edge_patch).permute(0,3,1,2)
# in_img_2 = torch.from_numpy(input_patch_2).permute(0,3,1,2)
# in_img_3 = torch.from_numpy(input_patch_3).permute(0,3,1,2)
# r,g,b = in_img[0,0,:,:]+1, in_img[0,1,:,:]+1, in_img[0,2,:,:]+1
# in_gray_img = (1.0 - (0.299*r+0.587*g+0.114*b)/2.0).unsqueeze(0).unsqueeze(0)
# sample = {'in_img': in_img.squeeze(0), 'gt_img': gt_img.squeeze(0), 'ind': ind, 'ratio': ratio}
# sample = {'in_img': in_img.squeeze(0), 'gt_img': gt_img.squeeze(0), 'in_gray_img': in_gray_img.squeeze(0), 'ind': ind, 'ratio': ratio}
# sample = {'in_img': in_img.squeeze(0), 'gt_img': gt_img.squeeze(0), 'in_edge_img': in_edge_img.squeeze(0), 'ind': ind, 'ratio': ratio}
sample = {'in_img': in_img.squeeze(0), 'gt_img': gt_img.squeeze(0), 'in_gray_img': in_gray_img.squeeze(0), 'in_edge_img': in_edge_img.squeeze(0), 'ind': ind, 'ratio': ratio}
# sample = {'in_img': in_img.squeeze(0), 'in_img_2': in_img_2.squeeze(0), 'in_img_3': in_img_3.squeeze(0), 'gt_img': gt_img.squeeze(0), 'in_gray_img': in_gray_img.squeeze(0), 'in_edge_img': in_edge_img.squeeze(0), 'ind': ind, 'ratio': ratio}
return sample
| [
"numpy.minimum",
"numpy.maximum",
"numpy.flip",
"cv2.cvtColor",
"numpy.float32",
"numpy.transpose",
"cv2.imread",
"numpy.random.randint",
"os.path.join",
"torch.from_numpy"
] | [((2369, 2402), 'numpy.random.randint', 'np.random.randint', (['(0)', '(W - self.ps)'], {}), '(0, W - self.ps)\n', (2386, 2402), True, 'import numpy as np\n'), ((2417, 2450), 'numpy.random.randint', 'np.random.randint', (['(0)', '(H - self.ps)'], {}), '(0, H - self.ps)\n', (2434, 2450), True, 'import numpy as np\n'), ((3779, 3807), 'numpy.minimum', 'np.minimum', (['input_patch', '(1.0)'], {}), '(input_patch, 1.0)\n', (3789, 3807), True, 'import numpy as np\n'), ((3828, 3853), 'numpy.maximum', 'np.maximum', (['gt_patch', '(0.0)'], {}), '(gt_patch, 0.0)\n', (3838, 3853), True, 'import numpy as np\n'), ((3882, 3915), 'numpy.maximum', 'np.maximum', (['input_gray_patch', '(0.0)'], {}), '(input_gray_patch, 0.0)\n', (3892, 3915), True, 'import numpy as np\n'), ((3944, 3977), 'numpy.maximum', 'np.maximum', (['input_edge_patch', '(0.0)'], {}), '(input_edge_patch, 0.0)\n', (3954, 3977), True, 'import numpy as np\n'), ((1170, 1213), 'os.path.join', 'os.path.join', (['self.root_dir', 'input_img_name'], {}), '(self.root_dir, input_img_name)\n', (1182, 1213), False, 'import os\n'), ((1239, 1265), 'cv2.imread', 'cv2.imread', (['input_img_path'], {}), '(input_img_path)\n', (1249, 1265), False, 'import cv2\n'), ((1291, 1333), 'cv2.cvtColor', 'cv2.cvtColor', (['input_img', 'cv2.COLOR_BGR2RGB'], {}), '(input_img, cv2.COLOR_BGR2RGB)\n', (1303, 1333), False, 'import cv2\n'), ((1466, 1506), 'os.path.join', 'os.path.join', (['self.root_dir', 'gt_img_name'], {}), '(self.root_dir, gt_img_name)\n', (1478, 1506), False, 'import os\n'), ((1525, 1548), 'cv2.imread', 'cv2.imread', (['gt_img_path'], {}), '(gt_img_path)\n', (1535, 1548), False, 'import cv2\n'), ((1567, 1602), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (1579, 1602), False, 'import cv2\n'), ((1712, 1770), 'os.path.join', 'os.path.join', (['self.root_dir', "('LOL/train/gray/%d.png' % ind)"], {}), "(self.root_dir, 'LOL/train/gray/%d.png' % ind)\n", (1724, 1770), False, 'import os\n'), ((1799, 1842), 'cv2.imread', 'cv2.imread', (['gray_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(gray_path, cv2.IMREAD_GRAYSCALE)\n', (1809, 1842), False, 'import cv2\n'), ((1992, 2050), 'os.path.join', 'os.path.join', (['self.root_dir', "('LOL/train/edge/%d.png' % ind)"], {}), "(self.root_dir, 'LOL/train/edge/%d.png' % ind)\n", (2004, 2050), False, 'import os\n'), ((2079, 2122), 'cv2.imread', 'cv2.imread', (['edge_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(edge_path, cv2.IMREAD_GRAYSCALE)\n', (2089, 2122), False, 'import cv2\n'), ((2903, 2931), 'numpy.flip', 'np.flip', (['input_patch'], {'axis': '(1)'}), '(input_patch, axis=1)\n', (2910, 2931), True, 'import numpy as np\n'), ((2956, 2981), 'numpy.flip', 'np.flip', (['gt_patch'], {'axis': '(1)'}), '(gt_patch, axis=1)\n', (2963, 2981), True, 'import numpy as np\n'), ((3014, 3047), 'numpy.flip', 'np.flip', (['input_gray_patch'], {'axis': '(1)'}), '(input_gray_patch, axis=1)\n', (3021, 3047), True, 'import numpy as np\n'), ((3080, 3113), 'numpy.flip', 'np.flip', (['input_edge_patch'], {'axis': '(1)'}), '(input_edge_patch, axis=1)\n', (3087, 3113), True, 'import numpy as np\n'), ((3191, 3219), 'numpy.flip', 'np.flip', (['input_patch'], {'axis': '(2)'}), '(input_patch, axis=2)\n', (3198, 3219), True, 'import numpy as np\n'), ((3244, 3269), 'numpy.flip', 'np.flip', (['gt_patch'], {'axis': '(2)'}), '(gt_patch, axis=2)\n', (3251, 3269), True, 'import numpy as np\n'), ((3302, 3335), 'numpy.flip', 'np.flip', (['input_gray_patch'], {'axis': '(2)'}), '(input_gray_patch, axis=2)\n', (3309, 3335), True, 'import numpy as np\n'), ((3368, 3401), 'numpy.flip', 'np.flip', (['input_edge_patch'], {'axis': '(2)'}), '(input_edge_patch, axis=2)\n', (3375, 3401), True, 'import numpy as np\n'), ((3499, 3538), 'numpy.transpose', 'np.transpose', (['input_patch', '(0, 2, 1, 3)'], {}), '(input_patch, (0, 2, 1, 3))\n', (3511, 3538), True, 'import numpy as np\n'), ((3563, 3599), 'numpy.transpose', 'np.transpose', (['gt_patch', '(0, 2, 1, 3)'], {}), '(gt_patch, (0, 2, 1, 3))\n', (3575, 3599), True, 'import numpy as np\n'), ((3632, 3676), 'numpy.transpose', 'np.transpose', (['input_gray_patch', '(0, 2, 1, 3)'], {}), '(input_gray_patch, (0, 2, 1, 3))\n', (3644, 3676), True, 'import numpy as np\n'), ((3709, 3753), 'numpy.transpose', 'np.transpose', (['input_edge_patch', '(0, 2, 1, 3)'], {}), '(input_edge_patch, (0, 2, 1, 3))\n', (3721, 3753), True, 'import numpy as np\n'), ((1653, 1675), 'numpy.float32', 'np.float32', (['(im / 255.0)'], {}), '(im / 255.0)\n', (1663, 1675), True, 'import numpy as np\n'), ((2823, 2851), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (2840, 2851), True, 'import numpy as np\n'), ((3126, 3154), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (3143, 3154), True, 'import numpy as np\n'), ((3414, 3442), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (3431, 3442), True, 'import numpy as np\n'), ((4140, 4169), 'torch.from_numpy', 'torch.from_numpy', (['input_patch'], {}), '(input_patch)\n', (4156, 4169), False, 'import torch\n'), ((4205, 4231), 'torch.from_numpy', 'torch.from_numpy', (['gt_patch'], {}), '(gt_patch)\n', (4221, 4231), False, 'import torch\n'), ((4272, 4306), 'torch.from_numpy', 'torch.from_numpy', (['input_gray_patch'], {}), '(input_gray_patch)\n', (4288, 4306), False, 'import torch\n'), ((4347, 4381), 'torch.from_numpy', 'torch.from_numpy', (['input_edge_patch'], {}), '(input_edge_patch)\n', (4363, 4381), False, 'import torch\n'), ((1387, 1416), 'numpy.float32', 'np.float32', (['(input_img / 255.0)'], {}), '(input_img / 255.0)\n', (1397, 1416), True, 'import numpy as np\n'), ((1916, 1946), 'numpy.float32', 'np.float32', (['(input_gray / 255.0)'], {}), '(input_gray / 255.0)\n', (1926, 1946), True, 'import numpy as np\n'), ((2196, 2226), 'numpy.float32', 'np.float32', (['(input_edge / 255.0)'], {}), '(input_edge / 255.0)\n', (2206, 2226), True, 'import numpy as np\n')] |
import os
import sys
from typing import Any
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from torchvision.datasets.utils import extract_archive
from torchvision.datasets.vision import VisionDataset
from src.datasets.specs import Input2dSpec
# From DATASET_ROOT/chexpert/CheXpert-v1.0-small/valid.csv
CHEXPERT_LABELS = {
'No Finding': 0,
'Enlarged Cardiomediastinum': 1,
'Cardiomegaly': 2,
'Lung Opacity': 3,
'Lung Lesion': 4,
'Edema': 5,
'Consolidation': 6,
'Pneumonia': 7,
'Atelectasis': 8,
'Pneumothorax': 9,
'Pleural Effusion': 10,
'Pleural Other': 11,
'Fracture': 12,
'Support Devices': 13,
}
def any_exist(files):
return any(map(os.path.exists, files))
class CheXpert(VisionDataset):
'''A dataset class for the CheXpert dataset (https://stanfordmlgroup.github.io/competitions/chexpert/).
Note that you must register and manually download the data to use this dataset.
'''
# Dataset information.
TRANSFORMS = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
])
LABELS_COL = 5
# From https://arxiv.org/abs/1901.07031 (Irvin et al. 2019)
CHEXPERT_LABELS_IDX = np.array(
[
CHEXPERT_LABELS['Atelectasis'],
CHEXPERT_LABELS['Cardiomegaly'],
CHEXPERT_LABELS['Consolidation'],
CHEXPERT_LABELS['Edema'],
CHEXPERT_LABELS['Pleural Effusion'],
],
dtype=np.int32
)
NUM_CLASSES = 5 # 14 total, but we select 5: len(self.CHEXPERT_LABELS_IDX)
INPUT_SIZE = (224, 224)
PATCH_SIZE = (16, 16)
IN_CHANNELS = 3
def __init__(self, base_root: str, download: bool = False, train: bool = True) -> None:
self.root = os.path.join(base_root, 'medical_images', 'chexpert')
super().__init__(self.root)
self.index_location = self.find_data()
self.split = 'train' if train else 'valid'
self.build_index()
def find_data(self):
os.makedirs(self.root, exist_ok=True)
components = list(map(lambda x: os.path.join(self.root, 'CheXpert-v1.0' + x), ['', '-small', '.zip', '-small.zip']))
# if no data is present, prompt the user to download it
if not any_exist(components):
raise RuntimeError(
"""
'Visit https://stanfordmlgroup.github.io/competitions/chexpert/ to download the data'
'Once you receive the download links, place the zip file in {}'.format(self.root)
'To maintain compatibility with the paper baselines, download the sampled version (CheXpert-v1.0-small).'
"""
)
# if the data has not been extracted, extract the data, prioritizing the full-res dataset
if not any_exist(components[:2]):
for i in (2, 3):
if os.path.exists(components[i]):
print('Extracting data...')
extract_archive(components[i])
print('Done')
break
# return the data folder, prioritizing the full-res dataset
for i in (0, 1):
if os.path.exists(components[i]):
return components[i]
raise FileNotFoundError('CheXpert data not found')
def build_index(self):
print('Building index...')
index_file = os.path.join(self.index_location, self.split + '.csv')
self.fnames = np.loadtxt(index_file, dtype=np.str, delimiter=',', skiprows=1, usecols=0)
end_col = self.LABELS_COL + len(CHEXPERT_LABELS)
# missing values occur when no comment is made on a particular diagnosis. we treat this as a negative diagnosis
self.labels = np.genfromtxt(
index_file,
dtype=np.float,
delimiter=',',
skip_header=1,
usecols=range(self.LABELS_COL, end_col),
missing_values='',
filling_values=0,
)
self.labels = np.maximum(self.labels, 0) # convert -1 (unknown) to 0
print('Done')
def __len__(self) -> int:
return self.fnames.shape[0]
def __getitem__(self, index: int) -> Any:
fname = self.fnames[index]
image = Image.open(os.path.join(self.root, fname)).convert('RGB')
image = self.TRANSFORMS(image)
label = torch.tensor(self.labels[index][self.CHEXPERT_LABELS_IDX]).long()
return index, image.float(), label
@staticmethod
def num_classes():
return CheXpert.NUM_CLASSES
@staticmethod
def spec():
return [
Input2dSpec(input_size=CheXpert.INPUT_SIZE, patch_size=CheXpert.PATCH_SIZE, in_channels=CheXpert.IN_CHANNELS),
]
def open_folder(path: str):
'''Opens a folder in the file explorer. Attempts to be platform-independent
Args:
path (str): The folder path to be opened in a file explorer
'''
try:
if hasattr(os, 'startfile'):
os.startfile(path)
elif sys.platform == 'darwin':
os.system('open ' + path)
else:
os.system('xdg-open ' + path)
except: # noqa E722
pass
| [
"numpy.maximum",
"os.makedirs",
"os.path.exists",
"os.system",
"torchvision.transforms.ToTensor",
"numpy.array",
"numpy.loadtxt",
"src.datasets.specs.Input2dSpec",
"torch.tensor",
"torchvision.datasets.utils.extract_archive",
"os.path.join",
"os.startfile",
"torchvision.transforms.Resize"
] | [((1252, 1448), 'numpy.array', 'np.array', (["[CHEXPERT_LABELS['Atelectasis'], CHEXPERT_LABELS['Cardiomegaly'],\n CHEXPERT_LABELS['Consolidation'], CHEXPERT_LABELS['Edema'],\n CHEXPERT_LABELS['Pleural Effusion']]"], {'dtype': 'np.int32'}), "([CHEXPERT_LABELS['Atelectasis'], CHEXPERT_LABELS['Cardiomegaly'],\n CHEXPERT_LABELS['Consolidation'], CHEXPERT_LABELS['Edema'],\n CHEXPERT_LABELS['Pleural Effusion']], dtype=np.int32)\n", (1260, 1448), True, 'import numpy as np\n'), ((1802, 1855), 'os.path.join', 'os.path.join', (['base_root', '"""medical_images"""', '"""chexpert"""'], {}), "(base_root, 'medical_images', 'chexpert')\n", (1814, 1855), False, 'import os\n'), ((2051, 2088), 'os.makedirs', 'os.makedirs', (['self.root'], {'exist_ok': '(True)'}), '(self.root, exist_ok=True)\n', (2062, 2088), False, 'import os\n'), ((3423, 3477), 'os.path.join', 'os.path.join', (['self.index_location', "(self.split + '.csv')"], {}), "(self.index_location, self.split + '.csv')\n", (3435, 3477), False, 'import os\n'), ((3500, 3574), 'numpy.loadtxt', 'np.loadtxt', (['index_file'], {'dtype': 'np.str', 'delimiter': '""","""', 'skiprows': '(1)', 'usecols': '(0)'}), "(index_file, dtype=np.str, delimiter=',', skiprows=1, usecols=0)\n", (3510, 3574), True, 'import numpy as np\n'), ((4042, 4068), 'numpy.maximum', 'np.maximum', (['self.labels', '(0)'], {}), '(self.labels, 0)\n', (4052, 4068), True, 'import numpy as np\n'), ((1073, 1102), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (1090, 1102), False, 'from torchvision import transforms\n'), ((1112, 1133), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1131, 1133), False, 'from torchvision import transforms\n'), ((3212, 3241), 'os.path.exists', 'os.path.exists', (['components[i]'], {}), '(components[i])\n', (3226, 3241), False, 'import os\n'), ((4649, 4762), 'src.datasets.specs.Input2dSpec', 'Input2dSpec', ([], {'input_size': 'CheXpert.INPUT_SIZE', 'patch_size': 'CheXpert.PATCH_SIZE', 'in_channels': 'CheXpert.IN_CHANNELS'}), '(input_size=CheXpert.INPUT_SIZE, patch_size=CheXpert.PATCH_SIZE,\n in_channels=CheXpert.IN_CHANNELS)\n', (4660, 4762), False, 'from src.datasets.specs import Input2dSpec\n'), ((5025, 5043), 'os.startfile', 'os.startfile', (['path'], {}), '(path)\n', (5037, 5043), False, 'import os\n'), ((2913, 2942), 'os.path.exists', 'os.path.exists', (['components[i]'], {}), '(components[i])\n', (2927, 2942), False, 'import os\n'), ((4398, 4456), 'torch.tensor', 'torch.tensor', (['self.labels[index][self.CHEXPERT_LABELS_IDX]'], {}), '(self.labels[index][self.CHEXPERT_LABELS_IDX])\n', (4410, 4456), False, 'import torch\n'), ((5095, 5120), 'os.system', 'os.system', (["('open ' + path)"], {}), "('open ' + path)\n", (5104, 5120), False, 'import os\n'), ((5147, 5176), 'os.system', 'os.system', (["('xdg-open ' + path)"], {}), "('xdg-open ' + path)\n", (5156, 5176), False, 'import os\n'), ((2129, 2173), 'os.path.join', 'os.path.join', (['self.root', "('CheXpert-v1.0' + x)"], {}), "(self.root, 'CheXpert-v1.0' + x)\n", (2141, 2173), False, 'import os\n'), ((3012, 3042), 'torchvision.datasets.utils.extract_archive', 'extract_archive', (['components[i]'], {}), '(components[i])\n', (3027, 3042), False, 'from torchvision.datasets.utils import extract_archive\n'), ((4296, 4326), 'os.path.join', 'os.path.join', (['self.root', 'fname'], {}), '(self.root, fname)\n', (4308, 4326), False, 'import os\n')] |
import argparse
import sys
from tensorflow.python.framework import dtypes
import tensorflow as tf
import numpy as np
from collections import namedtuple
import json
from os import makedirs
from os import path
FLAGS = None
Datasets = namedtuple('Datasets', ['train', 'validation', 'test'])
def export_def_graph(outdir="log"):
if not path.exists(outdir):
makedirs(outdir)
writer = tf.summary.FileWriter(outdir, tf.get_default_graph())
writer.close()
print("+ Graph exported")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
class DataSet(object):
def __init__(self,
images,
labels,
one_hot=False,
dtype=dtypes.float32):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32, dtypes.float64):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(np.float32)
images = np.multiply(images, 1.0 / 255.0)
elif dtype == dtypes.float64:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(np.float64)
images = np.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_coil_data_sets(dtype=dtypes.float32):
with open("coil_dataset_report.json", "r") as report_file:
report = json.load(report_file)
with open("coil_train_data.bin", "rb") as train_data_file:
train_images = np.frombuffer(
train_data_file.read(), dtype=np.float32)
train_images = train_images.reshape(*report['train_data_shape'])
with open("coil_train_labels.bin", "rb") as train_labels_file:
train_labels = np.frombuffer(
train_labels_file.read(), dtype=np.uint8)
train_labels = train_labels.reshape(*report['train_labels_shape'])
with open("coil_test_data.bin", "rb") as test_data_file:
test_images = np.frombuffer(
test_data_file.read(), dtype=np.float32)
test_images = test_images.reshape(*report['test_data_shape'])
with open("coil_test_labels.bin", "rb") as test_labels_file:
test_labels = np.frombuffer(
test_labels_file.read(), dtype=np.uint8)
test_labels = test_labels.reshape(*report['test_labels_shape'])
VALIDATION_SIZE = 200
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
train = DataSet(train_images, train_labels, dtype=dtype)
validation = DataSet(validation_images, validation_labels, dtype=dtype)
test = DataSet(test_images, test_labels, dtype=dtype)
return Datasets(train=train, validation=validation, test=test)
class MyModel(object):
def __init__(self):
attributes = [
"x",
"y_",
"W_conv1",
"b_conv1",
"x_image",
"h_conv1",
"h_pool1",
"W_conv2",
"b_conv2",
"h_conv2",
"h_pool2",
"W_fc1",
"b_fc1",
"h_pool2_flat",
"h_fc1",
"keep_prob",
"h_fc1_drop",
"W_fc2",
"b_fc2",
"y_conv",
"cross_entropy",
"train_step",
"correct_prediction",
"accuracy",
"variables"
]
for attr in attributes:
setattr(self, attr, None)
def gen_model():
model = MyModel()
print("+ Create model")
with tf.name_scope('Conv_1'):
# Create the model
model.x = tf.placeholder(tf.float32, [None, 16384])
# # Define loss and optimizer
model.y_ = tf.placeholder(tf.float32, [None, 20])
model.W_conv1 = weight_variable([5, 5, 1, 32])
model.b_conv1 = bias_variable([32])
# 128x128 images
model.x_image = tf.reshape(model.x, [-1, 128, 128, 1])
model.h_conv1 = tf.nn.relu(
conv2d(model.x_image, model.W_conv1) + model.b_conv1)
model.h_pool1 = max_pool_2x2(model.h_conv1)
with tf.name_scope('Conv_2'):
model.W_conv2 = weight_variable([5, 5, 32, 64])
model.b_conv2 = bias_variable([64])
model.h_conv2 = tf.nn.relu(
conv2d(model.h_pool1, model.W_conv2) + model.b_conv2)
model.h_pool2 = max_pool_2x2(model.h_conv2)
with tf.name_scope('Full_Connected_1'):
model.W_fc1 = weight_variable([32 * 32 * 64, 1024])
model.b_fc1 = bias_variable([1024])
# 3276800 / 32 / 32 / 64 = 50
model.h_pool2_flat = tf.reshape(model.h_pool2, [-1, 32*32*64])
model.h_fc1 = tf.nn.relu(
tf.matmul(model.h_pool2_flat, model.W_fc1) + model.b_fc1)
model.keep_prob = tf.placeholder(tf.float32)
model.h_fc1_drop = tf.nn.dropout(model.h_fc1, model.keep_prob)
with tf.name_scope('Full_Connected_2'):
model.W_fc2 = weight_variable([1024, 20])
model.b_fc2 = bias_variable([20])
model.y_conv = tf.matmul(model.h_fc1_drop, model.W_fc2) + model.b_fc2
model.cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=model.y_conv, labels=model.y_))
model.train_step = tf.train.AdamOptimizer(
1e-4).minimize(model.cross_entropy)
model.correct_prediction = tf.equal(
tf.argmax(model.y_conv, 1), tf.argmax(model.y_, 1))
model.accuracy = tf.reduce_mean(
tf.cast(model.correct_prediction, tf.float32))
model.variables = [
model.W_conv1,
model.b_conv1,
model.W_conv2,
model.b_conv2,
model.W_fc1,
model.b_fc1,
model.W_fc2,
model.b_fc2
]
return model
def main(unparsed_args):
# Import data
print("+ Load data")
coil = read_coil_data_sets()
model = gen_model()
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
export_def_graph()
saver = tf.train.Saver(model.variables)
if FLAGS.session is None:
print("+ Train model")
for i in range(2000):
batch = coil.train.next_batch(50)
if i % 100 == 0:
train_accuracy = model.accuracy.eval(feed_dict={
model.x: batch[0],
model.y_: batch[1],
model.keep_prob: 1.0})
print("+ Step %d, training accuracy %g" % (i, train_accuracy))
print("+ Step %d" % i, end="\r")
model.train_step.run(
feed_dict={model.x: batch[0],
model.y_: batch[1],
model.keep_prob: 0.5})
print("+ Save model")
if not path.exists("models"):
makedirs("models")
saver.save(sess, "models/coil.chk")
else:
print("+ Load model")
saver.restore(sess, FLAGS.session)
print("+ Test accuracy %g" % model.accuracy.eval(feed_dict={
model.x: coil.test.images,
model.y_: coil.test.labels,
model.keep_prob: 1.0}))
if __name__ == '__main__':
# python coil_conv.py --session models\coil.chk
parser = argparse.ArgumentParser()
parser.add_argument('--session', type=str, help='Previously saved session')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"argparse.ArgumentParser",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"numpy.arange",
"tensorflow.nn.conv2d",
"tensorflow.InteractiveSession",
"tensorflow.get_default_graph",
"tensorflow.truncated_normal",
"numpy.multiply",
"tensorflow.nn.softmax_cross_entropy_with_logits"... | [((234, 289), 'collections.namedtuple', 'namedtuple', (['"""Datasets"""', "['train', 'validation', 'test']"], {}), "('Datasets', ['train', 'validation', 'test'])\n", (244, 289), False, 'from collections import namedtuple\n'), ((545, 583), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (564, 583), True, 'import tensorflow as tf\n'), ((595, 615), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (606, 615), True, 'import tensorflow as tf\n'), ((658, 687), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (669, 687), True, 'import tensorflow as tf\n'), ((699, 719), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (710, 719), True, 'import tensorflow as tf\n'), ((751, 807), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (763, 807), True, 'import tensorflow as tf\n'), ((842, 917), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (856, 917), True, 'import tensorflow as tf\n'), ((8035, 8058), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (8056, 8058), True, 'import tensorflow as tf\n'), ((8139, 8170), 'tensorflow.train.Saver', 'tf.train.Saver', (['model.variables'], {}), '(model.variables)\n', (8153, 8170), True, 'import tensorflow as tf\n'), ((9300, 9325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9323, 9325), False, 'import argparse\n'), ((9458, 9510), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main', 'argv': '([sys.argv[0]] + unparsed)'}), '(main=main, argv=[sys.argv[0]] + unparsed)\n', (9468, 9510), True, 'import tensorflow as tf\n'), ((339, 358), 'os.path.exists', 'path.exists', (['outdir'], {}), '(outdir)\n', (350, 358), False, 'from os import path\n'), ((368, 384), 'os.makedirs', 'makedirs', (['outdir'], {}), '(outdir)\n', (376, 384), False, 'from os import makedirs\n'), ((428, 450), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (448, 450), True, 'import tensorflow as tf\n'), ((3453, 3475), 'json.load', 'json.load', (['report_file'], {}), '(report_file)\n', (3462, 3475), False, 'import json\n'), ((5710, 5733), 'tensorflow.name_scope', 'tf.name_scope', (['"""Conv_1"""'], {}), "('Conv_1')\n", (5723, 5733), True, 'import tensorflow as tf\n'), ((5780, 5821), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 16384]'], {}), '(tf.float32, [None, 16384])\n', (5794, 5821), True, 'import tensorflow as tf\n'), ((5880, 5918), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 20]'], {}), '(tf.float32, [None, 20])\n', (5894, 5918), True, 'import tensorflow as tf\n'), ((6069, 6107), 'tensorflow.reshape', 'tf.reshape', (['model.x', '[-1, 128, 128, 1]'], {}), '(model.x, [-1, 128, 128, 1])\n', (6079, 6107), True, 'import tensorflow as tf\n'), ((6273, 6296), 'tensorflow.name_scope', 'tf.name_scope', (['"""Conv_2"""'], {}), "('Conv_2')\n", (6286, 6296), True, 'import tensorflow as tf\n'), ((6563, 6596), 'tensorflow.name_scope', 'tf.name_scope', (['"""Full_Connected_1"""'], {}), "('Full_Connected_1')\n", (6576, 6596), True, 'import tensorflow as tf\n'), ((6770, 6815), 'tensorflow.reshape', 'tf.reshape', (['model.h_pool2', '[-1, 32 * 32 * 64]'], {}), '(model.h_pool2, [-1, 32 * 32 * 64])\n', (6780, 6815), True, 'import tensorflow as tf\n'), ((6943, 6969), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (6957, 6969), True, 'import tensorflow as tf\n'), ((6997, 7040), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['model.h_fc1', 'model.keep_prob'], {}), '(model.h_fc1, model.keep_prob)\n', (7010, 7040), True, 'import tensorflow as tf\n'), ((7051, 7084), 'tensorflow.name_scope', 'tf.name_scope', (['"""Full_Connected_2"""'], {}), "('Full_Connected_2')\n", (7064, 7084), True, 'import tensorflow as tf\n'), ((7308, 7385), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'model.y_conv', 'labels': 'model.y_'}), '(logits=model.y_conv, labels=model.y_)\n', (7347, 7385), True, 'import tensorflow as tf\n'), ((7527, 7553), 'tensorflow.argmax', 'tf.argmax', (['model.y_conv', '(1)'], {}), '(model.y_conv, 1)\n', (7536, 7553), True, 'import tensorflow as tf\n'), ((7555, 7577), 'tensorflow.argmax', 'tf.argmax', (['model.y_', '(1)'], {}), '(model.y_, 1)\n', (7564, 7577), True, 'import tensorflow as tf\n'), ((7624, 7669), 'tensorflow.cast', 'tf.cast', (['model.correct_prediction', 'tf.float32'], {}), '(model.correct_prediction, tf.float32)\n', (7631, 7669), True, 'import tensorflow as tf\n'), ((1350, 1372), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['dtype'], {}), '(dtype)\n', (1365, 1372), False, 'from tensorflow.python.framework import dtypes\n'), ((1907, 1939), 'numpy.multiply', 'np.multiply', (['images', '(1.0 / 255.0)'], {}), '(images, 1.0 / 255.0)\n', (1918, 1939), True, 'import numpy as np\n'), ((2915, 2944), 'numpy.arange', 'np.arange', (['self._num_examples'], {}), '(self._num_examples)\n', (2924, 2944), True, 'import numpy as np\n'), ((2957, 2980), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (2974, 2980), True, 'import numpy as np\n'), ((7202, 7242), 'tensorflow.matmul', 'tf.matmul', (['model.h_fc1_drop', 'model.W_fc2'], {}), '(model.h_fc1_drop, model.W_fc2)\n', (7211, 7242), True, 'import tensorflow as tf\n'), ((7410, 7440), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (7432, 7440), True, 'import tensorflow as tf\n'), ((8063, 8096), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8094, 8096), True, 'import tensorflow as tf\n'), ((8854, 8875), 'os.path.exists', 'path.exists', (['"""models"""'], {}), "('models')\n", (8865, 8875), False, 'from os import path\n'), ((8889, 8907), 'os.makedirs', 'makedirs', (['"""models"""'], {}), "('models')\n", (8897, 8907), False, 'from os import makedirs\n'), ((2097, 2129), 'numpy.multiply', 'np.multiply', (['images', '(1.0 / 255.0)'], {}), '(images, 1.0 / 255.0)\n', (2108, 2129), True, 'import numpy as np\n'), ((6858, 6900), 'tensorflow.matmul', 'tf.matmul', (['model.h_pool2_flat', 'model.W_fc1'], {}), '(model.h_pool2_flat, model.W_fc1)\n', (6867, 6900), True, 'import tensorflow as tf\n')] |
from fypy.model.FourierModel import FourierModel
import numpy as np
from scipy.fft import ifft
from fypy.pricing.StrikesPricer import StrikesPricer
from scipy.interpolate import interp1d
from scipy.integrate import quad
class LewisEuropeanPricer(StrikesPricer):
def __init__(self,
model: FourierModel,
N: int = 2 ** 12,
interp: str = 'cubic'):
"""
Price European options using Fourier method of Lewis (2001)
:param model: Fourier model
:param N: int (power of 2), number of quadrature points in integral calculation
:param interp: str, 'cubic' or 'linear'
"""
self._model = model
self._limit = 200 # upper bound on number of quad subintervals
self._N = N
self._interp = interp
def price_strikes(self,
T: float,
K: np.ndarray,
is_calls: np.ndarray) -> np.ndarray:
"""
Price a set of set of strikes (at same time to maturity, ie one slice of a surface), using FFT with
interpolation between strikes (note: using one by one pricing can be more accurate but much slower)
:param T: float, time to maturity of options
:param K: np.array, strikes of options
:param is_calls: np.array[bool], indicators of if strikes are calls (true) or puts (false)
:return: np.array, prices of strikes
"""
N = self._N
dx = self._limit / N
x = np.arange(N) * dx # the final value limit is excluded
weight = np.arange(N) # Simpson weights
weight = 3 + (-1) ** (weight + 1)
weight[0] = 1
weight[N - 1] = 1
dk = 2 * np.pi / self._limit
b = N * dk / 2
ks = -b + dk * np.arange(N)
S0 = self._model.spot()
chf = lambda x: self._model.chf(T=T, xi=x)
integrand = np.exp(- 1j * b * np.arange(N) * dx) * chf(x - 0.5j) * 1 / (x ** 2 + 0.25) * weight * dx / 3
integral_value = np.real(ifft(integrand) * N)
disc = self._model.discountCurve(T)
sf = self._model.forwardCurve(T) * disc
if self._interp == "linear":
spline = interp1d(ks, integral_value, kind='linear')
elif self._interp == "cubic":
spline = interp1d(ks, integral_value, kind='cubic')
else:
raise NotImplementedError("Only linear and cubic interpolation supported")
prices = sf - np.sqrt(S0 * K) * disc / np.pi * spline(np.log(S0 / K))
prices[~is_calls] -= sf - K[~is_calls] * disc
return prices
def price(self, T: float, K: float, is_call: bool):
"""
Price a single strike of European option using Quadrature implementation of Lewis
:param T: float, time to maturity
:param K: float, strike of option
:param is_call: bool, indicator of if strike is call (true) or put (false)
:return: float, price of option
"""
S0 = self._model.spot()
cf = lambda x: self._model.chf(T=T, xi=x)
disc = self._model.discountCurve(T)
k = np.log(S0 / K)
integrand = lambda u: np.real(np.exp(u * k * 1j) * cf(u - 0.5j)) * 1 / (u ** 2 + 0.25)
int_value = quad(integrand, 0, self._N, limit=self._limit)[0]
sf = self._model.forwardCurve(T) * disc
price = sf - np.sqrt(S0 * K) * disc / np.pi * int_value
if not is_call:
price -= sf - K * disc
return price
| [
"numpy.log",
"scipy.integrate.quad",
"numpy.arange",
"numpy.exp",
"scipy.interpolate.interp1d",
"scipy.fft.ifft",
"numpy.sqrt"
] | [((1590, 1602), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1599, 1602), True, 'import numpy as np\n'), ((3134, 3148), 'numpy.log', 'np.log', (['(S0 / K)'], {}), '(S0 / K)\n', (3140, 3148), True, 'import numpy as np\n'), ((1517, 1529), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1526, 1529), True, 'import numpy as np\n'), ((2213, 2256), 'scipy.interpolate.interp1d', 'interp1d', (['ks', 'integral_value'], {'kind': '"""linear"""'}), "(ks, integral_value, kind='linear')\n", (2221, 2256), False, 'from scipy.interpolate import interp1d\n'), ((3264, 3310), 'scipy.integrate.quad', 'quad', (['integrand', '(0)', 'self._N'], {'limit': 'self._limit'}), '(integrand, 0, self._N, limit=self._limit)\n', (3268, 3310), False, 'from scipy.integrate import quad\n'), ((1796, 1808), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1805, 1808), True, 'import numpy as np\n'), ((2040, 2055), 'scipy.fft.ifft', 'ifft', (['integrand'], {}), '(integrand)\n', (2044, 2055), False, 'from scipy.fft import ifft\n'), ((2316, 2358), 'scipy.interpolate.interp1d', 'interp1d', (['ks', 'integral_value'], {'kind': '"""cubic"""'}), "(ks, integral_value, kind='cubic')\n", (2324, 2358), False, 'from scipy.interpolate import interp1d\n'), ((2523, 2537), 'numpy.log', 'np.log', (['(S0 / K)'], {}), '(S0 / K)\n', (2529, 2537), True, 'import numpy as np\n'), ((2483, 2498), 'numpy.sqrt', 'np.sqrt', (['(S0 * K)'], {}), '(S0 * K)\n', (2490, 2498), True, 'import numpy as np\n'), ((3383, 3398), 'numpy.sqrt', 'np.sqrt', (['(S0 * K)'], {}), '(S0 * K)\n', (3390, 3398), True, 'import numpy as np\n'), ((3187, 3207), 'numpy.exp', 'np.exp', (['(u * k * 1.0j)'], {}), '(u * k * 1.0j)\n', (3193, 3207), True, 'import numpy as np\n'), ((1932, 1944), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1941, 1944), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2017 Johns Hopkins University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import copy
import json
import logging
import math
import os
import re
# chainer related
import chainer
from chainer.datasets import TransformDataset
from chainer import reporter as reporter_module
from chainer import training
from chainer.training import extensions
# torch related
import torch
# espnet related
from espnet.asr.asr_utils import adadelta_eps_decay
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.asr_utils import CompareValueTrigger
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import load_inputs_and_targets
from espnet.asr.asr_utils import make_batchset
from espnet.asr.asr_utils import PlotAttentionReport
from espnet.asr.asr_utils import restore_snapshot
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_save
from espnet.asr.asr_utils import torch_snapshot
from espnet.nets.e2e_asr_th import E2E
from espnet.nets.e2e_asr_th import Loss
from espnet.nets.e2e_asr_th import pad_list
# for kaldi io
import kaldi_io_py
# rnnlm
import espnet.lm.extlm_pytorch as extlm_pytorch
import espnet.lm.lm_pytorch as lm_pytorch
# matplotlib related
import matplotlib
import numpy as np
matplotlib.use('Agg')
REPORT_INTERVAL = 100
def get_alpha_number(s):
m = re.search(r'\d+$', s)
m1 = int(m.group()) if m else None
t = re.search(r'^[a-z]+', s)
t1 = t.group() if t else None
return (t1, m1)
def get_advsched(advstr, nepochs):
advsched = {}
sp = [get_alpha_number(x) for x in advstr.split(',')]
assert sum([x[1] for x in sp]) == nepochs, "Sum of schedule segment != nepochs"
ecnt = 0
for m, t in sp:
for i in range(t):
advsched[ecnt] = m
ecnt += 1
# Hack to prevent KeyError in last epoch, add last mode
advsched[ecnt] = sp[-1][0]
return advsched
def get_grlalpha(max_grlalpha, ep_num, total_epochs):
p_i = ep_num / float(total_epochs)
cga = float(max_grlalpha * (2.0 / (1.0 + np.exp(-10 * p_i)) - 1.0))
logging.info(" ------------- CGA = %f ---------", cga)
return cga
class CustomEvaluator(extensions.Evaluator):
'''Custom evaluater for pytorch'''
def __init__(self, model, iterator, target, converter, device):
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.converter = converter
self.device = device
# The core part of the update routine can be customized by overriding
def evaluate(self):
iterator = self._iterators['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
observation = {}
with reporter_module.report_scope(observation):
# read scp files
# x: original json with loaded features
# will be converted to chainer variable later
x = self.converter(batch, self.device)
self.model(*x)
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(training.StandardUpdater):
'''Custom updater for pytorch'''
def __init__(self, model, grad_clip_threshold, train_iter,
optimizer, converter, device, ngpu, adv_schedule=None,
max_grlalpha=None):
super(CustomUpdater, self).__init__(train_iter, optimizer)
self.model = model
self.grad_clip_threshold = grad_clip_threshold
self.converter = converter
self.device = device
self.ngpu = ngpu
self.adv_schedule = adv_schedule
self.last_adv_mode = None
self.max_grlalpha = max_grlalpha
# The core part of the update routine can be customized by overriding.
def update_core(self):
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator('main')
optimizer = self.get_optimizer('main')
# Get the next batch ( a list of json files)
batch = train_iter.next()
x = self.converter(batch, self.device)
curr_epoch = int(self.epoch_detail)
adv_mode = self.adv_schedule[curr_epoch]
logging.info("Epoch detail = %f, Adv mode = %s", self.epoch_detail,
adv_mode)
# If transitioning to speaker branch training - RESET!
if curr_epoch > 0:
if self.last_adv_mode != 'spk' and adv_mode == 'spk':
logging.info(" ----- Resetting the adversarial branch weights... -----")
if self.ngpu > 1:
self.model.module.predictor.adv.init_like_chainer()
else:
self.model.predictor.adv.init_like_chainer()
#logging.info("Some weights after resetting ---")
#for p in self.model.predictor.adv.advnet.parameters():
# logging.info(p)
# break
# UNCOMMENT NEXT LINE TO ALLOW exponentially growing alpha
#curr_grlalpha = get_grlalpha(self.max_grlalpha, self.epoch_detail,
# len(self.adv_schedule))
curr_grlalpha = self.max_grlalpha
loss_asr, loss_adv = self.model(*x, grlalpha=curr_grlalpha)
if adv_mode == 'spk':
if self.ngpu > 1:
self.model.module.predictor.freeze_encoder()
else:
self.model.predictor.freeze_encoder()
loss = loss_adv
elif adv_mode == 'asr':
if self.ngpu > 1:
self.model.module.predictor.freeze_encoder()
else:
self.model.predictor.freeze_encoder()
loss = loss_asr
else:
if self.ngpu > 1:
self.model.module.predictor.unfreeze_encoder()
else:
self.model.predictor.unfreeze_encoder()
loss = loss_asr + loss_adv
# Compute the loss at this time step and accumulate it
optimizer.zero_grad() # Clear the parameter gradients
if self.ngpu > 1:
loss = 1. / self.ngpu * loss
loss.backward(loss.new_ones(self.ngpu)) # Backprop
else:
loss.backward() # Backprop
loss.detach_() # Truncate the graph
loss_asr.detach_()
loss_adv.detach_()
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.grad_clip_threshold)
logging.info('grad norm={}'.format(grad_norm))
if math.isnan(grad_norm):
logging.warning('grad norm is nan. Do not update model.')
else:
optimizer.step()
# Update last adv mode
self.last_adv_mode = adv_mode
class CustomConverter(object):
"""CUSTOM CONVERTER"""
def __init__(self, subsamping_factor=1):
self.subsamping_factor = subsamping_factor
self.ignore_id = -1
def transform(self, item):
return load_inputs_and_targets(item)
def __call__(self, batch, device):
# batch should be located in list
assert len(batch) == 1
xs, ys, y_adv = batch[0]
# perform subsamping
if self.subsamping_factor > 1:
xs = [x[::self.subsampling_factor, :] for x in xs]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
# perform padding and convert to tensor
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(device)
ilens = torch.from_numpy(ilens).to(device)
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], self.ignore_id).to(device)
y_adv_pad = pad_list([torch.from_numpy(y).long() for y in y_adv], 0).to(device)
return xs_pad, ilens, ys_pad, y_adv_pad
def train(args):
'''Run training'''
# seed setting
torch.manual_seed(args.seed)
# debug mode setting
# 0 would be fastest, but 1 seems to be reasonable
# by considering reproducability
# revmoe type check
if args.debugmode < 2:
chainer.config.type_check = False
logging.info('torch type check is disabled')
# use determinisitic computation or not
if args.debugmode < 1:
torch.backends.cudnn.deterministic = False
logging.info('torch cudnn deterministic is disabled')
else:
torch.backends.cudnn.deterministic = True
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
# get input and output dimension info
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]['input'][0]['shape'][1])
odim = int(valid_json[utts[0]]['output'][0]['shape'][1])
logging.info('#input dims : ' + str(idim))
logging.info('#output dims: ' + str(odim))
odim_adv = None
if args.adv:
odim_adv = int(valid_json[utts[0]]['output'][1]['shape'][1])
logging.info('#output dims adversarial: ' + str(odim_adv))
# specify attention, CTC, hybrid mode
if args.mtlalpha == 1.0:
mtl_mode = 'ctc'
logging.info('Pure CTC mode')
elif args.mtlalpha == 0.0:
mtl_mode = 'att'
logging.info('Pure attention mode')
else:
mtl_mode = 'mtl'
logging.info('Multitask learning mode')
# specify model architecture
e2e = E2E(idim, odim, args, odim_adv=odim_adv)
model = Loss(e2e, args.mtlalpha)
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
e2e.rnnlm = rnnlm
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to ' + model_conf)
f.write(json.dumps((idim, odim, odim_adv, vars(args)), indent=4, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
logging.info('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
# Setup an optimizer
# First distinguish between learning rates
if args.ngpu > 1:
param_grp = [
{'params': model.module.predictor.enc.parameters(), 'lr': args.asr_lr},
{'params': model.module.predictor.dec.parameters(), 'lr': args.asr_lr},
{'params': model.module.predictor.adv.parameters(), 'lr': args.adv_lr}
]
else:
param_grp = [
{'params': model.predictor.enc.parameters(), 'lr': args.asr_lr},
{'params': model.predictor.dec.parameters(), 'lr': args.asr_lr},
{'params': model.predictor.adv.parameters(), 'lr': args.adv_lr}
]
if args.opt == 'adadelta':
optimizer = torch.optim.Adadelta(param_grp, rho=0.95, eps=args.eps)
elif args.opt == 'adam':
optimizer = torch.optim.Adam(param_grp)
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
converter = CustomConverter(e2e.subsample[0])
# read json data
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
# make minibatch list (variable length)
train = make_batchset(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1)
valid = make_batchset(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1)
# hack to make batchsze argument as 1
# actual bathsize is included in a list
if args.n_iter_processes > 0:
train_iter = chainer.iterators.MultiprocessIterator(
TransformDataset(train, converter.transform),
batch_size=1, n_processes=args.n_iter_processes, n_prefetch=8, maxtasksperchild=20)
valid_iter = chainer.iterators.MultiprocessIterator(
TransformDataset(valid, converter.transform),
batch_size=1, repeat=False, shuffle=False,
n_processes=args.n_iter_processes, n_prefetch=8, maxtasksperchild=20)
else:
train_iter = chainer.iterators.SerialIterator(
TransformDataset(train, converter.transform),
batch_size=1)
valid_iter = chainer.iterators.SerialIterator(
TransformDataset(valid, converter.transform),
batch_size=1, repeat=False, shuffle=False)
# Prepare adversarial training schedule dictionary
adv_schedule = get_advsched(args.adv, args.epochs)
# Set up a trainer
updater = CustomUpdater(
model, args.grad_clip, train_iter, optimizer, converter, device,
args.ngpu, adv_schedule=adv_schedule, max_grlalpha=args.grlalpha)
trainer = training.Trainer(
updater, (args.epochs, 'epoch'), out=args.outdir)
# Resume from a snapshot
if args.resume:
logging.info('resumed from %s' % args.resume)
#torch_resume(args.resume, trainer, weight_sharing=args.weight_sharing)
torch_resume(args.resume, trainer, weight_sharing=args.weight_sharing,
reinit_adv=args.reinit_adv)
# Evaluate the model with the test dataset for each epoch
trainer.extend(CustomEvaluator(model, valid_iter, reporter, converter, device))
# Save attention weight each epoch
if args.num_save_attention > 0 and args.mtlalpha != 1.0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
if hasattr(model, "module"):
att_vis_fn = model.module.predictor.calculate_all_attentions
else:
att_vis_fn = model.predictor.calculate_all_attentions
trainer.extend(PlotAttentionReport(
att_vis_fn, data, args.outdir + "/att_ws",
converter=converter, device=device), trigger=(1, 'epoch'))
# Make a plot for training and validation values
trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss',
'main/loss_ctc', 'validation/main/loss_ctc',
'main/loss_att',
'validation/main/loss_att',
'main/loss_adv',
'validation/main/loss_adv'],
'epoch', file_name='loss.png'))
trainer.extend(extensions.PlotReport(['main/acc', 'validation/main/acc',
'main/acc_adv',
'validation/main/acc_adv'],
'epoch', file_name='acc.png'))
# Save best models
trainer.extend(extensions.snapshot_object(model, 'model.loss.best', savefun=torch_save),
trigger=training.triggers.MinValueTrigger('validation/main/loss'))
if mtl_mode is not 'ctc':
trainer.extend(extensions.snapshot_object(model, 'model.acc.best', savefun=torch_save),
trigger=training.triggers.MaxValueTrigger('validation/main/acc'))
# save snapshot which contains model and optimizer states
trainer.extend(torch_snapshot(), trigger=(1, 'epoch'))
# epsilon decay in the optimizer
if args.opt == 'adadelta':
if args.criterion == 'acc' and mtl_mode is not 'ctc':
trainer.extend(restore_snapshot(model, args.outdir + '/model.acc.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
elif args.criterion == 'loss':
trainer.extend(restore_snapshot(model, args.outdir + '/model.loss.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=(REPORT_INTERVAL, 'iteration')))
report_keys = ['epoch', 'iteration', 'main/loss', 'main/loss_ctc', 'main/loss_att',
'validation/main/loss', 'validation/main/loss_ctc', 'validation/main/loss_att',
'main/acc', 'validation/main/acc', 'elapsed_time']
if args.opt == 'adadelta':
trainer.extend(extensions.observe_value(
'eps', lambda trainer: trainer.updater.get_optimizer('main').param_groups[0]["eps"]),
trigger=(REPORT_INTERVAL, 'iteration'))
report_keys.append('eps')
if args.report_cer:
report_keys.append('validation/main/cer')
if args.report_wer:
report_keys.append('validation/main/wer')
if args.adv:
report_keys.extend(['main/loss_adv', 'main/acc_adv',
'validation/main/loss_adv',
'validation/main/acc_adv'])
trainer.extend(extensions.PrintReport(
report_keys), trigger=(REPORT_INTERVAL, 'iteration'))
trainer.extend(extensions.ProgressBar(update_interval=REPORT_INTERVAL))
# Run the training
trainer.run()
def recog(args):
'''Run recognition'''
# seed setting
torch.manual_seed(args.seed)
# read training config
idim, odim, odim_adv, train_args = get_model_conf(args.model, args.model_conf)
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor,
word_dict, char_dict))
# load trained model parameters
logging.info('reading model parameters from ' + args.model)
e2e = E2E(idim, odim, train_args, odim_adv=odim_adv)
model = Loss(e2e, train_args.mtlalpha)
if train_args.rnnlm is not None:
# set rnnlm. external rnnlm is used for recognition.
model.predictor.rnnlm = rnnlm
torch_load(args.model, model)
e2e.recog_args = args
# gpu
if args.ngpu == 1:
gpu_id = range(args.ngpu)
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info('(%d/%d) decoding ' + name, idx, len(js.keys()))
feat = kaldi_io_py.read_mat(js[name]['input'][0]['feat'])
nbest_hyps = e2e.recognize(feat, args, train_args.char_list, rnnlm)
new_js[name] = add_results_to_json(js[name], nbest_hyps, train_args.char_list)
else:
try:
from itertools import zip_longest as zip_longest
except Exception:
from itertools import izip_longest as zip_longest
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
# sort data
keys = list(js.keys())
feat_lens = [js[key]['input'][0]['shape'][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
feats = [kaldi_io_py.read_mat(js[name]['input'][0]['feat'])
for name in names]
nbest_hyps = e2e.recognize_batch(feats, args, train_args.char_list, rnnlm=rnnlm)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(js[name], nbest_hyp, train_args.char_list)
# TODO(watanabe) fix character coding problems when saving it
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, sort_keys=True).encode('utf_8'))
def encode(args):
'''Get ASR encoded representations...probably for xvectors'''
# seed setting
torch.manual_seed(args.seed)
# read training config
idim, odim, odim_adv, train_args = get_model_conf(args.model, args.model_conf)
# load trained model parameters
logging.info('reading model parameters from ' + args.model)
e2e = E2E(idim, odim, train_args, odim_adv=odim_adv)
model = Loss(e2e, train_args.mtlalpha)
if train_args.rnnlm is not None:
# set rnnlm. external rnnlm is used for recognition.
model.predictor.rnnlm = rnnlm
torch_load(args.model, model)
e2e.recog_args = args
# gpu
if args.ngpu == 1:
gpu_id = range(args.ngpu)
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
arkscp = 'ark:| copy-feats --print-args=false ark:- ark,scp:%s.ark,%s.scp' % (args.feats_out, args.feats_out)
if args.batchsize == 0:
with torch.no_grad():
with kaldi_io_py.open_or_fd(arkscp, 'wb') as f, open(args.feats_in, 'rb') as f2:
lines = f2.read().splitlines()
for idx, line in enumerate(lines, 1):
line = line.strip().split()
name = line[0]
logging.info('(%d/%d) decoding ' + name, idx, len(lines))
feat = kaldi_io_py.read_mat(line[1])
rep = e2e.erep(feat)
logging.info('Rep shape: %s', rep.shape)
kaldi_io_py.write_mat(f, rep, name)
else:
try:
from itertools import zip_longest as zip_longest
except Exception:
from itertools import izip_longest as zip_longest
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
# Create json object for batch processing
logging.info("Creating json for batch processing...")
js = {}
with open(args.feats_in, 'rb') as f:
lines = f.read().splitlines()
for line in lines:
line = line.strip().split()
name = line[0]
featpath = line[1]
feat_shape = kaldi_io_py.read_mat(featpath).shape
js[name] = { 'feat': featpath, 'shape': feat_shape }
# sort data
logging.info("Sorting data for batch processing...")
keys = list(js.keys())
feat_lens = [js[key]['shape'][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
with kaldi_io_py.open_or_fd(arkscp, 'wb') as f:
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
feats = [kaldi_io_py.read_mat(js[name]['feat'])
for name in names]
reps, replens = e2e.erep_batch(feats)
print(reps.shape, replens)
for i, rep in enumerate(reps):
name = names[i]
kaldi_io_py.write_mat(f, rep, name)
| [
"espnet.asr.asr_utils.adadelta_eps_decay",
"espnet.nets.e2e_asr_th.E2E",
"torch.optim.Adadelta",
"espnet.asr.asr_utils.restore_snapshot",
"json.dumps",
"espnet.asr.asr_utils.CompareValueTrigger",
"numpy.exp",
"torch.device",
"torch.no_grad",
"chainer.training.extensions.LogReport",
"espnet.lm.ex... | [((1364, 1385), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1378, 1385), False, 'import matplotlib\n'), ((1444, 1465), 're.search', 're.search', (['"""\\\\d+$"""', 's'], {}), "('\\\\d+$', s)\n", (1453, 1465), False, 'import re\n'), ((1513, 1536), 're.search', 're.search', (['"""^[a-z]+"""', 's'], {}), "('^[a-z]+', s)\n", (1522, 1536), False, 'import re\n'), ((2183, 2237), 'logging.info', 'logging.info', (['""" ------------- CGA = %f ---------"""', 'cga'], {}), "(' ------------- CGA = %f ---------', cga)\n", (2195, 2237), False, 'import logging\n'), ((8410, 8438), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8427, 8438), False, 'import torch\n'), ((9978, 10018), 'espnet.nets.e2e_asr_th.E2E', 'E2E', (['idim', 'odim', 'args'], {'odim_adv': 'odim_adv'}), '(idim, odim, args, odim_adv=odim_adv)\n', (9981, 10018), False, 'from espnet.nets.e2e_asr_th import E2E\n'), ((10031, 10055), 'espnet.nets.e2e_asr_th.Loss', 'Loss', (['e2e', 'args.mtlalpha'], {}), '(e2e, args.mtlalpha)\n', (10035, 10055), False, 'from espnet.nets.e2e_asr_th import Loss\n'), ((11210, 11258), 'torch.device', 'torch.device', (["('cuda' if args.ngpu > 0 else 'cpu')"], {}), "('cuda' if args.ngpu > 0 else 'cpu')\n", (11222, 11258), False, 'import torch\n'), ((12587, 12734), 'espnet.asr.asr_utils.make_batchset', 'make_batchset', (['train_json', 'args.batch_size', 'args.maxlen_in', 'args.maxlen_out', 'args.minibatches'], {'min_batch_size': '(args.ngpu if args.ngpu > 1 else 1)'}), '(train_json, args.batch_size, args.maxlen_in, args.maxlen_out,\n args.minibatches, min_batch_size=args.ngpu if args.ngpu > 1 else 1)\n', (12600, 12734), False, 'from espnet.asr.asr_utils import make_batchset\n'), ((12795, 12942), 'espnet.asr.asr_utils.make_batchset', 'make_batchset', (['valid_json', 'args.batch_size', 'args.maxlen_in', 'args.maxlen_out', 'args.minibatches'], {'min_batch_size': '(args.ngpu if args.ngpu > 1 else 1)'}), '(valid_json, args.batch_size, args.maxlen_in, args.maxlen_out,\n args.minibatches, min_batch_size=args.ngpu if args.ngpu > 1 else 1)\n', (12808, 12942), False, 'from espnet.asr.asr_utils import make_batchset\n'), ((14225, 14291), 'chainer.training.Trainer', 'training.Trainer', (['updater', "(args.epochs, 'epoch')"], {'out': 'args.outdir'}), "(updater, (args.epochs, 'epoch'), out=args.outdir)\n", (14241, 14291), False, 'from chainer import training\n'), ((19364, 19392), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (19381, 19392), False, 'import torch\n'), ((19460, 19503), 'espnet.asr.asr_utils.get_model_conf', 'get_model_conf', (['args.model', 'args.model_conf'], {}), '(args.model, args.model_conf)\n', (19474, 19503), False, 'from espnet.asr.asr_utils import get_model_conf\n'), ((20753, 20812), 'logging.info', 'logging.info', (["('reading model parameters from ' + args.model)"], {}), "('reading model parameters from ' + args.model)\n", (20765, 20812), False, 'import logging\n'), ((20823, 20869), 'espnet.nets.e2e_asr_th.E2E', 'E2E', (['idim', 'odim', 'train_args'], {'odim_adv': 'odim_adv'}), '(idim, odim, train_args, odim_adv=odim_adv)\n', (20826, 20869), False, 'from espnet.nets.e2e_asr_th import E2E\n'), ((20882, 20912), 'espnet.nets.e2e_asr_th.Loss', 'Loss', (['e2e', 'train_args.mtlalpha'], {}), '(e2e, train_args.mtlalpha)\n', (20886, 20912), False, 'from espnet.nets.e2e_asr_th import Loss\n'), ((21053, 21082), 'espnet.asr.asr_utils.torch_load', 'torch_load', (['args.model', 'model'], {}), '(args.model, model)\n', (21063, 21082), False, 'from espnet.asr.asr_utils import torch_load\n'), ((23291, 23319), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (23308, 23319), False, 'import torch\n'), ((23387, 23430), 'espnet.asr.asr_utils.get_model_conf', 'get_model_conf', (['args.model', 'args.model_conf'], {}), '(args.model, args.model_conf)\n', (23401, 23430), False, 'from espnet.asr.asr_utils import get_model_conf\n'), ((23472, 23531), 'logging.info', 'logging.info', (["('reading model parameters from ' + args.model)"], {}), "('reading model parameters from ' + args.model)\n", (23484, 23531), False, 'import logging\n'), ((23542, 23588), 'espnet.nets.e2e_asr_th.E2E', 'E2E', (['idim', 'odim', 'train_args'], {'odim_adv': 'odim_adv'}), '(idim, odim, train_args, odim_adv=odim_adv)\n', (23545, 23588), False, 'from espnet.nets.e2e_asr_th import E2E\n'), ((23601, 23631), 'espnet.nets.e2e_asr_th.Loss', 'Loss', (['e2e', 'train_args.mtlalpha'], {}), '(e2e, train_args.mtlalpha)\n', (23605, 23631), False, 'from espnet.nets.e2e_asr_th import Loss\n'), ((23772, 23801), 'espnet.asr.asr_utils.torch_load', 'torch_load', (['args.model', 'model'], {}), '(args.model, model)\n', (23782, 23801), False, 'from espnet.asr.asr_utils import torch_load\n'), ((2930, 2959), 'chainer.reporter.DictSummary', 'reporter_module.DictSummary', ([], {}), '()\n', (2957, 2959), True, 'from chainer import reporter as reporter_module\n'), ((4683, 4760), 'logging.info', 'logging.info', (['"""Epoch detail = %f, Adv mode = %s"""', 'self.epoch_detail', 'adv_mode'], {}), "('Epoch detail = %f, Adv mode = %s', self.epoch_detail, adv_mode)\n", (4695, 4760), False, 'import logging\n'), ((7082, 7103), 'math.isnan', 'math.isnan', (['grad_norm'], {}), '(grad_norm)\n', (7092, 7103), False, 'import math\n'), ((7520, 7549), 'espnet.asr.asr_utils.load_inputs_and_targets', 'load_inputs_and_targets', (['item'], {}), '(item)\n', (7543, 7549), False, 'from espnet.asr.asr_utils import load_inputs_and_targets\n'), ((7895, 7929), 'numpy.array', 'np.array', (['[x.shape[0] for x in xs]'], {}), '([x.shape[0] for x in xs])\n', (7903, 7929), True, 'import numpy as np\n'), ((8658, 8702), 'logging.info', 'logging.info', (['"""torch type check is disabled"""'], {}), "('torch type check is disabled')\n", (8670, 8702), False, 'import logging\n'), ((8833, 8886), 'logging.info', 'logging.info', (['"""torch cudnn deterministic is disabled"""'], {}), "('torch cudnn deterministic is disabled')\n", (8845, 8886), False, 'import logging\n'), ((8989, 9014), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9012, 9014), False, 'import torch\n'), ((9024, 9064), 'logging.warning', 'logging.warning', (['"""cuda is not available"""'], {}), "('cuda is not available')\n", (9039, 9064), False, 'import logging\n'), ((9721, 9750), 'logging.info', 'logging.info', (['"""Pure CTC mode"""'], {}), "('Pure CTC mode')\n", (9733, 9750), False, 'import logging\n'), ((10109, 10152), 'espnet.asr.asr_utils.get_model_conf', 'get_model_conf', (['args.rnnlm', 'args.rnnlm_conf'], {}), '(args.rnnlm, args.rnnlm_conf)\n', (10123, 10152), False, 'from espnet.asr.asr_utils import get_model_conf\n'), ((10312, 10341), 'espnet.asr.asr_utils.torch_load', 'torch_load', (['args.rnnlm', 'rnnlm'], {}), '(args.rnnlm, rnnlm)\n', (10322, 10341), False, 'from espnet.asr.asr_utils import torch_load\n'), ((10405, 10432), 'os.path.exists', 'os.path.exists', (['args.outdir'], {}), '(args.outdir)\n', (10419, 10432), False, 'import os\n'), ((10442, 10466), 'os.makedirs', 'os.makedirs', (['args.outdir'], {}), '(args.outdir)\n', (10453, 10466), False, 'import os\n'), ((10558, 10618), 'logging.info', 'logging.info', (["('writing a model config file to ' + model_conf)"], {}), "('writing a model config file to ' + model_conf)\n", (10570, 10618), False, 'import logging\n'), ((11009, 11127), 'logging.info', 'logging.info', (["('batch size is automatically increased (%d -> %d)' % (args.batch_size, \n args.batch_size * args.ngpu))"], {}), "('batch size is automatically increased (%d -> %d)' % (args.\n batch_size, args.batch_size * args.ngpu))\n", (11021, 11127), False, 'import logging\n'), ((11989, 12044), 'torch.optim.Adadelta', 'torch.optim.Adadelta', (['param_grp'], {'rho': '(0.95)', 'eps': 'args.eps'}), '(param_grp, rho=0.95, eps=args.eps)\n', (12009, 12044), False, 'import torch\n'), ((14359, 14404), 'logging.info', 'logging.info', (["('resumed from %s' % args.resume)"], {}), "('resumed from %s' % args.resume)\n", (14371, 14404), False, 'import logging\n'), ((14493, 14595), 'espnet.asr.asr_utils.torch_resume', 'torch_resume', (['args.resume', 'trainer'], {'weight_sharing': 'args.weight_sharing', 'reinit_adv': 'args.reinit_adv'}), '(args.resume, trainer, weight_sharing=args.weight_sharing,\n reinit_adv=args.reinit_adv)\n', (14505, 14595), False, 'from espnet.asr.asr_utils import torch_resume\n'), ((15452, 15691), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['main/loss', 'validation/main/loss', 'main/loss_ctc',\n 'validation/main/loss_ctc', 'main/loss_att', 'validation/main/loss_att',\n 'main/loss_adv', 'validation/main/loss_adv']", '"""epoch"""'], {'file_name': '"""loss.png"""'}), "(['main/loss', 'validation/main/loss', 'main/loss_ctc',\n 'validation/main/loss_ctc', 'main/loss_att', 'validation/main/loss_att',\n 'main/loss_adv', 'validation/main/loss_adv'], 'epoch', file_name='loss.png'\n )\n", (15473, 15691), False, 'from chainer.training import extensions\n'), ((15950, 16085), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['main/acc', 'validation/main/acc', 'main/acc_adv', 'validation/main/acc_adv']", '"""epoch"""'], {'file_name': '"""acc.png"""'}), "(['main/acc', 'validation/main/acc', 'main/acc_adv',\n 'validation/main/acc_adv'], 'epoch', file_name='acc.png')\n", (15971, 16085), False, 'from chainer.training import extensions\n'), ((16251, 16323), 'chainer.training.extensions.snapshot_object', 'extensions.snapshot_object', (['model', '"""model.loss.best"""'], {'savefun': 'torch_save'}), "(model, 'model.loss.best', savefun=torch_save)\n", (16277, 16323), False, 'from chainer.training import extensions\n'), ((16708, 16724), 'espnet.asr.asr_utils.torch_snapshot', 'torch_snapshot', ([], {}), '()\n', (16722, 16724), False, 'from espnet.asr.asr_utils import torch_snapshot\n'), ((18151, 18211), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {'trigger': "(REPORT_INTERVAL, 'iteration')"}), "(trigger=(REPORT_INTERVAL, 'iteration'))\n", (18171, 18211), False, 'from chainer.training import extensions\n'), ((19091, 19126), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (['report_keys'], {}), '(report_keys)\n', (19113, 19126), False, 'from chainer.training import extensions\n'), ((19197, 19252), 'chainer.training.extensions.ProgressBar', 'extensions.ProgressBar', ([], {'update_interval': 'REPORT_INTERVAL'}), '(update_interval=REPORT_INTERVAL)\n', (19219, 19252), False, 'from chainer.training import extensions\n'), ((19562, 19605), 'espnet.asr.asr_utils.get_model_conf', 'get_model_conf', (['args.rnnlm', 'args.rnnlm_conf'], {}), '(args.rnnlm, args.rnnlm_conf)\n', (19576, 19605), False, 'from espnet.asr.asr_utils import get_model_conf\n'), ((19771, 19800), 'espnet.asr.asr_utils.torch_load', 'torch_load', (['args.rnnlm', 'rnnlm'], {}), '(args.rnnlm, rnnlm)\n', (19781, 19800), False, 'from espnet.asr.asr_utils import torch_load\n'), ((19899, 19952), 'espnet.asr.asr_utils.get_model_conf', 'get_model_conf', (['args.word_rnnlm', 'args.word_rnnlm_conf'], {}), '(args.word_rnnlm, args.word_rnnlm_conf)\n', (19913, 19952), False, 'from espnet.asr.asr_utils import get_model_conf\n'), ((20212, 20251), 'espnet.asr.asr_utils.torch_load', 'torch_load', (['args.word_rnnlm', 'word_rnnlm'], {}), '(args.word_rnnlm, word_rnnlm)\n', (20222, 20251), False, 'from espnet.asr.asr_utils import torch_load\n'), ((25091, 25144), 'logging.info', 'logging.info', (['"""Creating json for batch processing..."""'], {}), "('Creating json for batch processing...')\n", (25103, 25144), False, 'import logging\n'), ((25553, 25605), 'logging.info', 'logging.info', (['"""Sorting data for batch processing..."""'], {}), "('Sorting data for batch processing...')\n", (25565, 25605), False, 'import logging\n'), ((2891, 2910), 'copy.copy', 'copy.copy', (['iterator'], {}), '(iterator)\n', (2900, 2910), False, 'import copy\n'), ((3000, 3015), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3013, 3015), False, 'import torch\n'), ((7117, 7174), 'logging.warning', 'logging.warning', (['"""grad norm is nan. Do not update model."""'], {}), "('grad norm is nan. Do not update model.')\n", (7132, 7174), False, 'import logging\n'), ((9172, 9184), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9181, 9184), False, 'import json\n'), ((9815, 9850), 'logging.info', 'logging.info', (['"""Pure attention mode"""'], {}), "('Pure attention mode')\n", (9827, 9850), False, 'import logging\n'), ((9894, 9933), 'logging.info', 'logging.info', (['"""Multitask learning mode"""'], {}), "('Multitask learning mode')\n", (9906, 9933), False, 'import logging\n'), ((12094, 12121), 'torch.optim.Adam', 'torch.optim.Adam', (['param_grp'], {}), '(param_grp)\n', (12110, 12121), False, 'import torch\n'), ((12424, 12436), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12433, 12436), False, 'import json\n'), ((12509, 12521), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12518, 12521), False, 'import json\n'), ((13184, 13228), 'chainer.datasets.TransformDataset', 'TransformDataset', (['train', 'converter.transform'], {}), '(train, converter.transform)\n', (13200, 13228), False, 'from chainer.datasets import TransformDataset\n'), ((13399, 13443), 'chainer.datasets.TransformDataset', 'TransformDataset', (['valid', 'converter.transform'], {}), '(valid, converter.transform)\n', (13415, 13443), False, 'from chainer.datasets import TransformDataset\n'), ((13659, 13703), 'chainer.datasets.TransformDataset', 'TransformDataset', (['train', 'converter.transform'], {}), '(train, converter.transform)\n', (13675, 13703), False, 'from chainer.datasets import TransformDataset\n'), ((13798, 13842), 'chainer.datasets.TransformDataset', 'TransformDataset', (['valid', 'converter.transform'], {}), '(valid, converter.transform)\n', (13814, 13842), False, 'from chainer.datasets import TransformDataset\n'), ((15232, 15335), 'espnet.asr.asr_utils.PlotAttentionReport', 'PlotAttentionReport', (['att_vis_fn', 'data', "(args.outdir + '/att_ws')"], {'converter': 'converter', 'device': 'device'}), "(att_vis_fn, data, args.outdir + '/att_ws', converter=\n converter, device=device)\n", (15251, 15335), False, 'from espnet.asr.asr_utils import PlotAttentionReport\n'), ((16352, 16409), 'chainer.training.triggers.MinValueTrigger', 'training.triggers.MinValueTrigger', (['"""validation/main/loss"""'], {}), "('validation/main/loss')\n", (16385, 16409), False, 'from chainer import training\n'), ((16464, 16535), 'chainer.training.extensions.snapshot_object', 'extensions.snapshot_object', (['model', '"""model.acc.best"""'], {'savefun': 'torch_save'}), "(model, 'model.acc.best', savefun=torch_save)\n", (16490, 16535), False, 'from chainer.training import extensions\n'), ((21366, 21378), 'json.load', 'json.load', (['f'], {}), '(f)\n', (21375, 21378), False, 'import json\n'), ((21445, 21460), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21458, 21460), False, 'import torch\n'), ((22130, 22170), 'itertools.izip_longest', 'zip_longest', (['*kargs'], {'fillvalue': 'fillvalue'}), '(*kargs, fillvalue=fillvalue)\n', (22141, 22170), True, 'from itertools import izip_longest as zip_longest\n'), ((22436, 22451), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22449, 22451), False, 'import torch\n'), ((24121, 24136), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24134, 24136), False, 'import torch\n'), ((24991, 25031), 'itertools.izip_longest', 'zip_longest', (['*kargs'], {'fillvalue': 'fillvalue'}), '(*kargs, fillvalue=fillvalue)\n', (25002, 25031), True, 'from itertools import izip_longest as zip_longest\n'), ((25838, 25853), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25851, 25853), False, 'import torch\n'), ((4954, 5026), 'logging.info', 'logging.info', (['""" ----- Resetting the adversarial branch weights... -----"""'], {}), "(' ----- Resetting the adversarial branch weights... -----')\n", (4966, 5026), False, 'import logging\n'), ((8078, 8101), 'torch.from_numpy', 'torch.from_numpy', (['ilens'], {}), '(ilens)\n', (8094, 8101), False, 'import torch\n'), ((16568, 16624), 'chainer.training.triggers.MaxValueTrigger', 'training.triggers.MaxValueTrigger', (['"""validation/main/acc"""'], {}), "('validation/main/acc')\n", (16601, 16624), False, 'from chainer import training\n'), ((16906, 16982), 'espnet.asr.asr_utils.restore_snapshot', 'restore_snapshot', (['model', "(args.outdir + '/model.acc.best')"], {'load_fn': 'torch_load'}), "(model, args.outdir + '/model.acc.best', load_fn=torch_load)\n", (16922, 16982), False, 'from espnet.asr.asr_utils import restore_snapshot\n'), ((17215, 17249), 'espnet.asr.asr_utils.adadelta_eps_decay', 'adadelta_eps_decay', (['args.eps_decay'], {}), '(args.eps_decay)\n', (17233, 17249), False, 'from espnet.asr.asr_utils import adadelta_eps_decay\n'), ((20377, 20468), 'espnet.lm.extlm_pytorch.MultiLevelLM', 'extlm_pytorch.MultiLevelLM', (['word_rnnlm.predictor', 'rnnlm.predictor', 'word_dict', 'char_dict'], {}), '(word_rnnlm.predictor, rnnlm.predictor, word_dict,\n char_dict)\n', (20403, 20468), True, 'import espnet.lm.extlm_pytorch as extlm_pytorch\n'), ((20591, 20664), 'espnet.lm.extlm_pytorch.LookAheadWordLM', 'extlm_pytorch.LookAheadWordLM', (['word_rnnlm.predictor', 'word_dict', 'char_dict'], {}), '(word_rnnlm.predictor, word_dict, char_dict)\n', (20620, 20664), True, 'import espnet.lm.extlm_pytorch as extlm_pytorch\n'), ((21617, 21667), 'kaldi_io_py.read_mat', 'kaldi_io_py.read_mat', (["js[name]['input'][0]['feat']"], {}), "(js[name]['input'][0]['feat'])\n", (21637, 21667), False, 'import kaldi_io_py\n'), ((21783, 21846), 'espnet.asr.asr_utils.add_results_to_json', 'add_results_to_json', (['js[name]', 'nbest_hyps', 'train_args.char_list'], {}), '(js[name], nbest_hyps, train_args.char_list)\n', (21802, 21846), False, 'from espnet.asr.asr_utils import add_results_to_json\n'), ((24155, 24191), 'kaldi_io_py.open_or_fd', 'kaldi_io_py.open_or_fd', (['arkscp', '"""wb"""'], {}), "(arkscp, 'wb')\n", (24177, 24191), False, 'import kaldi_io_py\n'), ((25872, 25908), 'kaldi_io_py.open_or_fd', 'kaldi_io_py.open_or_fd', (['arkscp', '"""wb"""'], {}), "(arkscp, 'wb')\n", (25894, 25908), False, 'import kaldi_io_py\n'), ((3100, 3141), 'chainer.reporter.report_scope', 'reporter_module.report_scope', (['observation'], {}), '(observation)\n', (3128, 3141), True, 'from chainer import reporter as reporter_module\n'), ((17019, 17127), 'espnet.asr.asr_utils.CompareValueTrigger', 'CompareValueTrigger', (['"""validation/main/acc"""', '(lambda best_value, current_value: best_value > current_value)'], {}), "('validation/main/acc', lambda best_value, current_value:\n best_value > current_value)\n", (17038, 17127), False, 'from espnet.asr.asr_utils import CompareValueTrigger\n'), ((17286, 17394), 'espnet.asr.asr_utils.CompareValueTrigger', 'CompareValueTrigger', (['"""validation/main/acc"""', '(lambda best_value, current_value: best_value > current_value)'], {}), "('validation/main/acc', lambda best_value, current_value:\n best_value > current_value)\n", (17305, 17394), False, 'from espnet.asr.asr_utils import CompareValueTrigger\n'), ((17521, 17598), 'espnet.asr.asr_utils.restore_snapshot', 'restore_snapshot', (['model', "(args.outdir + '/model.loss.best')"], {'load_fn': 'torch_load'}), "(model, args.outdir + '/model.loss.best', load_fn=torch_load)\n", (17537, 17598), False, 'from espnet.asr.asr_utils import restore_snapshot\n'), ((17832, 17866), 'espnet.asr.asr_utils.adadelta_eps_decay', 'adadelta_eps_decay', (['args.eps_decay'], {}), '(args.eps_decay)\n', (17850, 17866), False, 'from espnet.asr.asr_utils import adadelta_eps_decay\n'), ((22597, 22647), 'kaldi_io_py.read_mat', 'kaldi_io_py.read_mat', (["js[name]['input'][0]['feat']"], {}), "(js[name]['input'][0]['feat'])\n", (22617, 22647), False, 'import kaldi_io_py\n'), ((22919, 22981), 'espnet.asr.asr_utils.add_results_to_json', 'add_results_to_json', (['js[name]', 'nbest_hyp', 'train_args.char_list'], {}), '(js[name], nbest_hyp, train_args.char_list)\n', (22938, 22981), False, 'from espnet.asr.asr_utils import add_results_to_json\n'), ((23110, 23164), 'json.dumps', 'json.dumps', (["{'utts': new_js}"], {'indent': '(4)', 'sort_keys': '(True)'}), "({'utts': new_js}, indent=4, sort_keys=True)\n", (23120, 23164), False, 'import json\n'), ((24520, 24549), 'kaldi_io_py.read_mat', 'kaldi_io_py.read_mat', (['line[1]'], {}), '(line[1])\n', (24540, 24549), False, 'import kaldi_io_py\n'), ((24611, 24651), 'logging.info', 'logging.info', (['"""Rep shape: %s"""', 'rep.shape'], {}), "('Rep shape: %s', rep.shape)\n", (24623, 24651), False, 'import logging\n'), ((24672, 24707), 'kaldi_io_py.write_mat', 'kaldi_io_py.write_mat', (['f', 'rep', 'name'], {}), '(f, rep, name)\n', (24693, 24707), False, 'import kaldi_io_py\n'), ((25418, 25448), 'kaldi_io_py.read_mat', 'kaldi_io_py.read_mat', (['featpath'], {}), '(featpath)\n', (25438, 25448), False, 'import kaldi_io_py\n'), ((2152, 2169), 'numpy.exp', 'np.exp', (['(-10 * p_i)'], {}), '(-10 * p_i)\n', (2158, 2169), True, 'import numpy as np\n'), ((17635, 17744), 'espnet.asr.asr_utils.CompareValueTrigger', 'CompareValueTrigger', (['"""validation/main/loss"""', '(lambda best_value, current_value: best_value < current_value)'], {}), "('validation/main/loss', lambda best_value,\n current_value: best_value < current_value)\n", (17654, 17744), False, 'from espnet.asr.asr_utils import CompareValueTrigger\n'), ((17903, 18012), 'espnet.asr.asr_utils.CompareValueTrigger', 'CompareValueTrigger', (['"""validation/main/loss"""', '(lambda best_value, current_value: best_value < current_value)'], {}), "('validation/main/loss', lambda best_value,\n current_value: best_value < current_value)\n", (17922, 18012), False, 'from espnet.asr.asr_utils import CompareValueTrigger\n'), ((26071, 26109), 'kaldi_io_py.read_mat', 'kaldi_io_py.read_mat', (["js[name]['feat']"], {}), "(js[name]['feat'])\n", (26091, 26109), False, 'import kaldi_io_py\n'), ((26378, 26413), 'kaldi_io_py.write_mat', 'kaldi_io_py.write_mat', (['f', 'rep', 'name'], {}), '(f, rep, name)\n', (26399, 26413), False, 'import kaldi_io_py\n'), ((8006, 8025), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (8022, 8025), False, 'import torch\n'), ((8140, 8159), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (8156, 8159), False, 'import torch\n'), ((8238, 8257), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (8254, 8257), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
2019-10-07-adjustables.py: Experiment runner file for generating data for
many combinations of numbers of high- vs. low-fidelity samples, specifically
for the adjustable benchmark functions.
A specific adjustable parameter can be given as commandline argument.
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import sys
from itertools import product
import numpy as np
from pyprojroot import here
import mf2
from experiments import Instance, create_model_error_grid
save_dir = here('files/2019-10-07-adjustables/')
save_dir.mkdir(parents=True, exist_ok=True)
funcs = [
mf2.adjustable.branin,
mf2.adjustable.paciorek,
mf2.adjustable.hartmann3,
mf2.adjustable.trid,
]
if len(sys.argv) > 1:
params = [float(x) for x in sys.argv[1:]]
else:
params = np.round(np.linspace(0, 1.0, 21), 2)
cases = [
f(param)
for f in funcs
for param in params
]
kernels = ['Matern']
scaling_options = [
'off',
# 'on',
# 'inverted',
# 'regularized'
]
min_high, max_high = 2, 50
min_low, max_low = 3, 125
step = 1
num_reps = 50
instances = [Instance(h, l, r)
for h, l, r in product(range(min_high, max_high + 1, step),
range(min_low, max_low + 1, step),
range(num_reps))
if h < l]
extra_attributes = {'mf2_version': mf2.__version__}
for case, kernel, scale in product(cases, kernels, scaling_options):
if 'paciorek' in case.name.lower() and float(case.name[-3:]) == 0.0:
continue # In this case the functions are equal, leading to badly defined diff models
mfbo_options = {'kernel': kernel, 'scaling': scale}
create_model_error_grid(case, instances, mfbo_options, save_dir=save_dir,
extra_attributes=extra_attributes)
| [
"experiments.create_model_error_grid",
"numpy.linspace",
"itertools.product",
"experiments.Instance",
"pyprojroot.here"
] | [((512, 549), 'pyprojroot.here', 'here', (['"""files/2019-10-07-adjustables/"""'], {}), "('files/2019-10-07-adjustables/')\n", (516, 549), False, 'from pyprojroot import here\n'), ((1427, 1467), 'itertools.product', 'product', (['cases', 'kernels', 'scaling_options'], {}), '(cases, kernels, scaling_options)\n', (1434, 1467), False, 'from itertools import product\n'), ((1108, 1125), 'experiments.Instance', 'Instance', (['h', 'l', 'r'], {}), '(h, l, r)\n', (1116, 1125), False, 'from experiments import Instance, create_model_error_grid\n'), ((1698, 1810), 'experiments.create_model_error_grid', 'create_model_error_grid', (['case', 'instances', 'mfbo_options'], {'save_dir': 'save_dir', 'extra_attributes': 'extra_attributes'}), '(case, instances, mfbo_options, save_dir=save_dir,\n extra_attributes=extra_attributes)\n', (1721, 1810), False, 'from experiments import Instance, create_model_error_grid\n'), ((815, 838), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(21)'], {}), '(0, 1.0, 21)\n', (826, 838), True, 'import numpy as np\n')] |
import numpy as np
from cost_functions import trajectory_cost_fn
import time
import logging
def dd(s):
logging.getLogger("hw4").debug(s)
def di(s):
logging.getLogger("hw4").info(s)
class Controller():
def __init__(self):
pass
# Get the appropriate action(s) for this state(s)
def get_action(self, state):
pass
class RandomController(Controller):
def __init__(self, env):
""" YOUR CODE HERE """
self.env = env
def get_action(self, state):
""" YOUR CODE HERE """
""" Your code should randomly sample an action uniformly from the action space """
return self.env.action_space.sample() # pick a random action
class MPCcontroller(Controller):
""" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 """
def __init__(self,
env,
dyn_model,
horizon=5,
cost_fn=None,
num_simulated_paths=10,
):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
def get_action(self, state):
""" YOUR CODE HERE """
""" Note: be careful to batch your simulations through the model for speed """
# state is the initial state
# we need to generate self.num_simulated_paths trajectories at length self.horizon steps
curr_state = np.tile(state, (
self.num_simulated_paths, 1)) # create a batch of start state: [num_simulated_paths,obs_dim]
states = []
actions = []
next_states = []
for i in range(self.horizon):
# sample an action per each path
curr_action = []
for _ in range(self.num_simulated_paths):
curr_action.append(self.env.action_space.sample()) # curr action per each path
curr_action = np.concatenate([curr_action]) # shape : [num_simulated_paths,act_dim]
next_state = self.dyn_model.predict(curr_state, curr_action) # shape: [num_simulated_paths,obs_dim]
# append it to the path data structure
states.append(curr_state)
actions.append(curr_action)
next_states.append(next_state)
# progress one step
curr_state = next_state
# at this point we have the following lists:
# states = a list of numpy arrays, each is a set of states for a time step t, for num_simulated_paths
# so states[t] is numpy array of size [num_simulated_paths,obs_dim]
# actions = list of numpy array of actions for all paths. actions[t] is of shape [num_simulated_paths,act_dim]
# next_states = like states but its the state of time t+1. np array shape [num_simulated_paths,obs_dim]
# we now need to find the cost of each path
paths_costs = trajectory_cost_fn(self.cost_fn, states, actions, next_states)
# now we have array of num_simulated_paths cost values. we need to find the argmin and take the corresponding action
return actions[0][np.argmin(paths_costs)]
| [
"cost_functions.trajectory_cost_fn",
"logging.getLogger",
"numpy.argmin",
"numpy.tile",
"numpy.concatenate"
] | [((1504, 1549), 'numpy.tile', 'np.tile', (['state', '(self.num_simulated_paths, 1)'], {}), '(state, (self.num_simulated_paths, 1))\n', (1511, 1549), True, 'import numpy as np\n'), ((2957, 3019), 'cost_functions.trajectory_cost_fn', 'trajectory_cost_fn', (['self.cost_fn', 'states', 'actions', 'next_states'], {}), '(self.cost_fn, states, actions, next_states)\n', (2975, 3019), False, 'from cost_functions import trajectory_cost_fn\n'), ((108, 132), 'logging.getLogger', 'logging.getLogger', (['"""hw4"""'], {}), "('hw4')\n", (125, 132), False, 'import logging\n'), ((158, 182), 'logging.getLogger', 'logging.getLogger', (['"""hw4"""'], {}), "('hw4')\n", (175, 182), False, 'import logging\n'), ((1977, 2006), 'numpy.concatenate', 'np.concatenate', (['[curr_action]'], {}), '([curr_action])\n', (1991, 2006), True, 'import numpy as np\n'), ((3171, 3193), 'numpy.argmin', 'np.argmin', (['paths_costs'], {}), '(paths_costs)\n', (3180, 3193), True, 'import numpy as np\n')] |
"""
Movie recommender.
Copyright (C) <NAME> 2019
See the LICENSE file for more information.
Example usage:
$ python rmovie.py "lord of the rings"
$ python rmovie.py "shawshank redemption"
"""
import argparse
import numpy as np
import pandas as pd
if __name__ != '__main__':
m = '"rmovie.py" can\'t be imported'
raise Exception(m)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('movie_name', help='the title of the movie you like, from the list "data/movies.csv"')
args = arg_parser.parse_args()
del arg_parser
# Loading the needed data
df = pd.read_csv('data/movies.csv').merge(pd.read_csv('data/ratings.csv'), on='movieId')
# Check for the movie name in the database
movie_in_db = df['title'] == args.movie_name
if not np.any(movie_in_db):
# m = 'movie with title "{}", can\'t be found in the "data/movies.csv" file'
# raise Exception(m.format(args.movie_name))
similar_names_with_points = []
added_titles = []
movie_name_words = [w.strip() for w in args.movie_name.lower().split(' ')]
# Exclude "the"
if 'the' in movie_in_db:
del movie_in_db[movie_in_db.index('the')]
# Find the most similars by name
for title in df['title'].values:
points = 0
for word in title.lower().split(' '):
if word in movie_name_words:
points += 1
# Do not add movie to the list if it has already been added
if title not in added_titles:
similar_names_with_points.append((title, points,))
added_titles.append(title)
# Choose the biggest pointed five
del added_titles, movie_name_words
similar_names_with_points = sorted(similar_names_with_points, key=lambda x: x[1], reverse=True)[:5]
# Ask user for input
print()
print('Movie with title "{}", can\'t be found in the "data/movies.csv" file.'.format(args.movie_name))
print('These are the most similar movie titles:')
print()
print('-' * 30)
print('Index\tMovie title')
for i, title in enumerate(similar_names_with_points):
print('[' + str(i + 1) + ']:\t' + title[0])
print('-' * 30)
print()
print('Please choose one by providing the index for the movie that is most likely what you search for. (Type 0 and enter for exit)')
movie_index = input('Movie index: ')
try:
movie_index = int(movie_index)
except ValueError:
print('Wrong index, process terminated.')
exit(1)
if movie_index == 0:
exit()
else:
movie_in_db = df['title'] == similar_names_with_points[movie_index - 1][0]
recommended_movies = []
# Find the movie in the database, and sort it by rating
movie_db = df[movie_in_db].sort_values(by='rating', ascending=False)
# Get the first 5 users who liked this movie
for user in movie_db.iloc[:5]['userId'].values:
# Get the rated movies for this user
rated_movies = df[df['userId'] == user]
# Get the five biggest rated movie by this user
rated_movies = rated_movies[rated_movies['title'] != args.movie_name]\
.sort_values(by='rating', ascending=False)\
.iloc[:5]
# Add these to the recommendations
recommended_movies.extend(list(rated_movies['title'].values))
recommended_movies = np.unique(recommended_movies)
# Weighting each movie
given_movie_genres = df[movie_in_db].iloc[0]['genres'].split('|') # Genres of the given movie
scores = {} # {title: score ...}
for movie in recommended_movies:
movie_d = df[df['title'] == movie].iloc[0]
movie_genres = movie_d['genres'].split('|')
score = 0
# Scoring on how many given_movie_genres can be found in movie_genres
for given_movie_genre in given_movie_genres:
if given_movie_genre in movie_genres:
score += 1
scores[movie] = score
# Sort them on score and reverse it, because the bigger the score the better
recommended_movies = sorted(scores, key=lambda x: scores[x])[::-1]
print()
print('-' * 30)
print('Movie recommendations:')
print()
for i, movie in enumerate(recommended_movies):
print('[' + str(i + 1) + ']:\t' + movie)
print('-' * 30)
| [
"pandas.read_csv",
"numpy.any",
"argparse.ArgumentParser",
"numpy.unique"
] | [((358, 383), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (381, 383), False, 'import argparse\n'), ((3288, 3317), 'numpy.unique', 'np.unique', (['recommended_movies'], {}), '(recommended_movies)\n', (3297, 3317), True, 'import numpy as np\n'), ((610, 641), 'pandas.read_csv', 'pd.read_csv', (['"""data/ratings.csv"""'], {}), "('data/ratings.csv')\n", (621, 641), True, 'import pandas as pd\n'), ((754, 773), 'numpy.any', 'np.any', (['movie_in_db'], {}), '(movie_in_db)\n', (760, 773), True, 'import numpy as np\n'), ((573, 603), 'pandas.read_csv', 'pd.read_csv', (['"""data/movies.csv"""'], {}), "('data/movies.csv')\n", (584, 603), True, 'import pandas as pd\n')] |
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution() # need to disable eager in TF2.x
input_size = 3 # the length of input feature vectors to simple RNN cells
output_size = 5 # the length of output feature vectors produced by simple RNN cells
"""
* When thinking in low level, the operations performed by neural networks are actually nothing but simple mathematical
computations. Hence, deep learning libraries such as tensorflow and pytorch utilize special data structure called
"computation graph" for the implementation of these networks.
* Computation graph defined for basic RNN layer reads input sequence via placeholders in this python script.
* In this point, we assume that input sequences would be composed of exactly 3 pieces like a sentence of just three
words, so RNN layer is implemented to have 3 cells. Each cell is supposed to read feature vector representing a piece
of input sequence; hence, for each of these cells, one placeholder in 2D (input-size, batch-size) is defined.
Since fixed batch-size is not necessary except for some special conditions like "stateful concept", we left it with
None.
"""
x0 = tf.compat.v1.placeholder(tf.float32, [input_size, None])
x1 = tf.compat.v1.placeholder(tf.float32, [input_size, None])
x2 = tf.compat.v1.placeholder(tf.float32, [input_size, None])
"""
* Network parameters are regularly modified and updated under the supervision of calculated loss value. TF recommends
these parameters to be maintained by Variables; thus, bias term and parameter matrices are defined by TF Variable
class.
* To figure out the shape of parameters clearly, you can check the figures in "README.pdf"
"""
Wx = tf.compat.v1.Variable(tf.random.normal(shape=[output_size, input_size], dtype=tf.float32))
Wa = tf.compat.v1.Variable(tf.random.normal(shape=[output_size, output_size], dtype=tf.float32))
b = tf.compat.v1.Variable(tf.random.normal(shape=[output_size, 1], dtype=tf.float32))
"""
* When we instantiate an RNN layer by using high-level libraries such as Keras, its corresponding computation graph is
automatically created. In this point, because we implement entire architecture manually, we have to specify the
operations of computation graph explicitly, as being done in the code below.
"""
a0 = tf.tanh(tf.matmul(Wx, x0) + b) # first RNN cell
a1 = tf.tanh(tf.matmul(Wx, x1) + tf.matmul(Wa, a0) + b) # second RNN cell
a2 = tf.tanh(tf.matmul(Wx, x2) + tf.matmul(Wa, a1) + b) # third RNN cell
"""
* All variables defined to instantiate model parameters are stored in a list.
* To set these variables to pre-determined initial values in definition section, global variable initializer is used.
* We do this initialization process inside the session.
"""
init = tf.compat.v1.global_variables_initializer()
"""
* Each batch is dedicated to fill one placeholder of computation graph.
* While columns refer to the vectors of input sequence, the number of columns specifies how many vectors
for one placeholder exist in the batch. In other words, There are 4 vectors for each placeholder through
the batches, and each input vector has 3 components.
* The diagram below provides more understandable intuition.
--------- --------- ---------
| | | | | |
Three | 1 | | 2 | | 3 |
Cells | | | | | |
--------- --------- ---------
^ ^ ^
| | |
Batch 1 --> [0, 1, 2] [9, 8, 7] [5, 4, 3]
Batch 2 --> [3, 4, 5] [0, 0, 0] [9, 8, 7]
Batch 3 --> [6, 7, 8] [6, 5, 4] [1, 2, 0]
Batch 4 --> [9, 0, 1] [3, 2, 1] [6, 6, 6]
"""
x0_batch = np.array([[0, 3, 6, 9],
[1, 4, 7, 0],
[2, 5, 8, 1]])
x1_batch = np.array([[9, 0, 6, 3],
[8, 0, 5, 2],
[7, 0, 4, 1]])
x2_batch = np.array([[5, 9, 1, 6],
[4, 8, 2, 6],
[3, 7, 0, 6]])
"""
* First argument of session.run() is named as "fetches". It can be a single operation associated with a tensor or a list
of them. It is generally interpreted as one branch of a computation graph.
* A simple recurrent network consists of consecutively ordered recurrent cells in theoretical level. Each cell takes one
input vector of features. Each input vector is processed by its corresponding cell; hence, the argument "fetches"
refers to the list of input vectors. On the other hand, the argument "feed_dict" is a dictionary, mapping graph
elements to the values.
* fetches: [timesteps, batch_size, features]
"""
with tf.compat.v1.Session() as session:
init.run() # initializing the variables
a0_result, a1_result, a2_result = session.run([a0, a1, a2], feed_dict={x0: x0_batch, x1: x1_batch, x2: x2_batch})
print(a0_result)
| [
"tensorflow.random.normal",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.Session",
"tensorflow.matmul",
"numpy.array",
"tensorflow.compat.v1.global_variables_initializer"
] | [((44, 82), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (80, 82), True, 'import tensorflow as tf\n'), ((1188, 1244), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[input_size, None]'], {}), '(tf.float32, [input_size, None])\n', (1212, 1244), True, 'import tensorflow as tf\n'), ((1250, 1306), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[input_size, None]'], {}), '(tf.float32, [input_size, None])\n', (1274, 1306), True, 'import tensorflow as tf\n'), ((1312, 1368), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[input_size, None]'], {}), '(tf.float32, [input_size, None])\n', (1336, 1368), True, 'import tensorflow as tf\n'), ((2797, 2840), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2838, 2840), True, 'import tensorflow as tf\n'), ((3957, 4009), 'numpy.array', 'np.array', (['[[0, 3, 6, 9], [1, 4, 7, 0], [2, 5, 8, 1]]'], {}), '([[0, 3, 6, 9], [1, 4, 7, 0], [2, 5, 8, 1]])\n', (3965, 4009), True, 'import numpy as np\n'), ((4064, 4116), 'numpy.array', 'np.array', (['[[9, 0, 6, 3], [8, 0, 5, 2], [7, 0, 4, 1]]'], {}), '([[9, 0, 6, 3], [8, 0, 5, 2], [7, 0, 4, 1]])\n', (4072, 4116), True, 'import numpy as np\n'), ((4171, 4223), 'numpy.array', 'np.array', (['[[5, 9, 1, 6], [4, 8, 2, 6], [3, 7, 0, 6]]'], {}), '([[5, 9, 1, 6], [4, 8, 2, 6], [3, 7, 0, 6]])\n', (4179, 4223), True, 'import numpy as np\n'), ((1745, 1812), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[output_size, input_size]', 'dtype': 'tf.float32'}), '(shape=[output_size, input_size], dtype=tf.float32)\n', (1761, 1812), True, 'import tensorflow as tf\n'), ((1841, 1909), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[output_size, output_size]', 'dtype': 'tf.float32'}), '(shape=[output_size, output_size], dtype=tf.float32)\n', (1857, 1909), True, 'import tensorflow as tf\n'), ((1937, 1995), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[output_size, 1]', 'dtype': 'tf.float32'}), '(shape=[output_size, 1], dtype=tf.float32)\n', (1953, 1995), True, 'import tensorflow as tf\n'), ((4910, 4932), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (4930, 4932), True, 'import tensorflow as tf\n'), ((2335, 2352), 'tensorflow.matmul', 'tf.matmul', (['Wx', 'x0'], {}), '(Wx, x0)\n', (2344, 2352), True, 'import tensorflow as tf\n'), ((2389, 2406), 'tensorflow.matmul', 'tf.matmul', (['Wx', 'x1'], {}), '(Wx, x1)\n', (2398, 2406), True, 'import tensorflow as tf\n'), ((2409, 2426), 'tensorflow.matmul', 'tf.matmul', (['Wa', 'a0'], {}), '(Wa, a0)\n', (2418, 2426), True, 'import tensorflow as tf\n'), ((2464, 2481), 'tensorflow.matmul', 'tf.matmul', (['Wx', 'x2'], {}), '(Wx, x2)\n', (2473, 2481), True, 'import tensorflow as tf\n'), ((2484, 2501), 'tensorflow.matmul', 'tf.matmul', (['Wa', 'a1'], {}), '(Wa, a1)\n', (2493, 2501), True, 'import tensorflow as tf\n')] |
import pickle
import fire
import matplotlib.pyplot as plt
import numpy as np
def create_graph(time_trace_path, out, roi_select="0 1 2 3 4 5 6 7 8 9 10 11"):
with open(time_trace_path, "rb") as f:
time_traces = pickle.load(f)
std_past = None
for num_1, key in enumerate(time_traces.keys()):
mean_flor_den = time_traces[key]
try:
if roi_select != "":
mean_flor_den = [mean_flor_den[int(x)] for x in roi_select.split(" ")]
except IndexError:
print("Invalid rois numbers")
if type(mean_flor_den[0]) == list:
mean_flor_den = [np.hstack(x) for x in mean_flor_den]
time_stack = np.vstack(mean_flor_den)
# time_stack = scipy.ndimage.median_filter(time_stack, (1,15))
# time_stack = np.divide(time_stack-np.percentile(time_stack,2,axis=1,keepdims=True),(np.percentile(time_stack,98,axis=1,keepdims=True)-np.percentile(time_stack,2,axis=1,keepdims=True)))
# time_stack[time_stack>1]=1
# time_stack[time_stack<0] =0
if std_past is None:
std = 6 * np.std(time_stack, axis=1)
std_past = std
else:
std = std_past
time_stack_scaled = time_stack / std.reshape([std.shape[0], 1])
time_traces_scaled = list(time_stack_scaled)
x_var = np.arange(1, time_stack_scaled.shape[1] + 1)
plt.figure(0)
fig = plt.gcf()
fig.set_size_inches(10, 6)
for num, x in enumerate(time_traces_scaled):
# print((x + num).min(), (x + num).max(), np.mean(x))
plt.plot(x_var, x - x.mean() + num, color="black" if num_1 != 0 else "red",
linewidth=1)
plt.savefig(out[:-4] + key.replace(" ", "_") + ".png")
if __name__ == '__main__':
fire.Fire(create_graph)
| [
"fire.Fire",
"numpy.std",
"numpy.hstack",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"matplotlib.pyplot.gcf",
"numpy.vstack"
] | [((1802, 1825), 'fire.Fire', 'fire.Fire', (['create_graph'], {}), '(create_graph)\n', (1811, 1825), False, 'import fire\n'), ((225, 239), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (236, 239), False, 'import pickle\n'), ((686, 710), 'numpy.vstack', 'np.vstack', (['mean_flor_den'], {}), '(mean_flor_den)\n', (695, 710), True, 'import numpy as np\n'), ((1339, 1383), 'numpy.arange', 'np.arange', (['(1)', '(time_stack_scaled.shape[1] + 1)'], {}), '(1, time_stack_scaled.shape[1] + 1)\n', (1348, 1383), True, 'import numpy as np\n'), ((1392, 1405), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (1402, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1420, 1429), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1427, 1429), True, 'import matplotlib.pyplot as plt\n'), ((628, 640), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (637, 640), True, 'import numpy as np\n'), ((1103, 1129), 'numpy.std', 'np.std', (['time_stack'], {'axis': '(1)'}), '(time_stack, axis=1)\n', (1109, 1129), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import yaml
import numpy as np
from easydict import EasyDict as edict
config = edict()
config.OUTPUT_DIR = ''
config.LOG_DIR = ''
config.DATA_DIR = ''
config.GPUS = '0'
config.WORKERS = 4
config.PRINT_FREQ = 20
# Cudnn related params
config.CUDNN = edict()
config.CUDNN.BENCHMARK = True
config.CUDNN.DETERMINISTIC = False
config.CUDNN.ENABLED = True
# pose_resnet related params
POSE_RESNET = edict()
POSE_RESNET.NUM_LAYERS = 50
POSE_RESNET.DECONV_WITH_BIAS = False
POSE_RESNET.NUM_DECONV_LAYERS = 3
POSE_RESNET.NUM_DECONV_FILTERS = [256, 256, 256]
POSE_RESNET.NUM_DECONV_KERNELS = [4, 4, 4]
POSE_RESNET.FINAL_CONV_KERNEL = 1
POSE_RESNET.TARGET_TYPE = 'gaussian'
POSE_RESNET.HEATMAP_SIZE = [64, 64] # width * height, ex: 24 * 32
POSE_RESNET.SIGMA = 2
MODEL_EXTRAS = {
'pose_resnet': POSE_RESNET,
}
# common params for NETWORK
config.MODEL = edict()
config.MODEL.NAME = 'pose_resnet'
config.MODEL.INIT_WEIGHTS = True
config.MODEL.PRETRAINED = ''
config.MODEL.NUM_JOINTS = 16
config.MODEL.IMAGE_SIZE = [256, 256] # width * height, ex: 192 * 256
config.MODEL.EXTRA = MODEL_EXTRAS[config.MODEL.NAME]
config.LOSS = edict()
config.LOSS.USE_TARGET_WEIGHT = True
# DATASET related params
config.DATASET = edict()
config.DATASET.ROOT = ''
config.DATASET.DATASET = 'mpii'
config.DATASET.TRAIN_SET = 'train'
config.DATASET.TEST_SET = 'valid'
config.DATASET.DATA_FORMAT = 'jpg'
config.DATASET.HYBRID_JOINTS_TYPE = ''
config.DATASET.SELECT_DATA = False
# training data augmentation
config.DATASET.FLIP = True
config.DATASET.SCALE_FACTOR = 0.25
config.DATASET.ROT_FACTOR = 30
# train
config.TRAIN = edict()
config.TRAIN.LR_FACTOR = 0.1
config.TRAIN.LR_STEP = [90, 110]
config.TRAIN.LR = 0.001
config.TRAIN.OPTIMIZER = 'adam'
config.TRAIN.MOMENTUM = 0.9
config.TRAIN.WD = 0.0001
config.TRAIN.NESTEROV = False
config.TRAIN.GAMMA1 = 0.99
config.TRAIN.GAMMA2 = 0.0
config.TRAIN.BEGIN_EPOCH = 0
config.TRAIN.END_EPOCH = 140
config.TRAIN.RESUME = False
config.TRAIN.CHECKPOINT = ''
config.TRAIN.BATCH_SIZE = 32
config.TRAIN.SHUFFLE = True
# testing
config.TEST = edict()
# size of images for each device
config.TEST.BATCH_SIZE = 32
# Test Model Epoch
config.TEST.FLIP_TEST = False
config.TEST.POST_PROCESS = True
config.TEST.SHIFT_HEATMAP = True
config.TEST.USE_GT_BBOX = False
# nms
config.TEST.OKS_THRE = 0.5
config.TEST.IN_VIS_THRE = 0.0
config.TEST.COCO_BBOX_FILE = ''
config.TEST.BBOX_THRE = 1.0
config.TEST.MODEL_FILE = ''
####aoyagi
config.TEST.IMAGE_THRE = 0.0
config.TEST.NMS_THRE = 1.0
####
# debug
config.DEBUG = edict()
config.DEBUG.DEBUG = False
config.DEBUG.SAVE_BATCH_IMAGES_GT = False
config.DEBUG.SAVE_BATCH_IMAGES_PRED = False
config.DEBUG.SAVE_HEATMAPS_GT = False
config.DEBUG.SAVE_HEATMAPS_PRED = False
def _update_dict(k, v):
if k == 'DATASET':
if 'MEAN' in v and v['MEAN']:
v['MEAN'] = np.array([eval(x) if isinstance(x, str) else x
for x in v['MEAN']])
if 'STD' in v and v['STD']:
v['STD'] = np.array([eval(x) if isinstance(x, str) else x
for x in v['STD']])
if k == 'MODEL':
if 'EXTRA' in v and 'HEATMAP_SIZE' in v['EXTRA']:
if isinstance(v['EXTRA']['HEATMAP_SIZE'], int):
v['EXTRA']['HEATMAP_SIZE'] = np.array(
[v['EXTRA']['HEATMAP_SIZE'], v['EXTRA']['HEATMAP_SIZE']])
else:
v['EXTRA']['HEATMAP_SIZE'] = np.array(
v['EXTRA']['HEATMAP_SIZE'])
if 'IMAGE_SIZE' in v:
if isinstance(v['IMAGE_SIZE'], int):
v['IMAGE_SIZE'] = np.array([v['IMAGE_SIZE'], v['IMAGE_SIZE']])
else:
v['IMAGE_SIZE'] = np.array(v['IMAGE_SIZE'])
for vk, vv in v.items():
if vk in config[k]:
config[k][vk] = vv
else:
raise ValueError("{}.{} not exist in config.py".format(k, vk))
def update_config(config_file):
exp_config = None
with open(config_file) as f:
exp_config = edict(yaml.load(f))
for k, v in exp_config.items():
if k in config:
if isinstance(v, dict):
_update_dict(k, v)
else:
if k == 'SCALES':
config[k][0] = (tuple(v))
else:
config[k] = v
else:
raise ValueError("{} not exist in config.py".format(k))
def gen_config(config_file):
cfg = dict(config)
for k, v in cfg.items():
if isinstance(v, edict):
cfg[k] = dict(v)
with open(config_file, 'w') as f:
yaml.dump(dict(cfg), f, default_flow_style=False)
def update_dir(model_dir, log_dir, data_dir):
if model_dir:
config.OUTPUT_DIR = model_dir
if log_dir:
config.LOG_DIR = log_dir
if data_dir:
config.DATA_DIR = data_dir
config.DATASET.ROOT = os.path.join(
config.DATA_DIR, config.DATASET.ROOT)
config.TEST.COCO_BBOX_FILE = os.path.join(
config.DATA_DIR, config.TEST.COCO_BBOX_FILE)
config.MODEL.PRETRAINED = os.path.join(
config.DATA_DIR, config.MODEL.PRETRAINED)
def get_model_name(cfg):
name = cfg.MODEL.NAME
full_name = cfg.MODEL.NAME
extra = cfg.MODEL.EXTRA
if name in ['pose_resnet']:
name = '{model}_{num_layers}'.format(
model=name,
num_layers=extra.NUM_LAYERS)
deconv_suffix = ''.join(
'd{}'.format(num_filters)
for num_filters in extra.NUM_DECONV_FILTERS)
full_name = '{height}x{width}_{name}_{deconv_suffix}'.format(
height=cfg.MODEL.IMAGE_SIZE[1],
width=cfg.MODEL.IMAGE_SIZE[0],
name=name,
deconv_suffix=deconv_suffix)
elif name in ['MnasNet_']:
name = 'MnasNet_'
deconv_suffix = ''.join(
'd{}'.format(num_filters)
for num_filters in extra.NUM_DECONV_FILTERS)
full_name = '{height}x{width}_{name}'.format(
height=cfg.MODEL.IMAGE_SIZE[1],
width=cfg.MODEL.IMAGE_SIZE[0],
name=name)
elif name in ['MobileNet16_']:
name = 'MobileNet16_'
deconv_suffix = ''.join(
'd{}'.format(num_filters)
for num_filters in extra.NUM_DECONV_FILTERS)
full_name = '{height}x{width}_{name}'.format(
height=cfg.MODEL.IMAGE_SIZE[1],
width=cfg.MODEL.IMAGE_SIZE[0],
name=name)
elif name in ['MobileNet162_']:
name = 'MobileNet162_'
deconv_suffix = ''.join(
'd{}'.format(num_filters)
for num_filters in extra.NUM_DECONV_FILTERS)
full_name = '{height}x{width}_{name}'.format(
height=cfg.MODEL.IMAGE_SIZE[1],
width=cfg.MODEL.IMAGE_SIZE[0],
name=name)
else:
deconv_suffix = ''.join(
'd{}'.format(num_filters)
for num_filters in extra.NUM_DECONV_FILTERS)
full_name = '{height}x{width}_{name}'.format(
height=cfg.MODEL.IMAGE_SIZE[1],
width=cfg.MODEL.IMAGE_SIZE[0],
name=name)
return name, full_name
if __name__ == '__main__':
import sys
gen_config(sys.argv[1])
| [
"numpy.array",
"yaml.load",
"os.path.join",
"easydict.EasyDict"
] | [((472, 479), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (477, 479), True, 'from easydict import EasyDict as edict\n'), ((654, 661), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (659, 661), True, 'from easydict import EasyDict as edict\n'), ((805, 812), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (810, 812), True, 'from easydict import EasyDict as edict\n'), ((1276, 1283), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (1281, 1283), True, 'from easydict import EasyDict as edict\n'), ((1555, 1562), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (1560, 1562), True, 'from easydict import EasyDict as edict\n'), ((1647, 1654), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (1652, 1654), True, 'from easydict import EasyDict as edict\n'), ((2052, 2059), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (2057, 2059), True, 'from easydict import EasyDict as edict\n'), ((2539, 2546), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (2544, 2546), True, 'from easydict import EasyDict as edict\n'), ((3025, 3032), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (3030, 3032), True, 'from easydict import EasyDict as edict\n'), ((5492, 5542), 'os.path.join', 'os.path.join', (['config.DATA_DIR', 'config.DATASET.ROOT'], {}), '(config.DATA_DIR, config.DATASET.ROOT)\n', (5504, 5542), False, 'import os\n'), ((5593, 5650), 'os.path.join', 'os.path.join', (['config.DATA_DIR', 'config.TEST.COCO_BBOX_FILE'], {}), '(config.DATA_DIR, config.TEST.COCO_BBOX_FILE)\n', (5605, 5650), False, 'import os\n'), ((5698, 5752), 'os.path.join', 'os.path.join', (['config.DATA_DIR', 'config.MODEL.PRETRAINED'], {}), '(config.DATA_DIR, config.MODEL.PRETRAINED)\n', (5710, 5752), False, 'import os\n'), ((4557, 4569), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (4566, 4569), False, 'import yaml\n'), ((3799, 3865), 'numpy.array', 'np.array', (["[v['EXTRA']['HEATMAP_SIZE'], v['EXTRA']['HEATMAP_SIZE']]"], {}), "([v['EXTRA']['HEATMAP_SIZE'], v['EXTRA']['HEATMAP_SIZE']])\n", (3807, 3865), True, 'import numpy as np\n'), ((3953, 3989), 'numpy.array', 'np.array', (["v['EXTRA']['HEATMAP_SIZE']"], {}), "(v['EXTRA']['HEATMAP_SIZE'])\n", (3961, 3989), True, 'import numpy as np\n'), ((4128, 4172), 'numpy.array', 'np.array', (["[v['IMAGE_SIZE'], v['IMAGE_SIZE']]"], {}), "([v['IMAGE_SIZE'], v['IMAGE_SIZE']])\n", (4136, 4172), True, 'import numpy as np\n'), ((4227, 4252), 'numpy.array', 'np.array', (["v['IMAGE_SIZE']"], {}), "(v['IMAGE_SIZE'])\n", (4235, 4252), True, 'import numpy as np\n')] |
import os
import time
import numpy as np
import cv2
def getPathList(path, suffix='png'):
if (path[-1] != '/') & (path[-1] != '\\'):
path = path + '/'
pathlist = list()
g = os.walk(path)
for p, d, filelist in g:
for filename in filelist:
if filename.endswith(suffix):
pathlist.append(os.path.join(p, filename))
return pathlist
def mkdir(path):
if not os.path.isdir(path):
mkdir(os.path.split(path)[0])
else:
return
if not os.path.isdir(path):
os.mkdir(path)
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = (mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb))
img = (np.clip(img * 255, a_min=0, a_max=255)).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
def path_join(root, name):
if root == '':
return name
if name[0] == '/':
return os.path.join(root, name[1:])
else:
return os.path.join(root, name)
class Timer:
def __init__(self, msg):
self.msg = msg
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_tb):
print(self.msg % (time.time() - self.start_time)) | [
"os.mkdir",
"os.path.isdir",
"os.walk",
"numpy.ascontiguousarray",
"numpy.clip",
"time.time",
"numpy.array",
"os.path.split",
"os.path.join"
] | [((194, 207), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (201, 207), False, 'import os\n'), ((671, 703), 'numpy.array', 'np.array', (['mean'], {'dtype': 'np.float32'}), '(mean, dtype=np.float32)\n', (679, 703), True, 'import numpy as np\n'), ((714, 745), 'numpy.array', 'np.array', (['std'], {'dtype': 'np.float32'}), '(std, dtype=np.float32)\n', (722, 745), True, 'import numpy as np\n'), ((421, 440), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (434, 440), False, 'import os\n'), ((516, 535), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (529, 535), False, 'import os\n'), ((545, 559), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (553, 559), False, 'import os\n'), ((1181, 1209), 'os.path.join', 'os.path.join', (['root', 'name[1:]'], {}), '(root, name[1:])\n', (1193, 1209), False, 'import os\n'), ((1235, 1259), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (1247, 1259), False, 'import os\n'), ((1411, 1422), 'time.time', 'time.time', ([], {}), '()\n', (1420, 1422), False, 'import time\n'), ((1033, 1058), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (1053, 1058), True, 'import numpy as np\n'), ((456, 475), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (469, 475), False, 'import os\n'), ((956, 994), 'numpy.clip', 'np.clip', (['(img * 255)'], {'a_min': '(0)', 'a_max': '(255)'}), '(img * 255, a_min=0, a_max=255)\n', (963, 994), True, 'import numpy as np\n'), ((345, 370), 'os.path.join', 'os.path.join', (['p', 'filename'], {}), '(p, filename)\n', (357, 370), False, 'import os\n'), ((1503, 1514), 'time.time', 'time.time', ([], {}), '()\n', (1512, 1514), False, 'import time\n')] |
import os, sys
from pdb import set_trace as st
import numpy as np
from functools import partial
import copy
from model.config import LENET
import nninst_mode as mode
from dataset import mnist
from dataset.config import MNIST_TRAIN
from dataset.mnist_transforms import *
from trace.lenet_mnist_class_trace_v2 import (
data_config,
)
from trace.common import (
class_trace,
)
from tf_utils import new_session_config
from nninst_statistics import calc_trace_side_overlap
from nninst_trace import TraceKey
from nninst_utils.numpy import arg_approx
from nninst_utils.ray import ray_init
from nninst_utils.fs import ensure_dir
from nninst_op import *
from nninst_trace import calc_padding
# Configs
# Model config
model_label = "augmentation"
model_dir = f"result/lenet/model_{model_label}"
# Trace config
trace_dir = f"{model_dir}/traces"
trace_name = "9translation"
# Thred
threshold = 0.5
key = TraceKey.EDGE
filter_zero_ratio_path = f"{model_dir}/filter_zero_ratio.txt"
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
class_trace_fn=lambda class_id: lenet_mnist_class_trace(
class_id,
threshold,
label=model_label,
trace_dir = trace_dir,
)
graph = LENET.network_class.graph().load()
def to_bitmap(shape):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = 1
return np.packbits(mask)
def reconstruct_edge(
trace,
graph,
key,
node_name,
):
attrs = trace.nodes[node_name]
op = graph.op(graph.id(node_name))
if key not in attrs:
return None
else:
attr = attrs[key]
edge = TraceKey.to_array(attr)
if isinstance(op, (AddOp, DenseOp)):
shape = attrs[key+"_shape"]
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[edge] = 1
mask = np.reshape(mask, shape)
return mask
elif isinstance(op, (MaxPoolOp, Conv2dOp, AvgPoolOp)):
input_shape = trace.tensors[op.input_nodes[0].name][
TraceKey.POINT_SHAPE
]
output_shape = trace.tensors[op.output_nodes[0].name][
TraceKey.POINT_SHAPE
]
if op.data_format == "NHWC":
input_shape = (
input_shape[2],
input_shape[0],
input_shape[1],
)
output_shape = (
output_shape[2],
output_shape[0],
output_shape[1],
)
in_channel, in_height, in_width, out_channel, out_height, out_width = np.unravel_index(
edge, input_shape + output_shape
)
stride = np.array(op.strides)
kernel_size = (
np.array(attrs[TraceKey.WEIGHT_SHAPE])[2:]
if isinstance(op, Conv2dOp)
else np.array(op.filter_shape)
)
padding = calc_padding(
np.array(input_shape)[1:],
np.array(output_shape)[1:],
stride,
kernel_size,
)
kernel_height = (
in_height + padding[1][0] - (out_height * stride[0])
)
kernel_width = (
in_width + padding[2][0] - (out_width * stride[1])
)
edge_shape = attrs[TraceKey.EDGE_SHAPE]
if isinstance(op, Conv2dOp):
new_edge_index = np.ravel_multi_index(
(
in_channel,
kernel_height,
kernel_width,
out_channel,
out_height,
out_width,
),
edge_shape,
)
else:
new_edge_index = np.ravel_multi_index(
(
kernel_height,
kernel_width,
out_channel,
out_height,
out_width,
),
edge_shape,
)
mask = np.zeros(np.prod(edge_shape), dtype=np.int8)
mask[new_edge_index] = 1
mask = np.reshape(mask, edge_shape)
return mask
def compute_reduced_vector():
# Compute reduced vectors of size (Ci, Hk, Wk, Co) that ommit spatial information
# of feature map
np.set_printoptions(precision=2, linewidth=200)
f = open(filter_zero_ratio_path, "w")
class_edge_info = {i: {} for i in range(10)}
for class_id in range(10):
print(f"Class {class_id}", file=f)
trace = class_trace_fn(class_id).load()
reconstruct_edge_fn = partial(
reconstruct_edge,
trace,
graph,
key,
)
op_to_mask = {}
for node_name in sorted(trace.nodes):
if key in trace.nodes[node_name]:
op_to_mask[node_name] = reconstruct_edge_fn(node_name)
for node_name in op_to_mask:
edge = op_to_mask[node_name]
if "conv2d" in node_name:
edge_sum = op_to_mask[node_name].sum(-1).sum(-1)
edge_sum[edge_sum>0] = 1
else:
edge_sum = edge
edge_sum_num = edge_sum.size
edge_sum_zero = edge_sum_num - np.count_nonzero(edge_sum)
ratio = edge_sum_zero / edge_sum_num
print(f"{node_name}:\t{ratio:.2f}\t{edge_sum_zero}/{edge_sum_num}", file=f)
class_edge_info[class_id][node_name] = edge_sum
print(file=f)
def compute_zero_correlation():
# Compute the correlation between zeros of reduced vectors
correlation_path = f"{model_dir}/correlation.txt"
f = open(correlation_path, "w")
node_names = class_edge_info[0].keys()
correlation = {node_name: np.zeros((10,10)) for node_name in node_names}
for node_name in node_names:
for src_id in range(10):
for dst_id in range(10):
src_edge = class_edge_info[src_id][node_name]
src_mask = 1-src_edge
dst_edge = class_edge_info[dst_id][node_name]
dst_mask = 1-dst_edge
intersect = src_mask * dst_mask
ratio = intersect.sum() / src_mask.sum()
correlation[node_name][src_id][dst_id] = ratio
for node_name in node_names:
print(node_name, file=f)
print(correlation[node_name], file=f)
def compute_effective_node_number():
# Compute reduced vectors of size (Ci, Hk, Wk, Co) that ommit spatial information
# of feature map
np.set_printoptions(precision=2, linewidth=200)
effective_node_number_path = f"{model_dir}/node_number.txt"
f = open(effective_node_number_path, "w")
class_edge_info = {i: {} for i in range(10)}
for class_id in range(10):
print(f"Class {class_id}", file=f)
trace = class_trace_fn(class_id).load()
reconstruct_edge_fn = partial(
reconstruct_edge,
trace,
graph,
key,
)
op_to_mask = {}
for node_name in sorted(trace.nodes):
if key in trace.nodes[node_name]:
op_to_mask[node_name] = reconstruct_edge_fn(node_name)
for node_name in op_to_mask:
edge = op_to_mask[node_name]
if "conv2d" in node_name:
edge_sum = op_to_mask[node_name].sum(-1).sum(-1)
edge_sum[edge_sum>0] = 1
else:
edge_sum = edge
edge_sum_num = edge_sum.size
edge_sum_zero = edge_sum_num - np.count_nonzero(edge_sum)
ratio = edge_sum_zero / edge_sum_num
node_number = np.count_nonzero(edge)
sum_node_number = np.count_nonzero(edge_sum)
print(f"{node_name}:\t{node_number}\t{sum_node_number}", file=f)
class_edge_info[class_id][node_name] = edge_sum
if class_id is 0:
edge_all_class = copy.deepcopy(op_to_mask)
edge_sum_all_class = copy.deepcopy(class_edge_info[class_id])
else:
for node_name in op_to_mask:
edge_all_class[node_name] = np.logical_or(
edge_all_class[node_name],
op_to_mask[node_name])
edge_sum_all_class[node_name] = np.logical_or(
edge_sum_all_class[node_name],
class_edge_info[class_id]
)
print(file=f)
print(f"All", file=f)
for node_name in edge_all_class:
node_number = np.count_nonzero(edge_all_class[node_name])
sum_node_number = np.count_nonzero(edge_sum_all_class[node_name])
print(f"{node_name}:\t{node_number}\t{sum_node_number}", file=f)
# compute_effective_node_number()
| [
"functools.partial",
"model.config.LENET.network_class.graph",
"numpy.set_printoptions",
"numpy.count_nonzero",
"copy.deepcopy",
"numpy.packbits",
"numpy.zeros",
"numpy.unravel_index",
"numpy.array",
"numpy.reshape",
"numpy.logical_or",
"numpy.ravel_multi_index",
"trace.common.class_trace",
... | [((1007, 1075), 'trace.common.class_trace', 'class_trace', (['trace_name'], {'model_config': 'LENET', 'data_config': 'data_config'}), '(trace_name, model_config=LENET, data_config=data_config)\n', (1018, 1075), False, 'from trace.common import class_trace\n'), ((1493, 1510), 'numpy.packbits', 'np.packbits', (['mask'], {}), '(mask)\n', (1504, 1510), True, 'import numpy as np\n'), ((4619, 4666), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'linewidth': '(200)'}), '(precision=2, linewidth=200)\n', (4638, 4666), True, 'import numpy as np\n'), ((6948, 6995), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'linewidth': '(200)'}), '(precision=2, linewidth=200)\n', (6967, 6995), True, 'import numpy as np\n'), ((1335, 1362), 'model.config.LENET.network_class.graph', 'LENET.network_class.graph', ([], {}), '()\n', (1360, 1362), False, 'from model.config import LENET\n'), ((1413, 1427), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1420, 1427), True, 'import numpy as np\n'), ((1453, 1476), 'nninst_trace.TraceKey.to_array', 'TraceKey.to_array', (['attr'], {}), '(attr)\n', (1470, 1476), False, 'from nninst_trace import TraceKey\n'), ((1753, 1776), 'nninst_trace.TraceKey.to_array', 'TraceKey.to_array', (['attr'], {}), '(attr)\n', (1770, 1776), False, 'from nninst_trace import TraceKey\n'), ((4910, 4954), 'functools.partial', 'partial', (['reconstruct_edge', 'trace', 'graph', 'key'], {}), '(reconstruct_edge, trace, graph, key)\n', (4917, 4954), False, 'from functools import partial\n'), ((6168, 6186), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (6176, 6186), True, 'import numpy as np\n'), ((7307, 7351), 'functools.partial', 'partial', (['reconstruct_edge', 'trace', 'graph', 'key'], {}), '(reconstruct_edge, trace, graph, key)\n', (7314, 7351), False, 'from functools import partial\n'), ((9129, 9172), 'numpy.count_nonzero', 'np.count_nonzero', (['edge_all_class[node_name]'], {}), '(edge_all_class[node_name])\n', (9145, 9172), True, 'import numpy as np\n'), ((9199, 9246), 'numpy.count_nonzero', 'np.count_nonzero', (['edge_sum_all_class[node_name]'], {}), '(edge_sum_all_class[node_name])\n', (9215, 9246), True, 'import numpy as np\n'), ((1967, 1990), 'numpy.reshape', 'np.reshape', (['mask', 'shape'], {}), '(mask, shape)\n', (1977, 1990), True, 'import numpy as np\n'), ((8161, 8183), 'numpy.count_nonzero', 'np.count_nonzero', (['edge'], {}), '(edge)\n', (8177, 8183), True, 'import numpy as np\n'), ((8214, 8240), 'numpy.count_nonzero', 'np.count_nonzero', (['edge_sum'], {}), '(edge_sum)\n', (8230, 8240), True, 'import numpy as np\n'), ((8434, 8459), 'copy.deepcopy', 'copy.deepcopy', (['op_to_mask'], {}), '(op_to_mask)\n', (8447, 8459), False, 'import copy\n'), ((8493, 8533), 'copy.deepcopy', 'copy.deepcopy', (['class_edge_info[class_id]'], {}), '(class_edge_info[class_id])\n', (8506, 8533), False, 'import copy\n'), ((1890, 1904), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1897, 1904), True, 'import numpy as np\n'), ((2757, 2807), 'numpy.unravel_index', 'np.unravel_index', (['edge', '(input_shape + output_shape)'], {}), '(edge, input_shape + output_shape)\n', (2773, 2807), True, 'import numpy as np\n'), ((2859, 2879), 'numpy.array', 'np.array', (['op.strides'], {}), '(op.strides)\n', (2867, 2879), True, 'import numpy as np\n'), ((4424, 4452), 'numpy.reshape', 'np.reshape', (['mask', 'edge_shape'], {}), '(mask, edge_shape)\n', (4434, 4452), True, 'import numpy as np\n'), ((5662, 5688), 'numpy.count_nonzero', 'np.count_nonzero', (['edge_sum'], {}), '(edge_sum)\n', (5678, 5688), True, 'import numpy as np\n'), ((8059, 8085), 'numpy.count_nonzero', 'np.count_nonzero', (['edge_sum'], {}), '(edge_sum)\n', (8075, 8085), True, 'import numpy as np\n'), ((8633, 8696), 'numpy.logical_or', 'np.logical_or', (['edge_all_class[node_name]', 'op_to_mask[node_name]'], {}), '(edge_all_class[node_name], op_to_mask[node_name])\n', (8646, 8696), True, 'import numpy as np\n'), ((8834, 8905), 'numpy.logical_or', 'np.logical_or', (['edge_sum_all_class[node_name]', 'class_edge_info[class_id]'], {}), '(edge_sum_all_class[node_name], class_edge_info[class_id])\n', (8847, 8905), True, 'import numpy as np\n'), ((3032, 3057), 'numpy.array', 'np.array', (['op.filter_shape'], {}), '(op.filter_shape)\n', (3040, 3057), True, 'import numpy as np\n'), ((3612, 3727), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['(in_channel, kernel_height, kernel_width, out_channel, out_height, out_width)', 'edge_shape'], {}), '((in_channel, kernel_height, kernel_width, out_channel,\n out_height, out_width), edge_shape)\n', (3632, 3727), True, 'import numpy as np\n'), ((4002, 4105), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['(kernel_height, kernel_width, out_channel, out_height, out_width)', 'edge_shape'], {}), '((kernel_height, kernel_width, out_channel, out_height,\n out_width), edge_shape)\n', (4022, 4105), True, 'import numpy as np\n'), ((4332, 4351), 'numpy.prod', 'np.prod', (['edge_shape'], {}), '(edge_shape)\n', (4339, 4351), True, 'import numpy as np\n'), ((2924, 2962), 'numpy.array', 'np.array', (['attrs[TraceKey.WEIGHT_SHAPE]'], {}), '(attrs[TraceKey.WEIGHT_SHAPE])\n', (2932, 2962), True, 'import numpy as np\n'), ((3124, 3145), 'numpy.array', 'np.array', (['input_shape'], {}), '(input_shape)\n', (3132, 3145), True, 'import numpy as np\n'), ((3167, 3189), 'numpy.array', 'np.array', (['output_shape'], {}), '(output_shape)\n', (3175, 3189), True, 'import numpy as np\n')] |
import numpy as np
from layers import (
FullyConnectedLayer, ReLULayer,
ConvolutionalLayer, MaxPoolingLayer, Flattener,
softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax
)
class ConvNet:
"""
Implements a very simple conv net
Input -> Conv[3x3] -> Relu -> Maxpool[4x4] ->
Conv[3x3] -> Relu -> MaxPool[4x4] ->
Flatten -> FC -> Softmax
"""
def __init__(self, input_shape, n_output_classes, conv1_channels, conv2_channels):
"""
Initializes the neural network
Arguments:
input_shape, tuple of 3 ints - image_width, image_height, n_channels
Will be equal to (32, 32, 3)
n_output_classes, int - number of classes to predict
conv1_channels, int - number of filters in the 1st conv layer
conv2_channels, int - number of filters in the 2nd conv layer
"""
# TODO Create necessary layers
#raise Exception("Not implemented!")
image_width, image_height, n_channels = input_shape
self.conv1 = ConvolutionalLayer(n_channels, conv1_channels, 3, 1)
self.relu1 = ReLULayer()
self.maxp1 = MaxPoolingLayer(4, 4)
self.conv2 = ConvolutionalLayer(conv1_channels, conv2_channels, 3, 1)
self.relu2 = ReLULayer()
self.maxp2 = MaxPoolingLayer(4, 4)
self.flatn = Flattener()
fc_input = int(image_width * image_height * conv2_channels / pow(4, 4))
self.fc = FullyConnectedLayer(fc_input, n_output_classes)
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients
on a batch of training examples
Arguments:
X, np array (batch_size, height, width, input_features) - input data
y, np array of int (batch_size) - classes
"""
# Before running forward and backward pass through the model,
# clear parameter gradients aggregated from the previous pass
self.conv1.W.grad = np.zeros_like(self.conv1.W.grad)
self.conv1.B.grad = np.zeros_like(self.conv1.B.grad)
self.conv2.W.grad = np.zeros_like(self.conv2.W.grad)
self.conv2.B.grad = np.zeros_like(self.conv2.B.grad)
self.fc.W.grad = np.zeros_like(self.fc.W.grad)
self.fc.B.grad = np.zeros_like(self.fc.B.grad)
# TODO Compute loss and fill param gradients
# Don't worry about implementing L2 regularization, we will not
# need it in this assignment
#raise Exception("Not implemented!")
fconv1 = self.conv1.forward(X)
frelu1 = self.relu1.forward(fconv1)
fmaxp1 = self.maxp1.forward(frelu1)
fconv2 = self.conv2.forward(fmaxp1)
frelu2 = self.relu2.forward(fconv2)
fmaxp2 = self.maxp2.forward(frelu2)
fflatn = self.flatn.forward(fmaxp2)
ffc = self.fc.forward(fflatn)
loss, d_preds = softmax_with_cross_entropy(ffc, y)
bfc = self.fc.backward(d_preds)
bflatn = self.flatn.backward(bfc)
bmaxp2 = self.maxp2.backward(bflatn)
brelu2 = self.relu2.backward(bmaxp2)
bconv2 = self.conv2.backward(brelu2)
bmaxp1 = self.maxp1.backward(bconv2)
brelu1 = self.relu1.backward(bmaxp1)
bconv1 = self.conv1.backward(brelu1)
return loss
def predict(self, X):
# You can probably copy the code from previous assignment
#raise Exception("Not implemented!")
fconv1 = self.conv1.forward(X)
frelu1 = self.relu1.forward(fconv1)
fmaxp1 = self.maxp1.forward(frelu1)
fconv2 = self.conv2.forward(fmaxp1)
frelu2 = self.relu2.forward(fconv2)
fmaxp2 = self.maxp2.forward(frelu2)
fflatn = self.flatn.forward(fmaxp2)
ffc = self.fc.forward(fflatn)
prob = softmax(ffc)
pred = np.argmax(prob, axis=1)
return pred
def params(self):
result = {
'Wc1':self.conv1.W, 'Bc1': self.conv1.B,
'Wc2':self.conv2.W, 'Bc2': self.conv2.B,
'Wfc':self.fc.W, 'Bfc':self.fc.B
}
# TODO: Aggregate all the params from all the layers
# which have parameters
#raise Exception("Not implemented!")
return result
| [
"layers.Flattener",
"numpy.zeros_like",
"layers.ConvolutionalLayer",
"numpy.argmax",
"layers.MaxPoolingLayer",
"layers.softmax_with_cross_entropy",
"layers.FullyConnectedLayer",
"layers.softmax",
"layers.ReLULayer"
] | [((1115, 1167), 'layers.ConvolutionalLayer', 'ConvolutionalLayer', (['n_channels', 'conv1_channels', '(3)', '(1)'], {}), '(n_channels, conv1_channels, 3, 1)\n', (1133, 1167), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((1189, 1200), 'layers.ReLULayer', 'ReLULayer', ([], {}), '()\n', (1198, 1200), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((1222, 1243), 'layers.MaxPoolingLayer', 'MaxPoolingLayer', (['(4)', '(4)'], {}), '(4, 4)\n', (1237, 1243), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((1265, 1321), 'layers.ConvolutionalLayer', 'ConvolutionalLayer', (['conv1_channels', 'conv2_channels', '(3)', '(1)'], {}), '(conv1_channels, conv2_channels, 3, 1)\n', (1283, 1321), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((1343, 1354), 'layers.ReLULayer', 'ReLULayer', ([], {}), '()\n', (1352, 1354), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((1376, 1397), 'layers.MaxPoolingLayer', 'MaxPoolingLayer', (['(4)', '(4)'], {}), '(4, 4)\n', (1391, 1397), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((1419, 1430), 'layers.Flattener', 'Flattener', ([], {}), '()\n', (1428, 1430), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((1538, 1585), 'layers.FullyConnectedLayer', 'FullyConnectedLayer', (['fc_input', 'n_output_classes'], {}), '(fc_input, n_output_classes)\n', (1557, 1585), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((2092, 2124), 'numpy.zeros_like', 'np.zeros_like', (['self.conv1.W.grad'], {}), '(self.conv1.W.grad)\n', (2105, 2124), True, 'import numpy as np\n'), ((2153, 2185), 'numpy.zeros_like', 'np.zeros_like', (['self.conv1.B.grad'], {}), '(self.conv1.B.grad)\n', (2166, 2185), True, 'import numpy as np\n'), ((2223, 2255), 'numpy.zeros_like', 'np.zeros_like', (['self.conv2.W.grad'], {}), '(self.conv2.W.grad)\n', (2236, 2255), True, 'import numpy as np\n'), ((2284, 2316), 'numpy.zeros_like', 'np.zeros_like', (['self.conv2.B.grad'], {}), '(self.conv2.B.grad)\n', (2297, 2316), True, 'import numpy as np\n'), ((2351, 2380), 'numpy.zeros_like', 'np.zeros_like', (['self.fc.W.grad'], {}), '(self.fc.W.grad)\n', (2364, 2380), True, 'import numpy as np\n'), ((2406, 2435), 'numpy.zeros_like', 'np.zeros_like', (['self.fc.B.grad'], {}), '(self.fc.B.grad)\n', (2419, 2435), True, 'import numpy as np\n'), ((3062, 3096), 'layers.softmax_with_cross_entropy', 'softmax_with_cross_entropy', (['ffc', 'y'], {}), '(ffc, y)\n', (3088, 3096), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((4089, 4101), 'layers.softmax', 'softmax', (['ffc'], {}), '(ffc)\n', (4096, 4101), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax_with_cross_entropy, softmax\n'), ((4126, 4149), 'numpy.argmax', 'np.argmax', (['prob'], {'axis': '(1)'}), '(prob, axis=1)\n', (4135, 4149), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import copy
import re
# Helper functions
def getRowsAndCols(line):
index = 0
words = line.split()
for word in words:
if index == 0:
try:
rows = int(word)
except:
raise Exception(
"Unable to convert first field to int, field is {}, line is {}".format(
word, line
)
)
elif index == 1:
try:
cols = int(word)
except:
raise Exception(
"Unable to convert second field to int, field is {}, line is {}".format(
word, line
)
)
else:
error_msg = "Only expected two fields for rows and columns"
raise Exception(error_msg)
index = index + 1
return rows, cols
class DescriptorFile:
# Initialization
def __init__(self):
self.__descriptors = np.zeros((0, 0))
# Getters
@property
def descriptors(self):
return self.__descriptors
@property
def number_descriptors(self):
return self.__descriptors.shape[0]
@property
def dimensions(self):
return self.__descriptors.shape[1]
def read(self, file_name):
with open(file_name, "r") as file1:
# Ignore first line
file1.readline()
# Ignore orientation
file1.readline()
line = file1.readline()
rows, cols = getRowsAndCols(line)
self.__descriptors.resize((rows, cols))
for row in range(0, rows):
line = file1.readline()
words = line.split()
col = 0
for word in words:
try:
self.__descriptors[row, col] = float(word)
except:
raise Exception(
"Unable to convert field {} to float, at line {}"
", row {} and col {}".format(word, line, row, col)
)
col = col + 1
| [
"numpy.zeros"
] | [((1062, 1078), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (1070, 1078), True, 'import numpy as np\n')] |
import sys
import os
import cv2 as cv
import numpy as np
import piexif
import piexif.helper
class Photo:
''' Photo class to store individual photo data '''
def __init__(self, image):
self.image = image
def imageCV(self, image):
''' Return image as cv image array '''
im = cv.imread(image)
return im
def exifData(self):
''' Read exif data from image '''
exif = piexif.load(self.image)
exposure_time = exif["Exif"][piexif.ExifIFD.ExposureTime]
exposure = exposure_time[0] / exposure_time[1]
return exposure
class PhotoSet:
''' PhotoSet class to process a set of Photo objects '''
def __init__(self):
self.image_paths = []
self.im_list = []
self.exposure_times = []
def readImages(self, folder):
""" Return all images in dir and populate im_list with images
as cv2 data """
for root, dirs, files in os.walk(folder):
for file in files:
p = Photo(file)
full_path = os.path.join(root, p.image)
self.image_paths.append(full_path)
self.im_list.append(p.imageCV(full_path))
def createHDR(self):
""" Numpy array of exposure times from exif data """
for image in self.image_paths:
p = Photo(image)
self.exposure_times.append(p.exifData())
times_np = np.asarray(self.exposure_times, dtype=np.float32)
# Align Images
alignMTB = cv.createAlignMTB()
alignMTB.process(self.im_list, self.im_list)
# Find Camera Response Curve
calibrateDebevec = cv.createCalibrateDebevec()
responseDebevec = calibrateDebevec.process(self.im_list, times_np)
# Merge Images
mergeDebevec = cv.createMergeDebevec()
hdrDebevec = mergeDebevec.process(self.im_list, times_np,
responseDebevec)
# Generate HDR image and LDR tone mapped preview
cv.imwrite("hdr.hdr", hdrDebevec)
toneMapReinhard = cv.createTonemapReinhard(1.5, 0.0)
ldrReinhard = toneMapReinhard.process(hdrDebevec)
cv.imwrite("hdr_preview.jpg", ldrReinhard * 255)
folder = "images"
photo = "images/R0010041.JPG"
images = PhotoSet()
images.readImages(folder)
images.createHDR()
hdr = cv.imread('hdr_preview.jpg')
cv.imshow('HDR', hdr)
cv.waitKey(0)
cv.destroyAllWindows() | [
"os.path.join",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.asarray",
"os.walk",
"cv2.imwrite",
"cv2.createAlignMTB",
"cv2.imread",
"piexif.load",
"cv2.createMergeDebevec",
"cv2.createCalibrateDebevec",
"cv2.imshow",
"cv2.createTonemapReinhard"
] | [((2352, 2380), 'cv2.imread', 'cv.imread', (['"""hdr_preview.jpg"""'], {}), "('hdr_preview.jpg')\n", (2361, 2380), True, 'import cv2 as cv\n'), ((2381, 2402), 'cv2.imshow', 'cv.imshow', (['"""HDR"""', 'hdr'], {}), "('HDR', hdr)\n", (2390, 2402), True, 'import cv2 as cv\n'), ((2403, 2416), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (2413, 2416), True, 'import cv2 as cv\n'), ((2417, 2439), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (2437, 2439), True, 'import cv2 as cv\n'), ((312, 328), 'cv2.imread', 'cv.imread', (['image'], {}), '(image)\n', (321, 328), True, 'import cv2 as cv\n'), ((429, 452), 'piexif.load', 'piexif.load', (['self.image'], {}), '(self.image)\n', (440, 452), False, 'import piexif\n'), ((953, 968), 'os.walk', 'os.walk', (['folder'], {}), '(folder)\n', (960, 968), False, 'import os\n'), ((1425, 1474), 'numpy.asarray', 'np.asarray', (['self.exposure_times'], {'dtype': 'np.float32'}), '(self.exposure_times, dtype=np.float32)\n', (1435, 1474), True, 'import numpy as np\n'), ((1518, 1537), 'cv2.createAlignMTB', 'cv.createAlignMTB', ([], {}), '()\n', (1535, 1537), True, 'import cv2 as cv\n'), ((1656, 1683), 'cv2.createCalibrateDebevec', 'cv.createCalibrateDebevec', ([], {}), '()\n', (1681, 1683), True, 'import cv2 as cv\n'), ((1806, 1829), 'cv2.createMergeDebevec', 'cv.createMergeDebevec', ([], {}), '()\n', (1827, 1829), True, 'import cv2 as cv\n'), ((2020, 2053), 'cv2.imwrite', 'cv.imwrite', (['"""hdr.hdr"""', 'hdrDebevec'], {}), "('hdr.hdr', hdrDebevec)\n", (2030, 2053), True, 'import cv2 as cv\n'), ((2080, 2114), 'cv2.createTonemapReinhard', 'cv.createTonemapReinhard', (['(1.5)', '(0.0)'], {}), '(1.5, 0.0)\n', (2104, 2114), True, 'import cv2 as cv\n'), ((2181, 2229), 'cv2.imwrite', 'cv.imwrite', (['"""hdr_preview.jpg"""', '(ldrReinhard * 255)'], {}), "('hdr_preview.jpg', ldrReinhard * 255)\n", (2191, 2229), True, 'import cv2 as cv\n'), ((1061, 1088), 'os.path.join', 'os.path.join', (['root', 'p.image'], {}), '(root, p.image)\n', (1073, 1088), False, 'import os\n')] |
from matplotlib import pyplot as plt
import numpy as np
file_name='../result_split.txt'
def draw_best():
with open(file_name,'r') as f:
plt.figure()
old_freq=0
accuracy=[]
best=[]
for line in f.readlines():
freq=line.strip().split()[2][:-1]
accu=line.strip().split(':')[-1]
if str(old_freq)==freq:
accuracy.append(accu)
else:
#x=[i for i in range(len(accuracy))]
if len(best)==0:
best=accuracy
best_array=np.array(best,dtype=np.float)
accuracy_array=np.array(accuracy,dtype=np.float)
if accuracy_array.mean()>best_array.mean():
best=accuracy
old_freq=freq
accuracy=[]
x=[i for i in range(len(best))]
best=[float(i) for i in best]
plt.plot(x, best)
plt.savefig('./validation_best.png')
def get_one_freq(freq):
with open(file_name, 'r') as f:
accuracy=[]
for line in f.readlines():
freq_load = line.strip().split()[2][:-1]
if int(freq_load)==freq:
accu = line.strip().split(':')[-1]
accuracy.append(float(accu))
return accuracy
target_freq=[0,6,10]
accuracy=[get_one_freq(target_freq[i]) for i in range(len(target_freq))]
def plot_multi_figure(*accuracy):
shape=[len(i) for i in accuracy]
assert [shape[i]==shape[i+1] for i in range(len(shape)-1)]
plt.figure()
x=[i for i in range(shape[0])]
for i in range(len(accuracy)):
plt.plot(x,accuracy[i],label='freq{}'.format(target_freq[i]))
plt.legend()
plt.savefig('multi_accu.png')
plot_multi_figure(*accuracy) | [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.savefig"
] | [((1537, 1549), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1547, 1549), True, 'from matplotlib import pyplot as plt\n'), ((1694, 1706), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1704, 1706), True, 'from matplotlib import pyplot as plt\n'), ((1711, 1740), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""multi_accu.png"""'], {}), "('multi_accu.png')\n", (1722, 1740), True, 'from matplotlib import pyplot as plt\n'), ((148, 160), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (158, 160), True, 'from matplotlib import pyplot as plt\n'), ((916, 933), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'best'], {}), '(x, best)\n', (924, 933), True, 'from matplotlib import pyplot as plt\n'), ((942, 978), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./validation_best.png"""'], {}), "('./validation_best.png')\n", (953, 978), True, 'from matplotlib import pyplot as plt\n'), ((583, 613), 'numpy.array', 'np.array', (['best'], {'dtype': 'np.float'}), '(best, dtype=np.float)\n', (591, 613), True, 'import numpy as np\n'), ((644, 678), 'numpy.array', 'np.array', (['accuracy'], {'dtype': 'np.float'}), '(accuracy, dtype=np.float)\n', (652, 678), True, 'import numpy as np\n')] |
import subprocess
import pandas
import io
import requests
import time
import json
from matplotlib import pyplot as plt
import numpy as np
from merlin.core import analysistask
class SlurmReport(analysistask.AnalysisTask):
"""
An analysis task that generates reports on previously completed analysis
tasks using Slurm.
This analysis task only works when Merlin is run through Slurm
with every analysis task fragment run as a separate job.
"""
def __init__(self, dataSet, parameters=None, analysisName=None):
super().__init__(dataSet, parameters, analysisName)
if 'codebook_index' not in self.parameters:
self.parameters['codebook_index'] = 0
def get_estimated_memory(self):
return 2048
def get_estimated_time(self):
return 5
def get_dependencies(self):
return [self.parameters['run_after_task']]
def _generate_slurm_report(self, task: analysistask.AnalysisTask):
if isinstance(task, analysistask.ParallelAnalysisTask):
idList = [
self.dataSet.get_analysis_environment(task, i)['SLURM_JOB_ID']
for i in range(task.fragment_count())]
else:
idList = [
self.dataSet.get_analysis_environment(task)['SLURM_JOB_ID']]
queryResult = subprocess.run(
['sacct', '--format=AssocID,Account,Cluster,User,JobID,JobName,'
+ 'NodeList,AveCPU,AveCPUFreq,MaxPages,MaxDiskRead,MaxDiskWrite,'
+ 'MaxRSS,ReqMem,CPUTime,Elapsed,Submit,Start,End,Timelimit',
'--units=M', '-P', '-j', ','.join(idList)], stdout=subprocess.PIPE)
slurmJobDF = pandas.read_csv(
io.StringIO(queryResult.stdout.decode('utf-8')), sep='|')
return self._clean_slurm_dataframe(slurmJobDF)
@staticmethod
def _clean_slurm_dataframe(inputDataFrame):
outputDF = inputDataFrame[
~inputDataFrame['JobID'].str.contains('.extern')]
outputDF = outputDF.assign(
JobID=outputDF['JobID'].str.partition('.')[0])
def get_not_nan(listIn):
return listIn.dropna().iloc[0]
outputDF = outputDF.groupby('JobID').aggregate(get_not_nan)
def reformat_timedelta(elapsedIn):
splitElapsed = elapsedIn.split('-')
if len(splitElapsed) > 1:
return splitElapsed[0] + ' days ' + splitElapsed[1]
else:
return splitElapsed[0]
outputDF = outputDF.assign(Elapsed=pandas.to_timedelta(
outputDF['Elapsed'].apply(reformat_timedelta), unit='s'))
outputDF = outputDF.assign(Timelimit=pandas.to_timedelta(
outputDF['Timelimit'].apply(reformat_timedelta), unit='s'))
outputDF = outputDF.assign(Queued=pandas.to_timedelta(
pandas.to_datetime(outputDF['Start']) -
pandas.to_datetime(outputDF['Submit']), unit='s'))
return outputDF.reindex()
def _plot_slurm_report(self, slurmDF, analysisName):
fig = plt.figure(figsize=(15, 4))
plt.subplot(1, 4, 1)
plt.boxplot([slurmDF['MaxRSS'].str[:-1].astype(float),
slurmDF['ReqMem'].str[:-2].astype(int)], widths=0.5)
plt.xticks([1, 2], ['Max used', 'Requested'])
plt.ylabel('Memory (mb)')
plt.title('RAM')
plt.subplot(1, 4, 2)
plt.boxplot([slurmDF['Queued'] / np.timedelta64(1, 'm'),
slurmDF['Elapsed'] / np.timedelta64(1, 'm'),
slurmDF['Timelimit'] / np.timedelta64(1, 'm')],
widths=0.5)
plt.xticks([1, 2, 3], ['Queued', 'Elapsed', 'Requested'])
plt.ylabel('Time (min)')
plt.title('Run time')
plt.subplot(1, 4, 3)
plt.boxplot([slurmDF['MaxDiskRead'].str[:-1].astype(float)],
widths=0.25)
plt.xticks([1], ['MaxDiskRead'])
plt.ylabel('Number of mb read')
plt.title('Disk usage')
plt.subplot(1, 4, 4)
plt.boxplot([slurmDF['MaxDiskWrite'].str[:-1].astype(float)],
widths=0.25)
plt.xticks([1], ['MaxDiskWrite'])
plt.ylabel('Number of mb written')
plt.suptitle(analysisName)
plt.tight_layout(pad=1)
self.dataSet.save_figure(self, fig, analysisName)
def _plot_slurm_summary(self, reportDict):
def setBoxColors(bPlot, c):
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians',
'caps']:
plt.setp(bPlot[element], color=c)
# Plot memory requested and used for each task
fig = plt.figure(figsize=(15, 12))
bp = plt.boxplot([d['MaxRSS'].str[:-1].astype(float)
for d in reportDict.values()],
positions=np.arange(len(reportDict))-0.15,
widths=0.25)
setBoxColors(bp, 'r')
bp = plt.boxplot([d['ReqMem'].str[:-2].astype(float)
for d in reportDict.values()],
positions=np.arange(len(reportDict))+0.15,
widths=0.25)
setBoxColors(bp, 'b')
plt.xticks(np.arange(len(reportDict)), list(reportDict.keys()),
rotation='vertical')
plt.yscale('log')
hB, = plt.plot([1, 1], 'b-')
hR, = plt.plot([1, 1], 'r-')
plt.legend((hB, hR), ('Requested', 'Max used'))
hB.set_visible(False)
hR.set_visible(False)
plt.ylabel('Memory per job (mb)')
plt.title('Memory summary')
plt.ylim([100, plt.ylim()[1]])
plt.xlim([-0.5, len(reportDict)-0.5])
plt.vlines(np.arange(0.5, len(reportDict)), ymin=plt.ylim()[0],
ymax=plt.ylim()[1], linestyles='dashed')
plt.tight_layout(pad=1)
self.dataSet.save_figure(self, fig, 'memory_summary')
# Plot time requested, queued and used for each task
fig = plt.figure(figsize=(15, 12))
bp = plt.boxplot([d['Elapsed'] / np.timedelta64(1, 'm')
for d in reportDict.values()],
positions=np.arange(len(reportDict))-0.15,
widths=0.25)
setBoxColors(bp, 'r')
bp = plt.boxplot([d['Timelimit'] / np.timedelta64(1, 'm')
for d in reportDict.values()],
positions=np.arange(len(reportDict))+0.15,
widths=0.25)
setBoxColors(bp, 'b')
bp = plt.boxplot([d['Queued'] / np.timedelta64(1, 'm')
for d in reportDict.values()],
positions=np.arange(len(reportDict))+0.15,
widths=0.25)
setBoxColors(bp, 'g')
plt.xticks(np.arange(len(reportDict)), list(reportDict.keys()),
rotation='vertical')
plt.yscale('log')
hB, = plt.plot([1, 1], 'b-')
hR, = plt.plot([1, 1], 'r-')
hG, = plt.plot([1, 1], 'g-')
plt.legend((hB, hR, hG), ('Requested', 'Used', 'Queued'))
hB.set_visible(False)
hR.set_visible(False)
hG.set_visible(False)
plt.ylabel('Time per job (min)')
plt.title('Time summary')
plt.xlim([-0.5, len(reportDict)+0.5])
plt.vlines(np.arange(0.5, len(reportDict)), ymin=plt.ylim()[0],
ymax=plt.ylim()[1], linestyles='dashed')
plt.tight_layout(pad=1)
self.dataSet.save_figure(self, fig, 'time_summary')
def _run_analysis(self):
taskList = self.dataSet.get_analysis_tasks()
reportTime = int(time.time())
reportDict = {}
analysisParameters = {}
for t in taskList:
currentTask = self.dataSet.load_analysis_task(t)
try:
if currentTask.is_complete():
slurmDF = self._generate_slurm_report(currentTask)
self.dataSet.save_dataframe_to_csv(slurmDF, t, self,
'reports')
dfStream = io.StringIO()
slurmDF.to_csv(dfStream, sep='|')
self._plot_slurm_report(slurmDF, t)
reportDict[t] = slurmDF
analysisParameters[t] = currentTask.get_parameters()
try:
requests.post('http://merlin.georgeemanuel.com/post',
files={'file': (
'.'.join([t, self.dataSet.dataSetName,
str(reportTime)]) + '.csv',
dfStream.getvalue())},
timeout=10)
except requests.exceptions.RequestException:
pass
except Exception:
pass
self._plot_slurm_summary(reportDict)
datasetMeta = {
'image_width': self.dataSet.get_image_dimensions()[0],
'image_height': self.dataSet.get_image_dimensions()[1],
'barcode_length': self.dataSet.get_codebook(
self.parameters['codebook_index']).get_bit_count(),
'barcode_count': self.dataSet.get_codebook(
self.parameters['codebook_index']).get_barcode_count(),
'fov_count': len(self.dataSet.get_fovs()),
'z_count': len(self.dataSet.get_z_positions()),
'sequential_count': len(self.dataSet.get_data_organization()
.get_sequential_rounds()),
'dataset_name': self.dataSet.dataSetName,
'report_time': reportTime,
'analysis_parameters': analysisParameters
}
try:
requests.post('http://merlin.georgeemanuel.com/post',
files={'file': ('.'.join(
[self.dataSet.dataSetName, str(reportTime)])
+ '.json', json.dumps(datasetMeta))},
timeout=10)
except requests.exceptions.RequestException:
pass
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.yscale",
"io.StringIO",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.setp",
"json.dumps",
"time.time",
"matplotlib.pyplot.figure",
"n... | [((3037, 3064), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (3047, 3064), True, 'from matplotlib import pyplot as plt\n'), ((3074, 3094), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (3085, 3094), True, 'from matplotlib import pyplot as plt\n'), ((3240, 3285), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1, 2]', "['Max used', 'Requested']"], {}), "([1, 2], ['Max used', 'Requested'])\n", (3250, 3285), True, 'from matplotlib import pyplot as plt\n'), ((3294, 3319), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Memory (mb)"""'], {}), "('Memory (mb)')\n", (3304, 3319), True, 'from matplotlib import pyplot as plt\n'), ((3328, 3344), 'matplotlib.pyplot.title', 'plt.title', (['"""RAM"""'], {}), "('RAM')\n", (3337, 3344), True, 'from matplotlib import pyplot as plt\n'), ((3353, 3373), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(2)'], {}), '(1, 4, 2)\n', (3364, 3373), True, 'from matplotlib import pyplot as plt\n'), ((3614, 3671), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1, 2, 3]', "['Queued', 'Elapsed', 'Requested']"], {}), "([1, 2, 3], ['Queued', 'Elapsed', 'Requested'])\n", (3624, 3671), True, 'from matplotlib import pyplot as plt\n'), ((3680, 3704), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time (min)"""'], {}), "('Time (min)')\n", (3690, 3704), True, 'from matplotlib import pyplot as plt\n'), ((3713, 3734), 'matplotlib.pyplot.title', 'plt.title', (['"""Run time"""'], {}), "('Run time')\n", (3722, 3734), True, 'from matplotlib import pyplot as plt\n'), ((3743, 3763), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(3)'], {}), '(1, 4, 3)\n', (3754, 3763), True, 'from matplotlib import pyplot as plt\n'), ((3874, 3906), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['MaxDiskRead']"], {}), "([1], ['MaxDiskRead'])\n", (3884, 3906), True, 'from matplotlib import pyplot as plt\n'), ((3915, 3946), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of mb read"""'], {}), "('Number of mb read')\n", (3925, 3946), True, 'from matplotlib import pyplot as plt\n'), ((3955, 3978), 'matplotlib.pyplot.title', 'plt.title', (['"""Disk usage"""'], {}), "('Disk usage')\n", (3964, 3978), True, 'from matplotlib import pyplot as plt\n'), ((3987, 4007), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(4)'], {}), '(1, 4, 4)\n', (3998, 4007), True, 'from matplotlib import pyplot as plt\n'), ((4119, 4152), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['MaxDiskWrite']"], {}), "([1], ['MaxDiskWrite'])\n", (4129, 4152), True, 'from matplotlib import pyplot as plt\n'), ((4161, 4195), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of mb written"""'], {}), "('Number of mb written')\n", (4171, 4195), True, 'from matplotlib import pyplot as plt\n'), ((4204, 4230), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['analysisName'], {}), '(analysisName)\n', (4216, 4230), True, 'from matplotlib import pyplot as plt\n'), ((4239, 4262), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1)'}), '(pad=1)\n', (4255, 4262), True, 'from matplotlib import pyplot as plt\n'), ((4642, 4670), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (4652, 4670), True, 'from matplotlib import pyplot as plt\n'), ((5298, 5315), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5308, 5315), True, 'from matplotlib import pyplot as plt\n'), ((5330, 5352), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 1]', '"""b-"""'], {}), "([1, 1], 'b-')\n", (5338, 5352), True, 'from matplotlib import pyplot as plt\n'), ((5367, 5389), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 1]', '"""r-"""'], {}), "([1, 1], 'r-')\n", (5375, 5389), True, 'from matplotlib import pyplot as plt\n'), ((5398, 5445), 'matplotlib.pyplot.legend', 'plt.legend', (['(hB, hR)', "('Requested', 'Max used')"], {}), "((hB, hR), ('Requested', 'Max used'))\n", (5408, 5445), True, 'from matplotlib import pyplot as plt\n'), ((5514, 5547), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Memory per job (mb)"""'], {}), "('Memory per job (mb)')\n", (5524, 5547), True, 'from matplotlib import pyplot as plt\n'), ((5556, 5583), 'matplotlib.pyplot.title', 'plt.title', (['"""Memory summary"""'], {}), "('Memory summary')\n", (5565, 5583), True, 'from matplotlib import pyplot as plt\n'), ((5809, 5832), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1)'}), '(pad=1)\n', (5825, 5832), True, 'from matplotlib import pyplot as plt\n'), ((5971, 5999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (5981, 5999), True, 'from matplotlib import pyplot as plt\n'), ((6889, 6906), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6899, 6906), True, 'from matplotlib import pyplot as plt\n'), ((6921, 6943), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 1]', '"""b-"""'], {}), "([1, 1], 'b-')\n", (6929, 6943), True, 'from matplotlib import pyplot as plt\n'), ((6958, 6980), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 1]', '"""r-"""'], {}), "([1, 1], 'r-')\n", (6966, 6980), True, 'from matplotlib import pyplot as plt\n'), ((6995, 7017), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 1]', '"""g-"""'], {}), "([1, 1], 'g-')\n", (7003, 7017), True, 'from matplotlib import pyplot as plt\n'), ((7026, 7083), 'matplotlib.pyplot.legend', 'plt.legend', (['(hB, hR, hG)', "('Requested', 'Used', 'Queued')"], {}), "((hB, hR, hG), ('Requested', 'Used', 'Queued'))\n", (7036, 7083), True, 'from matplotlib import pyplot as plt\n'), ((7182, 7214), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time per job (min)"""'], {}), "('Time per job (min)')\n", (7192, 7214), True, 'from matplotlib import pyplot as plt\n'), ((7223, 7248), 'matplotlib.pyplot.title', 'plt.title', (['"""Time summary"""'], {}), "('Time summary')\n", (7232, 7248), True, 'from matplotlib import pyplot as plt\n'), ((7435, 7458), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1)'}), '(pad=1)\n', (7451, 7458), True, 'from matplotlib import pyplot as plt\n'), ((7628, 7639), 'time.time', 'time.time', ([], {}), '()\n', (7637, 7639), False, 'import time\n'), ((4538, 4571), 'matplotlib.pyplot.setp', 'plt.setp', (['bPlot[element]'], {'color': 'c'}), '(bPlot[element], color=c)\n', (4546, 4571), True, 'from matplotlib import pyplot as plt\n'), ((3415, 3437), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (3429, 3437), True, 'import numpy as np\n'), ((3481, 3503), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (3495, 3503), True, 'import numpy as np\n'), ((3549, 3571), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (3563, 3571), True, 'import numpy as np\n'), ((5607, 5617), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (5615, 5617), True, 'from matplotlib import pyplot as plt\n'), ((5726, 5736), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (5734, 5736), True, 'from matplotlib import pyplot as plt\n'), ((5765, 5775), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (5773, 5775), True, 'from matplotlib import pyplot as plt\n'), ((6041, 6063), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (6055, 6063), True, 'import numpy as np\n'), ((6299, 6321), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (6313, 6321), True, 'import numpy as np\n'), ((6554, 6576), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (6568, 6576), True, 'import numpy as np\n'), ((7352, 7362), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (7360, 7362), True, 'from matplotlib import pyplot as plt\n'), ((7391, 7401), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (7399, 7401), True, 'from matplotlib import pyplot as plt\n'), ((8089, 8102), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (8100, 8102), False, 'import io\n'), ((2827, 2864), 'pandas.to_datetime', 'pandas.to_datetime', (["outputDF['Start']"], {}), "(outputDF['Start'])\n", (2845, 2864), False, 'import pandas\n'), ((2879, 2917), 'pandas.to_datetime', 'pandas.to_datetime', (["outputDF['Submit']"], {}), "(outputDF['Submit'])\n", (2897, 2917), False, 'import pandas\n'), ((10035, 10058), 'json.dumps', 'json.dumps', (['datasetMeta'], {}), '(datasetMeta)\n', (10045, 10058), False, 'import json\n')] |
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
import matplotlib.pyplot as plt
from skimage import data, util
from skimage.draw import ellipse
from skimage.measure import label, regionprops
from skimage.transform import rotate
import matplotlib.pyplot as plt
from scipy.stats import skew
from scipy.stats import kurtosis
import math
root = Tk()
root.withdraw()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files",".*"),("jpg files",".jpg")))
#img = cv.imread(root.filename,0)
root.destroy()
# Write Python code here
# import the necessary packages
import cv2
import argparse
# now let's initialize the list of reference point
ref_point = []
crop = False
def shape_selection(event, x, y, flags, param):
# grab references to the global variables
global ref_point, crop
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being performed
if event == cv2.EVENT_LBUTTONDOWN:
ref_point = [(x, y)]
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# center = (x + w//2, y + h//2)
# cv2.ellipse(img, center, (w//2, h//2), 0, 0, 360,(100, 7, 55), 2)
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
ref_point.append((x, y))
#frame = cv2.ellipse(image, center, (w//2, h//2), 0, 0, 360,(100, 7, 55), 2)
Punto_antes = ref_point[0]
Punto_actual = ref_point[1]
center = (Punto_antes[0]+(Punto_actual[0]-Punto_antes[0])//2,Punto_antes[1]+(Punto_actual[1]-Punto_antes[1])//2)
#frame = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),1)
cv2.ellipse(image, center, ((Punto_actual[0]-Punto_antes[0])//2,(Punto_actual[1]-Punto_antes[1])//2), 0, 0, 360,(100, 7, 55), 2)
# draw a rectangle around the region of interest
#cv2.rectangle(image, ref_point[0], ref_point[1], (0, 255, 0), 2)
cv2.imshow("image", image)
# load the image, clone it, and setup the mouse callback function
image = cv2.imread(root.filename)
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", shape_selection)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
# press 'r' to reset the window
if key == ord("r"):
image = clone.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
if len(ref_point) == 2:
crop_img2 = clone[ref_point[0][1]:ref_point[1][1], ref_point[0][0]:
ref_point[1][0]]
height = image.shape[0]
width = image.shape[1]
Punto_antes = ref_point[0]
Punto_actual = ref_point[1]
center = (Punto_antes[0]+(Punto_actual[0]-Punto_antes[0])//2,Punto_antes[1]+(Punto_actual[1]-Punto_antes[1])//2)
# create a mask image of the same shape as input image, filled with 0s (black color)
mask = np.zeros_like(clone)
rows, cols,_ = mask.shape
# create a white filled ellipse
mask = cv2.ellipse(mask, center, ((Punto_actual[0]-Punto_antes[0])//2,(Punto_actual[1]-Punto_antes[1])//2), 0, 0, 360,(255, 255, 255), -1)
# Bitwise AND operation to black out regions outside the mask
crop_img = np.bitwise_and(clone,mask)
# Convert from BGR to RGB for displaying correctly in matplotlib
# Note that you needn't do this for displaying using OpenCV's imshow()
(h, w) = crop_img2.shape[:2] #Calcular tamaño de la imageb
sumCols = []
sumFils=[]
plt.figure(u'Histograma horizontal y vertical')
newarray=np.array(crop_img2.flatten())
meanArr=np.mean(newarray)
varArr=np.var(newarray)
skeArr=skew(newarray)
kurArr=kurtosis(newarray)
#Histograma vertical
for j in range(w):
col = crop_img2[10:h, j:j+1] # y1:y2, x1:x2
sumCols.append(np.sum(col))
plt.subplot(211)
plt.plot(sumCols)
plt.title(u'Histogramas vertical y horizontal')
plt.xlabel(u'Número de columnas')
plt.ylabel(u'Nivel de intensidad')
#Histograma horizontal
for j in range(h):
cole = crop_img2[j:j+1, 10:w ] # y1:y2, x1:x2
sumFils.append(np.sum(cole))
plt.subplot(212)
plt.plot(sumFils)
#plt.title(u'Histograma horizontal')
plt.xlabel(u'Número de filas')
plt.ylabel(u'Nivel de intensidad')
meanH=np.mean(sumFils)
varH= np.var(sumFils)
stdH = np.std(sumFils)
skeH=skew(sumFils)
kurH=kurtosis(sumFils)
meanV=np.mean(sumCols)
varV= np.var(sumCols)
stdV = np.std(sumCols)
skeV=skew(sumCols)
kurV=kurtosis(sumCols)
kernelCross = cv2.getStructuringElement(cv2.MORPH_CROSS,(9,9))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
#kernel9 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(,5))
kerneldiamond = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]], np.uint8)
kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(9,9))
# close all open windows
crop_img =cv2.cvtColor(crop_img,cv2.COLOR_RGB2GRAY)
ret,thresh = cv.threshold(crop_img,130,255,cv.THRESH_BINARY_INV)
thresh = cv2.erode(thresh, kernel, iterations=6)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contours2, hierarchy = cv2.findContours(255-thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cnt = contours[0]
cnt2=contours2[0]
M = cv.moments(cnt2)
# calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# put text and highlight the center
cv.circle(crop_img, (cX, cY), 3, (0, 0, 155), -1)
cv.putText(crop_img, "C", (cX - 1, cY - 10),cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 155), 2)
#MAx, MAy = int(0.5 * ellipseMajorAxisx*math.sin(ellipseAngle)), int(0.5 * ellipseMajorAxisy*math.cos(ellipseAngle))
altura=crop_img2.shape[0]
ancho=crop_img2.shape[1]
#area = altura*ancho
#area = cv.contourArea(cnt)
(x,y),(MA,ma),angle = cv.fitEllipse(cnt)
(x2,y2),(MA2,ma2),angle2 = cv.fitEllipse(cnt2)
area=(int(MA2)//2)*(int(ma2)//2)*(math.pi)
xMinor=cX + int((MA/2)*np.cos(angle))
x2Minor=cX - int((MA/2)*np.cos(angle))
yMinor=cY + int((MA/2)*np.sin(angle))
y2Minor=cY - int((MA/2)*np.sin(angle))
xMajor=cX + int((ma/2)*np.sin(angle))
x2Major=cX - int((ma/2)*np.sin(angle))
yMajor=cY -int((ma/2)*np.cos(angle))
y2Major=cY +int((ma/2)*np.cos(angle))
cv.line(crop_img, (xMinor, yMinor),(x2Minor,y2Minor), (0, 0, 155), 1)
cv.line(crop_img, (xMajor, yMajor),(x2Major,y2Major), (0, 0, 155), 1)
#cv.line(im, (xMinor, yMinor),(cX, cY), (0, 255, 0), 1)
#cv.line(im, (xMajor, yMajor),(cX, cY), (0, 255,0), 1)
#cv.putText(crop_img, "A: "+str(int(area)), (cX - int(0.42*height), cY + int(0.42*width)),cv.FONT_HERSHEY_SIMPLEX, 0.42, (255, 55, 0),1)
cv.putText(crop_img, "D.Ma: "+str(int(ma2)), (int(0.1*height), cY-int(0.4*width)),cv.FONT_HERSHEY_SIMPLEX, 0.42, (255, 55, 0),1)
cv.putText(crop_img, "D.Me: "+str(int(MA2)), (int(0.1*height),cY - int(0.5*width)),cv.FONT_HERSHEY_SIMPLEX, 0.42, (255, 55, 0),1 )
print("LOS VALORES OBTENIDOS SON :")
print("El área es "+str(area))
print("La media es: " + str(meanArr) +" Varianza " + str(varArr) +" Oblicuidad " + str(skeArr) +" Kurtosis "+ str(kurArr))
print("MEDIA HORIZONTAL " +str(meanH) + " Vrianza horizonral "+ str(varH) + " Desviación horizontal " + str(stdH))
print("MEDIA VERTICAL " + str(meanV) +" Vrianza VERTICAL " + str(varV) +" Desviación VERTICAL " + str(stdV))
print("Oblicuidad HORIZONTAL " +str(skeH) + " Oblicuidad VERTICAL "+ str(skeV) )
print("kurtosis HORIZONTAL " +str(kurH) + " kurtosis VERTICAL "+ str(kurV) )
cv.drawContours(crop_img,contours,-1,255,2)
#cv.drawContours(im,[cnt],0,(255,0,0),-1)
cv2.imshow('Fitting an Ellipse ',crop_img)
cv2.imshow('Fitting an ',255-thresh)
#cv2.imshow("cropeada",crop_img2)
plt.show()
cv2.waitKey(0)
# close all open windows
cv2.destroyAllWindows() | [
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.figure",
"cv2.ellipse",
"numpy.mean",
"numpy.sin",
"cv2.erode",
"cv2.imshow",
"cv2.line",
"numpy.zeros_like",
"numpy.std",
"cv2.cvtColor",
"tkinter.filedialog.askopenfilename",
"cv2.setMouseCallback",
"cv2.fitEllipse",
"cv2.dra... | [((474, 598), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""/"""', 'title': '"""Select file"""', 'filetypes': "(('all files', '.*'), ('jpg files', '.jpg'))"}), "(initialdir='/', title='Select file', filetypes=(\n ('all files', '.*'), ('jpg files', '.jpg')))\n", (500, 598), False, 'from tkinter import filedialog\n'), ((2297, 2322), 'cv2.imread', 'cv2.imread', (['root.filename'], {}), '(root.filename)\n', (2307, 2322), False, 'import cv2\n'), ((2348, 2372), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (2363, 2372), False, 'import cv2\n'), ((2375, 2421), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'shape_selection'], {}), "('image', shape_selection)\n", (2395, 2421), False, 'import cv2\n'), ((8802, 8825), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8823, 8825), False, 'import cv2\n'), ((2546, 2572), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (2556, 2572), False, 'import cv2\n'), ((3335, 3355), 'numpy.zeros_like', 'np.zeros_like', (['clone'], {}), '(clone)\n', (3348, 3355), True, 'import numpy as np\n'), ((3436, 3582), 'cv2.ellipse', 'cv2.ellipse', (['mask', 'center', '((Punto_actual[0] - Punto_antes[0]) // 2, (Punto_actual[1] - Punto_antes[1]\n ) // 2)', '(0)', '(0)', '(360)', '(255, 255, 255)', '(-1)'], {}), '(mask, center, ((Punto_actual[0] - Punto_antes[0]) // 2, (\n Punto_actual[1] - Punto_antes[1]) // 2), 0, 0, 360, (255, 255, 255), -1)\n', (3447, 3582), False, 'import cv2\n'), ((3657, 3684), 'numpy.bitwise_and', 'np.bitwise_and', (['clone', 'mask'], {}), '(clone, mask)\n', (3671, 3684), True, 'import numpy as np\n'), ((3951, 3998), 'matplotlib.pyplot.figure', 'plt.figure', (['u"""Histograma horizontal y vertical"""'], {}), "(u'Histograma horizontal y vertical')\n", (3961, 3998), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4075), 'numpy.mean', 'np.mean', (['newarray'], {}), '(newarray)\n', (4065, 4075), True, 'import numpy as np\n'), ((4088, 4104), 'numpy.var', 'np.var', (['newarray'], {}), '(newarray)\n', (4094, 4104), True, 'import numpy as np\n'), ((4117, 4131), 'scipy.stats.skew', 'skew', (['newarray'], {}), '(newarray)\n', (4121, 4131), False, 'from scipy.stats import skew\n'), ((4144, 4162), 'scipy.stats.kurtosis', 'kurtosis', (['newarray'], {}), '(newarray)\n', (4152, 4162), False, 'from scipy.stats import kurtosis\n'), ((4905, 4921), 'numpy.mean', 'np.mean', (['sumFils'], {}), '(sumFils)\n', (4912, 4921), True, 'import numpy as np\n'), ((4933, 4948), 'numpy.var', 'np.var', (['sumFils'], {}), '(sumFils)\n', (4939, 4948), True, 'import numpy as np\n'), ((4961, 4976), 'numpy.std', 'np.std', (['sumFils'], {}), '(sumFils)\n', (4967, 4976), True, 'import numpy as np\n'), ((4987, 5000), 'scipy.stats.skew', 'skew', (['sumFils'], {}), '(sumFils)\n', (4991, 5000), False, 'from scipy.stats import skew\n'), ((5011, 5028), 'scipy.stats.kurtosis', 'kurtosis', (['sumFils'], {}), '(sumFils)\n', (5019, 5028), False, 'from scipy.stats import kurtosis\n'), ((5042, 5058), 'numpy.mean', 'np.mean', (['sumCols'], {}), '(sumCols)\n', (5049, 5058), True, 'import numpy as np\n'), ((5070, 5085), 'numpy.var', 'np.var', (['sumCols'], {}), '(sumCols)\n', (5076, 5085), True, 'import numpy as np\n'), ((5098, 5113), 'numpy.std', 'np.std', (['sumCols'], {}), '(sumCols)\n', (5104, 5113), True, 'import numpy as np\n'), ((5124, 5137), 'scipy.stats.skew', 'skew', (['sumCols'], {}), '(sumCols)\n', (5128, 5137), False, 'from scipy.stats import skew\n'), ((5148, 5165), 'scipy.stats.kurtosis', 'kurtosis', (['sumCols'], {}), '(sumCols)\n', (5156, 5165), False, 'from scipy.stats import kurtosis\n'), ((5187, 5237), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_CROSS', '(9, 9)'], {}), '(cv2.MORPH_CROSS, (9, 9))\n', (5212, 5237), False, 'import cv2\n'), ((5250, 5302), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv2.MORPH_ELLIPSE, (5, 5))\n', (5275, 5302), False, 'import cv2\n'), ((5388, 5498), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0,\n 1, 0, 0]]', 'np.uint8'], {}), '([[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0\n ], [0, 0, 1, 0, 0]], np.uint8)\n', (5396, 5498), True, 'import numpy as np\n'), ((5627, 5679), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(9, 9)'], {}), '(cv2.MORPH_ELLIPSE, (9, 9))\n', (5652, 5679), False, 'import cv2\n'), ((5725, 5767), 'cv2.cvtColor', 'cv2.cvtColor', (['crop_img', 'cv2.COLOR_RGB2GRAY'], {}), '(crop_img, cv2.COLOR_RGB2GRAY)\n', (5737, 5767), False, 'import cv2\n'), ((5786, 5840), 'cv2.threshold', 'cv.threshold', (['crop_img', '(130)', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(crop_img, 130, 255, cv.THRESH_BINARY_INV)\n', (5798, 5840), True, 'import cv2 as cv\n'), ((5852, 5891), 'cv2.erode', 'cv2.erode', (['thresh', 'kernel'], {'iterations': '(6)'}), '(thresh, kernel, iterations=6)\n', (5861, 5891), False, 'import cv2\n'), ((5919, 5985), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (5935, 5985), False, 'import cv2\n'), ((6012, 6084), 'cv2.findContours', 'cv2.findContours', (['(255 - thresh)', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(255 - thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (6028, 6084), False, 'import cv2\n'), ((6136, 6152), 'cv2.moments', 'cv.moments', (['cnt2'], {}), '(cnt2)\n', (6146, 6152), True, 'import cv2 as cv\n'), ((6323, 6372), 'cv2.circle', 'cv.circle', (['crop_img', '(cX, cY)', '(3)', '(0, 0, 155)', '(-1)'], {}), '(crop_img, (cX, cY), 3, (0, 0, 155), -1)\n', (6332, 6372), True, 'import cv2 as cv\n'), ((6378, 6472), 'cv2.putText', 'cv.putText', (['crop_img', '"""C"""', '(cX - 1, cY - 10)', 'cv.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 155)', '(2)'], {}), "(crop_img, 'C', (cX - 1, cY - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5,\n (0, 0, 155), 2)\n", (6388, 6472), True, 'import cv2 as cv\n'), ((6737, 6755), 'cv2.fitEllipse', 'cv.fitEllipse', (['cnt'], {}), '(cnt)\n', (6750, 6755), True, 'import cv2 as cv\n'), ((6788, 6807), 'cv2.fitEllipse', 'cv.fitEllipse', (['cnt2'], {}), '(cnt2)\n', (6801, 6807), True, 'import cv2 as cv\n'), ((7211, 7282), 'cv2.line', 'cv.line', (['crop_img', '(xMinor, yMinor)', '(x2Minor, y2Minor)', '(0, 0, 155)', '(1)'], {}), '(crop_img, (xMinor, yMinor), (x2Minor, y2Minor), (0, 0, 155), 1)\n', (7218, 7282), True, 'import cv2 as cv\n'), ((7286, 7357), 'cv2.line', 'cv.line', (['crop_img', '(xMajor, yMajor)', '(x2Major, y2Major)', '(0, 0, 155)', '(1)'], {}), '(crop_img, (xMajor, yMajor), (x2Major, y2Major), (0, 0, 155), 1)\n', (7293, 7357), True, 'import cv2 as cv\n'), ((8509, 8556), 'cv2.drawContours', 'cv.drawContours', (['crop_img', 'contours', '(-1)', '(255)', '(2)'], {}), '(crop_img, contours, -1, 255, 2)\n', (8524, 8556), True, 'import cv2 as cv\n'), ((8605, 8649), 'cv2.imshow', 'cv2.imshow', (['"""Fitting an Ellipse """', 'crop_img'], {}), "('Fitting an Ellipse ', crop_img)\n", (8615, 8649), False, 'import cv2\n'), ((8654, 8695), 'cv2.imshow', 'cv2.imshow', (['"""Fitting an """', '(255 - thresh)'], {}), "('Fitting an ', 255 - thresh)\n", (8664, 8695), False, 'import cv2\n'), ((8737, 8747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8745, 8747), True, 'import matplotlib.pyplot as plt\n'), ((8753, 8767), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8764, 8767), False, 'import cv2\n'), ((2585, 2599), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2596, 2599), False, 'import cv2\n'), ((4326, 4342), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (4337, 4342), True, 'import matplotlib.pyplot as plt\n'), ((4356, 4373), 'matplotlib.pyplot.plot', 'plt.plot', (['sumCols'], {}), '(sumCols)\n', (4364, 4373), True, 'import matplotlib.pyplot as plt\n'), ((4387, 4434), 'matplotlib.pyplot.title', 'plt.title', (['u"""Histogramas vertical y horizontal"""'], {}), "(u'Histogramas vertical y horizontal')\n", (4396, 4434), True, 'import matplotlib.pyplot as plt\n'), ((4448, 4481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['u"""Número de columnas"""'], {}), "(u'Número de columnas')\n", (4458, 4481), True, 'import matplotlib.pyplot as plt\n'), ((4496, 4530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['u"""Nivel de intensidad"""'], {}), "(u'Nivel de intensidad')\n", (4506, 4530), True, 'import matplotlib.pyplot as plt\n'), ((4701, 4717), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (4712, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4731, 4748), 'matplotlib.pyplot.plot', 'plt.plot', (['sumFils'], {}), '(sumFils)\n', (4739, 4748), True, 'import matplotlib.pyplot as plt\n'), ((4812, 4842), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['u"""Número de filas"""'], {}), "(u'Número de filas')\n", (4822, 4842), True, 'import matplotlib.pyplot as plt\n'), ((4857, 4891), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['u"""Nivel de intensidad"""'], {}), "(u'Nivel de intensidad')\n", (4867, 4891), True, 'import matplotlib.pyplot as plt\n'), ((1911, 2054), 'cv2.ellipse', 'cv2.ellipse', (['image', 'center', '((Punto_actual[0] - Punto_antes[0]) // 2, (Punto_actual[1] - Punto_antes[1]\n ) // 2)', '(0)', '(0)', '(360)', '(100, 7, 55)', '(2)'], {}), '(image, center, ((Punto_actual[0] - Punto_antes[0]) // 2, (\n Punto_actual[1] - Punto_antes[1]) // 2), 0, 0, 360, (100, 7, 55), 2)\n', (1922, 2054), False, 'import cv2\n'), ((2184, 2210), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (2194, 2210), False, 'import cv2\n'), ((4300, 4311), 'numpy.sum', 'np.sum', (['col'], {}), '(col)\n', (4306, 4311), True, 'import numpy as np\n'), ((4674, 4686), 'numpy.sum', 'np.sum', (['cole'], {}), '(cole)\n', (4680, 4686), True, 'import numpy as np\n'), ((6886, 6899), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (6892, 6899), True, 'import numpy as np\n'), ((6930, 6943), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (6936, 6943), True, 'import numpy as np\n'), ((6973, 6986), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (6979, 6986), True, 'import numpy as np\n'), ((7017, 7030), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (7023, 7030), True, 'import numpy as np\n'), ((7062, 7075), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (7068, 7075), True, 'import numpy as np\n'), ((7106, 7119), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (7112, 7119), True, 'import numpy as np\n'), ((7148, 7161), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (7154, 7161), True, 'import numpy as np\n'), ((7191, 7204), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (7197, 7204), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
IMAGE = cv.imread('D:\@Semester 06\Digital Image Processing\Lab\Manuals\Figures\lab7\_img1.png', 0)
cv.imshow('Original Image', IMAGE)
cv.waitKey()
cv.destroyAllWindows()
print(IMAGE.shape)
def globalAdaptiveThreshold(_img):
size = np.shape(_img) # Find img size
rows = size[0]
cols = size[1]
newImg = np.zeros([rows, cols], dtype=np.uint8) # New img
group1 = group2 = [] # Lists to store groups
prevThreshold = int(np.mean(_img)) # Initial threshold
epsilon = 8
for r in range(0, rows): # Initial thresholding
for c in range(0, cols):
if _img[r][c] < prevThreshold:
group1.append(_img[r][c])
else:
group2.append(_img[r][c])
m1 = sum(group1)/len(group1) # Find mean of each group
m2 = sum(group2)/len(group2)
nextThreshold = (m1 + m2)/2 # Find new threshold
while abs(nextThreshold-prevThreshold) >= epsilon: # Keep going if condition is not satisfied.
group1 = group2 = [] # Clear groups
for r in range(0, rows): # Again threshold
for c in range(0, cols):
if _img[r][c] < nextThreshold:
group1.append(_img[r][c])
else:
group2.append(_img[r][c])
prevThreshold = nextThreshold
m1 = sum(group1) / len(group1) # Find mean of each group
m2 = sum(group2) / len(group2)
nextThreshold = (m1 + m2) / 2
for r in range(0, rows): # Thresholding on the basics of final threshold after condition is met.
for c in range(0, cols):
newImg[r][c] = 0 if _img[r][c] < nextThreshold else 255
print(newImg.shape)
cv.imshow('Global Adaptive Thresholding', newImg) # Show image
cv.waitKey(0)
cv.destroyAllWindows()
globalAdaptiveThreshold(IMAGE)
| [
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"numpy.shape",
"cv2.imread",
"numpy.mean",
"cv2.imshow"
] | [((45, 153), 'cv2.imread', 'cv.imread', (['"""D:\\\\@Semester 06\\\\Digital Image Processing\\\\Lab\\\\Manuals\\\\Figures\\\\lab7\\\\_img1.png"""', '(0)'], {}), "(\n 'D:\\\\@Semester 06\\\\Digital Image Processing\\\\Lab\\\\Manuals\\\\Figures\\\\lab7\\\\_img1.png'\n , 0)\n", (54, 153), True, 'import cv2 as cv\n'), ((137, 171), 'cv2.imshow', 'cv.imshow', (['"""Original Image"""', 'IMAGE'], {}), "('Original Image', IMAGE)\n", (146, 171), True, 'import cv2 as cv\n'), ((172, 184), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (182, 184), True, 'import cv2 as cv\n'), ((185, 207), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (205, 207), True, 'import cv2 as cv\n'), ((275, 289), 'numpy.shape', 'np.shape', (['_img'], {}), '(_img)\n', (283, 289), True, 'import numpy as np\n'), ((360, 398), 'numpy.zeros', 'np.zeros', (['[rows, cols]'], {'dtype': 'np.uint8'}), '([rows, cols], dtype=np.uint8)\n', (368, 398), True, 'import numpy as np\n'), ((1793, 1842), 'cv2.imshow', 'cv.imshow', (['"""Global Adaptive Thresholding"""', 'newImg'], {}), "('Global Adaptive Thresholding', newImg)\n", (1802, 1842), True, 'import cv2 as cv\n'), ((1863, 1876), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1873, 1876), True, 'import cv2 as cv\n'), ((1881, 1903), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1901, 1903), True, 'import cv2 as cv\n'), ((516, 529), 'numpy.mean', 'np.mean', (['_img'], {}), '(_img)\n', (523, 529), True, 'import numpy as np\n')] |
'''
ModelNet dataset. Support ModelNet40, ModelNet10, XYZ and normal channels. Up to 10000 points.
'''
import os
import os.path
import numpy as np
import sys
from glob import glob
from collections import Counter
import tensorflow as tf
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
class PFRDataset:
def __init__(
self,
root,
batch_size=32,
npoints=1024,
split='train',
normalize=True,
normal_channel=True,
cache_size=15000,
shuffle=None,
shuffle_points=False,
scale_low=0.7,
scale_high=1.3,
shift_range=0.3,
jitter_sigma=0.005,
add_n_c_info=True,
omit_parameters_ranges=[],
to_categorical_indexes=[],
to_categorical_sizes=[]
):
self.root = root
self.batch_size = batch_size
self.npoints = npoints
self.normalize = normalize
self.add_n_c_info = add_n_c_info
if add_n_c_info:
self.n_c = np.expand_dims(np.array(np.arange(npoints) / npoints), axis=1)
self.classes_names = ['.'.join(j) for j in sorted([i.split('.') for i in os.listdir(self.root)], key=lambda x: (x[0], int(x[1])))]
self.classes = dict(zip(self.classes_names, range(len(self.classes_names))))
self.normal_channel = normal_channel
self.shuffle_points = shuffle_points
self.scale_low = scale_low
self.scale_high = scale_high
self.shift_range = shift_range
self.jitter_sigma = jitter_sigma
self.omit_parameters_ranges = omit_parameters_ranges
self.to_categorical_indexes = to_categorical_indexes
self.to_categorical_sizes = to_categorical_sizes
assert split == 'train' or split == 'test'
# list of (shape_name, shape_txt_file_path) tuple
self.datapath = sorted(
[(i.split('/')[2], i) for i in sorted(glob(self.root + f'/*/{split}/*.npy'))],
key=lambda x: (x[0][0], int(x[0][2:]))
)
self.cache_size = cache_size # how many data points to cache in memory
self.cache = {} # from index to (point_set, cls) tuple
self.get_classes_weights()
if shuffle is None:
if split == 'train':
self.shuffle = True
else:
self.shuffle = False
else:
self.shuffle = shuffle
self.reset()
def _augment_batch_data(self, batch_data):
if self.normal_channel:
rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data)
else:
rotated_data = provider.rotate_point_cloud(batch_data)
rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
jittered_data = provider.random_scale_point_cloud(rotated_data[:, :, 0:3], scale_low=self.scale_low, scale_high=self.scale_high)
jittered_data = provider.shift_point_cloud(jittered_data, shift_range=self.shift_range)
jittered_data = provider.jitter_point_cloud(jittered_data, sigma=self.jitter_sigma, clip=0.1)
rotated_data[:, :, 0:3] = jittered_data
if self.shuffle_points:
return provider.shuffle_points(rotated_data)
else:
return rotated_data
def _get_item(self, index):
if index in self.cache:
point_set, cls = self.cache[index]
else:
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
cls = np.array([cls]).astype(np.int32)
if self.normal_channel:
point_set = np.load(fn[1])[:, :]
for i in range(len(self.omit_parameters_ranges) - 1, -1, -2):
point_set = np.concatenate(
[point_set[:, :self.omit_parameters_ranges[i - 1]], point_set[:, self.omit_parameters_ranges[i]:]], axis=1
)
else:
point_set = np.load(fn[1])[:, :3]
if len(self.cache) < self.cache_size:
self.cache[index] = (point_set, cls)
# Take exactly n npoints
ind = np.arange(point_set.shape[0])
if len(ind) > self.npoints:
ind = np.sort(np.random.choice(ind, self.npoints, replace=False))
else:
ind = np.sort(np.random.choice(ind, self.npoints, replace=True))
point_set = point_set[ind, :]
for cat_ind, cat_size in zip(self.to_categorical_indexes, self.to_categorical_sizes):
cat = tf.keras.utils.to_categorical(point_set[:, cat_ind], num_classes=cat_size)
point_set = np.concatenate([point_set[:, :cat_ind], cat, point_set[:, cat_ind+1:]], axis=1)
if self.add_n_c_info:
point_set = np.concatenate([point_set, self.n_c], axis=1)
if self.normalize:
point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
if not self.normal_channel:
point_set = point_set[:, 0:3]
return point_set, cls
def __getitem__(self, index):
return self._get_item(index)
def __len__(self):
return len(self.datapath)
def num_channel(self):
return self._get_item(0)[0].shape[1]
def reset(self):
self.idxs = np.arange(0, len(self.datapath))
if self.shuffle:
np.random.shuffle(self.idxs)
self.num_batches = (len(self.datapath) + self.batch_size - 1) // self.batch_size
self.batch_idx = 0
def has_next_batch(self):
return self.batch_idx < self.num_batches
def next_batch(self, augment=False):
''' returned dimension may be smaller than self.batch_size '''
start_idx = self.batch_idx * self.batch_size
end_idx = min((self.batch_idx + 1) * self.batch_size, len(self.datapath))
bsize = end_idx - start_idx
batch_data = np.zeros((bsize, self.npoints, self.num_channel()))
batch_label = np.zeros((bsize), dtype=np.int32)
batch_cls_weights = np.zeros((bsize), dtype=np.float32)
for i in range(bsize):
ps, cls = self._get_item(self.idxs[i + start_idx])
batch_data[i] = ps
batch_label[i] = cls
batch_cls_weights[i] = self.weights[cls[0]]
self.batch_idx += 1
if augment:
batch_data = self._augment_batch_data(batch_data)
return batch_data, batch_label, batch_cls_weights
def get_classes_weights(self):
classes = [j[0] for j in self.datapath]
weights = {k: 1/v for k,v in Counter(classes).items()}
mean = np.mean(list(weights.values()))
weights = {k: v / mean for k, v in weights.items()}
sorted_weights = sorted(weights.items(), key=lambda x: (x[0][0], x[0].split('.')[2:]))
# return [i[1] for i in sorted_weights]
self.weights = [i[1] for i in sorted_weights]
| [
"provider.shuffle_points",
"numpy.load",
"numpy.sum",
"numpy.mean",
"numpy.arange",
"provider.rotate_perturbation_point_cloud_with_normal",
"glob.glob",
"os.path.join",
"provider.rotate_perturbation_point_cloud",
"os.path.abspath",
"numpy.random.choice",
"collections.Counter",
"provider.rota... | [((269, 294), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (284, 294), False, 'import os\n'), ((332, 363), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""utils"""'], {}), "(ROOT_DIR, 'utils')\n", (344, 363), False, 'import os\n'), ((439, 458), 'numpy.mean', 'np.mean', (['pc'], {'axis': '(0)'}), '(pc, axis=0)\n', (446, 458), True, 'import numpy as np\n'), ((3126, 3243), 'provider.random_scale_point_cloud', 'provider.random_scale_point_cloud', (['rotated_data[:, :, 0:3]'], {'scale_low': 'self.scale_low', 'scale_high': 'self.scale_high'}), '(rotated_data[:, :, 0:3], scale_low=self.\n scale_low, scale_high=self.scale_high)\n', (3159, 3243), False, 'import provider\n'), ((3263, 3334), 'provider.shift_point_cloud', 'provider.shift_point_cloud', (['jittered_data'], {'shift_range': 'self.shift_range'}), '(jittered_data, shift_range=self.shift_range)\n', (3289, 3334), False, 'import provider\n'), ((3359, 3436), 'provider.jitter_point_cloud', 'provider.jitter_point_cloud', (['jittered_data'], {'sigma': 'self.jitter_sigma', 'clip': '(0.1)'}), '(jittered_data, sigma=self.jitter_sigma, clip=0.1)\n', (3386, 3436), False, 'import provider\n'), ((4474, 4503), 'numpy.arange', 'np.arange', (['point_set.shape[0]'], {}), '(point_set.shape[0])\n', (4483, 4503), True, 'import numpy as np\n'), ((6262, 6293), 'numpy.zeros', 'np.zeros', (['bsize'], {'dtype': 'np.int32'}), '(bsize, dtype=np.int32)\n', (6270, 6293), True, 'import numpy as np\n'), ((6324, 6357), 'numpy.zeros', 'np.zeros', (['bsize'], {'dtype': 'np.float32'}), '(bsize, dtype=np.float32)\n', (6332, 6357), True, 'import numpy as np\n'), ((505, 528), 'numpy.sum', 'np.sum', (['(pc ** 2)'], {'axis': '(1)'}), '(pc ** 2, axis=1)\n', (511, 528), True, 'import numpy as np\n'), ((2792, 2843), 'provider.rotate_point_cloud_with_normal', 'provider.rotate_point_cloud_with_normal', (['batch_data'], {}), '(batch_data)\n', (2831, 2843), False, 'import provider\n'), ((2871, 2937), 'provider.rotate_perturbation_point_cloud_with_normal', 'provider.rotate_perturbation_point_cloud_with_normal', (['rotated_data'], {}), '(rotated_data)\n', (2923, 2937), False, 'import provider\n'), ((2979, 3018), 'provider.rotate_point_cloud', 'provider.rotate_point_cloud', (['batch_data'], {}), '(batch_data)\n', (3006, 3018), False, 'import provider\n'), ((3046, 3100), 'provider.rotate_perturbation_point_cloud', 'provider.rotate_perturbation_point_cloud', (['rotated_data'], {}), '(rotated_data)\n', (3086, 3100), False, 'import provider\n'), ((3536, 3573), 'provider.shuffle_points', 'provider.shuffle_points', (['rotated_data'], {}), '(rotated_data)\n', (3559, 3573), False, 'import provider\n'), ((4861, 4935), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['point_set[:, cat_ind]'], {'num_classes': 'cat_size'}), '(point_set[:, cat_ind], num_classes=cat_size)\n', (4890, 4935), True, 'import tensorflow as tf\n'), ((4960, 5045), 'numpy.concatenate', 'np.concatenate', (['[point_set[:, :cat_ind], cat, point_set[:, cat_ind + 1:]]'], {'axis': '(1)'}), '([point_set[:, :cat_ind], cat, point_set[:, cat_ind + 1:]],\n axis=1)\n', (4974, 5045), True, 'import numpy as np\n'), ((5095, 5140), 'numpy.concatenate', 'np.concatenate', (['[point_set, self.n_c]'], {'axis': '(1)'}), '([point_set, self.n_c], axis=1)\n', (5109, 5140), True, 'import numpy as np\n'), ((5658, 5686), 'numpy.random.shuffle', 'np.random.shuffle', (['self.idxs'], {}), '(self.idxs)\n', (5675, 5686), True, 'import numpy as np\n'), ((4566, 4616), 'numpy.random.choice', 'np.random.choice', (['ind', 'self.npoints'], {'replace': '(False)'}), '(ind, self.npoints, replace=False)\n', (4582, 4616), True, 'import numpy as np\n'), ((4658, 4707), 'numpy.random.choice', 'np.random.choice', (['ind', 'self.npoints'], {'replace': '(True)'}), '(ind, self.npoints, replace=True)\n', (4674, 4707), True, 'import numpy as np\n'), ((3858, 3873), 'numpy.array', 'np.array', (['[cls]'], {}), '([cls])\n', (3866, 3873), True, 'import numpy as np\n'), ((3955, 3969), 'numpy.load', 'np.load', (['fn[1]'], {}), '(fn[1])\n', (3962, 3969), True, 'import numpy as np\n'), ((4086, 4212), 'numpy.concatenate', 'np.concatenate', (['[point_set[:, :self.omit_parameters_ranges[i - 1]], point_set[:, self.\n omit_parameters_ranges[i]:]]'], {'axis': '(1)'}), '([point_set[:, :self.omit_parameters_ranges[i - 1]],\n point_set[:, self.omit_parameters_ranges[i]:]], axis=1)\n', (4100, 4212), True, 'import numpy as np\n'), ((4301, 4315), 'numpy.load', 'np.load', (['fn[1]'], {}), '(fn[1])\n', (4308, 4315), True, 'import numpy as np\n'), ((1303, 1321), 'numpy.arange', 'np.arange', (['npoints'], {}), '(npoints)\n', (1312, 1321), True, 'import numpy as np\n'), ((2180, 2217), 'glob.glob', 'glob', (["(self.root + f'/*/{split}/*.npy')"], {}), "(self.root + f'/*/{split}/*.npy')\n", (2184, 2217), False, 'from glob import glob\n'), ((6867, 6883), 'collections.Counter', 'Counter', (['classes'], {}), '(classes)\n', (6874, 6883), False, 'from collections import Counter\n'), ((1423, 1444), 'os.listdir', 'os.listdir', (['self.root'], {}), '(self.root)\n', (1433, 1444), False, 'import os\n')] |
import setting.constant as const
import numpy as np
import cv2
def overlay(image, layer):
if (len(layer.shape) == 2):
layer = cv2.cvtColor(layer, cv2.COLOR_GRAY2BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
layer = cv2.cvtColor(layer, cv2.COLOR_BGR2BGRA)
layer[np.where((layer == [0,0,0,255]).all(axis=2))] = const.BACKGROUND_COLOR + [255]
layer[np.where((layer == [255,255,255,255]).all(axis=2))] = const.SEGMENTATION_COLOR + [255]
layer = cv2.addWeighted(image, 0.6, layer, 0.4, 0)
return layer
def light(image, bright, contrast):
bright = bright * 1.2
contrast = contrast * 2
image = image * ((contrast/127)+1) - contrast + bright
image = np.clip(image, 0, 255)
return np.uint8(image)
def threshold(image, min_limit=None, max_limit=255, clip=0):
if min_limit is None:
min_limit = int(np.mean(image) + clip)
_, image = cv2.threshold(image, min_limit, max_limit, cv2.THRESH_BINARY)
return np.uint8(image)
def gauss_filter(image, kernel=(3,3), iterations=1):
for _ in range(iterations):
image = cv2.GaussianBlur(image, kernel, 0)
return np.uint8(image)
def median_filter(image, kernel=3, iterations=1):
for _ in range(iterations):
image = cv2.medianBlur(image, kernel, 0)
return np.uint8(image)
def equalize_light(image, limit=3, grid=(7,7), gray=False):
if (len(image.shape) == 2):
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
gray = True
clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=grid)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
image = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
if gray:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return np.uint8(image)
def back_in_black(image):
image = light(image.copy(), bright=120, contrast=60)
black_level = 0
for x in range(6):
bi = threshold(image, clip=-x)
if (bi==0).sum() > (bi==255).sum():
black_level += 1
return (black_level > 3)
######### Unused #########
def edges(image, threshold1=250, threshold2=350, kernel=3):
image = cv2.Canny(image, threshold1, threshold2, kernel)
image = cv2.bitwise_not(image)
return np.uint8(image)
def equalize_hist(image):
image = cv2.equalizeHist(image)
return np.uint8(image)
def otsu(img):
hist = np.zeros(256, dtype=int)
for y in range(len(img)):
for x in range(len(img[0])):
hist[int(img[y,x])] += 1
total = (len(img) * len(img[0]))
current_max, threshold = 0, 0
sumT, sumF, sumB = 0, 0, 0
weightB, weightF = 0, 0
varBetween, meanB, meanF = 0, 0, 0
for i in range(0,256):
sumT += (i * hist[i])
for i in range(0,256):
weightB += hist[i]
weightF = total - weightB
if (weightF <= 0):
break
if (weightB <= 0):
weightB = 1
sumB += (i * hist[i])
sumF = sumT - sumB
meanB = sumB/weightB
meanF = sumF/weightF
varBetween = (weightB * weightF)
varBetween *= (meanB-meanF) * (meanB-meanF)
if (varBetween > current_max):
current_max = varBetween
threshold = i
img[img <= threshold] = 0
img[img > threshold] = 255
return np.array(img, dtype=np.uint8) | [
"cv2.equalizeHist",
"cv2.Canny",
"numpy.uint8",
"cv2.bitwise_not",
"cv2.GaussianBlur",
"cv2.medianBlur",
"cv2.cvtColor",
"cv2.threshold",
"numpy.zeros",
"numpy.clip",
"cv2.addWeighted",
"cv2.split",
"numpy.array",
"numpy.mean",
"cv2.createCLAHE",
"cv2.merge"
] | [((196, 235), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2BGRA'], {}), '(image, cv2.COLOR_BGR2BGRA)\n', (208, 235), False, 'import cv2\n'), ((248, 287), 'cv2.cvtColor', 'cv2.cvtColor', (['layer', 'cv2.COLOR_BGR2BGRA'], {}), '(layer, cv2.COLOR_BGR2BGRA)\n', (260, 287), False, 'import cv2\n'), ((487, 529), 'cv2.addWeighted', 'cv2.addWeighted', (['image', '(0.6)', 'layer', '(0.4)', '(0)'], {}), '(image, 0.6, layer, 0.4, 0)\n', (502, 529), False, 'import cv2\n'), ((709, 731), 'numpy.clip', 'np.clip', (['image', '(0)', '(255)'], {}), '(image, 0, 255)\n', (716, 731), True, 'import numpy as np\n'), ((743, 758), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (751, 758), True, 'import numpy as np\n'), ((910, 971), 'cv2.threshold', 'cv2.threshold', (['image', 'min_limit', 'max_limit', 'cv2.THRESH_BINARY'], {}), '(image, min_limit, max_limit, cv2.THRESH_BINARY)\n', (923, 971), False, 'import cv2\n'), ((983, 998), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (991, 998), True, 'import numpy as np\n'), ((1147, 1162), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (1155, 1162), True, 'import numpy as np\n'), ((1306, 1321), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (1314, 1321), True, 'import numpy as np\n'), ((1508, 1559), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': 'limit', 'tileGridSize': 'grid'}), '(clipLimit=limit, tileGridSize=grid)\n', (1523, 1559), False, 'import cv2\n'), ((1570, 1608), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2LAB'], {}), '(image, cv2.COLOR_BGR2LAB)\n', (1582, 1608), False, 'import cv2\n'), ((1623, 1637), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (1632, 1637), False, 'import cv2\n'), ((1674, 1695), 'cv2.merge', 'cv2.merge', (['(cl, a, b)'], {}), '((cl, a, b))\n', (1683, 1695), False, 'import cv2\n'), ((1707, 1744), 'cv2.cvtColor', 'cv2.cvtColor', (['limg', 'cv2.COLOR_LAB2BGR'], {}), '(limg, cv2.COLOR_LAB2BGR)\n', (1719, 1744), False, 'import cv2\n'), ((1827, 1842), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (1835, 1842), True, 'import numpy as np\n'), ((2216, 2264), 'cv2.Canny', 'cv2.Canny', (['image', 'threshold1', 'threshold2', 'kernel'], {}), '(image, threshold1, threshold2, kernel)\n', (2225, 2264), False, 'import cv2\n'), ((2277, 2299), 'cv2.bitwise_not', 'cv2.bitwise_not', (['image'], {}), '(image)\n', (2292, 2299), False, 'import cv2\n'), ((2311, 2326), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (2319, 2326), True, 'import numpy as np\n'), ((2366, 2389), 'cv2.equalizeHist', 'cv2.equalizeHist', (['image'], {}), '(image)\n', (2382, 2389), False, 'import cv2\n'), ((2401, 2416), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (2409, 2416), True, 'import numpy as np\n'), ((2444, 2468), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'int'}), '(256, dtype=int)\n', (2452, 2468), True, 'import numpy as np\n'), ((3374, 3403), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (3382, 3403), True, 'import numpy as np\n'), ((139, 178), 'cv2.cvtColor', 'cv2.cvtColor', (['layer', 'cv2.COLOR_GRAY2BGR'], {}), '(layer, cv2.COLOR_GRAY2BGR)\n', (151, 178), False, 'import cv2\n'), ((1101, 1135), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', 'kernel', '(0)'], {}), '(image, kernel, 0)\n', (1117, 1135), False, 'import cv2\n'), ((1262, 1294), 'cv2.medianBlur', 'cv2.medianBlur', (['image', 'kernel', '(0)'], {}), '(image, kernel, 0)\n', (1276, 1294), False, 'import cv2\n'), ((1431, 1470), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2BGR'], {}), '(image, cv2.COLOR_GRAY2BGR)\n', (1443, 1470), False, 'import cv2\n'), ((1775, 1814), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1787, 1814), False, 'import cv2\n'), ((871, 885), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (878, 885), True, 'import numpy as np\n')] |
import sys
sys.path.append("..")
sys.path.append("../..")
import numpy as np
import os
import argparse
from sklearn.metrics import accuracy_score
import tensorflow as tf
from data.synthetic_dataset_wt_conf_za_link import SyntheticDataset, confounder_monotonicities_1,get_subportion_confounders
import pandas as pd
from models_synthetic import Twin_Net_with_Z_A, dice_loss,Twin_Net,class_loss
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import f1_score
import copy
from utils import pickle_object, read_pickle_object
def get_test_confs(dataset, args, N=None, mode='test'):
if mode == 'test':
if args.multiple_confounders:
conf_to_input = [dataset.test[i].values.astype(np.float32) for i in dataset.test.columns]
else:
conf_to_input = [dataset.test.values.astype(np.float32)]
elif mode=='sample':
conf_to_input = [dataset.get_z_samples(N,args.p_2)]
elif mode == 'dataset_median':
if 'all' in args.confounders:
args.confounders = dataset.train.columns
conf_to_input = [np.tile(dataset.test[i].median(), (N)) for i in args.confounders]
if not args.multiple_confounders:
conf_to_input = np.array(conf_to_input).T
return conf_to_input
def prob_nec(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args):
preds = model.predict([treatment_factual, treatment_counter, uy_to_input,
conf_to_input],
args.batch_size, 1)
pred_factual = preds[0]
pred_counter = preds[1]
scaler = MinMaxScaler()
y = scaler.fit_transform(pred_factual)
y_prime = scaler.fit_transform(pred_counter)
y[y >= 0.5] = 1
y[y < 0.5] = 0
y_prime[y_prime >= 0.5] = 1
y_prime[y_prime < 0.5] = 0
idx_given_y_1 = np.where(y == outcomes_factual)[0]
idx_query_y_0 = np.where(y_prime == outcomes_counter)[0]
idx_y_1_y_prime_0 = list(set(idx_given_y_1).intersection(idx_query_y_0))
prob_necessity_1 = len(idx_y_1_y_prime_0) / len(idx_given_y_1)
return prob_necessity_1
def prob_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args):
preds = model.predict([treatment_factual, treatment_counter, uy_to_input,
conf_to_input],
args.batch_size, 1)
pred_factual = preds[0]
pred_counter = preds[1]
scaler = MinMaxScaler()
y = scaler.fit_transform(pred_factual)
y_prime = scaler.fit_transform(pred_counter)
y[y >= 0.5] = 1
y[y < 0.5] = 0
y_prime[y_prime >= 0.5] = 1
y_prime[y_prime < 0.5] = 0
idx_given_y_0 = np.where(y == outcomes_factual)[0]
idx_query_y_1 = np.where(y_prime == outcomes_counter)[0]
idx_y_0_y_prime_1 = set(idx_given_y_0).intersection(idx_query_y_1)
prob_suficiency = len(idx_y_0_y_prime_1) / len(idx_given_y_0)
return prob_suficiency
def prob_nec_and_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args):
preds = model.predict([treatment_factual, treatment_counter, uy_to_input,
conf_to_input],
args.batch_size, 1)
pred_factual = preds[0]
pred_counter = preds[1]
scaler = MinMaxScaler()
y = scaler.fit_transform(pred_factual)
y_prime = scaler.fit_transform(pred_counter)
y[y >= 0.5] = 1
y[y < 0.5] = 0
y_prime[y_prime >= 0.5] = 1
y_prime[y_prime < 0.5] = 0
idx_given_y_0 = np.where(y == outcomes_factual)[0]
idx_query_y_1 = np.where(y_prime == outcomes_counter)[0]
idx_y_0_y_prime_1 = set(idx_given_y_0).intersection(idx_query_y_1)
prob_nec_and_suficiency = len(idx_y_0_y_prime_1)/len(y)
return prob_nec_and_suficiency
def ate(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args):
preds = model.predict([treatment_factual, treatment_counter, uy_to_input,
conf_to_input],
args.batch_size, 1)
pred_factual = preds[0]
pred_counter = preds[1]
scaler = MinMaxScaler()
y = scaler.fit_transform(pred_factual)
y_prime = scaler.fit_transform(pred_counter)
y[y >= 0.5] = 1
y[y < 0.5] = 0
y_prime[y_prime >= 0.5] = 1
y_prime[y_prime < 0.5] = 0
idx_given_y_0 = np.sum(y == 1)/ len(y)
idx_query_y_1 = np.sum(y_prime == 1)/ len(y_prime)
ate_ = idx_query_y_1 - idx_given_y_0
return ate_
def calc_ate(dataset):
a = dataset.index[(dataset['Y'] == 1) & (dataset['T'] == 0)].values
ab = dataset.index[(dataset['Y_prime'] == 1) & (dataset['T_prime'] == 0)].values
lighter = np.hstack((a, ab))
a = dataset.index[(dataset['Y'] == 1) & (dataset['T'] == 1)].values
ab = dataset.index[(dataset['Y_prime'] == 1) & (dataset['T_prime'] == 1)].values
heavier = np.hstack((a, ab))
prob_lighter = len(lighter)/len(dataset)
prob_heavier = len(heavier)/len(dataset)
ate = prob_heavier - prob_lighter
return ate
def run_inference(args):
dataset = SyntheticDataset(**vars(args))
dataset.train = pd.DataFrame.from_dict(dataset.train)
dataset.test = pd.DataFrame.from_dict(dataset.test)
try:
dataset.train.drop(['propensity_score', 'matched_element', 'propensity_score_logit'], axis=1, inplace=True)
dataset.test.drop(['propensity_score', 'matched_element', 'propensity_score_logit'], axis=1, inplace=True)
except:
print('Not propensity')
target = dataset.train.pop('Y')
target_prime = dataset.train.pop('Y_prime')
treatment = dataset.train.pop('X')
uy = dataset.train.pop('Uy')
dataset.test = dataset.test.reset_index().drop(['index'], axis=1)
target_test = dataset.test.pop('Y')
target_prime_test = dataset.test.pop('Y_prime')
treatment_test = dataset.test.pop('X')
treatment_prime_test = dataset.test.pop('X_prime')
uy_test = dataset.test.pop('Uy')
# Get confounders
dataset.train = get_subportion_confounders(dataset.train, args.confounders)
dataset.test = get_subportion_confounders(dataset.test, args.confounders)
args.len_conf = len(dataset.train.columns)
if args.multiple_confounders:
args.z_monotonicity = []
for i, col in enumerate(dataset.train.columns):
args.z_monotonicity.append(args.z_monotonicity_base[col])
args.lattice_sizes.append(args.z_calib_units)
input_len = args.len_conf + 2
else:
input_len = 3
args.z_monotonicity = [args.z_monotonicity]
args.lattice_sizes.append(2)
if 'za' in args.runPath:
model = Twin_Net_with_Z_A(treatment, uy, dataset.train, args)
else:
model = Twin_Net(treatment, uy, dataset.train, args)
# Set up loss
if 'mse' in args.loss:
loss_func = tf.keras.losses.mean_squared_error
elif 'mae' in args.loss:
loss_func = tf.keras.losses.mean_absolute_error
elif 'bce' in args.loss:
loss_func = tf.keras.losses.BinaryCrossentropy(from_logits=True)
elif 'dice' in args.loss:
loss_func = dice_loss
elif 'weighted_loss':
negs = np.sum(target == 0)
negs += np.sum(target_prime == 0)
pos = np.sum(target == 1)
pos += np.sum(target_prime == 1)
weight_for_0 = (1 / negs) * (len(dataset.train) * 2) / 2.0
weight_for_1 = (1 / pos) * (len(dataset.train) * 2) / 2.0
loss_func = class_loss(np.array([weight_for_0, weight_for_1]))
if args.weighted_loss:
model.compile(
loss=[loss_func, loss_func],
loss_weights=[args.weight_1, args.weight_2],
)
else:
model.compile(
loss=loss_func)
print('-------------------------Experiment: {} ---------------------'.format(args.inference_name))
model.build((1, input_len))
model.load_weights(args.runPath + '/best')
conf_to_input = [dataset.test.values.astype(np.float32)]
if args.multiple_confounders:
conf_to_input = [dataset.test[i].values.astype(np.float32) for i in dataset.test.columns]
test_loss = model.evaluate(
[treatment_test.values.astype(np.float32), treatment_prime_test.values.astype(np.float32),
uy_test.values.astype(np.float32),
conf_to_input],
[target_test[..., np.newaxis], target_prime_test[..., np.newaxis]])
print('Test Loss : {}'.format(test_loss))
preds = model.predict([treatment_test.values.astype(np.float32), treatment_prime_test.values.astype(np.float32),
uy_test.values.astype(np.float32), conf_to_input],
args.batch_size, 1)
title = ['Factual', 'Counterfactual']
preds = preds[0:2]
for i, pred in enumerate(preds):
scaler = MinMaxScaler()
pred = scaler.fit_transform(pred)
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
ac = accuracy_score(pred, target_test)
print('{} Acc: {}'.format(title[i], ac))
f1 = f1_score(target_test.values, pred)
print('{} F1 : {}'.format(title[i], f1))
if not args.train:
prob_necessities,prob_sufficiencies, prob_both , ate_, gt = calc_probs(args, treatment_test, dataset, model)
return prob_necessities,prob_sufficiencies, prob_both, ate_, gt
else:
calc_probs(args, treatment_test, dataset, model)
def calc_probs(args, treatment_test, dataset, model):
N = len(treatment_test)
treatment_factual = np.ones(N)
treatment_counter = np.zeros(N)
outcomes_factual = 1
outcomes_counter = 0
gt = None
if args.u_distribution == 'p_test':
conf_to_input = get_test_confs(dataset, args, mode='test')
uy_samples, gt = dataset.get_uy_samples(N,args.p,conf_to_input[0])
else:
uy_samples = dataset.get_uy_samples(N)
uy_to_input = uy_samples
conf_to_input = get_test_confs(dataset, args, N ,mode='test')
prob_necessity_1 = prob_nec(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input,
outcomes_factual, outcomes_counter, args)
print('Test Probability of Necessity : {}'.format(prob_necessity_1))
# ##################### Prob of Sufficiency #####################
N = len(treatment_test)
treatment_factual = np.zeros(N)
treatment_counter = np.ones(N)
outcomes_factual = 0
outcomes_counter = 1
conf_to_input = get_test_confs(dataset, args, mode='test')
prob_suf_1 = prob_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args)
print('Test Probability of Sufficiency : {}'.format(prob_suf_1))
# ############ PROB of Nec and Suf ##################
N = len(treatment_test)
treatment_factual = np.zeros(N)
treatment_counter = np.ones(N)
outcomes_factual = 0
outcomes_counter = 1
conf_to_input = get_test_confs(dataset, args, mode='test')
prob_nec_suf_1 = prob_nec_and_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args)
print('Test Probability of Necessity and Sufficiency : {}'.format(prob_nec_suf_1))
conf_to_input = get_test_confs(dataset, args, mode='test')
N = len(conf_to_input[0])
if args.u_distribution == 'p_test':
conf_to_input = get_test_confs(dataset, args, mode='test')
uy_samples, _ = dataset.get_uy_samples(N, args.p, conf_to_input[0])
else:
uy_samples = dataset.get_uy_samples(N)
uy_to_input = uy_samples
treatment_factual = np.zeros(N)
treatment_counter = np.ones(N)
outcomes_factual = 0
outcomes_counter = 1
ate_ = ate(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input,
outcomes_factual,
outcomes_counter, args)
print('Test ATE : {}'.format(ate_))
if not args.train:
return (prob_necessity_1), (prob_suf_1), (prob_nec_suf_1), ate_, gt
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=bool, default=False)
parser.add_argument('--inference_name',
default='twin_net_arch_za_lattice_none_uy_monotonicity_none_z_monoton_opt_1_z_layer_multiple_calib_units_3_3_z_3_lr_0_001_loss_mse_Synthetics_with_2_normal_uniform_bernouli_0_05_confounders_1_confs')
parser.add_argument('--prob_type', default='paper')
# Logging
parser.add_argument('--restore', type=bool, default=False)
parser.add_argument('--log_root', type=str, default='./experiments/Synthetic_wt_conf/')
# Dataset Hparams
parser.add_argument('--save_path', default='./data/Datasets/')
parser.add_argument('--save_name', default='kenyan_water_proc.pkl')
parser.add_argument('--save_dataset', default=False)
parser.add_argument('--load_dataset', default=True)
parser.add_argument('--dataset_mode', type=str, default='synthetic')
parser.add_argument('--path_to_data',
default = '../data/Datasets/synthetic_dataset_200000_samples_X_{}_Uy_{}_Z_{}_with_counterfactual_and_z_a_link_2_final.pkl')
parser.add_argument('--confounders',default=['Z'])
parser.add_argument('--u_distribution', default='normal')
parser.add_argument('--z_distribution', default='uniform')
parser.add_argument('--x_distribution', default='bernouli')
parser.add_argument('--p_1', type=float, default=0.05)
parser.add_argument('--p_2', type=float, default=0.7)
parser.add_argument('--p_3', type=float, default=0.2)
# Model Hparams
parser.add_argument('--lattice_sizes', default=[3, 3])
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lattice_units', type=int, default=1) # 1 or 2
parser.add_argument('--hidden_dims', type=int, default=3)
parser.add_argument('--calib_units', type=int, default=3)
parser.add_argument('--z_calib_units', type=int, default=3)
parser.add_argument('--layer', default='lattice')
parser.add_argument('--uy_layer', default='none')
parser.add_argument('--z_layer', default='none')
parser.add_argument('--uy_monotonicity', default='none')
parser.add_argument('--z_monotonicity', default='none')
parser.add_argument('--z_monot_opt', default=1)
parser.add_argument('--concats', type=bool, default=False)
parser.add_argument('--end_activation', default='none')
parser.add_argument('--loss', default='mse')
parser.add_argument('--weighted_loss',default=False)
parser.add_argument('--weight_1',type=float,default=1)
parser.add_argument('--weight_2',type=float,default=1)
parser.add_argument('--multiple_confounders', default=True, help='split confounders')
# General
parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 1)')
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--workers', type=int, default=0)
args = parser.parse_args()
# GPU setup
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# Set Randomness
# Set Randomness
if args.seed == 0: args.seed = int(np.random.randint(0, 2 ** 32 - 1, (1,)))
print('seed', args.seed)
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
# Set logdirs
# z_dist = '{}_{}_{}'.format(args.z_distribution, args.p_1,
# args.p_2) if args.z_distribution == 'bernouli' else '{}'.format(
# args.z_distribution)
z_dist = '{}_{}'.format(args.z_distribution, args.p_2,
args.p_2) if args.z_distribution == 'bernouli' else '{}'.format(
args.z_distribution)
x_dist = '{}_{}'.format(args.x_distribution, args.p_1,
args.p_1) if args.x_distribution == 'bernouli' else '{}'.format(
args.x_distribution)
args.path_to_data = args.load_path = args.path_to_data.format(x_dist, args.u_distribution, z_dist)
args.runPath = os.path.join(args.log_root, args.inference_name)
if args.multiple_confounders:
# args.z_calib_units = 2
# args.z_calib_units = args.lattice_sizes[0]
z_monotonicity = 'opt_{}'.format(args.z_monot_opt)
args.z_monotonicity_base = eval('confounder_monotonicities_{}'.format(args.z_monot_opt))
else:
z_monotonicity = args.z_monotonicity
args.z_calib_units = len(args.confounders)+1 if 'all' not in args.confounders else 2
if not args.train:
p_test = False
if not p_test:
nec_test_prob = []
nec_dataset_prob = []
suf_test_prob = []
suf_dataset_prob = []
nec_and_suf_test_prob = []
nec_and_suf_dataset_prob = []
ates =[]
for i in range(1, 20):
prob_necessities,prob_sufficiencies, prob_both,ate_ = run_inference(copy.deepcopy(args))
nec_test_prob.append(prob_necessities)
suf_test_prob.append(prob_sufficiencies)
nec_and_suf_test_prob.append(prob_both)
ates.append(ate_)
print('\n \nTest average Prob of Necessity {}, std: {}'.format(np.array(nec_test_prob).mean(), np.array(nec_test_prob).std()))
print('Test average Prob of Sufficiency {}, std: {}'.format(np.array(suf_test_prob).mean(),
np.array(suf_test_prob).std()))
print('Test average Prob of Necessity and Sufficiency {}, std: {}'.format(np.array(nec_and_suf_test_prob).mean(),
np.array(nec_and_suf_test_prob).std()))
print('Test ATE mean {}, std: {} \n \n'.format(
np.array(ates).mean(),
np.array(ates).std()))
else:
args.u_distribution = 'p_test'
probs = np.zeros((20, 10, 3))
gts = np.zeros((20, 10, 3))
for i in range(0, 20):
for j, p in enumerate(np.linspace(0.05, 0.95, 10)):
args.p = p
n, s, ns,ate_, gt = run_inference(copy.deepcopy(args))
probs[i, j, 0] = n
probs[i, j, 1] = s
probs[i, j, 2] = ns
gts[i, j, 0] = gt[0]
gts[i, j, 1] = gt[1]
gts[i, j, 2] = gt[2]
pn = np.array(probs[:, :, 0])
ps = np.array(probs[:, :, 1])
pns = np.array(probs[:, :, 2])
print('\n Prob of Necessity {} ; std: {}'.format(pn.mean(), pn.std()))
print('Prob of Sufficiency {} ; std: {}'.format(ps.mean(), ps.std()))
print('Prob of Necessity & Sufficiency {} ; std: {}'.format(pns.mean(), pns.std()))
to_save = {
'pn': pn,
'ps': ps,
'pns': pns,
'gt_n': gts[:, :, 0],
'gt_s': gts[:, :, 0],
'gt_ns': gts[:, :, 0]
}
pickle_object(to_save, './experiments/p_test_conf.npz')
else:
run_inference(args)
| [
"tensorflow.random.set_seed",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"sklearn.metrics.accuracy_score",
"sklearn.preprocessing.MinMaxScaler",
"numpy.ones",
"data.synthetic_dataset_wt_conf_za_link.get_subportion_confounders",
"sklearn.metrics.f1_score",
"numpy.random.randint",
... | [((12, 33), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (27, 33), False, 'import sys\n'), ((34, 58), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (49, 58), False, 'import sys\n'), ((1653, 1667), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1665, 1667), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2536, 2550), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2548, 2550), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3419, 3433), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3431, 3433), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4291, 4305), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4303, 4305), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4857, 4875), 'numpy.hstack', 'np.hstack', (['(a, ab)'], {}), '((a, ab))\n', (4866, 4875), True, 'import numpy as np\n'), ((5048, 5066), 'numpy.hstack', 'np.hstack', (['(a, ab)'], {}), '((a, ab))\n', (5057, 5066), True, 'import numpy as np\n'), ((5305, 5342), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dataset.train'], {}), '(dataset.train)\n', (5327, 5342), True, 'import pandas as pd\n'), ((5362, 5398), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dataset.test'], {}), '(dataset.test)\n', (5384, 5398), True, 'import pandas as pd\n'), ((6183, 6242), 'data.synthetic_dataset_wt_conf_za_link.get_subportion_confounders', 'get_subportion_confounders', (['dataset.train', 'args.confounders'], {}), '(dataset.train, args.confounders)\n', (6209, 6242), False, 'from data.synthetic_dataset_wt_conf_za_link import SyntheticDataset, confounder_monotonicities_1, get_subportion_confounders\n'), ((6262, 6320), 'data.synthetic_dataset_wt_conf_za_link.get_subportion_confounders', 'get_subportion_confounders', (['dataset.test', 'args.confounders'], {}), '(dataset.test, args.confounders)\n', (6288, 6320), False, 'from data.synthetic_dataset_wt_conf_za_link import SyntheticDataset, confounder_monotonicities_1, get_subportion_confounders\n'), ((9668, 9678), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (9675, 9678), True, 'import numpy as np\n'), ((9703, 9714), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (9711, 9714), True, 'import numpy as np\n'), ((10499, 10510), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (10507, 10510), True, 'import numpy as np\n'), ((10535, 10545), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (10542, 10545), True, 'import numpy as np\n'), ((10999, 11010), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (11007, 11010), True, 'import numpy as np\n'), ((11035, 11045), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (11042, 11045), True, 'import numpy as np\n'), ((11820, 11831), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (11828, 11831), True, 'import numpy as np\n'), ((11856, 11866), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (11863, 11866), True, 'import numpy as np\n'), ((12309, 12334), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12332, 12334), False, 'import argparse\n'), ((15579, 15604), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (15593, 15604), True, 'import numpy as np\n'), ((15609, 15638), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['args.seed'], {}), '(args.seed)\n', (15627, 15638), True, 'import tensorflow as tf\n'), ((16339, 16387), 'os.path.join', 'os.path.join', (['args.log_root', 'args.inference_name'], {}), '(args.log_root, args.inference_name)\n', (16351, 16387), False, 'import os\n'), ((1885, 1916), 'numpy.where', 'np.where', (['(y == outcomes_factual)'], {}), '(y == outcomes_factual)\n', (1893, 1916), True, 'import numpy as np\n'), ((1940, 1977), 'numpy.where', 'np.where', (['(y_prime == outcomes_counter)'], {}), '(y_prime == outcomes_counter)\n', (1948, 1977), True, 'import numpy as np\n'), ((2768, 2799), 'numpy.where', 'np.where', (['(y == outcomes_factual)'], {}), '(y == outcomes_factual)\n', (2776, 2799), True, 'import numpy as np\n'), ((2823, 2860), 'numpy.where', 'np.where', (['(y_prime == outcomes_counter)'], {}), '(y_prime == outcomes_counter)\n', (2831, 2860), True, 'import numpy as np\n'), ((3651, 3682), 'numpy.where', 'np.where', (['(y == outcomes_factual)'], {}), '(y == outcomes_factual)\n', (3659, 3682), True, 'import numpy as np\n'), ((3706, 3743), 'numpy.where', 'np.where', (['(y_prime == outcomes_counter)'], {}), '(y_prime == outcomes_counter)\n', (3714, 3743), True, 'import numpy as np\n'), ((4523, 4537), 'numpy.sum', 'np.sum', (['(y == 1)'], {}), '(y == 1)\n', (4529, 4537), True, 'import numpy as np\n'), ((4566, 4586), 'numpy.sum', 'np.sum', (['(y_prime == 1)'], {}), '(y_prime == 1)\n', (4572, 4586), True, 'import numpy as np\n'), ((6827, 6880), 'models_synthetic.Twin_Net_with_Z_A', 'Twin_Net_with_Z_A', (['treatment', 'uy', 'dataset.train', 'args'], {}), '(treatment, uy, dataset.train, args)\n', (6844, 6880), False, 'from models_synthetic import Twin_Net_with_Z_A, dice_loss, Twin_Net, class_loss\n'), ((6907, 6951), 'models_synthetic.Twin_Net', 'Twin_Net', (['treatment', 'uy', 'dataset.train', 'args'], {}), '(treatment, uy, dataset.train, args)\n', (6915, 6951), False, 'from models_synthetic import Twin_Net_with_Z_A, dice_loss, Twin_Net, class_loss\n'), ((8968, 8982), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (8980, 8982), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((9098, 9131), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'target_test'], {}), '(pred, target_test)\n', (9112, 9131), False, 'from sklearn.metrics import accuracy_score\n'), ((9194, 9228), 'sklearn.metrics.f1_score', 'f1_score', (['target_test.values', 'pred'], {}), '(target_test.values, pred)\n', (9202, 9228), False, 'from sklearn.metrics import f1_score\n'), ((15505, 15544), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 32 - 1)', '(1,)'], {}), '(0, 2 ** 32 - 1, (1,))\n', (15522, 15544), True, 'import numpy as np\n'), ((18264, 18285), 'numpy.zeros', 'np.zeros', (['(20, 10, 3)'], {}), '((20, 10, 3))\n', (18272, 18285), True, 'import numpy as np\n'), ((18305, 18326), 'numpy.zeros', 'np.zeros', (['(20, 10, 3)'], {}), '((20, 10, 3))\n', (18313, 18326), True, 'import numpy as np\n'), ((18795, 18819), 'numpy.array', 'np.array', (['probs[:, :, 0]'], {}), '(probs[:, :, 0])\n', (18803, 18819), True, 'import numpy as np\n'), ((18837, 18861), 'numpy.array', 'np.array', (['probs[:, :, 1]'], {}), '(probs[:, :, 1])\n', (18845, 18861), True, 'import numpy as np\n'), ((18880, 18904), 'numpy.array', 'np.array', (['probs[:, :, 2]'], {}), '(probs[:, :, 2])\n', (18888, 18904), True, 'import numpy as np\n'), ((19412, 19467), 'utils.pickle_object', 'pickle_object', (['to_save', '"""./experiments/p_test_conf.npz"""'], {}), "(to_save, './experiments/p_test_conf.npz')\n", (19425, 19467), False, 'from utils import pickle_object, read_pickle_object\n'), ((7187, 7239), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (7221, 7239), True, 'import tensorflow as tf\n'), ((17235, 17254), 'copy.deepcopy', 'copy.deepcopy', (['args'], {}), '(args)\n', (17248, 17254), False, 'import copy\n'), ((18400, 18427), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.95)', '(10)'], {}), '(0.05, 0.95, 10)\n', (18411, 18427), True, 'import numpy as np\n'), ((1220, 1243), 'numpy.array', 'np.array', (['conf_to_input'], {}), '(conf_to_input)\n', (1228, 1243), True, 'import numpy as np\n'), ((7341, 7360), 'numpy.sum', 'np.sum', (['(target == 0)'], {}), '(target == 0)\n', (7347, 7360), True, 'import numpy as np\n'), ((7377, 7402), 'numpy.sum', 'np.sum', (['(target_prime == 0)'], {}), '(target_prime == 0)\n', (7383, 7402), True, 'import numpy as np\n'), ((7417, 7436), 'numpy.sum', 'np.sum', (['(target == 1)'], {}), '(target == 1)\n', (7423, 7436), True, 'import numpy as np\n'), ((7452, 7477), 'numpy.sum', 'np.sum', (['(target_prime == 1)'], {}), '(target_prime == 1)\n', (7458, 7477), True, 'import numpy as np\n'), ((18515, 18534), 'copy.deepcopy', 'copy.deepcopy', (['args'], {}), '(args)\n', (18528, 18534), False, 'import copy\n'), ((7643, 7681), 'numpy.array', 'np.array', (['[weight_for_0, weight_for_1]'], {}), '([weight_for_0, weight_for_1])\n', (7651, 7681), True, 'import numpy as np\n'), ((17537, 17560), 'numpy.array', 'np.array', (['nec_test_prob'], {}), '(nec_test_prob)\n', (17545, 17560), True, 'import numpy as np\n'), ((17569, 17592), 'numpy.array', 'np.array', (['nec_test_prob'], {}), '(nec_test_prob)\n', (17577, 17592), True, 'import numpy as np\n'), ((17674, 17697), 'numpy.array', 'np.array', (['suf_test_prob'], {}), '(suf_test_prob)\n', (17682, 17697), True, 'import numpy as np\n'), ((17776, 17799), 'numpy.array', 'np.array', (['suf_test_prob'], {}), '(suf_test_prob)\n', (17784, 17799), True, 'import numpy as np\n'), ((17895, 17926), 'numpy.array', 'np.array', (['nec_and_suf_test_prob'], {}), '(nec_and_suf_test_prob)\n', (17903, 17926), True, 'import numpy as np\n'), ((18007, 18038), 'numpy.array', 'np.array', (['nec_and_suf_test_prob'], {}), '(nec_and_suf_test_prob)\n', (18015, 18038), True, 'import numpy as np\n'), ((18124, 18138), 'numpy.array', 'np.array', (['ates'], {}), '(ates)\n', (18132, 18138), True, 'import numpy as np\n'), ((18163, 18177), 'numpy.array', 'np.array', (['ates'], {}), '(ates)\n', (18171, 18177), True, 'import numpy as np\n')] |
from kuruve.envs.GymEnv import KuruveGymEnv
from kuruve.KurveGame import *
from gym import spaces
import pygame
import numpy as np
import math
class SimpleAiEnv(KuruveGymEnv):
"""
Environment with an AI opponent
"""
def __init__(self, headless=False, observation_size=(64, 64), fps_cap=0, frameskip=0, enable_powerups=False,
verbose=0, ai_count=1):
super().__init__(headless, observation_size, fps_cap, frameskip, enable_powerups, verbose, 1)
self.ai_count = ai_count
for i in range(1, self.ai_count+1):
Game.add_player("Kurve_"+str(i+1), GameConfig.default_colors[i], GameConfig.default_controls[i], is_ai=True)
self.player_count += 1
self.screen_player_pos_1 = pygame.Surface(self.screen_size)
self.screen_player_pos_1 = self.screen_player_pos_1.convert(32, 0)
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8,
shape=(self.screen_size[1], self.screen_size[0], 2))
Game.reset_game()
def reset(self):
obs = super().reset()
return self._process_observation(obs)
def step(self, action):
actions = [0, 0]
actions[0] = action
obs, reward, done, info = super().step(actions)
obs = self._process_observation(obs)
reward = reward[0]
info = {}
return obs, reward, done, info
def render(self, mode="human"):
raise NotImplementedError
def close(self):
super().close()
print("Game closed")
def seed(self, seed=None):
raise NotImplementedError
def _process_observation(self, obs):
"""Turn rgb image into grayscale and add players positions as white squares"""
scale_x = GameConfig.screen_x / self.screen_size[0]
scale_y = GameConfig.screen_y / self.screen_size[1]
# TODO: Aspect ratio
wh = math.ceil(Player.players[0].radius / scale_x)
rect = (wh*2, wh*2)
# Position for the white rectangle. wh is needed to center it.
pos_1 = (math.ceil(Player.players[0].position[0] / scale_x)-wh, math.ceil(Player.players[0].position[1] / scale_y)-wh)
self.screen_player_pos_1.fill((0, 0, 0))
pygame.draw.rect(self.screen_player_pos_1, (255, 255, 255), (pos_1, rect))
# Create observation
obs = np.dot(obs[..., :3], [0.2989, 0.5870, 0.1140]).astype(np.uint8)
pos_arr_1 = pygame.surfarray.pixels3d(self.screen_player_pos_1).swapaxes(0, 1)
pos_arr_1 = pos_arr_1[..., 1].astype(np.uint8)
obs = np.dstack((obs, pos_arr_1))
return obs
| [
"numpy.dstack",
"pygame.Surface",
"math.ceil",
"pygame.draw.rect",
"gym.spaces.Discrete",
"gym.spaces.Box",
"pygame.surfarray.pixels3d",
"numpy.dot"
] | [((758, 790), 'pygame.Surface', 'pygame.Surface', (['self.screen_size'], {}), '(self.screen_size)\n', (772, 790), False, 'import pygame\n'), ((895, 913), 'gym.spaces.Discrete', 'spaces.Discrete', (['(3)'], {}), '(3)\n', (910, 913), False, 'from gym import spaces\n'), ((947, 1047), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'dtype': 'np.uint8', 'shape': '(self.screen_size[1], self.screen_size[0], 2)'}), '(low=0, high=255, dtype=np.uint8, shape=(self.screen_size[1],\n self.screen_size[0], 2))\n', (957, 1047), False, 'from gym import spaces\n'), ((1985, 2030), 'math.ceil', 'math.ceil', (['(Player.players[0].radius / scale_x)'], {}), '(Player.players[0].radius / scale_x)\n', (1994, 2030), False, 'import math\n'), ((2316, 2390), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen_player_pos_1', '(255, 255, 255)', '(pos_1, rect)'], {}), '(self.screen_player_pos_1, (255, 255, 255), (pos_1, rect))\n', (2332, 2390), False, 'import pygame\n'), ((2656, 2683), 'numpy.dstack', 'np.dstack', (['(obs, pos_arr_1)'], {}), '((obs, pos_arr_1))\n', (2665, 2683), True, 'import numpy as np\n'), ((2148, 2198), 'math.ceil', 'math.ceil', (['(Player.players[0].position[0] / scale_x)'], {}), '(Player.players[0].position[0] / scale_x)\n', (2157, 2198), False, 'import math\n'), ((2203, 2253), 'math.ceil', 'math.ceil', (['(Player.players[0].position[1] / scale_y)'], {}), '(Player.players[0].position[1] / scale_y)\n', (2212, 2253), False, 'import math\n'), ((2435, 2479), 'numpy.dot', 'np.dot', (['obs[..., :3]', '[0.2989, 0.587, 0.114]'], {}), '(obs[..., :3], [0.2989, 0.587, 0.114])\n', (2441, 2479), True, 'import numpy as np\n'), ((2519, 2570), 'pygame.surfarray.pixels3d', 'pygame.surfarray.pixels3d', (['self.screen_player_pos_1'], {}), '(self.screen_player_pos_1)\n', (2544, 2570), False, 'import pygame\n')] |
import os
import sys
import argparse
import cv2
import numpy as np
from tensorflow import keras
sys.path.insert(1, './src/file_management')
import file_manager
###############################################################################################################
def pre_process(image):
"""
Invert pixel intensity of 'images' (to compensate the conversion into image with imwrite).
"""
return 1 - image * 255
###############################################################################################################
#
def run_discriminator(folder, model_path):
'''
Run the discriminator 'model_path' over the images from the folder 'folder'. Display the mean, min and max values, and percentage of good predictions.
'''
# Get images from folder
file_names = file_manager.get_content_from_folder(folder)
# Load Keras model
model = keras.models.load_model(model_path)
# Display model summary
print(model.summary())
# Process images
predictions = []
for name in file_names:
# Extract (grayscale) image from file
raw_img = cv2.imread(os.path.join(folder, name), cv2.IMREAD_GRAYSCALE)
# Preprocess image
img = pre_process(raw_img)
# Extend image dimensions
img_extended = np.expand_dims(img, axis=0)
# Run prediction
score = model.predict(img_extended)
# Append score to list
predictions.append(float(score))
# Display mean, min, max from the results
print('Results over', len(file_names), 'images.')
print('Average score:', np.round(np.mean(predictions), 3), '- Min:', np.round(min(predictions), 3), '- Max:', np.round(max(predictions),3))
# Get number of good predictions
num_success = len([p for p in predictions if p > 0.5])
# Percentage of images above 50%
print("Percentage of images above 50%: {0}%".format(num_success / len(predictions) * 100))
return predictions
###############################################################################################################
def main():
# Create arguments
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', type=str, default='./out', help="Folder to read image data from. Default is './out'.")
parser.add_argument('-m', '--model', type=str, default='./keras_h5/keras_discriminator_model.h5', help="Path to the Keras GAN's discriminator model. Default is './keras_h5/keras_discriminator_model.h5'.")
# Parse arguments
args = parser.parse_args()
# Print argument values
print('\n------------------------------------')
print ('Command line options:')
print(' --folder:', args.folder)
print(' --model:', args.model)
print('------------------------------------\n')
# Run the discriminator over the images to evaluate their degree of realism
run_discriminator(args.folder, args.model)
###############################################################################################################
if __name__ == '__main__':
main()
| [
"tensorflow.keras.models.load_model",
"argparse.ArgumentParser",
"file_manager.get_content_from_folder",
"numpy.expand_dims",
"sys.path.insert",
"numpy.mean",
"os.path.join"
] | [((106, 149), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""./src/file_management"""'], {}), "(1, './src/file_management')\n", (121, 149), False, 'import sys\n'), ((846, 890), 'file_manager.get_content_from_folder', 'file_manager.get_content_from_folder', (['folder'], {}), '(folder)\n', (882, 890), False, 'import file_manager\n'), ((930, 965), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['model_path'], {}), '(model_path)\n', (953, 965), False, 'from tensorflow import keras\n'), ((2218, 2243), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2241, 2243), False, 'import argparse\n'), ((1356, 1383), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1370, 1383), True, 'import numpy as np\n'), ((1182, 1208), 'os.path.join', 'os.path.join', (['folder', 'name'], {}), '(folder, name)\n', (1194, 1208), False, 'import os\n'), ((1675, 1695), 'numpy.mean', 'np.mean', (['predictions'], {}), '(predictions)\n', (1682, 1695), True, 'import numpy as np\n')] |
"""
This class will be removed before the first stable version of ZairaChem.
Computing time is simply unaffordable for the stacking methodology, unfortunately.
"""
import os
import numpy as np
import pandas as pd
import json
import h5py
import joblib
from .. import ZairaBase
from ..estimators.base import BaseEstimator, BaseOutcomeAssembler
from ..automl.autogluon import AutoGluonEstimator
from ..estimators import RESULTS_MAPPED_FILENAME, RESULTS_UNMAPPED_FILENAME
from ..setup import COMPOUND_IDENTIFIER_COLUMN, SMILES_COLUMN
from ..vars import (
DATA_FILENAME,
DATA_SUBFOLDER,
DESCRIPTORS_SUBFOLDER,
ESTIMATORS_SUBFOLDER,
POOL_SUBFOLDER,
)
AUTOGLUON_SAVE_SUBFOLDER = "autogluon"
class XGetter(ZairaBase):
def __init__(self, path):
ZairaBase.__init__(self)
self.path = path
self.X = []
self.columns = []
def _get_folds(self):
df = pd.read_csv(os.path.join(self.path, DATA_SUBFOLDER, DATA_FILENAME))
if "fld_aux" in list(df.columns):
self.columns += ["fld_aux"]
self.X += [np.array(df[["fld_aux"]])]
def _get_manifolds(self):
with h5py.File(
os.path.join(self.path, DESCRIPTORS_SUBFOLDER, "pca.h5"), "r"
) as f:
X_ = f["Values"][:]
self.X += [X_]
for i in range(X_.shape[1]):
self.columns += ["pca-{0}".format(i)]
with h5py.File(
os.path.join(self.path, DESCRIPTORS_SUBFOLDER, "umap.h5"), "r"
) as f:
X_ = f["Values"][:]
self.X += [X_]
for i in range(X_.shape[1]):
self.columns += ["umap-{0}".format(i)]
def _get_reference_descriptor(self):
with h5py.File(
os.path.join(self.path, DESCRIPTORS_SUBFOLDER, "reference.h5"), "r"
) as f:
X_ = f["Values"][:]
self.X += [X_]
self.columns += [
"ref-{0}".format(x.decode("utf-8")) for x in f["Features"][:]
]
def _get_out_of_sample_predictions(self):
with open(
os.path.join(self.path, DESCRIPTORS_SUBFOLDER, "done_eos.json"), "r"
) as f:
model_ids = list(json.load(f))
for model_id in model_ids:
file_name = os.path.join(
self.path, ESTIMATORS_SUBFOLDER, model_id, RESULTS_UNMAPPED_FILENAME
)
df = pd.read_csv(file_name)
df = df[
[
c
for c in list(df.columns)
if c not in [SMILES_COLUMN, COMPOUND_IDENTIFIER_COLUMN]
]
]
self.X += [np.array(df)]
self.columns += ["{0}-{1}".format(c, model_id) for c in list(df.columns)]
def get(self):
if not self.is_predict():
self._get_folds()
self._get_manifolds()
self._get_out_of_sample_predictions()
self._get_reference_descriptor()
X = np.hstack(self.X)
df = pd.DataFrame(X, columns=self.columns)
df.to_csv(os.path.join(self.path, POOL_SUBFOLDER, DATA_FILENAME), index=False)
return df
class Fitter(BaseEstimator):
def __init__(self, path):
BaseEstimator.__init__(self, path=path)
self.trained_path = os.path.join(
self.get_output_dir(), POOL_SUBFOLDER, AUTOGLUON_SAVE_SUBFOLDER
)
def _get_X(self):
df = XGetter(path=self.path).get()
return df
def _get_y(self, task):
df = pd.read_csv(os.path.join(self.path, DATA_SUBFOLDER, DATA_FILENAME))
return np.array(df[task])
def _get_Y(self):
Y = []
columns = []
for t in self._get_reg_tasks():
y = self._get_y(t)
Y += [y]
columns += [t]
for t in self._get_clf_tasks():
y = self._get_y(t)
Y += [y]
columns += [t]
Y = np.array(Y).T
df = pd.DataFrame(Y, columns=columns)
return df
def run(self, time_budget_sec=None):
self.reset_time()
if time_budget_sec is None:
time_budget_sec = self._estimate_time_budget()
else:
time_budget_sec = time_budget_sec
df_X = self._get_X()
df_Y = self._get_Y()
df = pd.concat([df_X, df_Y], axis=1)
labels = list(df_Y.columns)
self.logger.debug("Staring AutoGluon estimation")
estimator = AutoGluonEstimator(
save_path=self.trained_path, time_budget=time_budget_sec
)
if "fld_aux" in list(df_X.columns):
groups = "fld_aux"
else:
groups = None
self.logger.debug("Fitting")
results = estimator.fit(data=df, labels=labels, groups=groups)
self.update_elapsed_time()
return results
class Predictor(BaseEstimator):
def __init__(self, path):
BaseEstimator.__init__(self, path=path)
self.trained_path = os.path.join(
self.get_trained_dir(), POOL_SUBFOLDER, AUTOGLUON_SAVE_SUBFOLDER
)
def run(self):
self.reset_time()
df = XGetter(path=self.path).get()
model = AutoGluonEstimator(save_path=self.trained_path).load()
results = model.run(df)
self.update_elapsed_time()
return results
class PoolAssembler(BaseOutcomeAssembler):
def __init__(self, path=None):
BaseOutcomeAssembler.__init__(self, path=path)
def _back_to_raw(self, df):
for c in list(df.columns):
if "reg_" in c:
transformer = joblib.load(
os.path.join(
self.trained_path,
DATA_SUBFOLDER,
"{0}_transformer.joblib".format(c.split("_")[1]),
)
)
trn = np.array(df[c]).reshape(-1, 1)
raw = transformer.inverse_transform(trn)[:, 0]
df["reg_raw"] = raw
return df
def run(self, df):
df = self._back_to_raw(df)
df_c = self._get_compounds()
df_y = df
df = pd.concat([df_c, df_y], axis=1)
df.to_csv(
os.path.join(self.path, POOL_SUBFOLDER, RESULTS_UNMAPPED_FILENAME),
index=False,
)
mappings = self._get_mappings()
df = self._remap(df, mappings)
df.to_csv(
os.path.join(self.path, POOL_SUBFOLDER, RESULTS_MAPPED_FILENAME),
index=False,
)
class Pooler(ZairaBase):
def __init__(self, path=None):
ZairaBase.__init__(self)
if path is None:
self.path = self.get_output_dir()
else:
self.path = path
if not self.is_predict():
self.logger.debug("Starting pooled fitter")
self.estimator = Fitter(path=self.path)
else:
self.logger.debug("Starting pooled predictor")
self.estimator = Predictor(path=self.path)
def run(self, time_budget_sec=None):
if time_budget_sec is not None:
self.time_budget_sec = int(time_budget_sec)
else:
self.time_budget_sec = None
if not self.is_predict():
self.logger.debug("Mode: fit")
results = self.estimator.run(time_budget_sec=self.time_budget_sec)
else:
self.logger.debug("Mode: predict")
results = self.estimator.run()
pa = PoolAssembler(path=self.path)
pa.run(results)
| [
"pandas.DataFrame",
"json.load",
"pandas.read_csv",
"numpy.hstack",
"numpy.array",
"os.path.join",
"pandas.concat"
] | [((2990, 3007), 'numpy.hstack', 'np.hstack', (['self.X'], {}), '(self.X)\n', (2999, 3007), True, 'import numpy as np\n'), ((3021, 3058), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'self.columns'}), '(X, columns=self.columns)\n', (3033, 3058), True, 'import pandas as pd\n'), ((3610, 3628), 'numpy.array', 'np.array', (['df[task]'], {}), '(df[task])\n', (3618, 3628), True, 'import numpy as np\n'), ((3965, 3997), 'pandas.DataFrame', 'pd.DataFrame', (['Y'], {'columns': 'columns'}), '(Y, columns=columns)\n', (3977, 3997), True, 'import pandas as pd\n'), ((4310, 4341), 'pandas.concat', 'pd.concat', (['[df_X, df_Y]'], {'axis': '(1)'}), '([df_X, df_Y], axis=1)\n', (4319, 4341), True, 'import pandas as pd\n'), ((6129, 6160), 'pandas.concat', 'pd.concat', (['[df_c, df_y]'], {'axis': '(1)'}), '([df_c, df_y], axis=1)\n', (6138, 6160), True, 'import pandas as pd\n'), ((921, 975), 'os.path.join', 'os.path.join', (['self.path', 'DATA_SUBFOLDER', 'DATA_FILENAME'], {}), '(self.path, DATA_SUBFOLDER, DATA_FILENAME)\n', (933, 975), False, 'import os\n'), ((2286, 2372), 'os.path.join', 'os.path.join', (['self.path', 'ESTIMATORS_SUBFOLDER', 'model_id', 'RESULTS_UNMAPPED_FILENAME'], {}), '(self.path, ESTIMATORS_SUBFOLDER, model_id,\n RESULTS_UNMAPPED_FILENAME)\n', (2298, 2372), False, 'import os\n'), ((2416, 2438), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {}), '(file_name)\n', (2427, 2438), True, 'import pandas as pd\n'), ((3077, 3131), 'os.path.join', 'os.path.join', (['self.path', 'POOL_SUBFOLDER', 'DATA_FILENAME'], {}), '(self.path, POOL_SUBFOLDER, DATA_FILENAME)\n', (3089, 3131), False, 'import os\n'), ((3539, 3593), 'os.path.join', 'os.path.join', (['self.path', 'DATA_SUBFOLDER', 'DATA_FILENAME'], {}), '(self.path, DATA_SUBFOLDER, DATA_FILENAME)\n', (3551, 3593), False, 'import os\n'), ((3938, 3949), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3946, 3949), True, 'import numpy as np\n'), ((6192, 6258), 'os.path.join', 'os.path.join', (['self.path', 'POOL_SUBFOLDER', 'RESULTS_UNMAPPED_FILENAME'], {}), '(self.path, POOL_SUBFOLDER, RESULTS_UNMAPPED_FILENAME)\n', (6204, 6258), False, 'import os\n'), ((6405, 6469), 'os.path.join', 'os.path.join', (['self.path', 'POOL_SUBFOLDER', 'RESULTS_MAPPED_FILENAME'], {}), '(self.path, POOL_SUBFOLDER, RESULTS_MAPPED_FILENAME)\n', (6417, 6469), False, 'import os\n'), ((1082, 1107), 'numpy.array', 'np.array', (["df[['fld_aux']]"], {}), "(df[['fld_aux']])\n", (1090, 1107), True, 'import numpy as np\n'), ((1176, 1232), 'os.path.join', 'os.path.join', (['self.path', 'DESCRIPTORS_SUBFOLDER', '"""pca.h5"""'], {}), "(self.path, DESCRIPTORS_SUBFOLDER, 'pca.h5')\n", (1188, 1232), False, 'import os\n'), ((1444, 1501), 'os.path.join', 'os.path.join', (['self.path', 'DESCRIPTORS_SUBFOLDER', '"""umap.h5"""'], {}), "(self.path, DESCRIPTORS_SUBFOLDER, 'umap.h5')\n", (1456, 1501), False, 'import os\n'), ((1756, 1818), 'os.path.join', 'os.path.join', (['self.path', 'DESCRIPTORS_SUBFOLDER', '"""reference.h5"""'], {}), "(self.path, DESCRIPTORS_SUBFOLDER, 'reference.h5')\n", (1768, 1818), False, 'import os\n'), ((2099, 2162), 'os.path.join', 'os.path.join', (['self.path', 'DESCRIPTORS_SUBFOLDER', '"""done_eos.json"""'], {}), "(self.path, DESCRIPTORS_SUBFOLDER, 'done_eos.json')\n", (2111, 2162), False, 'import os\n'), ((2213, 2225), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2222, 2225), False, 'import json\n'), ((2677, 2689), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (2685, 2689), True, 'import numpy as np\n'), ((5854, 5869), 'numpy.array', 'np.array', (['df[c]'], {}), '(df[c])\n', (5862, 5869), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.