code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 02:57:27 2020
@author: philippe
"""
from argparse import ArgumentParser
import numpy as np
import tensorflow as tf
from sys import argv
from config import Config
from model import Model
# preprocessed android distribution
args_type="java-small-model"
args_dataset_name="java-small"
args_data_dir="C:/Users/philippe/python-projects/Code2Seq/data/baselines/preprocessed/java-small/"
args_data= args_data_dir + args_dataset_name
args_test_data= args_data_dir + args_dataset_name + ".val.c2s"
args_model_dir= "C:/Users/philippe/python-projects/Code2Seq/data/model/" + args_type
args_model = args_model_dir + "/model"
# train data
argv.append("--data")
argv.append( args_data )
# test data
argv.append("--test")
argv.append( args_test_data )
# model location
argv.append("--save_prefix")
argv.append( args_model )
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-d", "--data", dest="data_path",
help="path to preprocessed dataset", required=False)
parser.add_argument("-te", "--test", dest="test_path",
help="path to test file", metavar="FILE", required=False)
parser.add_argument("-s", "--save_prefix", dest="save_path_prefix",
help="path to save file", metavar="FILE", required=False)
parser.add_argument("-l", "--load", dest="load_path",
help="path to saved file", metavar="FILE", required=False)
parser.add_argument('--release', action='store_true',
help='if specified and loading a trained model, release the loaded model for a smaller model '
'size.')
parser.add_argument('--predict', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--seed', type=int, default=239)
args = parser.parse_args()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
if args.debug:
conf = Config.get_debug_config(args)
else:
conf = Config.get_default_config(args)
print( conf )
model = Model(conf)
print('Created model')
model.train()
model.close_session() | [
"numpy.random.seed",
"argparse.ArgumentParser",
"config.Config.get_default_config",
"sys.argv.append",
"model.Model",
"tensorflow.set_random_seed",
"config.Config.get_debug_config"
] | [((706, 727), 'sys.argv.append', 'argv.append', (['"""--data"""'], {}), "('--data')\n", (717, 727), False, 'from sys import argv\n'), ((729, 751), 'sys.argv.append', 'argv.append', (['args_data'], {}), '(args_data)\n', (740, 751), False, 'from sys import argv\n'), ((770, 791), 'sys.argv.append', 'argv.append', (['"""--test"""'], {}), "('--test')\n", (781, 791), False, 'from sys import argv\n'), ((793, 820), 'sys.argv.append', 'argv.append', (['args_test_data'], {}), '(args_test_data)\n', (804, 820), False, 'from sys import argv\n'), ((844, 872), 'sys.argv.append', 'argv.append', (['"""--save_prefix"""'], {}), "('--save_prefix')\n", (855, 872), False, 'from sys import argv\n'), ((874, 897), 'sys.argv.append', 'argv.append', (['args_model'], {}), '(args_model)\n', (885, 897), False, 'from sys import argv\n'), ((944, 960), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (958, 960), False, 'from argparse import ArgumentParser\n'), ((1973, 1998), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1987, 1998), True, 'import numpy as np\n'), ((2004, 2033), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.seed'], {}), '(args.seed)\n', (2022, 2033), True, 'import tensorflow as tf\n'), ((2193, 2204), 'model.Model', 'Model', (['conf'], {}), '(conf)\n', (2198, 2204), False, 'from model import Model\n'), ((2072, 2101), 'config.Config.get_debug_config', 'Config.get_debug_config', (['args'], {}), '(args)\n', (2095, 2101), False, 'from config import Config\n'), ((2129, 2160), 'config.Config.get_default_config', 'Config.get_default_config', (['args'], {}), '(args)\n', (2154, 2160), False, 'from config import Config\n')] |
import numpy as np
import sklearn.mixture
import multiprocessing
class GMM():
def __init__(self, means, covariances, weights):
"""
Gaussian Mixture Model Distribution class for calculation of log likelihood and sampling.
Parameters
----------
means : 2-D array_like of shape (n_mixtures, n_features)
Means for each component of the GMM
covariances : 2-D array_like of shape (n_mixtures, n_features)
Covariance matrices of the GMM. Only diagonal matrices are supported at this time.
weights : 1-D array_like of shape (n_mixtures,)
Weights for each of the GMM components
"""
if len(covariances.shape) == 2:
self.covariance_type = 'diag'
else:
raise NotImplementedError('Only diagonal covariance matrices supported')
self.gmm = sklearn.mixture.GaussianMixture(n_components=len(weights), covariance_type='diag')
self.gmm.weights_ = weights
self.gmm.covariances_ = covariances
self.gmm.means_ = means
self.gmm.precisions_cholesky_ = np.sqrt(1./covariances)
self.n_mixtures = len(weights)
try:
self.n_features = means.shape[1]
except:
raise ValueError("Means array must be 2 dimensional")
@property
def means(self):
return self.gmm.means_
@property
def covars(self):
return self.gmm.covars_
@property
def weights(self):
return self.gmm.weights_
def sample(self, n_samples):
"""
Sample from the GMM.
Parameters
----------
n_samples : int
Number of samples to draw.
Returns
-------
: 2-D array_like of shape (n_samples, n_features)
Samples drawn from the GMM distribution
"""
X, y = self.gmm.sample(n_samples)
return X
def log_likelihood(self, X, n_jobs=1):
"""
Calculate the average log likelihood of the data given the GMM parameters
Parameters
----------
X : 2-D array_like of shape (n_samples, n_features)
Data to be used.
n_jobs : int
Number of CPU cores to use in the calculation
Returns
-------
: float
average log likelihood of the data given the GMM parameters
Notes
-------
For GMMs with small numbers of mixtures (<10) the use of more than 1 core can slow down the function.
"""
return self.gmm.score_samples(X)
| [
"numpy.sqrt"
] | [((1115, 1141), 'numpy.sqrt', 'np.sqrt', (['(1.0 / covariances)'], {}), '(1.0 / covariances)\n', (1122, 1141), True, 'import numpy as np\n')] |
__author__ = 'mason'
from domain_orderFulfillment import *
from timer import DURATION
from state import state
import numpy as np
'''
This is a randomly generated problem
'''
def GetCostOfMove(id, r, loc1, loc2, dist):
return 1 + dist
def GetCostOfLookup(id, item):
return max(1, np.random.beta(2, 2))
def GetCostOfWrap(id, orderName, m, item):
return max(1, np.random.normal(5, .5))
def GetCostOfPickup(id, r, item):
return max(1, np.random.normal(4, 1))
def GetCostOfPutdown(id, r, item):
return max(1, np.random.normal(4, 1))
def GetCostOfLoad(id, orderName, r, m, item):
return max(1, np.random.normal(3, .5))
DURATION.TIME = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
DURATION.COUNTER = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
rv.LOCATIONS = [0, 1, 2, 3, 4, 5, 6, 7, 200]
rv.FACTORY1 = frozenset({0, 1, 2, 3, 4, 5, 6, 7, 200})
rv.FACTORY_UNION = rv.FACTORY1
rv.SHIPPING_DOC = {rv.FACTORY1: 1}
rv.GROUND_EDGES = {0: [5, 6, 4], 1: [2, 5, 7, 200], 2: [3, 5, 1, 7], 3: [4, 2, 7, 200], 4: [0, 3, 5], 5: [2, 4, 0, 1, 6], 6: [5, 0], 7: [2, 3, 1], 200: [3, 1]}
rv.GROUND_WEIGHTS = {(0, 5): 1, (0, 6): 12.780347394811297, (0, 4): 14.039879205640858, (1, 2): 1.807361018701961, (1, 5): 8.574150446152094, (1, 7): 5.684730330679724, (1, 200): 13.519908916855425, (2, 3): 5.31665350867007, (2, 5): 10.779604670982208, (2, 7): 5.644816461746832, (3, 4): 5.940740605637421, (3, 7): 10.032826310246485, (3, 200): 15.88063090549323, (4, 5): 1, (5, 6): 8.363619665708075}
rv.ROBOTS = { 'r0': rv.FACTORY1, 'r1': rv.FACTORY1, 'r2': rv.FACTORY1, 'r3': rv.FACTORY1, 'r4': rv.FACTORY1, 'r5': rv.FACTORY1, 'r6': rv.FACTORY1, }
rv.ROBOT_CAPACITY = {'r0': 6.174548780209071, 'r1': 7.888950393237086, 'r2': 8.94839227413827, 'r3': 6.109450732637805, 'r4': 9.243688988204152, 'r5': 7.653146729308341, 'r6': 7.00850756111181}
rv.MACHINES = { 'm0': rv.FACTORY1, }
rv.PALLETS = { 'p0', }
def ResetState():
state.OBJECTS = { 'o0': True, 'o1': True, 'o2': True, 'o3': True, 'o4': True, 'o5': True, }
state.OBJ_WEIGHT = {'o0': 7.480383025618398, 'o1': 6.5298717875512144, 'o2': 7.016331420516707, 'o3': 9.243688988204152, 'o4': 2.365805663120769, 'o5': 8.404532434838684}
state.OBJ_CLASS = {'type0': ['o0', 'o1', 'o2', 'o3', 'o4', 'o5']}
state.loc = { 'r0': 1, 'r1': 6, 'r2': 7, 'r3': 6, 'r4': 2, 'r5': 3, 'r6': 1, 'm0': 6, 'p0': 7, 'o0': 3, 'o1': 3, 'o2': 6, 'o3': 6, 'o4': 4, 'o5': 4,}
state.load = { 'r0': NIL, 'r1': NIL, 'r2': NIL, 'r3': NIL, 'r4': NIL, 'r5': NIL, 'r6': NIL,}
state.busy = {'r0': False, 'r1': False, 'r2': False, 'r3': False, 'r4': False, 'r5': False, 'r6': False, 'm0': False}
state.numUses = {'m0': 11}
state.var1 = {'temp': 'r0', 'temp1': 'r0', 'temp2': 1, 'redoId': 0}
state.shouldRedo = {}
tasks = {
1: [['orderStart', ['type0']]],
}
eventsEnv = {
} | [
"numpy.random.beta",
"numpy.random.normal"
] | [((291, 311), 'numpy.random.beta', 'np.random.beta', (['(2)', '(2)'], {}), '(2, 2)\n', (305, 311), True, 'import numpy as np\n'), ((375, 399), 'numpy.random.normal', 'np.random.normal', (['(5)', '(0.5)'], {}), '(5, 0.5)\n', (391, 399), True, 'import numpy as np\n'), ((453, 475), 'numpy.random.normal', 'np.random.normal', (['(4)', '(1)'], {}), '(4, 1)\n', (469, 475), True, 'import numpy as np\n'), ((531, 553), 'numpy.random.normal', 'np.random.normal', (['(4)', '(1)'], {}), '(4, 1)\n', (547, 553), True, 'import numpy as np\n'), ((620, 644), 'numpy.random.normal', 'np.random.normal', (['(3)', '(0.5)'], {}), '(3, 0.5)\n', (636, 644), True, 'import numpy as np\n')] |
import logging
from copy import deepcopy
from time import time
from typing import Optional, Union
import numpy as np
from mne.epochs import BaseEpochs
from sklearn.base import clone
from sklearn.metrics import get_scorer
from sklearn.model_selection import (
LeaveOneGroupOut,
StratifiedKFold,
StratifiedShuffleSplit,
cross_val_score,
)
from sklearn.model_selection._validation import _fit_and_score, _score
from sklearn.preprocessing import LabelEncoder
from moabb.evaluations.base import BaseEvaluation
log = logging.getLogger(__name__)
# Numpy ArrayLike is only available starting from Numpy 1.20 and Python 3.8
Vector = Union[list, tuple, np.ndarray]
class WithinSessionEvaluation(BaseEvaluation):
"""Within Session evaluation."""
VALID_POLICIES = ["per_class", "ratio"]
def __init__(
self,
n_perms: Optional[Union[int, Vector]] = None,
data_size: Optional[dict] = None,
**kwargs,
):
"""
Parameters
----------
n_perms :
Number of permutations to perform. If an array
is passed it has to be equal in size to the data_size array.
Values in this array must be monotonically decreasing (performing
more permutations for more data is not useful to reduce standard
error of the mean).
Default: None
data_size :
If None is passed, it performs conventional WithinSession evaluation.
Contains the policy to pick the datasizes to
evaluate, as well as the actual values. The dict has the
key 'policy' with either 'ratio' or 'per_class', and the key
'value' with the actual values as an numpy array. This array should be
sorted, such that values in data_size are strictly monotonically increasing.
Default: None
"""
self.data_size = data_size
self.n_perms = n_perms
self.calculate_learning_curve = self.data_size is not None
if self.calculate_learning_curve:
# Check correct n_perms parameter
if self.n_perms is None:
raise ValueError(
"When passing data_size, please also indicate number of permutations"
)
if type(n_perms) is int:
self.n_perms = np.full_like(self.data_size["value"], n_perms, dtype=int)
elif len(self.n_perms) != len(self.data_size["value"]):
raise ValueError(
"Number of elements in n_perms must be equal "
"to number of elements in data_size['value']"
)
elif not np.all(np.diff(n_perms) <= 0):
raise ValueError(
"If n_perms is passed as an array, it has to be monotonically decreasing"
)
# Check correct data size parameter
if not np.all(np.diff(self.data_size["value"]) > 0):
raise ValueError(
"data_size['value'] must be sorted in strictly monotonically increasing order."
)
if data_size["policy"] not in WithinSessionEvaluation.VALID_POLICIES:
raise ValueError(
f"{data_size['policy']} is not valid. Please use one of"
f"{WithinSessionEvaluation.VALID_POLICIES}"
)
self.test_size = 0.2 # Roughly similar to 5-fold CV
add_cols = ["data_size", "permutation"]
super().__init__(additional_columns=add_cols, **kwargs)
else:
# Perform default within session evaluation
super().__init__(**kwargs)
def _evaluate(self, dataset, pipelines):
for subject in dataset.subject_list:
# check if we already have result for this subject/pipeline
# we might need a better granularity, if we query the DB
run_pipes = self.results.not_yet_computed(pipelines, dataset, subject)
if len(run_pipes) == 0:
continue
# get the data
X, y, metadata = self.paradigm.get_data(
dataset, [subject], self.return_epochs
)
# iterate over sessions
for session in np.unique(metadata.session):
ix = metadata.session == session
for name, clf in run_pipes.items():
t_start = time()
cv = StratifiedKFold(5, shuffle=True, random_state=self.random_state)
le = LabelEncoder()
y_cv = le.fit_transform(y[ix])
if isinstance(X, BaseEpochs):
scorer = get_scorer(self.paradigm.scoring)
acc = list()
X_ = X[ix]
y_ = y[ix] if self.mne_labels else y_cv
for train, test in cv.split(X_, y_):
cvclf = clone(clf)
cvclf.fit(X_[train], y_[train])
acc.append(scorer(cvclf, X_[test], y_[test]))
acc = np.array(acc)
else:
acc = cross_val_score(
clf,
X[ix],
y_cv,
cv=cv,
scoring=self.paradigm.scoring,
n_jobs=self.n_jobs,
error_score=self.error_score,
)
score = acc.mean()
duration = time() - t_start
nchan = X.info["nchan"] if isinstance(X, BaseEpochs) else X.shape[1]
res = {
"time": duration / 5.0, # 5 fold CV
"dataset": dataset,
"subject": subject,
"session": session,
"score": score,
"n_samples": len(y_cv), # not training sample
"n_channels": nchan,
"pipeline": name,
}
yield res
def get_data_size_subsets(self, y):
if self.data_size is None:
raise ValueError(
"Cannot create data subsets without valid policy for data_size."
)
if self.data_size["policy"] == "ratio":
vals = np.array(self.data_size["value"])
if np.any(vals < 0) or np.any(vals > 1):
raise ValueError("Data subset ratios must be in range [0, 1]")
upto = np.ceil(vals * len(y)).astype(int)
indices = [np.array(range(i)) for i in upto]
elif self.data_size["policy"] == "per_class":
classwise_indices = dict()
n_smallest_class = np.inf
for cl in np.unique(y):
cl_i = np.where(cl == y)[0]
classwise_indices[cl] = cl_i
n_smallest_class = (
len(cl_i) if len(cl_i) < n_smallest_class else n_smallest_class
)
indices = []
for ds in self.data_size["value"]:
if ds > n_smallest_class:
raise ValueError(
f"Smallest class has {n_smallest_class} samples. "
f"Desired samples per class {ds} is too large."
)
indices.append(
np.concatenate(
[classwise_indices[cl][:ds] for cl in classwise_indices]
)
)
else:
raise ValueError(f"Unknown policy {self.data_size['policy']}")
return indices
def score_explicit(self, clf, X_train, y_train, X_test, y_test):
if not self.mne_labels:
# convert labels if array, keep them if epochs and mne_labels is set
le = LabelEncoder()
y_train = le.fit_transform(y_train)
y_test = le.transform(y_test)
scorer = get_scorer(self.paradigm.scoring)
t_start = time()
try:
model = clf.fit(X_train, y_train)
score = _score(model, X_test, y_test, scorer)
except ValueError as e:
if self.error_score == "raise":
raise e
score = self.error_score
duration = time() - t_start
return score, duration
def _evaluate_learning_curve(self, dataset, pipelines):
for subject in dataset.subject_list:
# check if we already have result for this subject/pipeline
# we might need a better granularity, if we query the DB
run_pipes = self.results.not_yet_computed(pipelines, dataset, subject)
if len(run_pipes) == 0:
continue
# get the data
X_all, y_all, metadata_all = self.paradigm.get_data(
dataset, [subject], self.return_epochs
)
# shuffle_data = True if self.n_perms > 1 else False
for session in np.unique(metadata_all.session):
sess_idx = metadata_all.session == session
X_sess = X_all[sess_idx]
y_sess = y_all[sess_idx]
# metadata_sess = metadata_all[sess_idx]
sss = StratifiedShuffleSplit(
n_splits=self.n_perms[0], test_size=self.test_size
)
for perm_i, (train_idx, test_idx) in enumerate(sss.split(X_sess, y_sess)):
X_train_all = X_sess[train_idx]
y_train_all = y_sess[train_idx]
X_test = X_sess[test_idx]
y_test = y_sess[test_idx]
data_size_steps = self.get_data_size_subsets(y_train_all)
for di, subset_indices in enumerate(data_size_steps):
if perm_i >= self.n_perms[di]:
continue
not_enough_data = False
log.info(
f"Permutation: {perm_i+1},"
f" Training samples: {len(subset_indices)}"
)
if self.return_epochs:
X_train = X_train_all[subset_indices]
else:
X_train = X_train_all[subset_indices, :]
y_train = y_train_all[subset_indices]
# metadata = metadata_perm[:subset_indices]
if len(np.unique(y_train)) < 2:
log.warning(
"For current data size, only one class" "would remain."
)
not_enough_data = True
nchan = (
X_train.info["nchan"]
if isinstance(X_train, BaseEpochs)
else X_train.shape[1]
)
for name, clf in run_pipes.items():
res = {
"dataset": dataset,
"subject": subject,
"session": session,
"n_samples": len(y_train),
"n_channels": nchan,
"pipeline": name,
# Additional columns
"data_size": len(subset_indices),
"permutation": perm_i + 1,
}
if not_enough_data:
res["time"] = 0
res["score"] = np.nan
else:
res["score"], res["time"] = self.score_explicit(
deepcopy(clf), X_train, y_train, X_test, y_test
)
yield res
def evaluate(self, dataset, pipelines):
if self.calculate_learning_curve:
yield from self._evaluate_learning_curve(dataset, pipelines)
else:
yield from self._evaluate(dataset, pipelines)
def is_valid(self, dataset):
return True
class CrossSessionEvaluation(BaseEvaluation):
"""Cross session Context.
Evaluate performance of the pipeline across sessions but for a single
subject. Verifies that sufficient sessions are there for this to be
reasonable
"""
def evaluate(self, dataset, pipelines):
if not self.is_valid(dataset):
raise AssertionError("Dataset is not appropriate for evaluation")
for subject in dataset.subject_list:
# check if we already have result for this subject/pipeline
# we might need a better granularity, if we query the DB
run_pipes = self.results.not_yet_computed(pipelines, dataset, subject)
if len(run_pipes) == 0:
continue
# get the data
X, y, metadata = self.paradigm.get_data(
dataset, [subject], self.return_epochs
)
le = LabelEncoder()
y = y if self.mne_labels else le.fit_transform(y)
groups = metadata.session.values
scorer = get_scorer(self.paradigm.scoring)
for name, clf in run_pipes.items():
# we want to store a results per session
cv = LeaveOneGroupOut()
for train, test in cv.split(X, y, groups):
t_start = time()
if isinstance(X, BaseEpochs):
cvclf = clone(clf)
cvclf.fit(X[train], y[train])
score = scorer(cvclf, X[test], y[test])
else:
score = _fit_and_score(
clone(clf),
X,
y,
scorer,
train,
test,
verbose=False,
parameters=None,
fit_params=None,
error_score=self.error_score,
)[0]
duration = time() - t_start
nchan = X.info["nchan"] if isinstance(X, BaseEpochs) else X.shape[1]
res = {
"time": duration,
"dataset": dataset,
"subject": subject,
"session": groups[test][0],
"score": score,
"n_samples": len(train),
"n_channels": nchan,
"pipeline": name,
}
yield res
def is_valid(self, dataset):
return dataset.n_sessions > 1
class CrossSubjectEvaluation(BaseEvaluation):
"""Cross Subject evaluation Context.
Evaluate performance of the pipeline trained on all subjects but one,
concatenating sessions.
"""
def evaluate(self, dataset, pipelines):
if not self.is_valid(dataset):
raise AssertionError("Dataset is not appropriate for evaluation")
# this is a bit akward, but we need to check if at least one pipe
# have to be run before loading the data. If at least one pipeline
# need to be run, we have to load all the data.
# we might need a better granularity, if we query the DB
run_pipes = {}
for subject in dataset.subject_list:
run_pipes.update(self.results.not_yet_computed(pipelines, dataset, subject))
if len(run_pipes) != 0:
# get the data
X, y, metadata = self.paradigm.get_data(
dataset, return_epochs=self.return_epochs
)
# encode labels
le = LabelEncoder()
y = y if self.mne_labels else le.fit_transform(y)
# extract metadata
groups = metadata.subject.values
sessions = metadata.session.values
scorer = get_scorer(self.paradigm.scoring)
# perform leave one subject out CV
cv = LeaveOneGroupOut()
for train, test in cv.split(X, y, groups):
subject = groups[test[0]]
# now we can check if this subject has results
run_pipes = self.results.not_yet_computed(pipelines, dataset, subject)
# iterate over pipelines
for name, clf in run_pipes.items():
t_start = time()
model = deepcopy(clf).fit(X[train], y[train])
duration = time() - t_start
# we eval on each session
for session in np.unique(sessions[test]):
ix = sessions[test] == session
score = _score(model, X[test[ix]], y[test[ix]], scorer)
nchan = (
X.info["nchan"] if isinstance(X, BaseEpochs) else X.shape[1]
)
res = {
"time": duration,
"dataset": dataset,
"subject": subject,
"session": session,
"score": score,
"n_samples": len(train),
"n_channels": nchan,
"pipeline": name,
}
yield res
def is_valid(self, dataset):
return len(dataset.subject_list) > 1
| [
"sklearn.model_selection._validation._score",
"numpy.full_like",
"copy.deepcopy",
"numpy.concatenate",
"sklearn.model_selection.cross_val_score",
"logging.getLogger",
"time.time",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.get_scorer",
"numpy.any",
"sklearn.model_selection.Stratified... | [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((8108, 8141), 'sklearn.metrics.get_scorer', 'get_scorer', (['self.paradigm.scoring'], {}), '(self.paradigm.scoring)\n', (8118, 8141), False, 'from sklearn.metrics import get_scorer\n'), ((8160, 8166), 'time.time', 'time', ([], {}), '()\n', (8164, 8166), False, 'from time import time\n'), ((4286, 4313), 'numpy.unique', 'np.unique', (['metadata.session'], {}), '(metadata.session)\n', (4295, 4313), True, 'import numpy as np\n'), ((6492, 6525), 'numpy.array', 'np.array', (["self.data_size['value']"], {}), "(self.data_size['value'])\n", (6500, 6525), True, 'import numpy as np\n'), ((7986, 8000), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (7998, 8000), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((8246, 8283), 'sklearn.model_selection._validation._score', '_score', (['model', 'X_test', 'y_test', 'scorer'], {}), '(model, X_test, y_test, scorer)\n', (8252, 8283), False, 'from sklearn.model_selection._validation import _fit_and_score, _score\n'), ((8440, 8446), 'time.time', 'time', ([], {}), '()\n', (8444, 8446), False, 'from time import time\n'), ((9133, 9164), 'numpy.unique', 'np.unique', (['metadata_all.session'], {}), '(metadata_all.session)\n', (9142, 9164), True, 'import numpy as np\n'), ((13336, 13350), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (13348, 13350), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((13479, 13512), 'sklearn.metrics.get_scorer', 'get_scorer', (['self.paradigm.scoring'], {}), '(self.paradigm.scoring)\n', (13489, 13512), False, 'from sklearn.metrics import get_scorer\n'), ((16135, 16149), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (16147, 16149), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((16358, 16391), 'sklearn.metrics.get_scorer', 'get_scorer', (['self.paradigm.scoring'], {}), '(self.paradigm.scoring)\n', (16368, 16391), False, 'from sklearn.metrics import get_scorer\n'), ((16457, 16475), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (16473, 16475), False, 'from sklearn.model_selection import LeaveOneGroupOut, StratifiedKFold, StratifiedShuffleSplit, cross_val_score\n'), ((2353, 2410), 'numpy.full_like', 'np.full_like', (["self.data_size['value']", 'n_perms'], {'dtype': 'int'}), "(self.data_size['value'], n_perms, dtype=int)\n", (2365, 2410), True, 'import numpy as np\n'), ((6541, 6557), 'numpy.any', 'np.any', (['(vals < 0)'], {}), '(vals < 0)\n', (6547, 6557), True, 'import numpy as np\n'), ((6561, 6577), 'numpy.any', 'np.any', (['(vals > 1)'], {}), '(vals > 1)\n', (6567, 6577), True, 'import numpy as np\n'), ((6922, 6934), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (6931, 6934), True, 'import numpy as np\n'), ((9386, 9460), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': 'self.n_perms[0]', 'test_size': 'self.test_size'}), '(n_splits=self.n_perms[0], test_size=self.test_size)\n', (9408, 9460), False, 'from sklearn.model_selection import LeaveOneGroupOut, StratifiedKFold, StratifiedShuffleSplit, cross_val_score\n'), ((13640, 13658), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (13656, 13658), False, 'from sklearn.model_selection import LeaveOneGroupOut, StratifiedKFold, StratifiedShuffleSplit, cross_val_score\n'), ((4447, 4453), 'time.time', 'time', ([], {}), '()\n', (4451, 4453), False, 'from time import time\n'), ((4479, 4543), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {'shuffle': '(True)', 'random_state': 'self.random_state'}), '(5, shuffle=True, random_state=self.random_state)\n', (4494, 4543), False, 'from sklearn.model_selection import LeaveOneGroupOut, StratifiedKFold, StratifiedShuffleSplit, cross_val_score\n'), ((4570, 4584), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4582, 4584), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((13748, 13754), 'time.time', 'time', ([], {}), '()\n', (13752, 13754), False, 'from time import time\n'), ((16848, 16854), 'time.time', 'time', ([], {}), '()\n', (16852, 16854), False, 'from time import time\n'), ((17051, 17076), 'numpy.unique', 'np.unique', (['sessions[test]'], {}), '(sessions[test])\n', (17060, 17076), True, 'import numpy as np\n'), ((2936, 2968), 'numpy.diff', 'np.diff', (["self.data_size['value']"], {}), "(self.data_size['value'])\n", (2943, 2968), True, 'import numpy as np\n'), ((4719, 4752), 'sklearn.metrics.get_scorer', 'get_scorer', (['self.paradigm.scoring'], {}), '(self.paradigm.scoring)\n', (4729, 4752), False, 'from sklearn.metrics import get_scorer\n'), ((5161, 5174), 'numpy.array', 'np.array', (['acc'], {}), '(acc)\n', (5169, 5174), True, 'import numpy as np\n'), ((5231, 5356), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['clf', 'X[ix]', 'y_cv'], {'cv': 'cv', 'scoring': 'self.paradigm.scoring', 'n_jobs': 'self.n_jobs', 'error_score': 'self.error_score'}), '(clf, X[ix], y_cv, cv=cv, scoring=self.paradigm.scoring,\n n_jobs=self.n_jobs, error_score=self.error_score)\n', (5246, 5356), False, 'from sklearn.model_selection import LeaveOneGroupOut, StratifiedKFold, StratifiedShuffleSplit, cross_val_score\n'), ((5646, 5652), 'time.time', 'time', ([], {}), '()\n', (5650, 5652), False, 'from time import time\n'), ((6959, 6976), 'numpy.where', 'np.where', (['(cl == y)'], {}), '(cl == y)\n', (6967, 6976), True, 'import numpy as np\n'), ((7537, 7609), 'numpy.concatenate', 'np.concatenate', (['[classwise_indices[cl][:ds] for cl in classwise_indices]'], {}), '([classwise_indices[cl][:ds] for cl in classwise_indices])\n', (7551, 7609), True, 'import numpy as np\n'), ((13837, 13847), 'sklearn.base.clone', 'clone', (['clf'], {}), '(clf)\n', (13842, 13847), False, 'from sklearn.base import clone\n'), ((14498, 14504), 'time.time', 'time', ([], {}), '()\n', (14502, 14504), False, 'from time import time\n'), ((16952, 16958), 'time.time', 'time', ([], {}), '()\n', (16956, 16958), False, 'from time import time\n'), ((17165, 17212), 'sklearn.model_selection._validation._score', '_score', (['model', 'X[test[ix]]', 'y[test[ix]]', 'scorer'], {}), '(model, X[test[ix]], y[test[ix]], scorer)\n', (17171, 17212), False, 'from sklearn.model_selection._validation import _fit_and_score, _score\n'), ((4986, 4996), 'sklearn.base.clone', 'clone', (['clf'], {}), '(clf)\n', (4991, 4996), False, 'from sklearn.base import clone\n'), ((16883, 16896), 'copy.deepcopy', 'deepcopy', (['clf'], {}), '(clf)\n', (16891, 16896), False, 'from copy import deepcopy\n'), ((2692, 2708), 'numpy.diff', 'np.diff', (['n_perms'], {}), '(n_perms)\n', (2699, 2708), True, 'import numpy as np\n'), ((10641, 10659), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (10650, 10659), True, 'import numpy as np\n'), ((14068, 14078), 'sklearn.base.clone', 'clone', (['clf'], {}), '(clf)\n', (14073, 14078), False, 'from sklearn.base import clone\n'), ((12022, 12035), 'copy.deepcopy', 'deepcopy', (['clf'], {}), '(clf)\n', (12030, 12035), False, 'from copy import deepcopy\n')] |
import numpy as np
def z_score(roa: float, capital_ratio: float, past_roas: np.ndarray) -> float:
r"""Z-score
A measure of bank insolvency risk, defined as:
$$
\text{Z-score} = \frac{\text{ROA}+\text{CAR}}{\sigma_{\text{ROA}}}
$$
where $\text{ROA}$ is the bank's ROA, $\text{CAR}$ is the bank's capital ratio and $\sigma_{\text{ROA}}$
is the standard deviation of bank ROA.
The rationale behind Z-score is simple. A bank is insolvent when its loss $-\pi$ exceeds equity $E$, i.e., $-\pi>E$.
The probability of insolvency is $P(-\pi>E)$.
If bank assets is $A$, then $P(-\pi>E)=P(-\frac{\pi}{A}>\frac{E}{A})=P(-ROA>CAR)$.
Assuming profits are normally distributed, then scaling $(\text{ROA}+\text{CAR})$ by $\sigma_{\text{ROA}}$ yields an
estimate of the distance to insolvency.
A higher Z-score implies that larger shocks to profitability are required to cause the losses to exceed bank equity.
Args:
roa (float): the current bank ROA.
capital_ratio (float): the current bank equity to asset ratio.
past_roas (np.ndarray): (n_periods,) array of past bank ROAs used to calculate the standard deviation.
Returns:
float: The bank's Z-score
Examples:
>>> from frds.measures.bank import z_score
>>> import numpy as np
>>> z_score(roa=0.2, capital_ratio=0.5, past_roas=np.array([0.1,0.2,0.15,0.18,0.2]))
18.549962900111296
References:
* [<NAME> (2009)](https://doi.org/10.1016/j.jfineco.2008.09.003),
Bank governance, regulation and risk taking, *Journal of Financial Economics*, 93, 2, 259-275.
* [<NAME> and Ma (2010)](https://doi.org/10.1016/j.jfineco.2010.02.008),
Creditor rights, information sharing, and bank risk taking, *Journal of Financial Economics*, 96, 3, 485-512.
* [<NAME>, and Schepens (2013)](https://doi.org/10.1016/j.jfi.2012.07.001),
Bank competition and stability: cross-country heterogeneity, *Journal of Financial Intermediation*, 22, 2, 218-244.
* [<NAME>, and Tsionas (2014)](https://doi.org/10.1016/j.jbankfin.2014.03.024),
The risk of financial intermediaries, *Journal of Banking & Finance*, 44, 1-12.
* [<NAME>, and Marton (2014)](https://doi.org/10.1016/j.jbankfin.2013.11.003),
Institutional development and bank stability: Evidence from transition countries, *Journal of Banking & Finance*, 39, 160-176.
"""
return (roa + capital_ratio) / np.std(past_roas)
| [
"numpy.std"
] | [((2518, 2535), 'numpy.std', 'np.std', (['past_roas'], {}), '(past_roas)\n', (2524, 2535), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Agg')
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
from functools import reduce
import torch
from sklearn.metrics import accuracy_score
from config import ModelConfig
from lib.pytorch_misc import optimistic_restore
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from lib.evaluation.sg_eval_all_rel_cates import BasicSceneGraphEvaluator as BasicSceneGraphEvaluator_rel
from tqdm import tqdm
from config import BOX_SCALE, IM_SCALE, DATA_PATH
import dill as pkl
import os
from collections import defaultdict
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from lib.pytorch_misc import load_reslayer4
size_index = np.load('/home/guoyuyu/guoyuyu/code/code_by_myself/scene_graph/dataset_analysis/size_index.npy')
AE_results = pkl.load(open('/home/guoyuyu/guoyuyu/code/code_by_other/neural-motifs_graphcnn/AE_loss_sgcls','rb'))
conf = ModelConfig()
if conf.model == 'motifnet':
from lib.models.rel_model_bert import RelModel
#from lib.rel_model_linknet import RelModel
#from lib.rel_model_complex_emb import RelModel
#from lib.rel_model_adj1 import RelModel
#from lib.rel_model_topgcn import RelModel
#from lib.ablation_study.rel_model_notop2 import RelModel
#from lib.ablation_study.rel_model_edgelstm_regfeat import RelModel
#from lib.rel_model_AE import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
else:
raise ValueError()
train, val, test = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet',
keep_pred=conf.keep_pred)
if conf.test:
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj,
nh_obj=conf.nh_edge, nh_edge=conf.nh_edge,
nl_adj=conf.nl_adj,
hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pass_in_obj_feats_to_gcn=conf.pass_in_obj_feats_to_gcn,
pass_embed_togcn=conf.pass_embed_togcn,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision,
attention_dim=conf.attention_dim,
adj_embed_dim = conf.adj_embed_dim,
with_adj_mat=conf.with_adj_mat,
bg_num_graph=conf.bg_num_graph,
bg_num_rel=conf.bg_num_rel,
neg_time=conf.neg_time,
adj_embed=conf.adj_embed,
mean_union_feat=conf.mean_union_feat,
ch_res=conf.ch_res,
with_att=conf.with_att,
with_gcn=conf.with_gcn,
fb_thr=conf.fb_thr,
with_biliner_score=conf.with_biliner_score,
gcn_adj_type=conf.gcn_adj_type,
where_gcn=conf.where_gcn,
with_gt_adj_mat=conf.gt_adj_mat,
type_gcn=conf.type_gcn,
edge_ctx_type=conf.edge_ctx_type,
nms_union=conf.nms_union,
cosine_dis=conf.cosine_dis,
test_alpha=conf.test_alpha,
)
detector.cuda()
ckpt = torch.load(conf.ckpt)
if conf.ckpt.split('-')[-2].split('/')[-1] == 'vgrel':
print("Loading EVERYTHING")
start_epoch = ckpt['epoch']
if not optimistic_restore(detector, ckpt['state_dict']):
start_epoch = -1
# optimistic_restore(detector.detector, torch.load('checkpoints/vgdet/vg-28.tar')['state_dict'])
else:
start_epoch = -1
optimistic_restore(detector.detector, ckpt['state_dict'])
#print('detector: ',detector.detector)
# for i in ckpt['state_dict'].keys():
# if 'roi_fmap' in i:
# print('ckpt state_dict: ',i)
if conf.mode != 'detclass':
if not conf.use_resnet:
detector.roi_fmap[1][0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap[1][3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap[1][0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap[1][3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.roi_fmap_obj[0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap_obj[3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap_obj[0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap_obj[3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
else :
load_reslayer4(detector, ckpt, 3)
"""
if conf.use_resnet:
detector.compress[0].weight.data.copy_(ckpt['state_dict']['compress.0.weight'])
detector.compress[0].bias.data.copy_(ckpt['state_dict']['compress.0.bias'])
detector.compress[2].weight.data.copy_(ckpt['state_dict']['compress.2.weight'])
detector.compress[2].bias.data.copy_(ckpt['state_dict']['compress.2.bias'])
detector.union_boxes.compress_union[0].weight.data.copy_(ckpt['state_dict']['compress.0.weight'])
detector.union_boxes.compress_union[0].bias.data.copy_(ckpt['state_dict']['compress.0.bias'])
detector.union_boxes.compress_union[2].weight.data.copy_(ckpt['state_dict']['compress.2.weight'])
detector.union_boxes.compress_union[2].bias.data.copy_(ckpt['state_dict']['compress.2.bias'])
"""
#optimistic_restore(detector, ckpt['state_dict'])
# if conf.mode == 'sgdet':
# det_ckpt = torch.load('checkpoints/new_vgdet/vg-19.tar')['state_dict']
# detector.detector.bbox_fc.weight.data.copy_(det_ckpt['bbox_fc.weight'])
# detector.detector.bbox_fc.bias.data.copy_(det_ckpt['bbox_fc.bias'])
# detector.detector.score_fc.weight.data.copy_(det_ckpt['score_fc.weight'])
# detector.detector.score_fc.bias.data.copy_(det_ckpt['score_fc.bias'])
all_pred_entries = []
all_TP_label_num = np.zeros([detector.num_classes])
all_label_num = np.zeros([detector.num_classes])
all_TP_size_num = np.zeros([size_index.shape[0]])
all_size_num = np.zeros([size_index.shape[0]])
all_pred_size = []
all_TP_rel_rel_num = np.zeros([detector.num_rels])
all_TP_rel_obj_num = np.zeros([detector.num_rels])
all_rel_num = np.zeros([detector.num_rels])
all_TP_pred_num = np.zeros([3, detector.num_rels])
all_pred_num = np.zeros([detector.num_rels])
all_pred_recall = []
def count_num(label_i):
for i in range(label_i.shape[0]):
all_label_num[label_i[i]] = all_label_num[label_i[i]] + 1
def TP_count_num(label_i, pred_i):
TP_labe_ind = ((label_i - pred_i) == 0)
TP_labe = TP_labe_ind * pred_i
for i in range(TP_labe.shape[0]):
if TP_labe_ind[i]:
all_TP_label_num[label_i[i]] = all_TP_label_num[label_i[i]] + 1
def count_size_num(boxes_i, image_size):
size_i = abs(boxes_i[:, 2] - boxes_i[:, 0]) * abs(boxes_i[:, 3] - boxes_i[:, 1])
size_i = size_i / (1.0 * image_size[0] * image_size[1])
for i in range(size_i.shape[0]):
ind = int((size_i[i] - size_index[0])/ (size_index[1]-size_index[0]))
all_size_num[ind] = all_size_num[ind] + 1
def TP_count_size_num(label_i, pred_i, boxes_i, image_size):
TP_labe_ind = ((label_i - pred_i) == 0)
size_i = abs(boxes_i[:, 2] - boxes_i[:, 0]) * abs(boxes_i[:, 3] - boxes_i[:, 1])
size_i = size_i / (1.0 * image_size[0] * image_size[1])
for i in range(TP_labe_ind.shape[0]):
if TP_labe_ind[i]:
ind = int((size_i[i] - size_index[0])/ (size_index[1]-size_index[0]))
all_TP_size_num[ind] = all_TP_size_num[ind] + 1
def TP_pred_recall_num(gt_rel_k, pred_to_gt_k):
i_TP_pred_num = np.zeros([3, detector.num_rels])
i_pred_num = np.zeros([detector.num_rels])
for gt_rels_i in gt_rel_k:
i_pred_num[gt_rels_i[2]] = i_pred_num[gt_rels_i[2]] + 1
all_pred_num[gt_rels_i[2]] = all_pred_num[gt_rels_i[2]] + 1
for k in evaluator[conf.mode].result_dict[conf.mode + '_recall']:
match = reduce(np.union1d, pred_to_gt_k[:k])
for j in match:
j = int(j)
if k==20:
thr_k=0
if k==50:
thr_k=1
if k==100:
thr_k=2
i_TP_pred_num[thr_k][gt_rel_k[j,2]] = i_TP_pred_num[thr_k][gt_rel_k[j,2]] + 1
all_TP_pred_num[thr_k][gt_rel_k[j,2]] = all_TP_pred_num[thr_k][gt_rel_k[j,2]] + 1
return i_TP_pred_num/(i_pred_num[None,:]+0.00001)
def list_rm_duplication(tri_list):
old_size = tri_list.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in tri_list:
all_rel_sets[(o0, o1)].append(r)
gt_rels = [(k[0], k[1], np.random.choice(v)) for k, v in all_rel_sets.items()]
gt_rels = np.array(gt_rels)
return gt_rels
def val_batch(batch_num, b, evaluator, evaluator_rel,thrs=(20, 50, 100)):
det_res = detector[b]
num_correct = 0
num_sample = 0
num_correct_adj_mat_rel = 0
num_correct_adj_mat_obj = 0
num_sample_adj_mat = 0
TP_sample_obj = 0
TP_sample_rel = 0
True_sample = 0
# the image size after resizing to IMAGE_SCALE (1024)
if conf.num_gpus == 1:
det_res = [det_res]
if conf.mode != 'detclass':
for i, (boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i, \
pred_adj_mat_rel_i, pred_adj_mat_obj_i, gt_adj_mat_i) in enumerate(det_res):
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
'gt_adj_mat': gt_adj_mat_i.copy(),
}
assert np.all(objs_i[rels_i[:,0]] > 0) and np.all(objs_i[rels_i[:,1]] > 0)
# assert np.all(rels_i[:,2] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE/IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i,
'pred_adj_mat_rel': pred_adj_mat_rel_i,
'pred_adj_mat_obj': pred_adj_mat_obj_i,
}
all_pred_entries.append(pred_entry)
num_sample = num_sample + objs_i.shape[0]
num_sample_adj_mat = num_sample_adj_mat + gt_adj_mat_i.shape[0]
True_sample = True_sample + (gt_adj_mat_i==1).sum()
res_i = evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
res_i_rel = evaluator_rel[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
gt_rel_k = val.relationships[batch_num + i].copy()
pred_to_gt_k = res_i[0]
all_pred_recall.append(TP_pred_recall_num(gt_rel_k, pred_to_gt_k))
else :
for i, (boxes_i, objs_i, obj_scores_i) in enumerate(det_res):
img_size = b.im_sizes[0][0]
num_sample = num_sample + objs_i.shape[0]
num_correct = num_correct + np.sum((val.gt_classes[batch_num + i].copy() - objs_i)==0)
count_num(val.gt_classes[batch_num + i].copy())
TP_count_num(val.gt_classes[batch_num + i].copy(), objs_i)
count_size_num(val.gt_boxes[batch_num + i].copy() * (IM_SCALE / BOX_SCALE), img_size)
TP_count_size_num(val.gt_classes[batch_num + i].copy(), objs_i,
val.gt_boxes[batch_num + i].copy() * (IM_SCALE / BOX_SCALE),
img_size)
return num_correct, num_sample, num_correct_adj_mat_obj, num_correct_adj_mat_rel, num_sample_adj_mat, \
True_sample, TP_sample_obj, TP_sample_rel
evaluator = BasicSceneGraphEvaluator.all_modes(multiple_preds=conf.multi_pred)
evaluator_rel = BasicSceneGraphEvaluator_rel.all_modes(multiple_preds=conf.multi_pred)
if conf.cache is not None and os.path.exists(conf.cache):
print("Found {}! Loading from it".format(conf.cache))
with open(conf.cache,'rb') as f:
all_pred_entries = pkl.load(f)
conf_mat = np.zeros([detector.num_rels,detector.num_rels])
for i, pred_entry in enumerate(tqdm(all_pred_entries)):
gt_entry = {
'gt_classes': val.gt_classes[i].copy(),
'gt_relations': val.relationships[i].copy(),
'gt_boxes': val.gt_boxes[i].copy(),
}
res_i = evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
res_i_rel = evaluator_rel[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
pred_prd_scores = pred_entry['rel_scores'][:, 1:]
pred_prd_labels = pred_prd_scores.argmax(-1) + 1
gt_labels_prd = val.relationships[i][:,2]
det_boxes_sbj = pred_entry['pred_boxes'][pred_entry['pred_rel_inds'][:,0]]
det_boxes_obj = pred_entry['pred_boxes'][pred_entry['pred_rel_inds'][:,1]]
gt_boxes_sbj = val.gt_boxes[i][val.relationships[i][:,0]]
gt_boxes_obj = val.gt_boxes[i][val.relationships[i][:,1]]
for i in range(len(pred_prd_labels)):
pred_prd_label = pred_prd_labels[i]
det_boxes_sbj_i = det_boxes_sbj[i]
det_boxes_sbj_i = det_boxes_sbj_i.astype(dtype=np.float32, copy=False)
det_boxes_obj_i = det_boxes_obj[i]
det_boxes_obj_i = det_boxes_obj_i.astype(dtype=np.float32, copy=False)
gt_boxes_sbj_t = gt_boxes_sbj.astype(dtype=np.float32, copy=False)
gt_boxes_obj_t = gt_boxes_obj.astype(dtype=np.float32, copy=False)
sub_iou = bbox_overlaps(det_boxes_sbj_i[None, :4], gt_boxes_sbj[:,:4])[0]
obj_iou = bbox_overlaps(det_boxes_obj_i[None, :4], gt_boxes_obj[:,:4])[0]
inds = (sub_iou >= 0.5) & (obj_iou >= 0.5)
max_iou = 0
max_id = -1
for j in range(len(inds)):
if inds[j]:
if sub_iou[j] >= 0.5 and obj_iou[j] >= 0.5:
if sub_iou[j] * obj_iou[j] >= max_iou:
max_iou = sub_iou[j] * obj_iou[j]
max_id = j
if max_id != -1:
gt_prd_label = gt_labels_prd[max_id]
else:
gt_prd_label = 0
conf_mat[gt_prd_label, pred_prd_label] = conf_mat[gt_prd_label, pred_prd_label] + 1
gt_rel_k = val.relationships[i].copy()
pred_to_gt_k = res_i[0]
#all_pred_recall.append(TP_pred_recall_num(gt_rel_k, pred_to_gt_k))
np.save('conf_mat.npy', conf_mat)
evaluator[conf.mode].print_stats()
evaluator_rel[conf.mode].print_stats()
save_path = conf.ckpt.split('vgre')[0]
file_name = conf.cache.split('/')[-1]
np.save(save_path +'/'+ file_name + 'all_pred_recall.npy', np.array(all_pred_recall).mean(0))
np.save(save_path +'/'+ file_name + 'all_pred_num.npy', all_pred_num)
np.save(save_path +'/'+ file_name + 'all_TP_pred_num.npy', all_TP_pred_num)
else:
detector.eval()
num_correct = 0
num_sample = 0
num_correct_adj_rel = 0
num_correct_adj_obj = 0
num_sample_adj = 0
True_sample = 0
TP_sample_obj = 0
TP_sample_rel = 0
for val_b, batch in enumerate(tqdm(val_loader)):
num_correct_i, num_sample_i, num_correct_adj_obj_i, num_correct_adj_rel_i, num_sample_adj_i, \
True_sample_i, TP_sample_obj_i, TP_sample_rel_i = val_batch(conf.num_gpus*val_b, batch,
evaluator,evaluator_rel)
num_correct = num_correct + num_correct_i
num_sample = num_sample + num_sample_i
num_correct_adj_rel = num_correct_adj_rel + num_correct_adj_rel_i
num_correct_adj_obj = num_correct_adj_obj + num_correct_adj_obj_i
num_sample_adj = num_sample_adj + num_sample_adj_i
True_sample = True_sample + True_sample_i
TP_sample_obj = TP_sample_obj + TP_sample_obj_i
TP_sample_rel = TP_sample_rel + TP_sample_rel_i
print('num_correct ',num_correct)
print('num_sample',num_sample)
print('obj acc:', (num_correct*1.0)/(num_sample*1.0))
print('adj rel sum:', (num_correct_adj_rel * 1.0))
print('adj obj sum:', (num_correct_adj_obj * 1.0))
print('adj rel acc:', (num_correct_adj_rel * 1.0) / (num_sample_adj * 1.0))
print('adj obj acc:', (num_correct_adj_obj * 1.0) / (num_sample_adj * 1.0))
print('TP adj rel recall:', (TP_sample_rel * 1.0) / (True_sample * 1.0))
print('TP adj obj recall:', (TP_sample_obj * 1.0) / (True_sample * 1.0))
evaluator[conf.mode].print_stats()
evaluator_rel[conf.mode].print_stats()
if conf.cache is not None:
with open(conf.cache,'wb') as f:
pkl.dump(all_pred_entries, f)
save_path = conf.ckpt.split('vgre')[0]
file_name = conf.cache.split('/')[-1]
np.save(save_path +'/'+ file_name + 'all_pred_recall.npy', np.array(all_pred_recall).mean(0))
np.save(save_path +'/'+ file_name + 'all_pred_num.npy', all_pred_num)
np.save(save_path +'/'+ file_name + 'all_TP_pred_num.npy', all_TP_pred_num)
np.save(save_path + '/all_rel_num.npy', all_rel_num)
np.save(save_path + '/all_TP_rel_rel_num.npy', all_TP_rel_rel_num)
np.save(save_path + '/all_TP_rel_obj_num.npy', all_TP_rel_obj_num)
np.save(save_path + '/label_recall.npy',all_TP_label_num / (1.0 * all_label_num))
np.save(save_path + '/size_recall.npy', all_TP_size_num / (1.0 * all_size_num))
np.save(save_path + '/all_TP_label_num.npy',all_TP_label_num)
np.save(save_path + '/all_label_num.npy', all_label_num)
np.save(save_path + '/all_TP_size_num.npy', all_TP_size_num)
np.save(save_path + '/all_size_num.npy', all_size_num)
| [
"numpy.load",
"dataloaders.visual_genome.VG.splits",
"lib.pytorch_misc.load_reslayer4",
"collections.defaultdict",
"lib.fpn.box_intersections_cpu.bbox.bbox_overlaps",
"torch.load",
"os.path.exists",
"dill.load",
"lib.pytorch_misc.optimistic_restore",
"numpy.random.choice",
"lib.evaluation.sg_eva... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((699, 805), 'numpy.load', 'np.load', (['"""/home/guoyuyu/guoyuyu/code/code_by_myself/scene_graph/dataset_analysis/size_index.npy"""'], {}), "(\n '/home/guoyuyu/guoyuyu/code/code_by_myself/scene_graph/dataset_analysis/size_index.npy'\n )\n", (706, 805), True, 'import numpy as np\n'), ((917, 930), 'config.ModelConfig', 'ModelConfig', ([], {}), '()\n', (928, 930), False, 'from config import ModelConfig\n'), ((1528, 1700), 'dataloaders.visual_genome.VG.splits', 'VG.splits', ([], {'num_val_im': 'conf.val_size', 'filter_duplicate_rels': '(True)', 'use_proposals': 'conf.use_proposals', 'filter_non_overlap': "(conf.mode == 'sgdet')", 'keep_pred': 'conf.keep_pred'}), "(num_val_im=conf.val_size, filter_duplicate_rels=True,\n use_proposals=conf.use_proposals, filter_non_overlap=conf.mode ==\n 'sgdet', keep_pred=conf.keep_pred)\n", (1537, 1700), False, 'from dataloaders.visual_genome import VGDataLoader, VG\n'), ((1827, 1956), 'dataloaders.visual_genome.VGDataLoader.splits', 'VGDataLoader.splits', (['train', 'val'], {'mode': '"""rel"""', 'batch_size': 'conf.batch_size', 'num_workers': 'conf.num_workers', 'num_gpus': 'conf.num_gpus'}), "(train, val, mode='rel', batch_size=conf.batch_size,\n num_workers=conf.num_workers, num_gpus=conf.num_gpus)\n", (1846, 1956), False, 'from dataloaders.visual_genome import VGDataLoader, VG\n'), ((2106, 3514), 'lib.rel_model_stanford.RelModelStanford', 'RelModel', ([], {'classes': 'train.ind_to_classes', 'rel_classes': 'train.ind_to_predicates', 'num_gpus': 'conf.num_gpus', 'mode': 'conf.mode', 'require_overlap_det': '(True)', 'use_resnet': 'conf.use_resnet', 'order': 'conf.order', 'nl_edge': 'conf.nl_edge', 'nl_obj': 'conf.nl_obj', 'nh_obj': 'conf.nh_edge', 'nh_edge': 'conf.nh_edge', 'nl_adj': 'conf.nl_adj', 'hidden_dim': 'conf.hidden_dim', 'use_proposals': 'conf.use_proposals', 'pass_in_obj_feats_to_decoder': 'conf.pass_in_obj_feats_to_decoder', 'pass_in_obj_feats_to_edge': 'conf.pass_in_obj_feats_to_edge', 'pass_in_obj_feats_to_gcn': 'conf.pass_in_obj_feats_to_gcn', 'pass_embed_togcn': 'conf.pass_embed_togcn', 'pooling_dim': 'conf.pooling_dim', 'rec_dropout': 'conf.rec_dropout', 'use_bias': 'conf.use_bias', 'use_tanh': 'conf.use_tanh', 'limit_vision': 'conf.limit_vision', 'attention_dim': 'conf.attention_dim', 'adj_embed_dim': 'conf.adj_embed_dim', 'with_adj_mat': 'conf.with_adj_mat', 'bg_num_graph': 'conf.bg_num_graph', 'bg_num_rel': 'conf.bg_num_rel', 'neg_time': 'conf.neg_time', 'adj_embed': 'conf.adj_embed', 'mean_union_feat': 'conf.mean_union_feat', 'ch_res': 'conf.ch_res', 'with_att': 'conf.with_att', 'with_gcn': 'conf.with_gcn', 'fb_thr': 'conf.fb_thr', 'with_biliner_score': 'conf.with_biliner_score', 'gcn_adj_type': 'conf.gcn_adj_type', 'where_gcn': 'conf.where_gcn', 'with_gt_adj_mat': 'conf.gt_adj_mat', 'type_gcn': 'conf.type_gcn', 'edge_ctx_type': 'conf.edge_ctx_type', 'nms_union': 'conf.nms_union', 'cosine_dis': 'conf.cosine_dis', 'test_alpha': 'conf.test_alpha'}), '(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,\n num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,\n use_resnet=conf.use_resnet, order=conf.order, nl_edge=conf.nl_edge,\n nl_obj=conf.nl_obj, nh_obj=conf.nh_edge, nh_edge=conf.nh_edge, nl_adj=\n conf.nl_adj, hidden_dim=conf.hidden_dim, use_proposals=conf.\n use_proposals, pass_in_obj_feats_to_decoder=conf.\n pass_in_obj_feats_to_decoder, pass_in_obj_feats_to_edge=conf.\n pass_in_obj_feats_to_edge, pass_in_obj_feats_to_gcn=conf.\n pass_in_obj_feats_to_gcn, pass_embed_togcn=conf.pass_embed_togcn,\n pooling_dim=conf.pooling_dim, rec_dropout=conf.rec_dropout, use_bias=\n conf.use_bias, use_tanh=conf.use_tanh, limit_vision=conf.limit_vision,\n attention_dim=conf.attention_dim, adj_embed_dim=conf.adj_embed_dim,\n with_adj_mat=conf.with_adj_mat, bg_num_graph=conf.bg_num_graph,\n bg_num_rel=conf.bg_num_rel, neg_time=conf.neg_time, adj_embed=conf.\n adj_embed, mean_union_feat=conf.mean_union_feat, ch_res=conf.ch_res,\n with_att=conf.with_att, with_gcn=conf.with_gcn, fb_thr=conf.fb_thr,\n with_biliner_score=conf.with_biliner_score, gcn_adj_type=conf.\n gcn_adj_type, where_gcn=conf.where_gcn, with_gt_adj_mat=conf.gt_adj_mat,\n type_gcn=conf.type_gcn, edge_ctx_type=conf.edge_ctx_type, nms_union=\n conf.nms_union, cosine_dis=conf.cosine_dis, test_alpha=conf.test_alpha)\n', (2114, 3514), True, 'from lib.rel_model_stanford import RelModelStanford as RelModel\n'), ((4219, 4240), 'torch.load', 'torch.load', (['conf.ckpt'], {}), '(conf.ckpt)\n', (4229, 4240), False, 'import torch\n'), ((7022, 7054), 'numpy.zeros', 'np.zeros', (['[detector.num_classes]'], {}), '([detector.num_classes])\n', (7030, 7054), True, 'import numpy as np\n'), ((7071, 7103), 'numpy.zeros', 'np.zeros', (['[detector.num_classes]'], {}), '([detector.num_classes])\n', (7079, 7103), True, 'import numpy as np\n'), ((7122, 7153), 'numpy.zeros', 'np.zeros', (['[size_index.shape[0]]'], {}), '([size_index.shape[0]])\n', (7130, 7153), True, 'import numpy as np\n'), ((7169, 7200), 'numpy.zeros', 'np.zeros', (['[size_index.shape[0]]'], {}), '([size_index.shape[0]])\n', (7177, 7200), True, 'import numpy as np\n'), ((7242, 7271), 'numpy.zeros', 'np.zeros', (['[detector.num_rels]'], {}), '([detector.num_rels])\n', (7250, 7271), True, 'import numpy as np\n'), ((7293, 7322), 'numpy.zeros', 'np.zeros', (['[detector.num_rels]'], {}), '([detector.num_rels])\n', (7301, 7322), True, 'import numpy as np\n'), ((7338, 7367), 'numpy.zeros', 'np.zeros', (['[detector.num_rels]'], {}), '([detector.num_rels])\n', (7346, 7367), True, 'import numpy as np\n'), ((7388, 7420), 'numpy.zeros', 'np.zeros', (['[3, detector.num_rels]'], {}), '([3, detector.num_rels])\n', (7396, 7420), True, 'import numpy as np\n'), ((7436, 7465), 'numpy.zeros', 'np.zeros', (['[detector.num_rels]'], {}), '([detector.num_rels])\n', (7444, 7465), True, 'import numpy as np\n'), ((12890, 12956), 'lib.evaluation.sg_eval.BasicSceneGraphEvaluator.all_modes', 'BasicSceneGraphEvaluator.all_modes', ([], {'multiple_preds': 'conf.multi_pred'}), '(multiple_preds=conf.multi_pred)\n', (12924, 12956), False, 'from lib.evaluation.sg_eval import BasicSceneGraphEvaluator\n'), ((12973, 13043), 'lib.evaluation.sg_eval_all_rel_cates.BasicSceneGraphEvaluator.all_modes', 'BasicSceneGraphEvaluator_rel.all_modes', ([], {'multiple_preds': 'conf.multi_pred'}), '(multiple_preds=conf.multi_pred)\n', (13011, 13043), True, 'from lib.evaluation.sg_eval_all_rel_cates import BasicSceneGraphEvaluator as BasicSceneGraphEvaluator_rel\n'), ((4584, 4641), 'lib.pytorch_misc.optimistic_restore', 'optimistic_restore', (['detector.detector', "ckpt['state_dict']"], {}), "(detector.detector, ckpt['state_dict'])\n", (4602, 4641), False, 'from lib.pytorch_misc import optimistic_restore\n'), ((8754, 8786), 'numpy.zeros', 'np.zeros', (['[3, detector.num_rels]'], {}), '([3, detector.num_rels])\n', (8762, 8786), True, 'import numpy as np\n'), ((8804, 8833), 'numpy.zeros', 'np.zeros', (['[detector.num_rels]'], {}), '([detector.num_rels])\n', (8812, 8833), True, 'import numpy as np\n'), ((9634, 9651), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9645, 9651), False, 'from collections import defaultdict\n'), ((9823, 9840), 'numpy.array', 'np.array', (['gt_rels'], {}), '(gt_rels)\n', (9831, 9840), True, 'import numpy as np\n'), ((13074, 13100), 'os.path.exists', 'os.path.exists', (['conf.cache'], {}), '(conf.cache)\n', (13088, 13100), False, 'import os\n'), ((13251, 13299), 'numpy.zeros', 'np.zeros', (['[detector.num_rels, detector.num_rels]'], {}), '([detector.num_rels, detector.num_rels])\n', (13259, 13299), True, 'import numpy as np\n'), ((15730, 15763), 'numpy.save', 'np.save', (['"""conf_mat.npy"""', 'conf_mat'], {}), "('conf_mat.npy', conf_mat)\n", (15737, 15763), True, 'import numpy as np\n'), ((16033, 16104), 'numpy.save', 'np.save', (["(save_path + '/' + file_name + 'all_pred_num.npy')", 'all_pred_num'], {}), "(save_path + '/' + file_name + 'all_pred_num.npy', all_pred_num)\n", (16040, 16104), True, 'import numpy as np\n'), ((16107, 16184), 'numpy.save', 'np.save', (["(save_path + '/' + file_name + 'all_TP_pred_num.npy')", 'all_TP_pred_num'], {}), "(save_path + '/' + file_name + 'all_TP_pred_num.npy', all_TP_pred_num)\n", (16114, 16184), True, 'import numpy as np\n'), ((18143, 18214), 'numpy.save', 'np.save', (["(save_path + '/' + file_name + 'all_pred_num.npy')", 'all_pred_num'], {}), "(save_path + '/' + file_name + 'all_pred_num.npy', all_pred_num)\n", (18150, 18214), True, 'import numpy as np\n'), ((18217, 18294), 'numpy.save', 'np.save', (["(save_path + '/' + file_name + 'all_TP_pred_num.npy')", 'all_TP_pred_num'], {}), "(save_path + '/' + file_name + 'all_TP_pred_num.npy', all_TP_pred_num)\n", (18224, 18294), True, 'import numpy as np\n'), ((18297, 18349), 'numpy.save', 'np.save', (["(save_path + '/all_rel_num.npy')", 'all_rel_num'], {}), "(save_path + '/all_rel_num.npy', all_rel_num)\n", (18304, 18349), True, 'import numpy as np\n'), ((18354, 18420), 'numpy.save', 'np.save', (["(save_path + '/all_TP_rel_rel_num.npy')", 'all_TP_rel_rel_num'], {}), "(save_path + '/all_TP_rel_rel_num.npy', all_TP_rel_rel_num)\n", (18361, 18420), True, 'import numpy as np\n'), ((18425, 18491), 'numpy.save', 'np.save', (["(save_path + '/all_TP_rel_obj_num.npy')", 'all_TP_rel_obj_num'], {}), "(save_path + '/all_TP_rel_obj_num.npy', all_TP_rel_obj_num)\n", (18432, 18491), True, 'import numpy as np\n'), ((18496, 18582), 'numpy.save', 'np.save', (["(save_path + '/label_recall.npy')", '(all_TP_label_num / (1.0 * all_label_num))'], {}), "(save_path + '/label_recall.npy', all_TP_label_num / (1.0 *\n all_label_num))\n", (18503, 18582), True, 'import numpy as np\n'), ((18582, 18661), 'numpy.save', 'np.save', (["(save_path + '/size_recall.npy')", '(all_TP_size_num / (1.0 * all_size_num))'], {}), "(save_path + '/size_recall.npy', all_TP_size_num / (1.0 * all_size_num))\n", (18589, 18661), True, 'import numpy as np\n'), ((18666, 18728), 'numpy.save', 'np.save', (["(save_path + '/all_TP_label_num.npy')", 'all_TP_label_num'], {}), "(save_path + '/all_TP_label_num.npy', all_TP_label_num)\n", (18673, 18728), True, 'import numpy as np\n'), ((18732, 18788), 'numpy.save', 'np.save', (["(save_path + '/all_label_num.npy')", 'all_label_num'], {}), "(save_path + '/all_label_num.npy', all_label_num)\n", (18739, 18788), True, 'import numpy as np\n'), ((18793, 18853), 'numpy.save', 'np.save', (["(save_path + '/all_TP_size_num.npy')", 'all_TP_size_num'], {}), "(save_path + '/all_TP_size_num.npy', all_TP_size_num)\n", (18800, 18853), True, 'import numpy as np\n'), ((18858, 18912), 'numpy.save', 'np.save', (["(save_path + '/all_size_num.npy')", 'all_size_num'], {}), "(save_path + '/all_size_num.npy', all_size_num)\n", (18865, 18912), True, 'import numpy as np\n'), ((4373, 4421), 'lib.pytorch_misc.optimistic_restore', 'optimistic_restore', (['detector', "ckpt['state_dict']"], {}), "(detector, ckpt['state_dict'])\n", (4391, 4421), False, 'from lib.pytorch_misc import optimistic_restore\n'), ((9085, 9121), 'functools.reduce', 'reduce', (['np.union1d', 'pred_to_gt_k[:k]'], {}), '(np.union1d, pred_to_gt_k[:k])\n', (9091, 9121), False, 'from functools import reduce\n'), ((13224, 13235), 'dill.load', 'pkl.load', (['f'], {}), '(f)\n', (13232, 13235), True, 'import dill as pkl\n'), ((13334, 13356), 'tqdm.tqdm', 'tqdm', (['all_pred_entries'], {}), '(all_pred_entries)\n', (13338, 13356), False, 'from tqdm import tqdm\n'), ((16425, 16441), 'tqdm.tqdm', 'tqdm', (['val_loader'], {}), '(val_loader)\n', (16429, 16441), False, 'from tqdm import tqdm\n'), ((5639, 5672), 'lib.pytorch_misc.load_reslayer4', 'load_reslayer4', (['detector', 'ckpt', '(3)'], {}), '(detector, ckpt, 3)\n', (5653, 5672), False, 'from lib.pytorch_misc import load_reslayer4\n'), ((9754, 9773), 'numpy.random.choice', 'np.random.choice', (['v'], {}), '(v)\n', (9770, 9773), True, 'import numpy as np\n'), ((17925, 17954), 'dill.dump', 'pkl.dump', (['all_pred_entries', 'f'], {}), '(all_pred_entries, f)\n', (17933, 17954), True, 'import dill as pkl\n'), ((10781, 10813), 'numpy.all', 'np.all', (['(objs_i[rels_i[:, 0]] > 0)'], {}), '(objs_i[rels_i[:, 0]] > 0)\n', (10787, 10813), True, 'import numpy as np\n'), ((10817, 10849), 'numpy.all', 'np.all', (['(objs_i[rels_i[:, 1]] > 0)'], {}), '(objs_i[rels_i[:, 1]] > 0)\n', (10823, 10849), True, 'import numpy as np\n'), ((14794, 14855), 'lib.fpn.box_intersections_cpu.bbox.bbox_overlaps', 'bbox_overlaps', (['det_boxes_sbj_i[None, :4]', 'gt_boxes_sbj[:, :4]'], {}), '(det_boxes_sbj_i[None, :4], gt_boxes_sbj[:, :4])\n', (14807, 14855), False, 'from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps\n'), ((14880, 14941), 'lib.fpn.box_intersections_cpu.bbox.bbox_overlaps', 'bbox_overlaps', (['det_boxes_obj_i[None, :4]', 'gt_boxes_obj[:, :4]'], {}), '(det_boxes_obj_i[None, :4], gt_boxes_obj[:, :4])\n', (14893, 14941), False, 'from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps\n'), ((15994, 16019), 'numpy.array', 'np.array', (['all_pred_recall'], {}), '(all_pred_recall)\n', (16002, 16019), True, 'import numpy as np\n'), ((18104, 18129), 'numpy.array', 'np.array', (['all_pred_recall'], {}), '(all_pred_recall)\n', (18112, 18129), True, 'import numpy as np\n')] |
"""Return a scalar type which is common to the input arrays."""
import numpy
import numpoly
from .common import implements
@implements(numpy.common_type)
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
The return type will always be an inexact (i.e. floating point) scalar
type, even if all the arrays are integer arrays. If one of the inputs is an
integer array, the minimum precision type that is returned is a 64-bit
floating point dtype.
All input arrays except int64 and uint64 can be safely cast to the
returned dtype without loss of information.
Args:
arrays (numpoly.ndpoly):
Input arrays.
Return:
out (numpy.generic):
Data type code.
Examples:
>>> numpoly.common_type(
... numpy.array(2, dtype=numpy.float32)).__name__
'float32'
>>> numpoly.common_type(
... numpoly.symbols("x")).__name__
'float64'
>>> numpoly.common_type(
... numpy.arange(3), 1j*numpoly.symbols("x"), 45).__name__
'complex128'
"""
arrays = [numpoly.aspolynomial(array) for array in arrays]
arrays = [array[array.keys[0]] for array in arrays]
return numpy.common_type(*arrays)
| [
"numpoly.aspolynomial",
"numpy.common_type"
] | [((1260, 1286), 'numpy.common_type', 'numpy.common_type', (['*arrays'], {}), '(*arrays)\n', (1277, 1286), False, 'import numpy\n'), ((1144, 1171), 'numpoly.aspolynomial', 'numpoly.aspolynomial', (['array'], {}), '(array)\n', (1164, 1171), False, 'import numpoly\n')] |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import cv2
import numpy as np
import tensorflow as tf
def preprocess(image):
image = image.astype('float32')
R_MEAN = 123.68
G_MEAN = 116.78
B_MEAN = 103.94
mean = np.array([B_MEAN, G_MEAN, R_MEAN], dtype=np.float32)
std = np.array([1.0, 1.0, 1.0], dtype=np.float32)
image = (image - mean) / std
return image
def get_config(key=None, default_value=None):
if not key:
raise ValueError("Please assign a key.")
if not default_value:
raise ValueEror("Please assign a default_value")
config = os.environ
if key in config:
value = config[key]
print("Get {} from env: {}".format(key, value))
return value
else:
print("Fail to get {} from env, use default value {}".format(
key, default_value))
return default_value
calib_image_dir = get_config(
key="CALIB_IMAGE_DIR",
default_value="../../data/EDD/images/")
calib_image_list = get_config(
key="CALIB_IMAGE_LIST",
default_value="../../data/EDD/val_image_list.txt")
calib_batch_size = int(get_config(key="CALIB_BATCH_SIZE", default_value=50))
input_height = int(get_config(key="INPUT_HEIGHT", default_value=320))
input_width = int(get_config(key="INPUT_WIDTH", default_value=320))
def calib_input(iter):
images = []
line = open(calib_image_list).readlines()
with tf.Graph().as_default():
for index in range(0, calib_batch_size):
curline = line[iter * calib_batch_size + index]
calib_image_name = curline.strip()
image_path = os.path.join(calib_image_dir, calib_image_name + ".jpg")
image = cv2.imread(image_path)
image = np.array(cv2.resize(image, (input_height, input_width)))
image = preprocess(image)
images.append(image)
return {"image": images}
| [
"cv2.imread",
"numpy.array",
"tensorflow.Graph",
"os.path.join",
"cv2.resize"
] | [((773, 825), 'numpy.array', 'np.array', (['[B_MEAN, G_MEAN, R_MEAN]'], {'dtype': 'np.float32'}), '([B_MEAN, G_MEAN, R_MEAN], dtype=np.float32)\n', (781, 825), True, 'import numpy as np\n'), ((834, 877), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {'dtype': 'np.float32'}), '([1.0, 1.0, 1.0], dtype=np.float32)\n', (842, 877), True, 'import numpy as np\n'), ((2078, 2134), 'os.path.join', 'os.path.join', (['calib_image_dir', "(calib_image_name + '.jpg')"], {}), "(calib_image_dir, calib_image_name + '.jpg')\n", (2090, 2134), False, 'import os\n'), ((2149, 2171), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2159, 2171), False, 'import cv2\n'), ((1894, 1904), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1902, 1904), True, 'import tensorflow as tf\n'), ((2195, 2241), 'cv2.resize', 'cv2.resize', (['image', '(input_height, input_width)'], {}), '(image, (input_height, input_width))\n', (2205, 2241), False, 'import cv2\n')] |
#!/usr/bin/env python
"order triplets by the sum of their two elements"
import numpy as np
from keras.layers import LSTM, Input
from keras.models import Model
from keras.utils.np_utils import to_categorical
from PointerLSTM import PointerLSTM
#
x_file = 'data/x_sums.csv'
y_file = 'data/y_sums.csv'
split_at = 9000
batch_size = 100
hidden_size = 100
weights_file = 'model_weights/model_weights_sums_{}.hdf5'.format(hidden_size)
n_steps = 3
n_features = 2
#
x = np.loadtxt(x_file, delimiter=',', dtype=int)
y = np.loadtxt(y_file, delimiter=',', dtype=int)
x = x.reshape(x.shape[0], n_steps, -1)
assert (x.shape[-1] == n_features)
YY = []
for y_ in y:
YY.append(to_categorical(y_))
YY = np.asarray(YY)
x_train = x[:split_at]
x_test = x[split_at:]
y_train = y[:split_at]
y_test = y[split_at:]
YY_train = YY[:split_at]
YY_test = YY[split_at:]
#
print("building model...")
main_input = Input(shape=(x.shape[1], x.shape[2]), name='main_input')
encoder = LSTM(output_dim=hidden_size, return_sequences=True, name="encoder")(main_input)
decoder = PointerLSTM(hidden_size, output_dim=hidden_size, name="decoder")(encoder)
model = Model(input=main_input, output=decoder)
print(("loading weights from {}...".format(weights_file)))
try:
model.load_weights(weights_file)
except IOError:
print("no weights file, starting anew.")
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('training and saving model weights each epoch...')
validation_data = (x_test, YY_test)
history = model.fit(x_train, YY_train, nb_epoch=1, batch_size=batch_size,
validation_data=validation_data)
p = model.predict(x_test)
for y_, p_ in list(zip(y_test, p))[:5]:
print(("y_test:", y_))
print(("p: ", p_.argmax(axis=1)))
print()
model.save_weights(weights_file)
| [
"keras.layers.LSTM",
"numpy.asarray",
"keras.models.Model",
"PointerLSTM.PointerLSTM",
"keras.utils.np_utils.to_categorical",
"numpy.loadtxt",
"keras.layers.Input"
] | [((472, 516), 'numpy.loadtxt', 'np.loadtxt', (['x_file'], {'delimiter': '""","""', 'dtype': 'int'}), "(x_file, delimiter=',', dtype=int)\n", (482, 516), True, 'import numpy as np\n'), ((521, 565), 'numpy.loadtxt', 'np.loadtxt', (['y_file'], {'delimiter': '""","""', 'dtype': 'int'}), "(y_file, delimiter=',', dtype=int)\n", (531, 565), True, 'import numpy as np\n'), ((702, 716), 'numpy.asarray', 'np.asarray', (['YY'], {}), '(YY)\n', (712, 716), True, 'import numpy as np\n'), ((903, 959), 'keras.layers.Input', 'Input', ([], {'shape': '(x.shape[1], x.shape[2])', 'name': '"""main_input"""'}), "(shape=(x.shape[1], x.shape[2]), name='main_input')\n", (908, 959), False, 'from keras.layers import LSTM, Input\n'), ((1144, 1183), 'keras.models.Model', 'Model', ([], {'input': 'main_input', 'output': 'decoder'}), '(input=main_input, output=decoder)\n', (1149, 1183), False, 'from keras.models import Model\n'), ((971, 1038), 'keras.layers.LSTM', 'LSTM', ([], {'output_dim': 'hidden_size', 'return_sequences': '(True)', 'name': '"""encoder"""'}), "(output_dim=hidden_size, return_sequences=True, name='encoder')\n", (975, 1038), False, 'from keras.layers import LSTM, Input\n'), ((1061, 1125), 'PointerLSTM.PointerLSTM', 'PointerLSTM', (['hidden_size'], {'output_dim': 'hidden_size', 'name': '"""decoder"""'}), "(hidden_size, output_dim=hidden_size, name='decoder')\n", (1072, 1125), False, 'from PointerLSTM import PointerLSTM\n'), ((677, 695), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_'], {}), '(y_)\n', (691, 695), False, 'from keras.utils.np_utils import to_categorical\n')] |
""" Unit tests for skycomponents
"""
import logging
import unittest
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from data_models.polarisation import PolarisationFrame
from processing_components.image.operations import export_image_to_fits
from processing_components.imaging.base import predict_2d, invert_2d
from processing_components.imaging.base import predict_skycomponent_visibility
from processing_components.skycomponent.operations import insert_skycomponent, create_skycomponent
from processing_components.simulation.testing_support import create_test_image, create_named_configuration
from processing_components.visibility.base import create_visibility
log = logging.getLogger(__name__)
class TestSkycomponentInsert(unittest.TestCase):
def setUp(self):
from data_models.parameters import arl_path
self.lowcore = create_named_configuration('LOWBD2-CORE')
self.dir = arl_path('test_results')
self.times = (numpy.pi / 12.0) * numpy.linspace(-3.0, 3.0, 7)
self.image_frequency = numpy.linspace(0.9e8, 1.1e8, 5)
self.component_frequency = numpy.linspace(0.8e8, 1.2e8, 7)
self.channel_bandwidth = numpy.array(5*[1e7])
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
self.vis = create_visibility(self.lowcore, self.times, self.image_frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame('stokesI'), zerow=True)
self.vis.data['vis'] *= 0.0
# Create model
self.model = create_test_image(cellsize=0.0015, phasecentre=self.vis.phasecentre, frequency=self.image_frequency)
self.model.data[self.model.data > 1.0] = 1.0
self.vis = predict_2d(self.vis, self.model)
assert numpy.max(numpy.abs(self.vis.vis)) > 0.0
dphasecentre = SkyCoord(ra=+181.0 * u.deg, dec=-58.0 * u.deg, frame='icrs', equinox='J2000')
flux = [[numpy.power(f/1e8, -0.7)] for f in self.component_frequency]
self.sc = create_skycomponent(direction=dphasecentre, flux=flux,
frequency=self.component_frequency,
polarisation_frame=PolarisationFrame('stokesI'))
def test_insert_skycomponent_FFT(self):
self.model.data *= 0.0
self.sc = create_skycomponent(direction=self.phasecentre, flux=self.sc.flux,
frequency=self.component_frequency,
polarisation_frame=PolarisationFrame('stokesI'))
insert_skycomponent(self.model, self.sc)
npixel = self.model.shape[3]
# WCS is 1-relative
rpix = numpy.round(self.model.wcs.wcs.crpix).astype('int') - 1
assert rpix[0] == npixel // 2
assert rpix[1] == npixel // 2
# The phase centre is at rpix[0], rpix[1] in 0-relative pixels
assert self.model.data[2, 0, rpix[1], rpix[0]] == 1.0
# If we predict the visibility, then the imaginary part must be zero. This is determined entirely
# by shift_vis_to_image in processing_library.imaging.base
self.vis.data['vis'][...] = 0.0
self.vis = predict_2d(self.vis, self.model)
# The actual phase centre of a numpy FFT is at nx //2, nx //2 (0 rel).
assert numpy.max(numpy.abs(self.vis.vis.imag)) <1e-3
def test_insert_skycomponent_dft(self):
self.sc = create_skycomponent(direction=self.phasecentre, flux=self.sc.flux,
frequency=self.component_frequency,
polarisation_frame=PolarisationFrame('stokesI'))
self.vis.data['vis'][...] = 0.0
self.vis = predict_skycomponent_visibility(self.vis, self.sc)
im, sumwt = invert_2d(self.vis, self.model)
export_image_to_fits(im, '%s/test_skycomponent_dft.fits' % self.dir)
assert numpy.max(numpy.abs(self.vis.vis.imag)) < 1e-3
def test_insert_skycomponent_nearest(self):
self.model.data *= 0.0
insert_skycomponent(self.model, self.sc, insert_method='Nearest')
# These test a regression but are not known a priori to be correct
self.assertAlmostEqual(self.model.data[2, 0, 151, 122], 1.0, 7)
self.assertAlmostEqual(self.model.data[2, 0, 152, 122], 0.0, 7)
def test_insert_skycomponent_sinc(self):
self.model.data *= 0.0
insert_skycomponent(self.model, self.sc, insert_method='Sinc')
# These test a regression but are not known a priori to be correct
self.assertAlmostEqual(self.model.data[2, 0, 151, 122], 0.87684398703184396, 7)
self.assertAlmostEqual(self.model.data[2, 0, 152, 122], 0.2469311811046056, 7)
def test_insert_skycomponent_sinc_bandwidth(self):
self.model.data *= 0.0
insert_skycomponent(self.model, self.sc, insert_method='Sinc', bandwidth=0.5)
# These test a regression but are not known a priori to be correct
self.assertAlmostEqual(self.model.data[2, 0, 151, 122], 0.25133066186805758, 7)
self.assertAlmostEqual(self.model.data[2, 0, 152, 122], 0.19685222464041874, 7)
def test_insert_skycomponent_lanczos(self):
self.model.data *= 0.0
insert_skycomponent(self.model, self.sc, insert_method='Lanczos')
# These test a regression but are not known a priori to be correct
self.assertAlmostEqual(self.model.data[2, 0, 151, 122], 0.87781267543090036, 7)
self.assertAlmostEqual(self.model.data[2, 0, 152, 122], 0.23817562762032077, 7)
def test_insert_skycomponent_lanczos_bandwidth(self):
self.model.data *= 0.0
insert_skycomponent(self.model, self.sc, insert_method='Lanczos', bandwidth=0.5)
# These test a regression but are not known a priori to be correct
self.assertAlmostEqual(self.model.data[2, 0, 151, 122], 0.24031092091707615, 7)
self.assertAlmostEqual(self.model.data[2, 0, 152, 122], 0.18648989466050975, 7)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"processing_components.simulation.testing_support.create_named_configuration",
"processing_components.image.operations.export_image_to_fits",
"numpy.abs",
"processing_components.simulation.testing_support.create_test_image",
"processing_components.imaging.base.predict_skycomponent_visibil... | [((709, 736), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (726, 736), False, 'import logging\n'), ((6247, 6262), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6260, 6262), False, 'import unittest\n'), ((884, 925), 'processing_components.simulation.testing_support.create_named_configuration', 'create_named_configuration', (['"""LOWBD2-CORE"""'], {}), "('LOWBD2-CORE')\n", (910, 925), False, 'from processing_components.simulation.testing_support import create_test_image, create_named_configuration\n'), ((945, 969), 'data_models.parameters.arl_path', 'arl_path', (['"""test_results"""'], {}), "('test_results')\n", (953, 969), False, 'from data_models.parameters import arl_path\n'), ((1071, 1113), 'numpy.linspace', 'numpy.linspace', (['(90000000.0)', '(110000000.0)', '(5)'], {}), '(90000000.0, 110000000.0, 5)\n', (1085, 1113), False, 'import numpy\n'), ((1138, 1180), 'numpy.linspace', 'numpy.linspace', (['(80000000.0)', '(120000000.0)', '(7)'], {}), '(80000000.0, 120000000.0, 7)\n', (1152, 1180), False, 'import numpy\n'), ((1203, 1232), 'numpy.array', 'numpy.array', (['(5 * [10000000.0])'], {}), '(5 * [10000000.0])\n', (1214, 1232), False, 'import numpy\n'), ((1251, 1328), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(+180.0 * u.deg)', 'dec': '(-60.0 * u.deg)', 'frame': '"""icrs"""', 'equinox': '"""J2000"""'}), "(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')\n", (1259, 1328), False, 'from astropy.coordinates import SkyCoord\n'), ((1759, 1863), 'processing_components.simulation.testing_support.create_test_image', 'create_test_image', ([], {'cellsize': '(0.0015)', 'phasecentre': 'self.vis.phasecentre', 'frequency': 'self.image_frequency'}), '(cellsize=0.0015, phasecentre=self.vis.phasecentre,\n frequency=self.image_frequency)\n', (1776, 1863), False, 'from processing_components.simulation.testing_support import create_test_image, create_named_configuration\n'), ((1932, 1964), 'processing_components.imaging.base.predict_2d', 'predict_2d', (['self.vis', 'self.model'], {}), '(self.vis, self.model)\n', (1942, 1964), False, 'from processing_components.imaging.base import predict_2d, invert_2d\n'), ((2053, 2130), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(+181.0 * u.deg)', 'dec': '(-58.0 * u.deg)', 'frame': '"""icrs"""', 'equinox': '"""J2000"""'}), "(ra=+181.0 * u.deg, dec=-58.0 * u.deg, frame='icrs', equinox='J2000')\n", (2061, 2130), False, 'from astropy.coordinates import SkyCoord\n'), ((2784, 2824), 'processing_components.skycomponent.operations.insert_skycomponent', 'insert_skycomponent', (['self.model', 'self.sc'], {}), '(self.model, self.sc)\n', (2803, 2824), False, 'from processing_components.skycomponent.operations import insert_skycomponent, create_skycomponent\n'), ((3402, 3434), 'processing_components.imaging.base.predict_2d', 'predict_2d', (['self.vis', 'self.model'], {}), '(self.vis, self.model)\n', (3412, 3434), False, 'from processing_components.imaging.base import predict_2d, invert_2d\n'), ((3922, 3972), 'processing_components.imaging.base.predict_skycomponent_visibility', 'predict_skycomponent_visibility', (['self.vis', 'self.sc'], {}), '(self.vis, self.sc)\n', (3953, 3972), False, 'from processing_components.imaging.base import predict_skycomponent_visibility\n'), ((3993, 4024), 'processing_components.imaging.base.invert_2d', 'invert_2d', (['self.vis', 'self.model'], {}), '(self.vis, self.model)\n', (4002, 4024), False, 'from processing_components.imaging.base import predict_2d, invert_2d\n'), ((4033, 4101), 'processing_components.image.operations.export_image_to_fits', 'export_image_to_fits', (['im', "('%s/test_skycomponent_dft.fits' % self.dir)"], {}), "(im, '%s/test_skycomponent_dft.fits' % self.dir)\n", (4053, 4101), False, 'from processing_components.image.operations import export_image_to_fits\n'), ((4256, 4321), 'processing_components.skycomponent.operations.insert_skycomponent', 'insert_skycomponent', (['self.model', 'self.sc'], {'insert_method': '"""Nearest"""'}), "(self.model, self.sc, insert_method='Nearest')\n", (4275, 4321), False, 'from processing_components.skycomponent.operations import insert_skycomponent, create_skycomponent\n'), ((4630, 4692), 'processing_components.skycomponent.operations.insert_skycomponent', 'insert_skycomponent', (['self.model', 'self.sc'], {'insert_method': '"""Sinc"""'}), "(self.model, self.sc, insert_method='Sinc')\n", (4649, 4692), False, 'from processing_components.skycomponent.operations import insert_skycomponent, create_skycomponent\n'), ((5042, 5119), 'processing_components.skycomponent.operations.insert_skycomponent', 'insert_skycomponent', (['self.model', 'self.sc'], {'insert_method': '"""Sinc"""', 'bandwidth': '(0.5)'}), "(self.model, self.sc, insert_method='Sinc', bandwidth=0.5)\n", (5061, 5119), False, 'from processing_components.skycomponent.operations import insert_skycomponent, create_skycomponent\n'), ((5463, 5528), 'processing_components.skycomponent.operations.insert_skycomponent', 'insert_skycomponent', (['self.model', 'self.sc'], {'insert_method': '"""Lanczos"""'}), "(self.model, self.sc, insert_method='Lanczos')\n", (5482, 5528), False, 'from processing_components.skycomponent.operations import insert_skycomponent, create_skycomponent\n'), ((5882, 5967), 'processing_components.skycomponent.operations.insert_skycomponent', 'insert_skycomponent', (['self.model', 'self.sc'], {'insert_method': '"""Lanczos"""', 'bandwidth': '(0.5)'}), "(self.model, self.sc, insert_method='Lanczos', bandwidth=0.5\n )\n", (5901, 5967), False, 'from processing_components.skycomponent.operations import insert_skycomponent, create_skycomponent\n'), ((1011, 1039), 'numpy.linspace', 'numpy.linspace', (['(-3.0)', '(3.0)', '(7)'], {}), '(-3.0, 3.0, 7)\n', (1025, 1039), False, 'import numpy\n'), ((1628, 1656), 'data_models.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (1645, 1656), False, 'from data_models.polarisation import PolarisationFrame\n'), ((1990, 2013), 'numpy.abs', 'numpy.abs', (['self.vis.vis'], {}), '(self.vis.vis)\n', (1999, 2013), False, 'import numpy\n'), ((2148, 2182), 'numpy.power', 'numpy.power', (['(f / 100000000.0)', '(-0.7)'], {}), '(f / 100000000.0, -0.7)\n', (2159, 2182), False, 'import numpy\n'), ((2409, 2437), 'data_models.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (2426, 2437), False, 'from data_models.polarisation import PolarisationFrame\n'), ((2745, 2773), 'data_models.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (2762, 2773), False, 'from data_models.polarisation import PolarisationFrame\n'), ((3539, 3567), 'numpy.abs', 'numpy.abs', (['self.vis.vis.imag'], {}), '(self.vis.vis.imag)\n', (3548, 3567), False, 'import numpy\n'), ((3832, 3860), 'data_models.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (3849, 3860), False, 'from data_models.polarisation import PolarisationFrame\n'), ((4127, 4155), 'numpy.abs', 'numpy.abs', (['self.vis.vis.imag'], {}), '(self.vis.vis.imag)\n', (4136, 4155), False, 'import numpy\n'), ((2905, 2942), 'numpy.round', 'numpy.round', (['self.model.wcs.wcs.crpix'], {}), '(self.model.wcs.wcs.crpix)\n', (2916, 2942), False, 'import numpy\n')] |
import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
| [
"scipy.optimize.minimize",
"random.randint",
"numpy.zeros",
"numpy.array",
"multiprocessing.Pool",
"numpy.dot",
"datetime.datetime.now"
] | [((1492, 1504), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1500, 1504), True, 'import numpy as np\n'), ((2290, 2316), 'numpy.dot', 'np.dot', (['r', 'self.data[m][k]'], {}), '(r, self.data[m][k])\n', (2296, 2316), True, 'import numpy as np\n'), ((3264, 3281), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (3268, 3281), False, 'from multiprocessing import Pool\n'), ((4297, 4314), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (4301, 4314), False, 'from multiprocessing import Pool\n'), ((4991, 5151), 'scipy.optimize.minimize', 'minimize', (['self.optimized_func', 'r0'], {'method': '"""BFGS"""', 'jac': 'self.optimized_func_der', 'options': "{'maxiter': self.max_iter, 'disp': True}", 'callback': 'self.callback'}), "(self.optimized_func, r0, method='BFGS', jac=self.\n optimized_func_der, options={'maxiter': self.max_iter, 'disp': True},\n callback=self.callback)\n", (4999, 5151), False, 'from scipy.optimize import minimize\n'), ((4893, 4916), 'random.randint', 'random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (4907, 4916), False, 'import random\n'), ((1985, 2016), 'numpy.zeros', 'np.zeros', (['(self.num_features + 1)'], {}), '(self.num_features + 1)\n', (1993, 2016), True, 'import numpy as np\n'), ((4795, 4809), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4807, 4809), False, 'from datetime import datetime\n'), ((4634, 4648), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4646, 4648), False, 'from datetime import datetime\n')] |
'''
Plot likelihood approximation along training
'''
# Modules
# =======================================================================================================================
import os
import sys
import shutil
import subprocess
import tqdm
import numpy as np
import pandas as pd
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import probcox as pcox
dtype = torch.FloatTensor
np.random.seed(5256)
torch.manual_seed(9235)
#os.chdir('/nfs/nobackup/gerstung/awj/projects/ProbCox/')
#os.chdir('/Users/alexwjung/projects/ProbCox/paper/ProbCox/')
os.chdir('/nfs/research/gerstung/awj/projects/ProbCox/paper/ProbCox')
# Plot Settings
# =======================================================================================================================
plt.rcParams['font.size'] = 7
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
cm = 1/2.54
# Simulation Settings
# =======================================================================================================================
P_binary=3
P_continuous=3
P = P_binary + P_continuous
theta = np.random.uniform(-1.5, 1.5, (P, 1))
scale=1.5
I = 1000 # individuals
batchsize = 1024
iter_ = 5000
eta = 0.01
# Simulation Data
# =======================================================================================================================
TVC = pcox.TVC(theta=theta, P_binary=P_continuous, P_continuous=P_continuous, dtype=dtype)
TVC.make_lambda0(scale=scale)
surv = torch.zeros((0, 3))
X = torch.zeros((0, 6))
for __ in (range(I)):
a, b = TVC.sample()
surv = torch.cat((surv, a))
X = torch.cat((X, b))
total_obs = surv.shape[0]
total_events = torch.sum(surv[:, -1] == 1).numpy().tolist()
sampling_proportion = [total_obs, batchsize, total_events, None]
# Inference
# =======================================================================================================================
pyro.clear_param_store()
m = pcox.PCox(sampling_proportion=sampling_proportion)
m.initialize(eta=eta)
loss=[0]
LL_full = []
LL_batch = []
LL_naive = []
for ii in tqdm.tqdm(range((iter_))):
idx = np.random.choice(range(surv.shape[0]), batchsize, replace=False)
data=[surv, X]
if torch.sum(surv[idx][:, -1]) > 0:
loss.append(m.infer(data=data))
if loss[-1] != loss[-1]:
eta = eta * 0.5
run=True
break
g = m.return_guide()
out = g.quantiles([0.5])
theta_est = out['theta'][0].detach()
with torch.no_grad():
pred = torch.mm(X, theta_est).type(dtype)
LL_full.append(pcox.CoxPartialLikelihood(pred=pred, sampling_proportion=None).log_prob(surv=surv).detach().numpy())
LL_batch.append(pcox.CoxPartialLikelihood(pred=pred[idx], sampling_proportion=[total_obs, batchsize, total_events, torch.sum(surv[idx, -1]).numpy().tolist()]).log_prob(surv=surv[idx]).detach().numpy())
LL_naive.append(pcox.CoxPartialLikelihood(pred=pred[idx], sampling_proportion=None).log_prob(surv=surv[idx]).detach().numpy() * (total_obs/batchsize))
m_est = pcox.CoxPartialLikelihood(pred=pred, sampling_proportion=None).log_prob(surv=surv).detach().numpy()
m_approx = []
m_approx_naive= []
for _ in tqdm.tqdm(range(10000)):
idx = np.random.choice(range(surv.shape[0]), batchsize, replace=False)
m_approx.append(pcox.CoxPartialLikelihood(pred=pred[idx], sampling_proportion=[total_obs, batchsize, total_events, torch.sum(surv[idx, -1])]).log_prob(surv=surv[idx]).detach().numpy())
m_approx_naive.append(pcox.CoxPartialLikelihood(pred=pred[idx], sampling_proportion=None).log_prob(surv=surv[idx]).detach().numpy() * (total_obs/batchsize))
# Plots
# =======================================================================================================================
fig, ax = plt.subplots(1, 2, figsize=(13*cm, 5.5*cm), dpi=600, sharey=True)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=None)
ax[0].scatter(np.arange(5000), -np.asarray(LL_batch)[:5000], color='0.5', alpha=1, label='', s=0.2, marker='x')
ax[0].scatter(np.arange(5000), -np.asarray(LL_naive)[:5000], color='0.8', alpha=1, label='naive', s=0.2, marker='.')
ax[0].plot(np.arange(5000), -np.asarray(LL_full)[:5000], color='0.1', label='full')
ax[1].axhline(-m_est, color='#0b64e0')
ax[0].set_xlabel(r'$Steps$')
ax[0].set_ylabel(r'$-\log \mathcal{L}(D|\theta)$')
ax[0].set_yticks([750, 1500, 2250])
ax[0].set_yticklabels([750, 1500, 2250])
ax[0].set_xticks([0, 2500, 5000])
ax[0].set_xlim([0, 5000])
ax[1].hist(-np.asarray(m_approx), bins=50, alpha=1, color='0.5', density=True, orientation='horizontal', label='reweighted')
ax[1].hist(-np.asarray(m_approx_naive), bins=50, alpha=1, color='0.8', density=True, orientation='horizontal', label='naive')
ax[1].axhline(-m_est, color='0.1', label='full')
ax[1].spines['bottom'].set_visible(False)
ax[1].set_xticks([])
ax[1].legend(frameon=False, prop={'size': 6})
#plt.show()
plt.savefig('./out/simulation/figures/likelihood_approximation.eps', bbox_inches='tight', dpi=600, transparent=True)
plt.savefig('./out/simulation/figures/likelihood_approximation.png', bbox_inches='tight', dpi=600, transparent=True)
plt.savefig('./out/simulation/figures/likelihood_approximation.pdf', bbox_inches='tight', dpi=600, transparent=True)
plt.close()
| [
"numpy.random.seed",
"torch.cat",
"torch.mm",
"numpy.arange",
"pyro.clear_param_store",
"torch.no_grad",
"os.chdir",
"probcox.PCox",
"matplotlib.pyplot.close",
"torch.zeros",
"matplotlib.pyplot.subplots",
"torch.manual_seed",
"numpy.asarray",
"probcox.TVC",
"probcox.CoxPartialLikelihood"... | [((486, 519), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (509, 519), False, 'import warnings\n'), ((573, 593), 'numpy.random.seed', 'np.random.seed', (['(5256)'], {}), '(5256)\n', (587, 593), True, 'import numpy as np\n'), ((594, 617), 'torch.manual_seed', 'torch.manual_seed', (['(9235)'], {}), '(9235)\n', (611, 617), False, 'import torch\n'), ((740, 809), 'os.chdir', 'os.chdir', (['"""/nfs/research/gerstung/awj/projects/ProbCox/paper/ProbCox"""'], {}), "('/nfs/research/gerstung/awj/projects/ProbCox/paper/ProbCox')\n", (748, 809), False, 'import os\n'), ((1281, 1317), 'numpy.random.uniform', 'np.random.uniform', (['(-1.5)', '(1.5)', '(P, 1)'], {}), '(-1.5, 1.5, (P, 1))\n', (1298, 1317), True, 'import numpy as np\n'), ((1539, 1627), 'probcox.TVC', 'pcox.TVC', ([], {'theta': 'theta', 'P_binary': 'P_continuous', 'P_continuous': 'P_continuous', 'dtype': 'dtype'}), '(theta=theta, P_binary=P_continuous, P_continuous=P_continuous,\n dtype=dtype)\n', (1547, 1627), True, 'import probcox as pcox\n'), ((1661, 1680), 'torch.zeros', 'torch.zeros', (['(0, 3)'], {}), '((0, 3))\n', (1672, 1680), False, 'import torch\n'), ((1685, 1704), 'torch.zeros', 'torch.zeros', (['(0, 6)'], {}), '((0, 6))\n', (1696, 1704), False, 'import torch\n'), ((2095, 2119), 'pyro.clear_param_store', 'pyro.clear_param_store', ([], {}), '()\n', (2117, 2119), False, 'import pyro\n'), ((2124, 2174), 'probcox.PCox', 'pcox.PCox', ([], {'sampling_proportion': 'sampling_proportion'}), '(sampling_proportion=sampling_proportion)\n', (2133, 2174), True, 'import probcox as pcox\n'), ((3959, 4028), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(13 * cm, 5.5 * cm)', 'dpi': '(600)', 'sharey': '(True)'}), '(1, 2, figsize=(13 * cm, 5.5 * cm), dpi=600, sharey=True)\n', (3971, 4028), True, 'import matplotlib.pyplot as plt\n'), ((5109, 5229), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./out/simulation/figures/likelihood_approximation.eps"""'], {'bbox_inches': '"""tight"""', 'dpi': '(600)', 'transparent': '(True)'}), "('./out/simulation/figures/likelihood_approximation.eps',\n bbox_inches='tight', dpi=600, transparent=True)\n", (5120, 5229), True, 'import matplotlib.pyplot as plt\n'), ((5226, 5346), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./out/simulation/figures/likelihood_approximation.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(600)', 'transparent': '(True)'}), "('./out/simulation/figures/likelihood_approximation.png',\n bbox_inches='tight', dpi=600, transparent=True)\n", (5237, 5346), True, 'import matplotlib.pyplot as plt\n'), ((5343, 5463), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./out/simulation/figures/likelihood_approximation.pdf"""'], {'bbox_inches': '"""tight"""', 'dpi': '(600)', 'transparent': '(True)'}), "('./out/simulation/figures/likelihood_approximation.pdf',\n bbox_inches='tight', dpi=600, transparent=True)\n", (5354, 5463), True, 'import matplotlib.pyplot as plt\n'), ((5460, 5471), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5469, 5471), True, 'import matplotlib.pyplot as plt\n'), ((1762, 1782), 'torch.cat', 'torch.cat', (['(surv, a)'], {}), '((surv, a))\n', (1771, 1782), False, 'import torch\n'), ((1791, 1808), 'torch.cat', 'torch.cat', (['(X, b)'], {}), '((X, b))\n', (1800, 1808), False, 'import torch\n'), ((4130, 4145), 'numpy.arange', 'np.arange', (['(5000)'], {}), '(5000)\n', (4139, 4145), True, 'import numpy as np\n'), ((4242, 4257), 'numpy.arange', 'np.arange', (['(5000)'], {}), '(5000)\n', (4251, 4257), True, 'import numpy as np\n'), ((4356, 4371), 'numpy.arange', 'np.arange', (['(5000)'], {}), '(5000)\n', (4365, 4371), True, 'import numpy as np\n'), ((2385, 2412), 'torch.sum', 'torch.sum', (['surv[idx][:, -1]'], {}), '(surv[idx][:, -1])\n', (2394, 2412), False, 'import torch\n'), ((2646, 2661), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2659, 2661), False, 'import torch\n'), ((4699, 4719), 'numpy.asarray', 'np.asarray', (['m_approx'], {}), '(m_approx)\n', (4709, 4719), True, 'import numpy as np\n'), ((4824, 4850), 'numpy.asarray', 'np.asarray', (['m_approx_naive'], {}), '(m_approx_naive)\n', (4834, 4850), True, 'import numpy as np\n'), ((4148, 4168), 'numpy.asarray', 'np.asarray', (['LL_batch'], {}), '(LL_batch)\n', (4158, 4168), True, 'import numpy as np\n'), ((4260, 4280), 'numpy.asarray', 'np.asarray', (['LL_naive'], {}), '(LL_naive)\n', (4270, 4280), True, 'import numpy as np\n'), ((4374, 4393), 'numpy.asarray', 'np.asarray', (['LL_full'], {}), '(LL_full)\n', (4384, 4393), True, 'import numpy as np\n'), ((1850, 1877), 'torch.sum', 'torch.sum', (['(surv[:, -1] == 1)'], {}), '(surv[:, -1] == 1)\n', (1859, 1877), False, 'import torch\n'), ((2678, 2700), 'torch.mm', 'torch.mm', (['X', 'theta_est'], {}), '(X, theta_est)\n', (2686, 2700), False, 'import torch\n'), ((3215, 3277), 'probcox.CoxPartialLikelihood', 'pcox.CoxPartialLikelihood', ([], {'pred': 'pred', 'sampling_proportion': 'None'}), '(pred=pred, sampling_proportion=None)\n', (3240, 3277), True, 'import probcox as pcox\n'), ((2736, 2798), 'probcox.CoxPartialLikelihood', 'pcox.CoxPartialLikelihood', ([], {'pred': 'pred', 'sampling_proportion': 'None'}), '(pred=pred, sampling_proportion=None)\n', (2761, 2798), True, 'import probcox as pcox\n'), ((3672, 3739), 'probcox.CoxPartialLikelihood', 'pcox.CoxPartialLikelihood', ([], {'pred': 'pred[idx]', 'sampling_proportion': 'None'}), '(pred=pred[idx], sampling_proportion=None)\n', (3697, 3739), True, 'import probcox as pcox\n'), ((3071, 3138), 'probcox.CoxPartialLikelihood', 'pcox.CoxPartialLikelihood', ([], {'pred': 'pred[idx]', 'sampling_proportion': 'None'}), '(pred=pred[idx], sampling_proportion=None)\n', (3096, 3138), True, 'import probcox as pcox\n'), ((3576, 3600), 'torch.sum', 'torch.sum', (['surv[idx, -1]'], {}), '(surv[idx, -1])\n', (3585, 3600), False, 'import torch\n'), ((2960, 2984), 'torch.sum', 'torch.sum', (['surv[idx, -1]'], {}), '(surv[idx, -1])\n', (2969, 2984), False, 'import torch\n')] |
import os
from eulerangles import euler2mat
import numpy as np
import math
import cv2
import torch
import torch.nn.functional as F
from torchvision import transforms
import matplotlib.cm as cm
from google_drive_downloader import GoogleDriveDownloader
from affine_transform import affineTransform
# label vector
# label_map = {'Car':0}
# label_map = {'Car':0, 'Van':1, 'Truck':2, 'Cyclist':3, 'Pedestrian':4}
label_map = {'Car':0, 'Cyclist':1, 'Pedestrian':2}
pose_fields = ['conf','x','y','z','l','w','h','cos_yaw','sin_yaw']
pose_vec_len = len(pose_fields)
# pretrained weights
pretrained_weights = [{'exp':'vr3d.learning_rate_0.0001.n_xgrids_16.n_ygrids_16.xlim_0.0_70.0.ylim_-25.0_25.0.zlim_-2.5_1.0.max_depth_100.0.vol_size_256x256x16.img_size_512x256.dense_depth_True.concat_latent_vector_True.exp_id_kitti',
'url':'https://drive.google.com/file/d/1h3wLV87MQCzwfglZ8eSldVRype9Ew-wd/view?usp=sharing',
'file_id':'1h3wLV87MQCzwfglZ8eSldVRype9Ew-wd'}
]
# load pretrained weights
def load_pretrained_weights(model, modeldir, exp_str):
# best checkpoint model name
model_exp_dir = os.path.join(modeldir, exp_str)
best_ckpt_model = os.path.join(model_exp_dir, 'checkpoint_best.pt')
# check if the model exists
if os.path.exists(best_ckpt_model):
model.load_state_dict(torch.load(best_ckpt_model, map_location=lambda storage, loc: storage))
print('Loaded pre-trained weights: {}'.format(best_ckpt_model))
else:
found = False
print('Pre-trained weights not found. Attempting to download.')
for pretrained_weight in pretrained_weights:
for key in pretrained_weight.keys():
if key == 'exp':
if exp_str == pretrained_weight[key]:
os.system('mkdir -p {}'.format(model_exp_dir))
GoogleDriveDownloader.download_file_from_google_drive(file_id=pretrained_weight['file_id'],
dest_path=os.path.join(model_exp_dir, 'checkpoint_best.pt'),
unzip=False,
showsize=True)
model.load_state_dict(torch.load(best_ckpt_model, map_location=lambda storage, loc: storage))
print('Loaded pre-trained weights: {}'.format(best_ckpt_model))
found = True
if found == False:
print('Unable to find pretrained weights with this experiment configuration')
raise Exception('Pre-trained weights not found.')
return model
# this function returns label index given a label string
def label_to_idx(label):
if label in label_map.keys():
return label_map[label]
else:
raise('Unsupported object class')
# this function returns index given a label
def idx_to_label(idx):
label_ret = 'DontCare'
for label in label_map.items():
if idx == label[1]:
label_ret = label[0]
if label_ret == 'DontCare':
raise('Unsupported object class')
return label_ret
## Image and Depth
# normalize
def normalize_img(img):
return (img / 255.0) - 0.5
# denormalize
def denormalize_img(img):
return np.array((img + 0.5) * 255.0, dtype=np.uint8)
# normalize
def normalize_depth(depth, max_depth):
return (depth * 2.0 / max_depth) - 1.0
# denormalize
def denormalize_depth(depth, max_depth):
return (depth + 1.0) * (max_depth / 2)
# draw point-cloud
def draw_point_cloud_topdown(input_points, canvasSize=800, radius=1,
zrot=0, switch_xyz=[0,1,2],
xlim=(0.0,70.0), ylim=(-50.0,50.0), zlim=(-5.0,10.0), background_color=(255,255,255)):
""" Render point cloud to image with alpha channel.
Input:
points: Nx3 numpy array (+z is up direction)
Output:
colorized image as numpy array of size canvasSizexcanvasSize
"""
# create mask
input_points_mask = np.logical_and.reduce(((input_points[:,0] > xlim[0]), (input_points[:,0] < xlim[1]), \
(input_points[:,1] > ylim[0]), (input_points[:,1] < ylim[1]), \
(input_points[:,2] > zlim[0]), (input_points[:,2] < zlim[1])))
# filter out
input_points = input_points[input_points_mask]
# hue range
huerange = (240, 0) # green to red
# image plane
image = np.zeros((canvasSize, canvasSize, 3), dtype=np.uint8)
if input_points is None or input_points.shape[0] == 0:
return image
# x in point-cloud to image y coordinate
def pcx_to_imgy(pcx):
m2px = (xlim[1]-xlim[0]) / float(canvasSize)
imgy = canvasSize - int(pcx / m2px)
return imgy
# y in point-cloud to image x coordinate
def pcy_to_imgx(pcy):
m2px = (ylim[1]-ylim[0]) / float(canvasSize)
imgx = int((float(canvasSize) / 2.0) - (pcy / m2px))
return imgx
points = input_points
M = euler2mat(zrot, 0, 0)
points = (np.dot(M, points.transpose())).transpose()
# go through each point
for pt in points:
imgx = pcy_to_imgx(pt[1])
imgy = pcx_to_imgy(pt[0])
# draw circle
ztohue = (zlim[1] - zlim[0]) / (huerange[0] - huerange[1])
hue = huerange[0] - int((pt[2] - zlim[0]) / ztohue)
cv2.circle(image, (imgx, imgy), radius=radius, color=(hue,255,128), thickness=-1)
# convert to BGR
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
image[np.where(np.all(image == [0,0,0], axis=-1))] = background_color
# scale between 0.0 and 1.0
image = image.astype(np.float32) / 255.0
return image
# draw point-cloud with bounding-box
def draw_point_cloud_w_bbox(input_points, label_dict_list, canvasSize=800, radius=1,
xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-5.0,10.0), background_color=(255,255,255), text_color=(0,0,0)):
""" Render point cloud to image and draw bounding box.
Input:
input_points: Nx3 numpy array (+z is up direction)
Output:
colorized image as numpy array of size canvasSizexcanvasSize
"""
# create mask
input_points_mask = np.logical_and.reduce(((input_points[:,0] > xlim[0]), (input_points[:,0] < xlim[1]), \
(input_points[:,1] > ylim[0]), (input_points[:,1] < ylim[1]), \
(input_points[:,2] > zlim[0]), (input_points[:,2] < zlim[1])))
# filter out
input_points = input_points[input_points_mask]
# hue range
huerange = (240, 0) # green to red
# image plane
image = np.zeros((canvasSize, canvasSize, 3), dtype=np.uint8)
if input_points is None or input_points.shape[0] == 0:
return image
# x in point-cloud to image y coordinate
def pcx_to_imgy(pcx):
m2px = (xlim[1]-xlim[0]) / float(canvasSize)
imgy = canvasSize - int(pcx / m2px)
return imgy
# y in point-cloud to image x coordinate
def pcy_to_imgx(pcy):
m2px = (ylim[1]-ylim[0]) / float(canvasSize)
imgx = int((float(canvasSize) / 2.0) - (pcy / m2px))
return imgx
# go through each point
for pt in input_points:
imgx = pcy_to_imgx(pt[1])
imgy = pcx_to_imgy(pt[0])
# draw circle
ztohue = (zlim[1] - zlim[0]) / (huerange[0] - huerange[1])
hue = huerange[0] - int((pt[2] - zlim[0]) / ztohue)
cv2.circle(image, (imgx, imgy), radius=radius, color=(hue,255,128), thickness=-1)
# convert to BGR
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
image[np.where(np.all(image == [0,0,0], axis=-1))] = background_color
# draw bounding boxes
for label_dict in label_dict_list:
x,y,z,l,w,h,yaw = label_dict['x'],label_dict['y'],label_dict['z'],label_dict['l'],label_dict['w'],label_dict['h'],label_dict['yaw']
yaw = np.pi/2.0 + yaw
# compute corners in birds-eye view
corners = []
corners.append([ l/2, -w/2, 0.0])
corners.append([ l/2, w/2, 0.0])
corners.append([-l/2, w/2, 0.0])
corners.append([-l/2, -w/2, 0.0])
corners = np.asarray(corners, dtype=np.float32)
# rotate input_points
M = euler2mat(yaw, 0, 0)
corners = (np.dot(M, corners.transpose())).transpose()
corners = corners + [x, y, z]
# corners in pixel
corners_px = []
for corner in corners:
corners_px.append([pcy_to_imgx(corner[1]), pcx_to_imgy(corner[0])])
corners_px = np.asarray(corners_px, dtype=np.int)
# draw bounding box
cv2.line(image, (corners_px[0,0], corners_px[0,1]), (corners_px[1,0], corners_px[1,1]), color=(0,0,0), thickness=6)
cv2.line(image, (corners_px[0,0], corners_px[0,1]), (corners_px[1,0], corners_px[1,1]), color=(255,0,0), thickness=2)
cv2.line(image, (corners_px[1,0], corners_px[1,1]), (corners_px[2,0], corners_px[2,1]), color=(255,0,0), thickness=2)
cv2.line(image, (corners_px[2,0], corners_px[2,1]), (corners_px[3,0], corners_px[3,1]), color=(255,0,0), thickness=2)
cv2.line(image, (corners_px[3,0], corners_px[3,1]), (corners_px[0,0], corners_px[0,1]), color=(255,0,0), thickness=2)
# get top-left coordinates
tl = (np.min(corners_px[:,0]), np.min(corners_px[:,1]))
# write class
cv2.putText(image, '{} {:.1f}%'.format(label_dict['class'], label_dict['conf']*100.0),
(tl[0],tl[1]-5),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
color=text_color,
thickness=2,
lineType=2)
# scale between 0.0 and 1.0
image = image.astype(np.float32) / 255.0
return image
# draw point-cloud with bounding-box
def draw_point_cloud_w_bbox_id(input_points, label_dict_list, canvasSize=800, radius=1,
xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-5.0,10.0), background_color=(255,255,255), text_color=(0,0,0)):
""" Render point cloud to image and draw bounding box.
Input:
input_points: Nx3 numpy array (+z is up direction)
Output:
colorized image as numpy array of size canvasSizexcanvasSize
"""
# create mask
input_points_mask = np.logical_and.reduce(((input_points[:,0] > xlim[0]), (input_points[:,0] < xlim[1]), \
(input_points[:,1] > ylim[0]), (input_points[:,1] < ylim[1]), \
(input_points[:,2] > zlim[0]), (input_points[:,2] < zlim[1])))
# filter out
input_points = input_points[input_points_mask]
# hue range
huerange = (240, 0) # green to red
# image plane
image = np.zeros((canvasSize, canvasSize, 3), dtype=np.uint8)
if input_points is None or input_points.shape[0] == 0:
return image
# x in point-cloud to image y coordinate
def pcx_to_imgy(pcx):
m2px = (xlim[1]-xlim[0]) / float(canvasSize)
imgy = canvasSize - int(pcx / m2px)
return imgy
# y in point-cloud to image x coordinate
def pcy_to_imgx(pcy):
m2px = (ylim[1]-ylim[0]) / float(canvasSize)
imgx = int((float(canvasSize) / 2.0) - (pcy / m2px))
return imgx
# go through each point
for pt in input_points:
imgx = pcy_to_imgx(pt[1])
imgy = pcx_to_imgy(pt[0])
# draw circle
ztohue = (zlim[1] - zlim[0]) / (huerange[0] - huerange[1])
hue = huerange[0] - int((pt[2] - zlim[0]) / ztohue)
cv2.circle(image, (imgx, imgy), radius=radius, color=(hue,255,255), thickness=-1)
# convert to BGR
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
image[np.where(np.all(image == [0,0,0], axis=-1))] = background_color
# draw bounding boxes
for label_dict in label_dict_list:
x,y,z,l,w,h,yaw = label_dict['x'],label_dict['y'],label_dict['z'],label_dict['l'],label_dict['w'],label_dict['h'],label_dict['yaw']
yaw = np.pi/2.0 + yaw
# compute corners in birds-eye view
corners = []
corners.append([ l/2, -w/2, 0.0])
corners.append([ l/2, w/2, 0.0])
corners.append([-l/2, w/2, 0.0])
corners.append([-l/2, -w/2, 0.0])
corners = np.asarray(corners, dtype=np.float32)
# rotate input_points
M = euler2mat(yaw, 0, 0)
corners = (np.dot(M, corners.transpose())).transpose()
corners = corners + [x, y, z]
# corners in pixel
corners_px = []
for corner in corners:
corners_px.append([pcy_to_imgx(corner[1]), pcx_to_imgy(corner[0])])
corners_px = np.asarray(corners_px, dtype=np.int)
# draw bounding box
cv2.line(image, (corners_px[0,0], corners_px[0,1]), (corners_px[1,0], corners_px[1,1]), color=(0,0,0), thickness=6)
cv2.line(image, (corners_px[0,0], corners_px[0,1]), (corners_px[1,0], corners_px[1,1]), color=(255,0,0), thickness=2)
cv2.line(image, (corners_px[1,0], corners_px[1,1]), (corners_px[2,0], corners_px[2,1]), color=(255,0,0), thickness=2)
cv2.line(image, (corners_px[2,0], corners_px[2,1]), (corners_px[3,0], corners_px[3,1]), color=(255,0,0), thickness=2)
cv2.line(image, (corners_px[3,0], corners_px[3,1]), (corners_px[0,0], corners_px[0,1]), color=(255,0,0), thickness=2)
# get top-left coordinates
tl = (np.min(corners_px[:,0]), np.min(corners_px[:,1]))
# write object id and class
cv2.putText(image, '{} {:.1f}% | id {}'.format(label_dict['class'], label_dict['conf']*100.0, label_dict['id']),
(tl[0],tl[1]-5),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
color=text_color,
thickness=2,
lineType=2)
# scale between 0.0 and 1.0
image = image.astype(np.float32) / 255.0
return image
def draw_bbox_img(img, label_dict_cam, K):
""" Draw bounding-box on image using label dictionary
in the camera coordinate system and camera intrinsic matrix, K
"""
img_copy = img.copy()
# colors
WHITE = (255,255,255)
RED = (255,0,0)
YELLOW = (255,255,0)
GREEN = (0,255,0)
CYAN = (0,255,255)
BLUE = (0,0,255)
# draw bounding boxes
for label in label_dict_cam:
x,y,z,l,w,h,yaw = label['x'],label['y'],label['z'],label['l'],label['w'],label['h'],label['yaw']
# yaw = np.pi/2.0 + yaw
yaw = (np.pi/2.0) - yaw
# extract vertices of bboxes
pts_3d = []
# front 4 vertices
pts_3d.append([x-w/2., y-h/2., z-l/2.])
pts_3d.append([x+w/2., y-h/2., z-l/2.])
pts_3d.append([x+w/2., y+h/2., z-l/2.])
pts_3d.append([x-w/2., y+h/2., z-l/2.])
# vertices behind
pts_3d.append([x-w/2., y-h/2., z+l/2.])
pts_3d.append([x+w/2., y-h/2., z+l/2.])
pts_3d.append([x+w/2., y+h/2., z+l/2.])
pts_3d.append([x-w/2., y+h/2., z+l/2.])
# # change the orientation so that 0 degrees is aligned with z-axis
yaw = math.degrees(yaw)
# move the bbox to the origin and then rotate as given orientation
for i in range(len(pts_3d)):
# move the center of bbox to origin
pts_3d[i] = affineTransform(pts_3d[i], 0, 0, 0, -x, -y, -z)
# rotate points and move the bbox back to x,y,z
pts_3d[i] = affineTransform(pts_3d[i], 0, yaw, 0, x, y, z)
# get 2d projection
pts_2d = pts_3d_to_2d(pts_3d, K)
# draw front rectangle
for i in range(3):
cv2.line(img, (pts_2d[i][0], pts_2d[i][1]), (pts_2d[i+1][0], pts_2d[i+1][1]), color=WHITE, thickness=2)
cv2.line(img, (pts_2d[3][0], pts_2d[3][1]), (pts_2d[0][0], pts_2d[0][1]), color=WHITE, thickness=2)
# front cross
cv2.line(img, (pts_2d[0][0], pts_2d[0][1]), (pts_2d[2][0], pts_2d[2][1]), color=WHITE, thickness=2)
cv2.line(img, (pts_2d[1][0], pts_2d[1][1]), (pts_2d[3][0], pts_2d[3][1]), color=WHITE, thickness=2)
# draw back rectangle
for i in range(4,7):
cv2.line(img, (pts_2d[i][0], pts_2d[i][1]), (pts_2d[i+1][0], pts_2d[i+1][1]), color=RED, thickness=2)
cv2.line(img, (pts_2d[7][0], pts_2d[7][1]), (pts_2d[4][0], pts_2d[4][1]), color=RED, thickness=2)
# connecting two rectangles
cv2.line(img, (pts_2d[0][0], pts_2d[0][1]), (pts_2d[4][0], pts_2d[4][1]), color=RED, thickness=2)
cv2.line(img, (pts_2d[1][0], pts_2d[1][1]), (pts_2d[5][0], pts_2d[5][1]), color=RED, thickness=2)
cv2.line(img, (pts_2d[2][0], pts_2d[2][1]), (pts_2d[6][0], pts_2d[6][1]), color=RED, thickness=2)
cv2.line(img, (pts_2d[3][0], pts_2d[3][1]), (pts_2d[7][0], pts_2d[7][1]), color=RED, thickness=2)
# bottom face
# cv2.fillConvexPoly(img_copy, np.array([pts_2d[2,:], pts_2d[3,:], pts_2d[7,:], pts_2d[6,:]], dtype=np.int32), color=BLUE)
# # get minimum x and y
# x_min = min(pts_2d[:,0])
# y_min = min(pts_2d[:,1])
# # a rectangle behind text
# cv2.rectangle(img, (x_min,y_min-20), (x_min+150,y_min), color=(0,0,0), thickness=-1)
# cv2.rectangle(img_copy, (x_min,y_min-20), (x_min+150,y_min), color=(0,0,0), thickness=-1)
# # write object class
# cv2.putText(img, label['class']+' {:.1f}%'.format(label['conf']*100.0),
# (x_min+5, y_min-5),
# cv2.FONT_HERSHEY_COMPLEX,
# 0.6,
# color=(255,255,255),
# thickness=1,
# lineType=2)
# return image
# img_ret = cv2.addWeighted(img, 1.0, img_copy, 0.5, 0.)
return img
def build_label_vector(label_dict_list, n_xgrids, n_ygrids, mean_lwh,
xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-10.0,10.0)):
""" Build the ground-truth label vector
given a set of poses, classes, and
number of grids.
Input:
label_dict_list: list of label dictionary
n_xgrids: number of grids in the x direction
n_ygrids: number of grids in the y direction
Output:
label vector
"""
obj_label_len = pose_vec_len + len(label_map) # 9 for poses, rest for object classes
label_vector = np.zeros((n_xgrids * n_ygrids * obj_label_len), dtype=np.float32)
# iterate through each pose
for i, label_dict in enumerate(label_dict_list):
x,y,z,l,w,h,yaw,cls_ = label_dict['x'],label_dict['y'],label_dict['z'],label_dict['l'],label_dict['w'],label_dict['h'],label_dict['yaw'],label_dict['class']
# obtain x index
xstop = (xlim[1] - xlim[0]) / float(n_xgrids)
x_idx = math.floor((x - xlim[0]) / xstop)
# obtain y index
ystop = (ylim[1] - ylim[0]) / float(n_ygrids)
y_idx = math.floor((y - ylim[0]) / ystop)
# pose vector
x_norm = ((x - xlim[0]) - (x_idx * xstop)) / xstop
y_norm = ((y - ylim[0]) - (y_idx * ystop)) / ystop
z_norm = (z - zlim[0]) / (zlim[1] - zlim[0])
mean_lwh_cls = mean_lwh[cls_]
l_norm = math.log(l/mean_lwh_cls[0])
w_norm = math.log(w/mean_lwh_cls[1])
h_norm = math.log(h/mean_lwh_cls[2])
cos_yaw_norm = (np.cos(yaw) + 1.0) / 2.0
sin_yaw_norm = (np.sin(yaw) + 1.0) / 2.0
# yaw_norm = (yaw + np.pi) / (2 * np.pi)
pose_vec = [1.0, x_norm, y_norm, z_norm, l_norm, w_norm, h_norm, cos_yaw_norm, sin_yaw_norm]
# class vector
class_vec = [0.0]*len(label_map)
class_idx = label_to_idx(cls_)
class_vec[class_idx] = 1.0
# label vector for this object
label_vec_this_obj = pose_vec + class_vec
# label index
label_idx = ((x_idx * n_ygrids) + y_idx) * obj_label_len
# populate label vector
label_vector[label_idx:label_idx+obj_label_len] = label_vec_this_obj
# return the label vector
return label_vector
def decompose_label_vector(label_vector, n_xgrids, n_ygrids, mean_lwh,
xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-10.0,10.0),
conf_thres=0.5, nms=True, iou_thres=0.1):
""" Build the ground-truth label vector
given a set of poses, classes, and
number of grids.
Input:
label_vector: label vector outputted from the model
n_xgrids: number of grids in the x direction
n_ygrids: number of grids in the y direction
Output:
poses: list of object poses [x,y,z,l,w,h,yaw]
classes: list of object classes
"""
conf = []
poses = []
classes = []
label_dict_list = []
# obtain x index
xstop = (xlim[1] - xlim[0]) / float(n_xgrids)
# obtain y index
ystop = (ylim[1] - ylim[0]) / float(n_ygrids)
# length of each object label
obj_label_len = pose_vec_len + len(label_map) # 8 for poses, rest for object classes
# reshape the vector
label_vector_reshaped = np.reshape(label_vector, (-1, obj_label_len))
# get each element
obj_confidences = label_vector_reshaped[:, 0]
obj_poses = label_vector_reshaped[:, 1:pose_vec_len]
obj_class_one_hot = label_vector_reshaped[:, pose_vec_len:]
# iterate through each element
for i, obj_conf in enumerate(obj_confidences):
if obj_conf > conf_thres:
# pose vector
x_norm, y_norm, z_norm, l_norm, w_norm, h_norm, cos_yaw_norm, sin_yaw_norm = obj_poses[i]
cls_ = idx_to_label(np.argmax(obj_class_one_hot[i]))
mean_lwh_cls = mean_lwh[cls_]
# get indices
x_idx = math.floor(i / n_xgrids)
y_idx = i - (x_idx * n_xgrids)
# denormalize pose
x = (x_norm * xstop) + (x_idx * xstop) + xlim[0]
y = (y_norm * ystop) + (y_idx * ystop) + ylim[0]
z = (z_norm * (zlim[1] - zlim[0])) + zlim[0]
l = mean_lwh_cls[0]*math.exp(l_norm)
w = mean_lwh_cls[1]*math.exp(w_norm)
h = mean_lwh_cls[2]*math.exp(h_norm)
cos_yaw = (cos_yaw_norm * 2.0) - 1.0
sin_yaw = (sin_yaw_norm * 2.0) - 1.0
yaw = np.arctan2(sin_yaw, cos_yaw)
# add poses, classes, and conf
label_dict = {}
label_dict['conf'] = obj_conf
label_dict['x'] = x
label_dict['y'] = y
label_dict['z'] = z
label_dict['l'] = l
label_dict['w'] = w
label_dict['h'] = h
label_dict['yaw'] = yaw
label_dict['class'] = idx_to_label(np.argmax(obj_class_one_hot[i]))
# label_dict['conf'] = np.max(obj_class_one_hot[i])
label_dict_list.append(label_dict)
# non-max suppression
if nms == True:
label_dict_list = non_max_suppression(label_dict_list, iou_threshold=iou_thres)
# return label dictionary
return label_dict_list
def point_cloud_to_volume(points, vol_size=(256,256,16), \
xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-10.0,10.0)):
""" input is Nx3 points.
output is vol_size
"""
vol = np.zeros(vol_size)
voxel_size = ((xlim[1]-xlim[0])/float(vol_size[0]), \
(ylim[1]-ylim[0])/float(vol_size[1]), \
(zlim[1]-zlim[0])/float(vol_size[2]))
locations = (points - (xlim[0],ylim[0],zlim[0])) / voxel_size
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] += 1.0
# change the axis for pytorch
vol = np.transpose(vol, (2, 0, 1))
# return volume
return vol
def volume_to_point_cloud(vol, vol_size=(256,256,16), \
xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-10.0,10.0)):
""" vol is occupancy grid (value = 0 or 1) of size vol_size
return Nx3 numpy array.
"""
# change the axis for pytorch
vol = np.transpose(vol, (1, 2, 0))
assert((vol.shape[0] == vol_size[0]) and \
(vol.shape[1] == vol_size[1]) and \
(vol.shape[2] == vol_size[2]))
voxel_size = ((xlim[1]-xlim[0])/float(vol_size[0]), \
(ylim[1]-ylim[0])/float(vol_size[1]), \
(zlim[1]-zlim[0])/float(vol_size[2]))
points = []
for a in range(vol_size[0]):
for b in range(vol_size[1]):
for c in range(vol_size[2]):
if vol[a,b,c] > 0:
point = np.array([a,b,c])*voxel_size + (xlim[0],ylim[0],zlim[0])
points.append(np.array(point))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def read_velo_bin(filename):
"""" read velodyne point-clouds from .bin files. """
points = np.fromfile(filename, dtype=np.float32).reshape(-1, 4)
points = points[:, :3] # exclude luminance
return points
# intersection-over-union
def iou(labelA, labelB):
poseA = [labelA['x'],labelA['y'],labelA['z'],labelA['l'],labelA['w'],labelA['h'],labelA['yaw']]
poseB = [labelB['x'],labelB['y'],labelB['z'],labelB['l'],labelB['w'],labelB['h'],labelB['yaw']]
# get min and max coordinates
xmin = max(poseA[0]-(poseA[3]/2.0), poseB[0]-(poseB[3]/2.0))
ymin = max(poseA[1]-(poseA[4]/2.0), poseB[1]-(poseB[4]/2.0))
xmax = min(poseA[0]+(poseA[3]/2.0), poseB[0]+(poseB[3]/2.0))
ymax = min(poseA[1]+(poseA[4]/2.0), poseB[1]+(poseB[4]/2.0))
# compute the volume of intersection rectangle
interArea = max(0, xmax - xmin) * max(0, ymax - ymin)
# compute the volume of both the prediction and ground-truth object
boxAArea = poseA[3] * poseA[4]
boxBArea = poseB[3] * poseB[4]
# compute the intersection over union by taking the intersection
# volume and dividing it by the sum of prediction + ground-truth
# volumes - the interesection volume
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
# non-max suppression
def non_max_suppression(label_dict_list, iou_threshold=0.1):
if(len(label_dict_list) > 0):
# delete lower-confidence objects with high iou
i,j = 0,0
while i < len(label_dict_list):
while j < len(label_dict_list):
if i != j:
if(iou(label_dict_list[i], label_dict_list[j]) > iou_threshold):
# if iou between two objects are higher than threshold
# then delete the object with lower confidence
if(label_dict_list[i]['conf'] >= label_dict_list[j]['conf']):
del label_dict_list[j]
j-=1
else:
del label_dict_list[i]
i-=1
j+=1
i+=1
return label_dict_list
# convert 3d points to 2d
def pts_3d_to_2d(pts_3d, K, convert2int=True):
pts_2d = None
if convert2int == True:
pts_2d = np.zeros((len(pts_3d), 2), dtype=np.int)
else:
pts_2d = np.zeros((len(pts_3d), 2), dtype=np.float32)
for i in range(len(pts_3d)):
if pts_3d[i][2] < 0.0:
pts_3d[i][2] = 1e-6
if(K.shape[1] == 4):
pts_3d_ = np.append(np.transpose(pts_3d[i]), 1.0)
else:
pts_3d_ = np.transpose(pts_3d[i])
pts_2d_ = np.matmul(K, pts_3d_)
if convert2int == True:
pts_2d[i] = [np.int(pts_2d_[0]/pts_2d_[2]), np.int(pts_2d_[1]/pts_2d_[2])]
else:
pts_2d[i] = [(pts_2d_[0]/pts_2d_[2]), (pts_2d_[1]/pts_2d_[2])]
return np.array(pts_2d)
# convert labels from lidar frame to camera frame
def label_lidar2cam(label_dict, lidar2cam):
label_dict_cam = []
for label in label_dict:
xyz1 = np.transpose([label['x'],label['y'],label['z'],1.0])
xyz_cam = np.dot(lidar2cam, xyz1)
label_cam = label.copy()
label_cam['x'] = xyz_cam[0]
label_cam['y'] = xyz_cam[1]
label_cam['z'] = xyz_cam[2]
label_dict_cam.append(label_cam)
return label_dict_cam
# project lidar point-cloud to image plane
def project_pc2image(points, lidar2cam, K, resolution):
'''
Inputs:
points: Nx3 vector containing lidar points
lidar2cam: lidar-to-camera transformation matrix
resolution: (x, y) resolution of camera frame
Output:
array of resolution (x, y) with lidar points projected on the image plane
'''
points_homogeneous = np.append(points, np.ones((points.shape[0],1)), axis=-1)
points_cam = np.dot(lidar2cam, points_homogeneous.T).T
points_img_homogeneous = np.dot(K, points_cam.T).T
points_img = np.array([(p_/p_[-1])[:2] for p_ in points_img_homogeneous], dtype=np.int32)
points_z = points_cam[:,2]
# create mask
points_mask = np.logical_and.reduce(((points_img[:,0] >= 0), (points_img[:,0] < resolution[0]), \
(points_img[:,1] >= 0), (points_img[:,1] < resolution[1]), \
(points_z > 0)))
# filter out points
points_img = points_img[points_mask]
points_z = points_z[points_mask]
# build 2d array for projected points
projected_img = np.zeros((resolution[1], resolution[0]), dtype=np.float32)
projected_img[(points_img[:,1],points_img[:,0])] = points_z
# return image
return projected_img
def colorize_depth_map(depth_map, min_depth=0, max_depth=100, cmap="magma", mask_zeros=False):
# normalize
min_depth = depth_map.min() if min_depth is None else min_depth
max_depth = depth_map.max() if max_depth is None else max_depth
# apply mask
if mask_zeros:
mask = (depth_map == 0)
# invert the scale for better colormap visualization
depth_map = max_depth - depth_map
# scale between 0 to 1
if min_depth != max_depth:
depth_map = (depth_map - min_depth) / (max_depth - min_depth)
else:
depth_map = depth_map * 0
cmapper = cm.get_cmap(cmap)
depth_map = cmapper(depth_map, bytes=True)
img = depth_map[:,:,:3]
if mask_zeros:
img[mask] = (0, 0, 0)
return img
def reproject_lr_disparity(img_left, img_right, depth_pred, f, baseline, camera):
h, w = img_left.shape[-2], img_left.shape[-1] # could be a batch or single image
resize = transforms.Compose([
transforms.Resize(size=(h, w))
])
# convert to tensor if depth is stored as numpy array
depth_pred = resize(depth_pred)
# huber norm
huber_norm = torch.nn.SmoothL1Loss(reduction='none', beta=1.0)
# compute depth
disparity_1to2 = f * baseline / (depth_pred + 1e-6)
# normalize disparity
disparity_1to2 = disparity_1to2 / w
img1 = img_left
img2 = img_right
# flip convention
if camera == 'right':
disparity_1to2 *= -1.0
# flip images
img1 = img_right
img2 = img_left
# warp left image to generate right image
img2_warped = apply_disparity(img1, disparity_1to2)
# get warped mask
warping_mask_1to2 = torch.ones_like(img1)
warping_mask_1to2 = apply_disparity(warping_mask_1to2, disparity_1to2)
# compute left-to-right L1 loss
# reproj_err_1to2 = warping_mask_1to2 * torch.abs(img2 - img2_warped)
reproj_err_1to2 = warping_mask_1to2 * huber_norm(img2, img2_warped)
# warp right image to generate left image
img1_warped = apply_disparity(img2, -disparity_1to2)
# get warped mask
warping_mask_2to1 = torch.ones_like(img1)
warping_mask_2to1 = apply_disparity(warping_mask_2to1, -disparity_1to2)
# compute right-to-left L1 loss
# reproj_err_2to1 = warping_mask_2to1 * torch.abs(img1 - img1_warped)
reproj_err_2to1 = warping_mask_2to1 * huber_norm(img1, img1_warped)
return depth_pred, disparity_1to2, img2_warped, warping_mask_1to2, reproj_err_1to2, img1_warped, warping_mask_2to1, reproj_err_2to1
def apply_disparity(img, disp):
batch_size, _, height, width = img.shape
# Original coordinates of pixels
x_base = torch.linspace(0, 1, width).repeat(batch_size,
height, 1).type_as(img)
y_base = torch.linspace(0, 1, height).repeat(batch_size,
width, 1).transpose(1, 2).type_as(img)
# Apply shift in X direction
x_shifts = disp[:, 0, :, :] # Disparity is passed in NCHW format with 1 channel
flow_field = torch.stack((x_base + x_shifts, y_base), dim=3)
# In grid_sample coordinates are assumed to be between -1 and 1
output = F.grid_sample(img, 2*flow_field - 1, mode='bilinear',
padding_mode='zeros')
return output
def get_reprojection_vis(img_left, img_right, depth_pred, f, baseline):
depth, disparity_l2r, img_right_warped, warping_mask_l2r, l2r_l1_err, img_left_warped, warping_mask_r2l, r2l_l1_err = \
reproject_lr_disparity(img_left, img_right, depth_pred, f, baseline, camera='left')
# left image
img_l = img_left.detach().cpu().numpy()
img_l = np.transpose(img_l[0], (1,2,0))
img_l = np.array(img_l, dtype=np.uint8)
# text on the visualization image
x_center = int(img_l.shape[1] / 2)
img_l = cv2.cvtColor(img_l, cv2.COLOR_RGB2BGR)
cv2.rectangle(img_l, (x_center-300,0), (x_center+300,60), color=(0,0,0), thickness=-1)
cv2.putText(img_l, 'left image',
(x_center-100,40),
cv2.FONT_HERSHEY_COMPLEX,
1.0,
color=(255,255,255),
thickness=2,
lineType=2)
# right image
img_r = img_right.detach().cpu().numpy()
img_r = np.transpose(img_r[0], (1,2,0))
img_r = np.array(img_r, dtype=np.uint8)
# text on the visualization image
img_r = cv2.cvtColor(img_r, cv2.COLOR_RGB2BGR)
cv2.rectangle(img_r, (x_center-300,0), (x_center+300,60), color=(0,0,0), thickness=-1)
cv2.putText(img_r, 'right image',
(x_center-100,40),
cv2.FONT_HERSHEY_COMPLEX,
1.0,
color=(255,255,255),
thickness=2,
lineType=2)
# depth map
depth = depth.detach().cpu().numpy()
depth = np.squeeze(depth[0], 0)
depth_colorized = colorize_depth_map(depth)
depth_colorized = np.array(depth_colorized, dtype=np.uint8)
# text on the visualization image
depth_colorized = cv2.cvtColor(depth_colorized, cv2.COLOR_RGB2BGR)
cv2.rectangle(depth_colorized, (x_center-300,0), (x_center+300,60), color=(0,0,0), thickness=-1)
cv2.putText(depth_colorized, 'predicted depth',
(x_center-100,40),
cv2.FONT_HERSHEY_COMPLEX,
1.0,
color=(255,255,255),
thickness=2,
lineType=2)
# right-to-left warped image
img_l_warped = img_left_warped.detach().cpu().numpy()
img_l_warped = np.transpose(img_l_warped[0], (1,2,0))
img_l_warped = np.array(img_l_warped, dtype=np.uint8)
# # text on the visualization image
img_l_warped = cv2.cvtColor(img_l_warped, cv2.COLOR_RGB2BGR)
cv2.rectangle(img_l_warped, (x_center-300,0), (x_center+300,60), color=(0,0,0), thickness=-1)
cv2.putText(img_l_warped, 'right-to-left warped image',
(x_center-200,40),
cv2.FONT_HERSHEY_COMPLEX,
1.0,
color=(255,255,255),
thickness=2,
lineType=2)
# right-to-left reprojection error
r2l_l1_err = r2l_l1_err.detach().cpu().numpy()
r2l_l1_err = np.transpose(r2l_l1_err[0], (1,2,0))
r2l_l1_err = np.array(r2l_l1_err, dtype=np.uint8)
r2l_l1_err = cv2.cvtColor(r2l_l1_err, cv2.COLOR_RGB2GRAY)
# text on the visualization image
r2l_l1_err = cv2.cvtColor(r2l_l1_err, cv2.COLOR_GRAY2BGR)
cv2.rectangle(r2l_l1_err, (x_center-300,0), (x_center+300,60), color=(0,0,0), thickness=-1)
cv2.putText(r2l_l1_err, 'right-to-left reproj error',
(x_center-200,40),
cv2.FONT_HERSHEY_COMPLEX,
1.0,
color=(255,255,255),
thickness=2,
lineType=2)
# left-to-right warped image
img_r_warped = img_right_warped.detach().cpu().numpy()
img_r_warped = np.transpose(img_r_warped[0], (1,2,0))
img_r_warped = np.array(img_r_warped, dtype=np.uint8)
# text on the visualization image
img_r_warped = cv2.cvtColor(img_r_warped, cv2.COLOR_RGB2BGR)
cv2.rectangle(img_r_warped, (x_center-300,0), (x_center+300,60), color=(0,0,0), thickness=-1)
cv2.putText(img_r_warped, 'left-to-right warped image',
(x_center-200,40),
cv2.FONT_HERSHEY_COMPLEX,
1.0,
color=(255,255,255),
thickness=2,
lineType=2)
# right-to-left reprojection error
l2r_l1_err = l2r_l1_err.detach().cpu().numpy()
l2r_l1_err = np.transpose(l2r_l1_err[0], (1,2,0))
l2r_l1_err = np.array(l2r_l1_err, dtype=np.uint8)
l2r_l1_err = cv2.cvtColor(l2r_l1_err, cv2.COLOR_RGB2GRAY)
# text on the visualization image
l2r_l1_err = cv2.cvtColor(l2r_l1_err, cv2.COLOR_GRAY2BGR)
cv2.rectangle(l2r_l1_err, (x_center-300,0), (x_center+300,60), color=(0,0,0), thickness=-1)
cv2.putText(l2r_l1_err, 'left-to-right reproj error',
(x_center-200,40),
cv2.FONT_HERSHEY_COMPLEX,
1.0,
color=(255,255,255),
thickness=2,
lineType=2)
# visualization
img_vis = cv2.vconcat([img_l, img_l_warped, r2l_l1_err, img_r, img_r_warped, l2r_l1_err, depth_colorized])
img_vis = cv2.cvtColor(img_vis, cv2.COLOR_BGR2RGB)
return img_vis
# function to compute precision and recall
def compute_precision_recall(poses_true_list, poses_pred_list, conf_thres=0.5, iou_thres=0.5, classes=['Car', 'Pedestrian', 'Cyclist']):
assert len(poses_true_list) == len(poses_pred_list)
tp, fp, fn = 0, 0, 0
for i, poses_pred in enumerate(poses_pred_list):
poses_true_filtered = []
poses_pred_filtered = []
for pose_pred in poses_pred:
if pose_pred['conf'] > conf_thres and pose_pred['class'] in classes:
poses_pred_filtered.append(pose_pred)
for pose_true in poses_true_list[i]:
if pose_true['class'] in classes:
poses_true_filtered.append(pose_true)
# setup a checked flag
checked_flag_true = [False]*len(poses_true_filtered)
checked_flag_pred = [False]*len(poses_pred_filtered)
for m in range(len(poses_true_filtered)):
for n in range(len(poses_pred_filtered)):
if (checked_flag_true[m] == False) and (checked_flag_pred[n] == False):
iou_ = iou(poses_true_filtered[m], poses_pred_filtered[n])
# poses contains [class, conf, x, y, z, w, h, l, orientation]
if (poses_pred_filtered[n]['conf'] >= conf_thres) and \
(poses_pred_filtered[n]['class'] == poses_true_filtered[m]['class']) and \
(iou_ >= iou_thres):
tp += 1
checked_flag_true[m] = True
checked_flag_pred[n] = True
fp += checked_flag_pred.count(False)
fn += checked_flag_true.count(False)
# compute precision and recall
precision = float(tp) / (float(tp + fp) + 1e-6)
recall = float(tp) / (float(tp + fn) + 1e-6)
# return precision and recall
return precision, recall | [
"numpy.arctan2",
"eulerangles.euler2mat",
"matplotlib.cm.get_cmap",
"cv2.vconcat",
"numpy.argmax",
"numpy.ones",
"numpy.sin",
"cv2.rectangle",
"os.path.join",
"cv2.line",
"torch.nn.functional.grid_sample",
"cv2.cvtColor",
"torch.load",
"os.path.exists",
"numpy.logical_and.reduce",
"num... | [((1160, 1191), 'os.path.join', 'os.path.join', (['modeldir', 'exp_str'], {}), '(modeldir, exp_str)\n', (1172, 1191), False, 'import os\n'), ((1214, 1263), 'os.path.join', 'os.path.join', (['model_exp_dir', '"""checkpoint_best.pt"""'], {}), "(model_exp_dir, 'checkpoint_best.pt')\n", (1226, 1263), False, 'import os\n'), ((1303, 1334), 'os.path.exists', 'os.path.exists', (['best_ckpt_model'], {}), '(best_ckpt_model)\n', (1317, 1334), False, 'import os\n'), ((3262, 3307), 'numpy.array', 'np.array', (['((img + 0.5) * 255.0)'], {'dtype': 'np.uint8'}), '((img + 0.5) * 255.0, dtype=np.uint8)\n', (3270, 3307), True, 'import numpy as np\n'), ((4032, 4244), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(input_points[:, 0] > xlim[0], input_points[:, 0] < xlim[1], input_points[:,\n 1] > ylim[0], input_points[:, 1] < ylim[1], input_points[:, 2] > zlim[0\n ], input_points[:, 2] < zlim[1])'], {}), '((input_points[:, 0] > xlim[0], input_points[:, 0] <\n xlim[1], input_points[:, 1] > ylim[0], input_points[:, 1] < ylim[1], \n input_points[:, 2] > zlim[0], input_points[:, 2] < zlim[1]))\n', (4053, 4244), True, 'import numpy as np\n'), ((4495, 4548), 'numpy.zeros', 'np.zeros', (['(canvasSize, canvasSize, 3)'], {'dtype': 'np.uint8'}), '((canvasSize, canvasSize, 3), dtype=np.uint8)\n', (4503, 4548), True, 'import numpy as np\n'), ((5059, 5080), 'eulerangles.euler2mat', 'euler2mat', (['zrot', '(0)', '(0)'], {}), '(zrot, 0, 0)\n', (5068, 5080), False, 'from eulerangles import euler2mat\n'), ((5531, 5569), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2BGR'], {}), '(image, cv2.COLOR_HSV2BGR)\n', (5543, 5569), False, 'import cv2\n'), ((6275, 6487), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(input_points[:, 0] > xlim[0], input_points[:, 0] < xlim[1], input_points[:,\n 1] > ylim[0], input_points[:, 1] < ylim[1], input_points[:, 2] > zlim[0\n ], input_points[:, 2] < zlim[1])'], {}), '((input_points[:, 0] > xlim[0], input_points[:, 0] <\n xlim[1], input_points[:, 1] > ylim[0], input_points[:, 1] < ylim[1], \n input_points[:, 2] > zlim[0], input_points[:, 2] < zlim[1]))\n', (6296, 6487), True, 'import numpy as np\n'), ((6738, 6791), 'numpy.zeros', 'np.zeros', (['(canvasSize, canvasSize, 3)'], {'dtype': 'np.uint8'}), '((canvasSize, canvasSize, 3), dtype=np.uint8)\n', (6746, 6791), True, 'import numpy as np\n'), ((7666, 7704), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2BGR'], {}), '(image, cv2.COLOR_HSV2BGR)\n', (7678, 7704), False, 'import cv2\n'), ((10366, 10578), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(input_points[:, 0] > xlim[0], input_points[:, 0] < xlim[1], input_points[:,\n 1] > ylim[0], input_points[:, 1] < ylim[1], input_points[:, 2] > zlim[0\n ], input_points[:, 2] < zlim[1])'], {}), '((input_points[:, 0] > xlim[0], input_points[:, 0] <\n xlim[1], input_points[:, 1] > ylim[0], input_points[:, 1] < ylim[1], \n input_points[:, 2] > zlim[0], input_points[:, 2] < zlim[1]))\n', (10387, 10578), True, 'import numpy as np\n'), ((10829, 10882), 'numpy.zeros', 'np.zeros', (['(canvasSize, canvasSize, 3)'], {'dtype': 'np.uint8'}), '((canvasSize, canvasSize, 3), dtype=np.uint8)\n', (10837, 10882), True, 'import numpy as np\n'), ((11757, 11795), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2BGR'], {}), '(image, cv2.COLOR_HSV2BGR)\n', (11769, 11795), False, 'import cv2\n'), ((18354, 18417), 'numpy.zeros', 'np.zeros', (['(n_xgrids * n_ygrids * obj_label_len)'], {'dtype': 'np.float32'}), '(n_xgrids * n_ygrids * obj_label_len, dtype=np.float32)\n', (18362, 18417), True, 'import numpy as np\n'), ((21085, 21130), 'numpy.reshape', 'np.reshape', (['label_vector', '(-1, obj_label_len)'], {}), '(label_vector, (-1, obj_label_len))\n', (21095, 21130), True, 'import numpy as np\n'), ((23249, 23267), 'numpy.zeros', 'np.zeros', (['vol_size'], {}), '(vol_size)\n', (23257, 23267), True, 'import numpy as np\n'), ((23651, 23679), 'numpy.transpose', 'np.transpose', (['vol', '(2, 0, 1)'], {}), '(vol, (2, 0, 1))\n', (23663, 23679), True, 'import numpy as np\n'), ((24005, 24033), 'numpy.transpose', 'np.transpose', (['vol', '(1, 2, 0)'], {}), '(vol, (1, 2, 0))\n', (24017, 24033), True, 'import numpy as np\n'), ((24711, 24728), 'numpy.vstack', 'np.vstack', (['points'], {}), '(points)\n', (24720, 24728), True, 'import numpy as np\n'), ((27714, 27730), 'numpy.array', 'np.array', (['pts_2d'], {}), '(pts_2d)\n', (27722, 27730), True, 'import numpy as np\n'), ((28795, 28873), 'numpy.array', 'np.array', (['[(p_ / p_[-1])[:2] for p_ in points_img_homogeneous]'], {'dtype': 'np.int32'}), '([(p_ / p_[-1])[:2] for p_ in points_img_homogeneous], dtype=np.int32)\n', (28803, 28873), True, 'import numpy as np\n'), ((28939, 29100), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(points_img[:, 0] >= 0, points_img[:, 0] < resolution[0], points_img[:, 1] >=\n 0, points_img[:, 1] < resolution[1], points_z > 0)'], {}), '((points_img[:, 0] >= 0, points_img[:, 0] < resolution\n [0], points_img[:, 1] >= 0, points_img[:, 1] < resolution[1], points_z > 0)\n )\n', (28960, 29100), True, 'import numpy as np\n'), ((29347, 29405), 'numpy.zeros', 'np.zeros', (['(resolution[1], resolution[0])'], {'dtype': 'np.float32'}), '((resolution[1], resolution[0]), dtype=np.float32)\n', (29355, 29405), True, 'import numpy as np\n'), ((30133, 30150), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cmap'], {}), '(cmap)\n', (30144, 30150), True, 'import matplotlib.cm as cm\n'), ((30703, 30752), 'torch.nn.SmoothL1Loss', 'torch.nn.SmoothL1Loss', ([], {'reduction': '"""none"""', 'beta': '(1.0)'}), "(reduction='none', beta=1.0)\n", (30724, 30752), False, 'import torch\n'), ((31247, 31268), 'torch.ones_like', 'torch.ones_like', (['img1'], {}), '(img1)\n', (31262, 31268), False, 'import torch\n'), ((31682, 31703), 'torch.ones_like', 'torch.ones_like', (['img1'], {}), '(img1)\n', (31697, 31703), False, 'import torch\n'), ((32568, 32615), 'torch.stack', 'torch.stack', (['(x_base + x_shifts, y_base)'], {'dim': '(3)'}), '((x_base + x_shifts, y_base), dim=3)\n', (32579, 32615), False, 'import torch\n'), ((32697, 32774), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['img', '(2 * flow_field - 1)'], {'mode': '"""bilinear"""', 'padding_mode': '"""zeros"""'}), "(img, 2 * flow_field - 1, mode='bilinear', padding_mode='zeros')\n", (32710, 32774), True, 'import torch.nn.functional as F\n'), ((33182, 33215), 'numpy.transpose', 'np.transpose', (['img_l[0]', '(1, 2, 0)'], {}), '(img_l[0], (1, 2, 0))\n', (33194, 33215), True, 'import numpy as np\n'), ((33226, 33257), 'numpy.array', 'np.array', (['img_l'], {'dtype': 'np.uint8'}), '(img_l, dtype=np.uint8)\n', (33234, 33257), True, 'import numpy as np\n'), ((33348, 33386), 'cv2.cvtColor', 'cv2.cvtColor', (['img_l', 'cv2.COLOR_RGB2BGR'], {}), '(img_l, cv2.COLOR_RGB2BGR)\n', (33360, 33386), False, 'import cv2\n'), ((33391, 33489), 'cv2.rectangle', 'cv2.rectangle', (['img_l', '(x_center - 300, 0)', '(x_center + 300, 60)'], {'color': '(0, 0, 0)', 'thickness': '(-1)'}), '(img_l, (x_center - 300, 0), (x_center + 300, 60), color=(0, 0,\n 0), thickness=-1)\n', (33404, 33489), False, 'import cv2\n'), ((33482, 33620), 'cv2.putText', 'cv2.putText', (['img_l', '"""left image"""', '(x_center - 100, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)'], {'color': '(255, 255, 255)', 'thickness': '(2)', 'lineType': '(2)'}), "(img_l, 'left image', (x_center - 100, 40), cv2.\n FONT_HERSHEY_COMPLEX, 1.0, color=(255, 255, 255), thickness=2, lineType=2)\n", (33493, 33620), False, 'import cv2\n'), ((33786, 33819), 'numpy.transpose', 'np.transpose', (['img_r[0]', '(1, 2, 0)'], {}), '(img_r[0], (1, 2, 0))\n', (33798, 33819), True, 'import numpy as np\n'), ((33830, 33861), 'numpy.array', 'np.array', (['img_r'], {'dtype': 'np.uint8'}), '(img_r, dtype=np.uint8)\n', (33838, 33861), True, 'import numpy as np\n'), ((33913, 33951), 'cv2.cvtColor', 'cv2.cvtColor', (['img_r', 'cv2.COLOR_RGB2BGR'], {}), '(img_r, cv2.COLOR_RGB2BGR)\n', (33925, 33951), False, 'import cv2\n'), ((33957, 34055), 'cv2.rectangle', 'cv2.rectangle', (['img_r', '(x_center - 300, 0)', '(x_center + 300, 60)'], {'color': '(0, 0, 0)', 'thickness': '(-1)'}), '(img_r, (x_center - 300, 0), (x_center + 300, 60), color=(0, 0,\n 0), thickness=-1)\n', (33970, 34055), False, 'import cv2\n'), ((34048, 34187), 'cv2.putText', 'cv2.putText', (['img_r', '"""right image"""', '(x_center - 100, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)'], {'color': '(255, 255, 255)', 'thickness': '(2)', 'lineType': '(2)'}), "(img_r, 'right image', (x_center - 100, 40), cv2.\n FONT_HERSHEY_COMPLEX, 1.0, color=(255, 255, 255), thickness=2, lineType=2)\n", (34059, 34187), False, 'import cv2\n'), ((34299, 34322), 'numpy.squeeze', 'np.squeeze', (['depth[0]', '(0)'], {}), '(depth[0], 0)\n', (34309, 34322), True, 'import numpy as np\n'), ((34393, 34434), 'numpy.array', 'np.array', (['depth_colorized'], {'dtype': 'np.uint8'}), '(depth_colorized, dtype=np.uint8)\n', (34401, 34434), True, 'import numpy as np\n'), ((34496, 34544), 'cv2.cvtColor', 'cv2.cvtColor', (['depth_colorized', 'cv2.COLOR_RGB2BGR'], {}), '(depth_colorized, cv2.COLOR_RGB2BGR)\n', (34508, 34544), False, 'import cv2\n'), ((34550, 34658), 'cv2.rectangle', 'cv2.rectangle', (['depth_colorized', '(x_center - 300, 0)', '(x_center + 300, 60)'], {'color': '(0, 0, 0)', 'thickness': '(-1)'}), '(depth_colorized, (x_center - 300, 0), (x_center + 300, 60),\n color=(0, 0, 0), thickness=-1)\n', (34563, 34658), False, 'import cv2\n'), ((34651, 34804), 'cv2.putText', 'cv2.putText', (['depth_colorized', '"""predicted depth"""', '(x_center - 100, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)'], {'color': '(255, 255, 255)', 'thickness': '(2)', 'lineType': '(2)'}), "(depth_colorized, 'predicted depth', (x_center - 100, 40), cv2.\n FONT_HERSHEY_COMPLEX, 1.0, color=(255, 255, 255), thickness=2, lineType=2)\n", (34662, 34804), False, 'import cv2\n'), ((34957, 34997), 'numpy.transpose', 'np.transpose', (['img_l_warped[0]', '(1, 2, 0)'], {}), '(img_l_warped[0], (1, 2, 0))\n', (34969, 34997), True, 'import numpy as np\n'), ((35015, 35053), 'numpy.array', 'np.array', (['img_l_warped'], {'dtype': 'np.uint8'}), '(img_l_warped, dtype=np.uint8)\n', (35023, 35053), True, 'import numpy as np\n'), ((35114, 35159), 'cv2.cvtColor', 'cv2.cvtColor', (['img_l_warped', 'cv2.COLOR_RGB2BGR'], {}), '(img_l_warped, cv2.COLOR_RGB2BGR)\n', (35126, 35159), False, 'import cv2\n'), ((35165, 35270), 'cv2.rectangle', 'cv2.rectangle', (['img_l_warped', '(x_center - 300, 0)', '(x_center + 300, 60)'], {'color': '(0, 0, 0)', 'thickness': '(-1)'}), '(img_l_warped, (x_center - 300, 0), (x_center + 300, 60),\n color=(0, 0, 0), thickness=-1)\n', (35178, 35270), False, 'import cv2\n'), ((35263, 35428), 'cv2.putText', 'cv2.putText', (['img_l_warped', '"""right-to-left warped image"""', '(x_center - 200, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)'], {'color': '(255, 255, 255)', 'thickness': '(2)', 'lineType': '(2)'}), "(img_l_warped, 'right-to-left warped image', (x_center - 200, 40\n ), cv2.FONT_HERSHEY_COMPLEX, 1.0, color=(255, 255, 255), thickness=2,\n lineType=2)\n", (35274, 35428), False, 'import cv2\n'), ((35574, 35612), 'numpy.transpose', 'np.transpose', (['r2l_l1_err[0]', '(1, 2, 0)'], {}), '(r2l_l1_err[0], (1, 2, 0))\n', (35586, 35612), True, 'import numpy as np\n'), ((35628, 35664), 'numpy.array', 'np.array', (['r2l_l1_err'], {'dtype': 'np.uint8'}), '(r2l_l1_err, dtype=np.uint8)\n', (35636, 35664), True, 'import numpy as np\n'), ((35682, 35726), 'cv2.cvtColor', 'cv2.cvtColor', (['r2l_l1_err', 'cv2.COLOR_RGB2GRAY'], {}), '(r2l_l1_err, cv2.COLOR_RGB2GRAY)\n', (35694, 35726), False, 'import cv2\n'), ((35784, 35828), 'cv2.cvtColor', 'cv2.cvtColor', (['r2l_l1_err', 'cv2.COLOR_GRAY2BGR'], {}), '(r2l_l1_err, cv2.COLOR_GRAY2BGR)\n', (35796, 35828), False, 'import cv2\n'), ((35834, 35938), 'cv2.rectangle', 'cv2.rectangle', (['r2l_l1_err', '(x_center - 300, 0)', '(x_center + 300, 60)'], {'color': '(0, 0, 0)', 'thickness': '(-1)'}), '(r2l_l1_err, (x_center - 300, 0), (x_center + 300, 60), color=\n (0, 0, 0), thickness=-1)\n', (35847, 35938), False, 'import cv2\n'), ((35930, 36092), 'cv2.putText', 'cv2.putText', (['r2l_l1_err', '"""right-to-left reproj error"""', '(x_center - 200, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)'], {'color': '(255, 255, 255)', 'thickness': '(2)', 'lineType': '(2)'}), "(r2l_l1_err, 'right-to-left reproj error', (x_center - 200, 40),\n cv2.FONT_HERSHEY_COMPLEX, 1.0, color=(255, 255, 255), thickness=2,\n lineType=2)\n", (35941, 36092), False, 'import cv2\n'), ((36243, 36283), 'numpy.transpose', 'np.transpose', (['img_r_warped[0]', '(1, 2, 0)'], {}), '(img_r_warped[0], (1, 2, 0))\n', (36255, 36283), True, 'import numpy as np\n'), ((36301, 36339), 'numpy.array', 'np.array', (['img_r_warped'], {'dtype': 'np.uint8'}), '(img_r_warped, dtype=np.uint8)\n', (36309, 36339), True, 'import numpy as np\n'), ((36398, 36443), 'cv2.cvtColor', 'cv2.cvtColor', (['img_r_warped', 'cv2.COLOR_RGB2BGR'], {}), '(img_r_warped, cv2.COLOR_RGB2BGR)\n', (36410, 36443), False, 'import cv2\n'), ((36449, 36554), 'cv2.rectangle', 'cv2.rectangle', (['img_r_warped', '(x_center - 300, 0)', '(x_center + 300, 60)'], {'color': '(0, 0, 0)', 'thickness': '(-1)'}), '(img_r_warped, (x_center - 300, 0), (x_center + 300, 60),\n color=(0, 0, 0), thickness=-1)\n', (36462, 36554), False, 'import cv2\n'), ((36547, 36712), 'cv2.putText', 'cv2.putText', (['img_r_warped', '"""left-to-right warped image"""', '(x_center - 200, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)'], {'color': '(255, 255, 255)', 'thickness': '(2)', 'lineType': '(2)'}), "(img_r_warped, 'left-to-right warped image', (x_center - 200, 40\n ), cv2.FONT_HERSHEY_COMPLEX, 1.0, color=(255, 255, 255), thickness=2,\n lineType=2)\n", (36558, 36712), False, 'import cv2\n'), ((36858, 36896), 'numpy.transpose', 'np.transpose', (['l2r_l1_err[0]', '(1, 2, 0)'], {}), '(l2r_l1_err[0], (1, 2, 0))\n', (36870, 36896), True, 'import numpy as np\n'), ((36912, 36948), 'numpy.array', 'np.array', (['l2r_l1_err'], {'dtype': 'np.uint8'}), '(l2r_l1_err, dtype=np.uint8)\n', (36920, 36948), True, 'import numpy as np\n'), ((36966, 37010), 'cv2.cvtColor', 'cv2.cvtColor', (['l2r_l1_err', 'cv2.COLOR_RGB2GRAY'], {}), '(l2r_l1_err, cv2.COLOR_RGB2GRAY)\n', (36978, 37010), False, 'import cv2\n'), ((37068, 37112), 'cv2.cvtColor', 'cv2.cvtColor', (['l2r_l1_err', 'cv2.COLOR_GRAY2BGR'], {}), '(l2r_l1_err, cv2.COLOR_GRAY2BGR)\n', (37080, 37112), False, 'import cv2\n'), ((37118, 37222), 'cv2.rectangle', 'cv2.rectangle', (['l2r_l1_err', '(x_center - 300, 0)', '(x_center + 300, 60)'], {'color': '(0, 0, 0)', 'thickness': '(-1)'}), '(l2r_l1_err, (x_center - 300, 0), (x_center + 300, 60), color=\n (0, 0, 0), thickness=-1)\n', (37131, 37222), False, 'import cv2\n'), ((37214, 37376), 'cv2.putText', 'cv2.putText', (['l2r_l1_err', '"""left-to-right reproj error"""', '(x_center - 200, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)'], {'color': '(255, 255, 255)', 'thickness': '(2)', 'lineType': '(2)'}), "(l2r_l1_err, 'left-to-right reproj error', (x_center - 200, 40),\n cv2.FONT_HERSHEY_COMPLEX, 1.0, color=(255, 255, 255), thickness=2,\n lineType=2)\n", (37225, 37376), False, 'import cv2\n'), ((37450, 37550), 'cv2.vconcat', 'cv2.vconcat', (['[img_l, img_l_warped, r2l_l1_err, img_r, img_r_warped, l2r_l1_err,\n depth_colorized]'], {}), '([img_l, img_l_warped, r2l_l1_err, img_r, img_r_warped,\n l2r_l1_err, depth_colorized])\n', (37461, 37550), False, 'import cv2\n'), ((37561, 37601), 'cv2.cvtColor', 'cv2.cvtColor', (['img_vis', 'cv2.COLOR_BGR2RGB'], {}), '(img_vis, cv2.COLOR_BGR2RGB)\n', (37573, 37601), False, 'import cv2\n'), ((5415, 5502), 'cv2.circle', 'cv2.circle', (['image', '(imgx, imgy)'], {'radius': 'radius', 'color': '(hue, 255, 128)', 'thickness': '(-1)'}), '(image, (imgx, imgy), radius=radius, color=(hue, 255, 128),\n thickness=-1)\n', (5425, 5502), False, 'import cv2\n'), ((7550, 7637), 'cv2.circle', 'cv2.circle', (['image', '(imgx, imgy)'], {'radius': 'radius', 'color': '(hue, 255, 128)', 'thickness': '(-1)'}), '(image, (imgx, imgy), radius=radius, color=(hue, 255, 128),\n thickness=-1)\n', (7560, 7637), False, 'import cv2\n'), ((8267, 8304), 'numpy.asarray', 'np.asarray', (['corners'], {'dtype': 'np.float32'}), '(corners, dtype=np.float32)\n', (8277, 8304), True, 'import numpy as np\n'), ((8348, 8368), 'eulerangles.euler2mat', 'euler2mat', (['yaw', '(0)', '(0)'], {}), '(yaw, 0, 0)\n', (8357, 8368), False, 'from eulerangles import euler2mat\n'), ((8654, 8690), 'numpy.asarray', 'np.asarray', (['corners_px'], {'dtype': 'np.int'}), '(corners_px, dtype=np.int)\n', (8664, 8690), True, 'import numpy as np\n'), ((8728, 8853), 'cv2.line', 'cv2.line', (['image', '(corners_px[0, 0], corners_px[0, 1])', '(corners_px[1, 0], corners_px[1, 1])'], {'color': '(0, 0, 0)', 'thickness': '(6)'}), '(image, (corners_px[0, 0], corners_px[0, 1]), (corners_px[1, 0],\n corners_px[1, 1]), color=(0, 0, 0), thickness=6)\n', (8736, 8853), False, 'import cv2\n'), ((8852, 8979), 'cv2.line', 'cv2.line', (['image', '(corners_px[0, 0], corners_px[0, 1])', '(corners_px[1, 0], corners_px[1, 1])'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (corners_px[0, 0], corners_px[0, 1]), (corners_px[1, 0],\n corners_px[1, 1]), color=(255, 0, 0), thickness=2)\n', (8860, 8979), False, 'import cv2\n'), ((8978, 9105), 'cv2.line', 'cv2.line', (['image', '(corners_px[1, 0], corners_px[1, 1])', '(corners_px[2, 0], corners_px[2, 1])'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (corners_px[1, 0], corners_px[1, 1]), (corners_px[2, 0],\n corners_px[2, 1]), color=(255, 0, 0), thickness=2)\n', (8986, 9105), False, 'import cv2\n'), ((9104, 9231), 'cv2.line', 'cv2.line', (['image', '(corners_px[2, 0], corners_px[2, 1])', '(corners_px[3, 0], corners_px[3, 1])'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (corners_px[2, 0], corners_px[2, 1]), (corners_px[3, 0],\n corners_px[3, 1]), color=(255, 0, 0), thickness=2)\n', (9112, 9231), False, 'import cv2\n'), ((9230, 9357), 'cv2.line', 'cv2.line', (['image', '(corners_px[3, 0], corners_px[3, 1])', '(corners_px[0, 0], corners_px[0, 1])'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (corners_px[3, 0], corners_px[3, 1]), (corners_px[0, 0],\n corners_px[0, 1]), color=(255, 0, 0), thickness=2)\n', (9238, 9357), False, 'import cv2\n'), ((11641, 11728), 'cv2.circle', 'cv2.circle', (['image', '(imgx, imgy)'], {'radius': 'radius', 'color': '(hue, 255, 255)', 'thickness': '(-1)'}), '(image, (imgx, imgy), radius=radius, color=(hue, 255, 255),\n thickness=-1)\n', (11651, 11728), False, 'import cv2\n'), ((12358, 12395), 'numpy.asarray', 'np.asarray', (['corners'], {'dtype': 'np.float32'}), '(corners, dtype=np.float32)\n', (12368, 12395), True, 'import numpy as np\n'), ((12439, 12459), 'eulerangles.euler2mat', 'euler2mat', (['yaw', '(0)', '(0)'], {}), '(yaw, 0, 0)\n', (12448, 12459), False, 'from eulerangles import euler2mat\n'), ((12745, 12781), 'numpy.asarray', 'np.asarray', (['corners_px'], {'dtype': 'np.int'}), '(corners_px, dtype=np.int)\n', (12755, 12781), True, 'import numpy as np\n'), ((12819, 12944), 'cv2.line', 'cv2.line', (['image', '(corners_px[0, 0], corners_px[0, 1])', '(corners_px[1, 0], corners_px[1, 1])'], {'color': '(0, 0, 0)', 'thickness': '(6)'}), '(image, (corners_px[0, 0], corners_px[0, 1]), (corners_px[1, 0],\n corners_px[1, 1]), color=(0, 0, 0), thickness=6)\n', (12827, 12944), False, 'import cv2\n'), ((12943, 13070), 'cv2.line', 'cv2.line', (['image', '(corners_px[0, 0], corners_px[0, 1])', '(corners_px[1, 0], corners_px[1, 1])'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (corners_px[0, 0], corners_px[0, 1]), (corners_px[1, 0],\n corners_px[1, 1]), color=(255, 0, 0), thickness=2)\n', (12951, 13070), False, 'import cv2\n'), ((13069, 13196), 'cv2.line', 'cv2.line', (['image', '(corners_px[1, 0], corners_px[1, 1])', '(corners_px[2, 0], corners_px[2, 1])'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (corners_px[1, 0], corners_px[1, 1]), (corners_px[2, 0],\n corners_px[2, 1]), color=(255, 0, 0), thickness=2)\n', (13077, 13196), False, 'import cv2\n'), ((13195, 13322), 'cv2.line', 'cv2.line', (['image', '(corners_px[2, 0], corners_px[2, 1])', '(corners_px[3, 0], corners_px[3, 1])'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (corners_px[2, 0], corners_px[2, 1]), (corners_px[3, 0],\n corners_px[3, 1]), color=(255, 0, 0), thickness=2)\n', (13203, 13322), False, 'import cv2\n'), ((13321, 13448), 'cv2.line', 'cv2.line', (['image', '(corners_px[3, 0], corners_px[3, 1])', '(corners_px[0, 0], corners_px[0, 1])'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (corners_px[3, 0], corners_px[3, 1]), (corners_px[0, 0],\n corners_px[0, 1]), color=(255, 0, 0), thickness=2)\n', (13329, 13448), False, 'import cv2\n'), ((15142, 15159), 'math.degrees', 'math.degrees', (['yaw'], {}), '(yaw)\n', (15154, 15159), False, 'import math\n'), ((15777, 15880), 'cv2.line', 'cv2.line', (['img', '(pts_2d[3][0], pts_2d[3][1])', '(pts_2d[0][0], pts_2d[0][1])'], {'color': 'WHITE', 'thickness': '(2)'}), '(img, (pts_2d[3][0], pts_2d[3][1]), (pts_2d[0][0], pts_2d[0][1]),\n color=WHITE, thickness=2)\n', (15785, 15880), False, 'import cv2\n'), ((15908, 16011), 'cv2.line', 'cv2.line', (['img', '(pts_2d[0][0], pts_2d[0][1])', '(pts_2d[2][0], pts_2d[2][1])'], {'color': 'WHITE', 'thickness': '(2)'}), '(img, (pts_2d[0][0], pts_2d[0][1]), (pts_2d[2][0], pts_2d[2][1]),\n color=WHITE, thickness=2)\n', (15916, 16011), False, 'import cv2\n'), ((16016, 16119), 'cv2.line', 'cv2.line', (['img', '(pts_2d[1][0], pts_2d[1][1])', '(pts_2d[3][0], pts_2d[3][1])'], {'color': 'WHITE', 'thickness': '(2)'}), '(img, (pts_2d[1][0], pts_2d[1][1]), (pts_2d[3][0], pts_2d[3][1]),\n color=WHITE, thickness=2)\n', (16024, 16119), False, 'import cv2\n'), ((16298, 16399), 'cv2.line', 'cv2.line', (['img', '(pts_2d[7][0], pts_2d[7][1])', '(pts_2d[4][0], pts_2d[4][1])'], {'color': 'RED', 'thickness': '(2)'}), '(img, (pts_2d[7][0], pts_2d[7][1]), (pts_2d[4][0], pts_2d[4][1]),\n color=RED, thickness=2)\n', (16306, 16399), False, 'import cv2\n'), ((16441, 16542), 'cv2.line', 'cv2.line', (['img', '(pts_2d[0][0], pts_2d[0][1])', '(pts_2d[4][0], pts_2d[4][1])'], {'color': 'RED', 'thickness': '(2)'}), '(img, (pts_2d[0][0], pts_2d[0][1]), (pts_2d[4][0], pts_2d[4][1]),\n color=RED, thickness=2)\n', (16449, 16542), False, 'import cv2\n'), ((16547, 16648), 'cv2.line', 'cv2.line', (['img', '(pts_2d[1][0], pts_2d[1][1])', '(pts_2d[5][0], pts_2d[5][1])'], {'color': 'RED', 'thickness': '(2)'}), '(img, (pts_2d[1][0], pts_2d[1][1]), (pts_2d[5][0], pts_2d[5][1]),\n color=RED, thickness=2)\n', (16555, 16648), False, 'import cv2\n'), ((16653, 16754), 'cv2.line', 'cv2.line', (['img', '(pts_2d[2][0], pts_2d[2][1])', '(pts_2d[6][0], pts_2d[6][1])'], {'color': 'RED', 'thickness': '(2)'}), '(img, (pts_2d[2][0], pts_2d[2][1]), (pts_2d[6][0], pts_2d[6][1]),\n color=RED, thickness=2)\n', (16661, 16754), False, 'import cv2\n'), ((16759, 16860), 'cv2.line', 'cv2.line', (['img', '(pts_2d[3][0], pts_2d[3][1])', '(pts_2d[7][0], pts_2d[7][1])'], {'color': 'RED', 'thickness': '(2)'}), '(img, (pts_2d[3][0], pts_2d[3][1]), (pts_2d[7][0], pts_2d[7][1]),\n color=RED, thickness=2)\n', (16767, 16860), False, 'import cv2\n'), ((18767, 18800), 'math.floor', 'math.floor', (['((x - xlim[0]) / xstop)'], {}), '((x - xlim[0]) / xstop)\n', (18777, 18800), False, 'import math\n'), ((18897, 18930), 'math.floor', 'math.floor', (['((y - ylim[0]) / ystop)'], {}), '((y - ylim[0]) / ystop)\n', (18907, 18930), False, 'import math\n'), ((19181, 19210), 'math.log', 'math.log', (['(l / mean_lwh_cls[0])'], {}), '(l / mean_lwh_cls[0])\n', (19189, 19210), False, 'import math\n'), ((19226, 19255), 'math.log', 'math.log', (['(w / mean_lwh_cls[1])'], {}), '(w / mean_lwh_cls[1])\n', (19234, 19255), False, 'import math\n'), ((19271, 19300), 'math.log', 'math.log', (['(h / mean_lwh_cls[2])'], {}), '(h / mean_lwh_cls[2])\n', (19279, 19300), False, 'import math\n'), ((24682, 24698), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (24690, 24698), True, 'import numpy as np\n'), ((27472, 27493), 'numpy.matmul', 'np.matmul', (['K', 'pts_3d_'], {}), '(K, pts_3d_)\n', (27481, 27493), True, 'import numpy as np\n'), ((27894, 27949), 'numpy.transpose', 'np.transpose', (["[label['x'], label['y'], label['z'], 1.0]"], {}), "([label['x'], label['y'], label['z'], 1.0])\n", (27906, 27949), True, 'import numpy as np\n'), ((27965, 27988), 'numpy.dot', 'np.dot', (['lidar2cam', 'xyz1'], {}), '(lidar2cam, xyz1)\n', (27971, 27988), True, 'import numpy as np\n'), ((28625, 28654), 'numpy.ones', 'np.ones', (['(points.shape[0], 1)'], {}), '((points.shape[0], 1))\n', (28632, 28654), True, 'import numpy as np\n'), ((28681, 28720), 'numpy.dot', 'np.dot', (['lidar2cam', 'points_homogeneous.T'], {}), '(lidar2cam, points_homogeneous.T)\n', (28687, 28720), True, 'import numpy as np\n'), ((28752, 28775), 'numpy.dot', 'np.dot', (['K', 'points_cam.T'], {}), '(K, points_cam.T)\n', (28758, 28775), True, 'import numpy as np\n'), ((1366, 1436), 'torch.load', 'torch.load', (['best_ckpt_model'], {'map_location': '(lambda storage, loc: storage)'}), '(best_ckpt_model, map_location=lambda storage, loc: storage)\n', (1376, 1436), False, 'import torch\n'), ((5589, 5624), 'numpy.all', 'np.all', (['(image == [0, 0, 0])'], {'axis': '(-1)'}), '(image == [0, 0, 0], axis=-1)\n', (5595, 5624), True, 'import numpy as np\n'), ((7724, 7759), 'numpy.all', 'np.all', (['(image == [0, 0, 0])'], {'axis': '(-1)'}), '(image == [0, 0, 0], axis=-1)\n', (7730, 7759), True, 'import numpy as np\n'), ((9398, 9422), 'numpy.min', 'np.min', (['corners_px[:, 0]'], {}), '(corners_px[:, 0])\n', (9404, 9422), True, 'import numpy as np\n'), ((9423, 9447), 'numpy.min', 'np.min', (['corners_px[:, 1]'], {}), '(corners_px[:, 1])\n', (9429, 9447), True, 'import numpy as np\n'), ((11815, 11850), 'numpy.all', 'np.all', (['(image == [0, 0, 0])'], {'axis': '(-1)'}), '(image == [0, 0, 0], axis=-1)\n', (11821, 11850), True, 'import numpy as np\n'), ((13489, 13513), 'numpy.min', 'np.min', (['corners_px[:, 0]'], {}), '(corners_px[:, 0])\n', (13495, 13513), True, 'import numpy as np\n'), ((13514, 13538), 'numpy.min', 'np.min', (['corners_px[:, 1]'], {}), '(corners_px[:, 1])\n', (13520, 13538), True, 'import numpy as np\n'), ((15345, 15392), 'affine_transform.affineTransform', 'affineTransform', (['pts_3d[i]', '(0)', '(0)', '(0)', '(-x)', '(-y)', '(-z)'], {}), '(pts_3d[i], 0, 0, 0, -x, -y, -z)\n', (15360, 15392), False, 'from affine_transform import affineTransform\n'), ((15477, 15523), 'affine_transform.affineTransform', 'affineTransform', (['pts_3d[i]', '(0)', 'yaw', '(0)', 'x', 'y', 'z'], {}), '(pts_3d[i], 0, yaw, 0, x, y, z)\n', (15492, 15523), False, 'from affine_transform import affineTransform\n'), ((15665, 15777), 'cv2.line', 'cv2.line', (['img', '(pts_2d[i][0], pts_2d[i][1])', '(pts_2d[i + 1][0], pts_2d[i + 1][1])'], {'color': 'WHITE', 'thickness': '(2)'}), '(img, (pts_2d[i][0], pts_2d[i][1]), (pts_2d[i + 1][0], pts_2d[i + 1\n ][1]), color=WHITE, thickness=2)\n', (15673, 15777), False, 'import cv2\n'), ((16188, 16298), 'cv2.line', 'cv2.line', (['img', '(pts_2d[i][0], pts_2d[i][1])', '(pts_2d[i + 1][0], pts_2d[i + 1][1])'], {'color': 'RED', 'thickness': '(2)'}), '(img, (pts_2d[i][0], pts_2d[i][1]), (pts_2d[i + 1][0], pts_2d[i + 1\n ][1]), color=RED, thickness=2)\n', (16196, 16298), False, 'import cv2\n'), ((21729, 21753), 'math.floor', 'math.floor', (['(i / n_xgrids)'], {}), '(i / n_xgrids)\n', (21739, 21753), False, 'import math\n'), ((22271, 22299), 'numpy.arctan2', 'np.arctan2', (['sin_yaw', 'cos_yaw'], {}), '(sin_yaw, cos_yaw)\n', (22281, 22299), True, 'import numpy as np\n'), ((24847, 24886), 'numpy.fromfile', 'np.fromfile', (['filename'], {'dtype': 'np.float32'}), '(filename, dtype=np.float32)\n', (24858, 24886), True, 'import numpy as np\n'), ((27429, 27452), 'numpy.transpose', 'np.transpose', (['pts_3d[i]'], {}), '(pts_3d[i])\n', (27441, 27452), True, 'import numpy as np\n'), ((30520, 30550), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(h, w)'}), '(size=(h, w))\n', (30537, 30550), False, 'from torchvision import transforms\n'), ((19323, 19334), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (19329, 19334), True, 'import numpy as np\n'), ((19372, 19383), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (19378, 19383), True, 'import numpy as np\n'), ((21607, 21638), 'numpy.argmax', 'np.argmax', (['obj_class_one_hot[i]'], {}), '(obj_class_one_hot[i])\n', (21616, 21638), True, 'import numpy as np\n'), ((22040, 22056), 'math.exp', 'math.exp', (['l_norm'], {}), '(l_norm)\n', (22048, 22056), False, 'import math\n'), ((22089, 22105), 'math.exp', 'math.exp', (['w_norm'], {}), '(w_norm)\n', (22097, 22105), False, 'import math\n'), ((22138, 22154), 'math.exp', 'math.exp', (['h_norm'], {}), '(h_norm)\n', (22146, 22154), False, 'import math\n'), ((22689, 22720), 'numpy.argmax', 'np.argmax', (['obj_class_one_hot[i]'], {}), '(obj_class_one_hot[i])\n', (22698, 22720), True, 'import numpy as np\n'), ((27363, 27386), 'numpy.transpose', 'np.transpose', (['pts_3d[i]'], {}), '(pts_3d[i])\n', (27375, 27386), True, 'import numpy as np\n'), ((27551, 27582), 'numpy.int', 'np.int', (['(pts_2d_[0] / pts_2d_[2])'], {}), '(pts_2d_[0] / pts_2d_[2])\n', (27557, 27582), True, 'import numpy as np\n'), ((27582, 27613), 'numpy.int', 'np.int', (['(pts_2d_[1] / pts_2d_[2])'], {}), '(pts_2d_[1] / pts_2d_[2])\n', (27588, 27613), True, 'import numpy as np\n'), ((32229, 32256), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', 'width'], {}), '(0, 1, width)\n', (32243, 32256), False, 'import torch\n'), ((24624, 24639), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (24632, 24639), True, 'import numpy as np\n'), ((2238, 2308), 'torch.load', 'torch.load', (['best_ckpt_model'], {'map_location': '(lambda storage, loc: storage)'}), '(best_ckpt_model, map_location=lambda storage, loc: storage)\n', (2248, 2308), False, 'import torch\n'), ((24533, 24552), 'numpy.array', 'np.array', (['[a, b, c]'], {}), '([a, b, c])\n', (24541, 24552), True, 'import numpy as np\n'), ((32329, 32357), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', 'height'], {}), '(0, 1, height)\n', (32343, 32357), False, 'import torch\n'), ((2040, 2089), 'os.path.join', 'os.path.join', (['model_exp_dir', '"""checkpoint_best.pt"""'], {}), "(model_exp_dir, 'checkpoint_best.pt')\n", (2052, 2089), False, 'import os\n')] |
import numpy as np
from .. import Lump, lump_tag
@lump_tag(3, 'LUMP_VERTICES')
class VertexLump(Lump):
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertices = np.array([])
def parse(self):
reader = self.reader
self.vertices = np.frombuffer(reader.read(), np.float32)
self.vertices = self.vertices.reshape((-1, 3))
return self
@lump_tag(0x47, 'LUMP_UNLITVERTEX', bsp_version=29)
class UnLitVertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (1,)),
('vni', np.uint32, (1,)),
('uv', np.float32, (2,)),
('unk', np.int32, (1,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x49, 'LUMP_BUMPLITVERTEX', bsp_version=29)
class BumpLitVertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (1,)),
('vni', np.uint32, (1,)),
('uv', np.float32, (2,)),
('unk1', np.int32, (1,)),
('uv_lm', np.float32, (2,)),
('uv1', np.float32, (2,)),
('unk2', np.uint32, (2,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x4a, 'LUMP_UNLITTSVERTEX', bsp_version=29)
class UnlitTSVertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (3,)),
('vni', np.uint32, (1,)),
('uv', np.float32, (2,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x4B, 'LUMP_BLINNPHONGVERTEX', bsp_version=29)
class BlinnPhongVertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (3,)),
('vni', np.uint32, (1,)),
('unk', np.uint32, (2,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x4C, 'LUMP_R5VERTEX', bsp_version=29)
class R5VertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (3,)),
('vni', np.uint32, (1,)),
('unk', np.uint32, (2,)),
('uv', np.float32, (2,)),
('uv_lm', np.float32, (2,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
@lump_tag(0x4E, 'LUMP_R7VERTEX', bsp_version=29)
class R7VertexLump(Lump):
_dtype = np.dtype(
[
('vpi', np.uint32, (3,)),
('vni', np.uint32, (1,)),
('uv', np.float32, (2,)),
('neg_one', np.int32, (1,)),
]
)
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.vertex_info = np.array([])
def parse(self):
reader = self.reader
self.vertex_info = np.frombuffer(reader.read(), self._dtype)
return self
| [
"numpy.dtype",
"numpy.array"
] | [((507, 625), 'numpy.dtype', 'np.dtype', (["[('vpi', np.uint32, (1,)), ('vni', np.uint32, (1,)), ('uv', np.float32, (2,\n )), ('unk', np.int32, (1,))]"], {}), "([('vpi', np.uint32, (1,)), ('vni', np.uint32, (1,)), ('uv', np.\n float32, (2,)), ('unk', np.int32, (1,))])\n", (515, 625), True, 'import numpy as np\n'), ((1052, 1258), 'numpy.dtype', 'np.dtype', (["[('vpi', np.uint32, (1,)), ('vni', np.uint32, (1,)), ('uv', np.float32, (2,\n )), ('unk1', np.int32, (1,)), ('uv_lm', np.float32, (2,)), ('uv1', np.\n float32, (2,)), ('unk2', np.uint32, (2,))]"], {}), "([('vpi', np.uint32, (1,)), ('vni', np.uint32, (1,)), ('uv', np.\n float32, (2,)), ('unk1', np.int32, (1,)), ('uv_lm', np.float32, (2,)),\n ('uv1', np.float32, (2,)), ('unk2', np.uint32, (2,))])\n", (1060, 1258), True, 'import numpy as np\n'), ((1717, 1810), 'numpy.dtype', 'np.dtype', (["[('vpi', np.uint32, (3,)), ('vni', np.uint32, (1,)), ('uv', np.float32, (2,))]"], {}), "([('vpi', np.uint32, (3,)), ('vni', np.uint32, (1,)), ('uv', np.\n float32, (2,))])\n", (1725, 1810), True, 'import numpy as np\n'), ((2231, 2324), 'numpy.dtype', 'np.dtype', (["[('vpi', np.uint32, (3,)), ('vni', np.uint32, (1,)), ('unk', np.uint32, (2,))]"], {}), "([('vpi', np.uint32, (3,)), ('vni', np.uint32, (1,)), ('unk', np.\n uint32, (2,))])\n", (2239, 2324), True, 'import numpy as np\n'), ((2729, 2877), 'numpy.dtype', 'np.dtype', (["[('vpi', np.uint32, (3,)), ('vni', np.uint32, (1,)), ('unk', np.uint32, (2,\n )), ('uv', np.float32, (2,)), ('uv_lm', np.float32, (2,))]"], {}), "([('vpi', np.uint32, (3,)), ('vni', np.uint32, (1,)), ('unk', np.\n uint32, (2,)), ('uv', np.float32, (2,)), ('uv_lm', np.float32, (2,))])\n", (2737, 2877), True, 'import numpy as np\n'), ((3306, 3428), 'numpy.dtype', 'np.dtype', (["[('vpi', np.uint32, (3,)), ('vni', np.uint32, (1,)), ('uv', np.float32, (2,\n )), ('neg_one', np.int32, (1,))]"], {}), "([('vpi', np.uint32, (3,)), ('vni', np.uint32, (1,)), ('uv', np.\n float32, (2,)), ('neg_one', np.int32, (1,))])\n", (3314, 3428), True, 'import numpy as np\n'), ((207, 219), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (215, 219), True, 'import numpy as np\n'), ((799, 811), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (807, 811), True, 'import numpy as np\n'), ((1464, 1476), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1472, 1476), True, 'import numpy as np\n'), ((1972, 1984), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1980, 1984), True, 'import numpy as np\n'), ((2486, 2498), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2494, 2498), True, 'import numpy as np\n'), ((3063, 3075), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3071, 3075), True, 'import numpy as np\n'), ((3602, 3614), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3610, 3614), True, 'import numpy as np\n')] |
import math
import random
import time
import numpy as np
import scipy
from numpy.random import RandomState
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.stats import gamma, norm
SIGMAS_HSIC = [x for x in range(35000,85000,5000)]
def kernelMatrixGaussian(m, m2, sigma=None):
"""
Calculates kernel matrix with a Gaussian kernel
m: rows are data points
m2: rows are data points
sigma: the bandwidth of the Gaussian kernel.
If not provided, the median distance between points will be used.
"""
pairwise_distances = cdist(m, m2, 'sqeuclidean')
# If sigma is not provided, set sigma based on median distance heuristic.
if sigma is None:
sigma = math.sqrt(0.5 * np.median(pairwise_distances[pairwise_distances>0]))
gamma = -1.0/(2 * sigma**2)
return np.exp(gamma * pairwise_distances)
def columnDistanceGaussian(col1, col2, sigma):
gamma = -1.0/(2 * sigma**2)
result = np.array([scipy.spatial.distance.sqeuclidean(x,y) for x,y in zip(col1, col2)])
return np.exp(gamma * np.array(result))
def columnDistanceLinear(col1, col2):
return np.array([np.dot(x,y) for x,y in zip(col1, col2)])
def getSigmaGaussian(m, m2, sample_size = 200, sigma_multiply=0):
""" Calculate sigma for a gaussian kernel based on observations.
m: rows are data points
m2: rows are data points
sample_size: maximum number of observations to take into account.
If number of observation is larger, take random sample
"""
if m.shape[0] > sample_size:
prng = RandomState(m.shape[0]) # To have the same bandwidth for the same samples
ind = prng.choice(m.shape[0], sample_size)
m = m[ind]
m2 = m2[ind]
pairwise_distances = cdist(m, m2, 'sqeuclidean')
distance_result = np.median(pairwise_distances[pairwise_distances>0])
if sigma_multiply != 0:
distance_result += sigma_multiply * np.std(pairwise_distances[pairwise_distances>0])
return math.sqrt(0.5 * distance_result)
def kernelMatrixLinear(m, m2):
""" Calculates kernel matrix with a linear kernel
m: rows are data points
m2: rows are data points
"""
return np.dot(m, m2.T)
def kernelMatrixDelta(m, m2):
""" 1 if items are the same. 0 otherwise """
return 1 - cdist(m, m2, 'hamming')
def columnDistanceDelta(col1, col2):
return np.array([1 if x==y else 0 for x,y in zip(col1, col2)])
def HSIC_pval_full_gram(X, Y, N_samp=100, kernelX="Gaussian", kernelY="Gaussian", sigmaX=None, sigmaY=None):
""" Calculates HSIC and p-value
Old implementation that calculates complete Gramm matrices
X: Data. Each row is a datapoint.
Y: Data. Each row is a datapoint.
N_samp: Number of samples
kernelX: Kernel to use (Gaussian or Linear)
kernelY: Kernel to use (Gaussian or Linear)
"""
timeA = time.time()
m,_ = X.shape
# Calculate Gram matrices
sigmaX = getSigmaGaussian(X,X,200) if sigmaX is None else sigmaX
sigmaY = getSigmaGaussian(Y,Y,200) if sigmaY is None else sigmaY
K = kernelMatrixGaussian(X,X,sigmaX) if kernelX == "Gaussian" else kernelMatrixLinear(X,X)
L = kernelMatrixGaussian(Y,Y,sigmaY) if kernelY == "Gaussian" else kernelMatrixLinear(Y,Y)
# Centering matrix
H = np.identity(m) - 1.0/m * np.ones((m,m))
Kc = np.mat(H) * np.mat(K)*np.mat(H)
# Dividing by m here, although some papers use m-1
HSIC = np.trace(np.dot(np.transpose(Kc),L))/m**2
boots = []
Yrand = np.copy(Y)
for _ in xrange(N_samp):
np.random.shuffle(Yrand)
L = kernelMatrixGaussian(Yrand,Yrand) if kernelY == "Gaussian" else kernelMatrixLinear(Yrand,Yrand)
boots.append(np.trace(np.dot(np.transpose(Kc),L))/m**2)
boots = np.array(boots)
pval = (sum(b >= HSIC for b in boots) + 1)/float(len(boots) + 1)
return HSIC, pval
def HSIC_pval(X, Y, N_samp=500, kernelX="Gaussian", kernelY="Gaussian", eta = 0.001,
sigmaX=None, sigmaY=None,
p_method="boots", return_boots=False):
""" Calculates HSIC and p-value
Gram matrices are approximated using incomplete Cholesky decomposition.
X: Data. Each row is a datapoint.
Y: Data. Each row is a datapoint.
N_samp: Number of samples
kernelX: Kernel to use (Gaussian, Linear, Delta)
kernelY: Kernel to use (Gaussian, Linear, Delta)
eta: Threshold for incomplete Cholesky decomposition
sigmaX: sigma for X when using Gaussian kernel
sigmaY: sigma for Y when using Gaussian kernel
"""
timeA = time.time()
m,_ = X.shape
sigmaX = getSigmaGaussian(X,X,200) if sigmaX is None else sigmaX
sigmaY = getSigmaGaussian(Y,Y,200) if sigmaY is None else sigmaY
A,max_rankA = incompleteCholeskyKernel(X, m, kernelX, sigmaX, eta)
B,max_rankB = incompleteCholeskyKernel(Y, m, kernelY, sigmaY, eta)
centered_A = A.T - A.T.mean(axis=0)
tmp = B * np.mat(centered_A)
HSIC = np.trace(tmp * tmp.T)/m**2
boots = []
Yrand = np.copy(Y)
for _ in xrange(N_samp):
np.random.shuffle(Yrand)
B, max_rankB = incompleteCholeskyKernel(Yrand, m, kernelY, sigmaY, eta)
tmp = np.mat(B) * np.mat(centered_A)
boots.append(np.trace(tmp * tmp.T)/m**2)
boots = np.array(boots)
if p_method == "boots":
pval = (sum(b >= HSIC for b in boots) + 1)/float(len(boots) + 1)
else: #gamma
fit_alpha, fit_loc, fit_beta= gamma.fit(boots)
pval = 1 - gamma.cdf(HSIC, fit_alpha, scale=fit_beta, loc=fit_loc)
if return_boots:
return HSIC, pval, boots
else:
return HSIC, pval
def HSIC_pval_bandwidth_sweep(locs, has_word, N_samp=500, kernelX="Gaussian", kernelY="Gaussian", eta=0.001):
"""" Calculate HSIC by sweeping over bandwidth values """
HSIC_vals = []
HSIC_pvals = []
best_HSIC_val = None
best_pval = float("inf")
for s in SIGMAS_HSIC:
val,pval = HSIC_pval(locs, has_word, N_samp, kernelX, kernelY, eta, sigmaX=s)
HSIC_vals.append(val)
HSIC_pvals.append(pval)
if pval < best_pval:
best_pval = pval
best_HSIC_val = val
return best_HSIC_val, best_pval, HSIC_vals, HSIC_pvals
def incompleteCholesky(K, k, eta = 0.01):
""" Incomplete Cholesky decomposition
Based on algorithm in Kernel Methods for Pattern Analysis, chapter
Elementary algorithms in feature space, fragment 5.4
K: the matrix
k: numbers of rows for new matrix
eta: threshold
"""
ell,_ = K.shape
I = []
R = np.zeros((ell,ell))
d = np.diagonal(K).copy()
a = max(d)
I.append(np.argmax(d))
j = 0
while a > eta and j < k:
nu_j = math.sqrt(a)
for i in xrange(ell):
R[j,i] = (K[I[j],i] - np.dot(R[:,i].T,R[:,I[j]]))/nu_j
d = d - R[j,:]**2
a = max(d)
I.append(np.argmax(d))
j += 1
return R[:j,], j
def incompleteCholeskyKernel(X, maxrank, kernel, sigma = None, eta = 0.001):
""" Incomplete Cholesky decomposition
Based on algorithm in Kernel Methods for Pattern Analysis, chapter
Elementary algorithms in feature space, fragment 5.4.
Doesn't need to compute Gram matrix beforehand.
K: the matrix
k: numbers of rows for new matrix
kernel: kernel to use
sigma: in case of Gaussian kernel
eta: threshold
"""
maxrank = min(maxrank, 100)
ell,_ = X.shape
I = []
R = np.zeros((maxrank,ell))
d = None
if kernel == "Gaussian":
d = columnDistanceGaussian(X, X, sigma)
elif kernel == "Linear":
d = columnDistanceLinear(X,X)
elif kernel == "Delta":
d = columnDistanceDelta(X,X)
a = max(d)
I.append(np.argmax(d))
j = 0
while j < maxrank and a > eta:
nu_j = math.sqrt(a)
x_elem = np.atleast_2d(X[I[j]])
K_tmp = None
if kernel == "Gaussian":
K_tmp = kernelMatrixGaussian(x_elem, X, sigma)
elif kernel == "Linear":
K_tmp = kernelMatrixLinear(x_elem, X)
elif kernel == "Delta":
K_tmp = kernelMatrixDelta(x_elem, X)
for i in xrange(ell):
R[j,i] = (K_tmp[0][i] - np.dot(R[:,i].T,R[:,I[j]]))/nu_j
d = d - R[j,:]**2
a = max(d)
I.append(np.argmax(d))
j += 1
return R[:j,], j
| [
"numpy.trace",
"numpy.argmax",
"numpy.ones",
"numpy.exp",
"numpy.mat",
"numpy.atleast_2d",
"numpy.copy",
"numpy.std",
"numpy.transpose",
"numpy.identity",
"numpy.random.RandomState",
"scipy.spatial.distance.sqeuclidean",
"numpy.random.shuffle",
"numpy.diagonal",
"scipy.spatial.distance.c... | [((602, 629), 'scipy.spatial.distance.cdist', 'cdist', (['m', 'm2', '"""sqeuclidean"""'], {}), "(m, m2, 'sqeuclidean')\n", (607, 629), False, 'from scipy.spatial.distance import cdist, pdist, squareform\n'), ((860, 894), 'numpy.exp', 'np.exp', (['(gamma * pairwise_distances)'], {}), '(gamma * pairwise_distances)\n', (866, 894), True, 'import numpy as np\n'), ((1808, 1835), 'scipy.spatial.distance.cdist', 'cdist', (['m', 'm2', '"""sqeuclidean"""'], {}), "(m, m2, 'sqeuclidean')\n", (1813, 1835), False, 'from scipy.spatial.distance import cdist, pdist, squareform\n'), ((1863, 1916), 'numpy.median', 'np.median', (['pairwise_distances[pairwise_distances > 0]'], {}), '(pairwise_distances[pairwise_distances > 0])\n', (1872, 1916), True, 'import numpy as np\n'), ((2047, 2079), 'math.sqrt', 'math.sqrt', (['(0.5 * distance_result)'], {}), '(0.5 * distance_result)\n', (2056, 2079), False, 'import math\n'), ((2251, 2266), 'numpy.dot', 'np.dot', (['m', 'm2.T'], {}), '(m, m2.T)\n', (2257, 2266), True, 'import numpy as np\n'), ((2936, 2947), 'time.time', 'time.time', ([], {}), '()\n', (2945, 2947), False, 'import time\n'), ((3593, 3603), 'numpy.copy', 'np.copy', (['Y'], {}), '(Y)\n', (3600, 3603), True, 'import numpy as np\n'), ((3855, 3870), 'numpy.array', 'np.array', (['boots'], {}), '(boots)\n', (3863, 3870), True, 'import numpy as np\n'), ((4658, 4669), 'time.time', 'time.time', ([], {}), '()\n', (4667, 4669), False, 'import time\n'), ((5122, 5132), 'numpy.copy', 'np.copy', (['Y'], {}), '(Y)\n', (5129, 5132), True, 'import numpy as np\n'), ((5400, 5415), 'numpy.array', 'np.array', (['boots'], {}), '(boots)\n', (5408, 5415), True, 'import numpy as np\n'), ((6704, 6724), 'numpy.zeros', 'np.zeros', (['(ell, ell)'], {}), '((ell, ell))\n', (6712, 6724), True, 'import numpy as np\n'), ((7608, 7632), 'numpy.zeros', 'np.zeros', (['(maxrank, ell)'], {}), '((maxrank, ell))\n', (7616, 7632), True, 'import numpy as np\n'), ((1618, 1641), 'numpy.random.RandomState', 'RandomState', (['m.shape[0]'], {}), '(m.shape[0])\n', (1629, 1641), False, 'from numpy.random import RandomState\n'), ((2362, 2385), 'scipy.spatial.distance.cdist', 'cdist', (['m', 'm2', '"""hamming"""'], {}), "(m, m2, 'hamming')\n", (2367, 2385), False, 'from scipy.spatial.distance import cdist, pdist, squareform\n'), ((3365, 3379), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (3376, 3379), True, 'import numpy as np\n'), ((3437, 3446), 'numpy.mat', 'np.mat', (['H'], {}), '(H)\n', (3443, 3446), True, 'import numpy as np\n'), ((3641, 3665), 'numpy.random.shuffle', 'np.random.shuffle', (['Yrand'], {}), '(Yrand)\n', (3658, 3665), True, 'import numpy as np\n'), ((5033, 5051), 'numpy.mat', 'np.mat', (['centered_A'], {}), '(centered_A)\n', (5039, 5051), True, 'import numpy as np\n'), ((5063, 5084), 'numpy.trace', 'np.trace', (['(tmp * tmp.T)'], {}), '(tmp * tmp.T)\n', (5071, 5084), True, 'import numpy as np\n'), ((5170, 5194), 'numpy.random.shuffle', 'np.random.shuffle', (['Yrand'], {}), '(Yrand)\n', (5187, 5194), True, 'import numpy as np\n'), ((5577, 5593), 'scipy.stats.gamma.fit', 'gamma.fit', (['boots'], {}), '(boots)\n', (5586, 5593), False, 'from scipy.stats import gamma, norm\n'), ((6783, 6795), 'numpy.argmax', 'np.argmax', (['d'], {}), '(d)\n', (6792, 6795), True, 'import numpy as np\n'), ((6851, 6863), 'math.sqrt', 'math.sqrt', (['a'], {}), '(a)\n', (6860, 6863), False, 'import math\n'), ((7896, 7908), 'numpy.argmax', 'np.argmax', (['d'], {}), '(d)\n', (7905, 7908), True, 'import numpy as np\n'), ((7970, 7982), 'math.sqrt', 'math.sqrt', (['a'], {}), '(a)\n', (7979, 7982), False, 'import math\n'), ((8000, 8022), 'numpy.atleast_2d', 'np.atleast_2d', (['X[I[j]]'], {}), '(X[I[j]])\n', (8013, 8022), True, 'import numpy as np\n'), ((1003, 1043), 'scipy.spatial.distance.sqeuclidean', 'scipy.spatial.distance.sqeuclidean', (['x', 'y'], {}), '(x, y)\n', (1037, 1043), False, 'import scipy\n'), ((1098, 1114), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1106, 1114), True, 'import numpy as np\n'), ((1178, 1190), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (1184, 1190), True, 'import numpy as np\n'), ((1987, 2037), 'numpy.std', 'np.std', (['pairwise_distances[pairwise_distances > 0]'], {}), '(pairwise_distances[pairwise_distances > 0])\n', (1993, 2037), True, 'import numpy as np\n'), ((3390, 3405), 'numpy.ones', 'np.ones', (['(m, m)'], {}), '((m, m))\n', (3397, 3405), True, 'import numpy as np\n'), ((3415, 3424), 'numpy.mat', 'np.mat', (['H'], {}), '(H)\n', (3421, 3424), True, 'import numpy as np\n'), ((3427, 3436), 'numpy.mat', 'np.mat', (['K'], {}), '(K)\n', (3433, 3436), True, 'import numpy as np\n'), ((5307, 5316), 'numpy.mat', 'np.mat', (['B'], {}), '(B)\n', (5313, 5316), True, 'import numpy as np\n'), ((5319, 5337), 'numpy.mat', 'np.mat', (['centered_A'], {}), '(centered_A)\n', (5325, 5337), True, 'import numpy as np\n'), ((5613, 5668), 'scipy.stats.gamma.cdf', 'gamma.cdf', (['HSIC', 'fit_alpha'], {'scale': 'fit_beta', 'loc': 'fit_loc'}), '(HSIC, fit_alpha, scale=fit_beta, loc=fit_loc)\n', (5622, 5668), False, 'from scipy.stats import gamma, norm\n'), ((6732, 6746), 'numpy.diagonal', 'np.diagonal', (['K'], {}), '(K)\n', (6743, 6746), True, 'import numpy as np\n'), ((7023, 7035), 'numpy.argmax', 'np.argmax', (['d'], {}), '(d)\n', (7032, 7035), True, 'import numpy as np\n'), ((8484, 8496), 'numpy.argmax', 'np.argmax', (['d'], {}), '(d)\n', (8493, 8496), True, 'import numpy as np\n'), ((763, 816), 'numpy.median', 'np.median', (['pairwise_distances[pairwise_distances > 0]'], {}), '(pairwise_distances[pairwise_distances > 0])\n', (772, 816), True, 'import numpy as np\n'), ((3534, 3550), 'numpy.transpose', 'np.transpose', (['Kc'], {}), '(Kc)\n', (3546, 3550), True, 'import numpy as np\n'), ((5359, 5380), 'numpy.trace', 'np.trace', (['(tmp * tmp.T)'], {}), '(tmp * tmp.T)\n', (5367, 5380), True, 'import numpy as np\n'), ((6928, 6957), 'numpy.dot', 'np.dot', (['R[:, i].T', 'R[:, I[j]]'], {}), '(R[:, i].T, R[:, I[j]])\n', (6934, 6957), True, 'import numpy as np\n'), ((8389, 8418), 'numpy.dot', 'np.dot', (['R[:, i].T', 'R[:, I[j]]'], {}), '(R[:, i].T, R[:, I[j]])\n', (8395, 8418), True, 'import numpy as np\n'), ((3811, 3827), 'numpy.transpose', 'np.transpose', (['Kc'], {}), '(Kc)\n', (3823, 3827), True, 'import numpy as np\n')] |
import re
import librosa
import numpy as np
import torch
from torch.nn import functional as F
def _tokenize_text(sentence):
w = re.findall(r"[\w']+", str(sentence))
return w
def create_audio_features(mel_spec, max_audio_STFT_nframes):
audio = np.zeros((mel_spec.shape[0], max_audio_STFT_nframes), dtype=np.float32)
audio_mask = np.zeros(max_audio_STFT_nframes, dtype=np.float32)
audio_STFT_nframes = min(mel_spec.shape[1], max_audio_STFT_nframes)
audio[:, :audio_STFT_nframes] = mel_spec[:, :audio_STFT_nframes]
audio_mask[:audio_STFT_nframes] = 1
audio = torch.from_numpy(audio).float()
audio_mask = torch.from_numpy(audio_mask).float()
return audio, audio_mask, audio_STFT_nframes
def create_text_features(words, max_words, we, we_dim):
raw_text = ' '.join(words)
words = [word for word in words if word in we.vocab]
text = np.zeros((max_words, we_dim), dtype=np.float32)
text_mask = np.zeros(max_words, dtype=np.float32)
nwords = min(len(words), max_words)
if nwords > 0:
text[:nwords] = we[words][:nwords]
text_mask[:nwords] = 1
text = torch.from_numpy(text).float()
text_mask = torch.from_numpy(text_mask).float()
return text, text_mask, raw_text
def create_video_features(feat_2d, feat_3d, n_tokens, strategy='clip'):
if n_tokens == 0:
feat_2d = F.normalize(torch.max(feat_2d, dim=0)[0], dim=0) if len(feat_2d) else torch.zeros(feat_2d.shape[1])
feat_3d = F.normalize(torch.max(feat_3d, dim=0)[0], dim=0) if len(feat_3d) else torch.zeros(feat_3d.shape[1])
video = torch.cat((feat_2d, feat_3d))
video_mask = torch.ones(1) # TODO: not quite right, really 0
return video, video_mask
else:
if strategy == 'clip':
if feat_2d is None:
video = torch.zeros(n_tokens, feat_3d.shape[-1])
video_mask = torch.zeros(n_tokens)
cur_n_tokens_3d, dim_3d = feat_3d.shape
video[:cur_n_tokens_3d] = F.normalize(feat_3d[:n_tokens], dim=1)
video_mask[:cur_n_tokens_3d] = 1
return video, video_mask
elif feat_3d is None:
video = torch.zeros(n_tokens, feat_2d.shape[-1])
video_mask = torch.zeros(n_tokens)
cur_n_tokens_2d, dim_2d = feat_2d.shape
video[:cur_n_tokens_2d] = F.normalize(feat_2d[:n_tokens], dim=1)
video_mask[:cur_n_tokens_2d] = 1
return video, video_mask
else:
video = torch.zeros(n_tokens, feat_2d.shape[-1] + feat_3d.shape[-1])
video_mask = torch.zeros(n_tokens)
cur_n_tokens_2d, dim_2d = feat_2d.shape
cur_n_tokens_3d, dim_3d = feat_3d.shape
if cur_n_tokens_2d != 0 and cur_n_tokens_3d != 0:
feat_2d = torch.nn.functional.interpolate(
feat_2d.permute(1, 0).unsqueeze(0),
size=cur_n_tokens_3d,
mode='nearest').squeeze(0).permute(1, 0)
video[:cur_n_tokens_3d, :dim_2d] = F.normalize(feat_2d[:n_tokens], dim=1)
video[:cur_n_tokens_3d, dim_2d:] = F.normalize(feat_3d[:n_tokens], dim=1)
video_mask[:cur_n_tokens_3d] = 1
return video, video_mask
elif strategy == 'nearest':
if feat_2d is None:
cur_n_tokens_3d, dim_3d = feat_3d.shape
if cur_n_tokens_3d <= n_tokens:
return create_video_features(feat_2d, feat_3d, n_tokens, strategy='clip')
feat_3d = torch.nn.functional.interpolate(
feat_3d.permute(1, 0).unsqueeze(0),
size=n_tokens,
mode='nearest').squeeze(0).permute(1, 0)
video = F.normalize(feat_3d, dim=1)
video_mask = torch.ones(n_tokens)
return video, video_mask
elif feat_3d is None:
cur_n_tokens_2d, dim_2d = feat_2d.shape
if cur_n_tokens_2d <= n_tokens:
return create_video_features(feat_2d, feat_2d, n_tokens, strategy='clip')
feat_2d = torch.nn.functional.interpolate(
feat_2d.permute(1, 0).unsqueeze(0),
size=n_tokens,
mode='nearest').squeeze(0).permute(1, 0)
video = F.normalize(feat_2d, dim=1)
video_mask = torch.ones(n_tokens)
return video, video_mask
else:
cur_n_tokens_2d, dim_2d = feat_2d.shape
cur_n_tokens_3d, dim_3d = feat_3d.shape
if cur_n_tokens_3d <= n_tokens or cur_n_tokens_2d == 0:
return create_video_features(feat_2d, feat_3d, n_tokens, strategy='clip')
video = torch.zeros(n_tokens, feat_2d.shape[-1] + feat_3d.shape[-1])
video_mask = torch.zeros(n_tokens)
feat_2d = torch.nn.functional.interpolate(
feat_2d.permute(1, 0).unsqueeze(0),
size=n_tokens,
mode='nearest').squeeze(0).permute(1, 0)
feat_3d = torch.nn.functional.interpolate(
feat_3d.permute(1, 0).unsqueeze(0),
size=n_tokens,
mode='nearest').squeeze(0).permute(1, 0)
video[:, :dim_2d] = F.normalize(feat_2d, dim=1)
video[:, dim_2d:] = F.normalize(feat_3d, dim=1)
video_mask[:] = 1
return video, video_mask
elif strategy == 'max_pool':
if feat_2d is None:
cur_n_tokens_3d = feat_3d.shape[0]
if cur_n_tokens_3d <= n_tokens:
return create_video_features(feat_2d, feat_3d, n_tokens, strategy='clip')
kernel_size_3d = int(np.floor(cur_n_tokens_3d / n_tokens))
if kernel_size_3d <= 1: # we don't have what to max pool
return create_video_features(feat_2d, feat_3d, n_tokens, strategy='nearest')
feat_3d = torch.nn.functional.max_pool1d(feat_3d.permute(1, 0), kernel_size=kernel_size_3d).permute(1,
0)
return create_video_features(feat_2d, feat_3d, n_tokens, strategy='nearest')
elif feat_3d is None:
cur_n_tokens_2d = feat_2d.shape[0]
if cur_n_tokens_2d <= n_tokens:
return create_video_features(feat_2d, feat_2d, n_tokens, strategy='clip')
kernel_size_2d = int(np.floor(cur_n_tokens_2d / n_tokens))
if kernel_size_2d <= 1: # we don't have what to max pool
return create_video_features(feat_2d, feat_2d, n_tokens, strategy='nearest')
feat_2d = torch.nn.functional.max_pool1d(feat_2d.permute(1, 0), kernel_size=kernel_size_2d).permute(1,
0)
return create_video_features(feat_2d, feat_2d, n_tokens, strategy='nearest')
else:
cur_n_tokens_2d = feat_2d.shape[0]
cur_n_tokens_3d = feat_3d.shape[0]
if cur_n_tokens_3d <= n_tokens or cur_n_tokens_2d == 0:
return create_video_features(feat_2d, feat_3d, n_tokens, strategy='clip')
kernel_size_3d = int(np.floor(cur_n_tokens_3d / n_tokens))
kernel_size_2d = int(np.floor(cur_n_tokens_2d / n_tokens))
if kernel_size_2d <= 1 or kernel_size_3d <= 1: # we don't have what to max pool
return create_video_features(feat_2d, feat_3d, n_tokens, strategy='nearest')
feat_2d = torch.nn.functional.max_pool1d(feat_2d.permute(1, 0), kernel_size=kernel_size_2d).permute(1, 0)
feat_3d = torch.nn.functional.max_pool1d(feat_3d.permute(1, 0), kernel_size=kernel_size_3d).permute(1, 0)
return create_video_features(feat_2d, feat_3d, n_tokens, strategy='nearest')
else:
raise NotImplementedError
def _crop_audio_from_mel_spec(start, end, mel_spec):
frames = librosa.core.time_to_frames([start, end], sr=16000, hop_length=160,
n_fft=400)
mel_spec = mel_spec[:, max(0, frames[0]): frames[1]]
return mel_spec
def _get_video(features_2d, features_3d, fps_2d, fps_3d, starts, ends, n_video_tokens, video_sampling_strategy='clip',
accurate_borders=False):
def get_slice(features, fps, start, end):
if accurate_borders:
start = int(np.floor(start * fps))
end = int(np.ceil(end * fps))
else:
# this was in baseline code
start = int(start * fps)
end = int(end * fps) + 1
if features is not None:
return features[start:end]
else:
return None
all_videos = []
all_video_masks = []
for i in range(len(starts)):
slice_2d = get_slice(features_2d, fps_2d, starts[i], ends[i])
slice_3d = get_slice(features_3d, fps_3d, starts[i], ends[i])
video, video_mask = create_video_features(slice_2d, slice_3d, n_video_tokens, strategy=video_sampling_strategy)
all_videos.append(video)
all_video_masks.append(video_mask)
all_videos = torch.stack(all_videos, dim=0)
all_video_masks = torch.stack(all_video_masks, dim=0)
return all_videos, all_video_masks
def cut_into_clips(video, video_mask, audio, audio_mask, text, text_mask, raw_text, audio_STFT_nframes, id_, dataset,
n_clips):
# create audio clips
max_num_audio_STFT_frames = int(audio_mask.shape[0] // n_clips)
audio = audio.permute(1, 0) \
.view(n_clips, max_num_audio_STFT_frames, audio.size(0)) \
.permute(0, 2, 1)
audio_mask = audio_mask.view(n_clips, max_num_audio_STFT_frames)
# create video clips
n_video_tokens = int(video_mask.shape[0] // n_clips)
video = video.view(n_clips, n_video_tokens, video.size(-1))
video_mask = video_mask.view(n_clips, n_video_tokens)
# copy text
text = text.unsqueeze(0).expand(n_clips, -1, -1)
text_mask = text_mask.unsqueeze(0).expand(n_clips, -1)
# determine audio_STFT_nframes
new_audio_STFT_nframes = []
new_id = []
for i in range(n_clips):
left_frame = audio_STFT_nframes - i * max_num_audio_STFT_frames
if (i == 0) or (left_frame > 0.7 * max_num_audio_STFT_frames):
new_audio_STFT_nframes.append(min(max_num_audio_STFT_frames, left_frame))
new_id.append(id_)
else:
new_audio_STFT_nframes.append(max_num_audio_STFT_frames)
new_id.append('-1')
audio_STFT_nframes = torch.tensor(new_audio_STFT_nframes)
id_ = new_id
dataset = [dataset] * n_clips
raw_text = [raw_text] * n_clips
return video, video_mask, audio, audio_mask, text, text_mask, raw_text, audio_STFT_nframes, id_, dataset | [
"torch.ones",
"torch.stack",
"librosa.core.time_to_frames",
"numpy.ceil",
"numpy.floor",
"numpy.zeros",
"torch.cat",
"torch.max",
"torch.zeros",
"torch.nn.functional.normalize",
"torch.tensor",
"torch.from_numpy"
] | [((260, 331), 'numpy.zeros', 'np.zeros', (['(mel_spec.shape[0], max_audio_STFT_nframes)'], {'dtype': 'np.float32'}), '((mel_spec.shape[0], max_audio_STFT_nframes), dtype=np.float32)\n', (268, 331), True, 'import numpy as np\n'), ((349, 399), 'numpy.zeros', 'np.zeros', (['max_audio_STFT_nframes'], {'dtype': 'np.float32'}), '(max_audio_STFT_nframes, dtype=np.float32)\n', (357, 399), True, 'import numpy as np\n'), ((887, 934), 'numpy.zeros', 'np.zeros', (['(max_words, we_dim)'], {'dtype': 'np.float32'}), '((max_words, we_dim), dtype=np.float32)\n', (895, 934), True, 'import numpy as np\n'), ((951, 988), 'numpy.zeros', 'np.zeros', (['max_words'], {'dtype': 'np.float32'}), '(max_words, dtype=np.float32)\n', (959, 988), True, 'import numpy as np\n'), ((8375, 8453), 'librosa.core.time_to_frames', 'librosa.core.time_to_frames', (['[start, end]'], {'sr': '(16000)', 'hop_length': '(160)', 'n_fft': '(400)'}), '([start, end], sr=16000, hop_length=160, n_fft=400)\n', (8402, 8453), False, 'import librosa\n'), ((9567, 9597), 'torch.stack', 'torch.stack', (['all_videos'], {'dim': '(0)'}), '(all_videos, dim=0)\n', (9578, 9597), False, 'import torch\n'), ((9620, 9655), 'torch.stack', 'torch.stack', (['all_video_masks'], {'dim': '(0)'}), '(all_video_masks, dim=0)\n', (9631, 9655), False, 'import torch\n'), ((10981, 11017), 'torch.tensor', 'torch.tensor', (['new_audio_STFT_nframes'], {}), '(new_audio_STFT_nframes)\n', (10993, 11017), False, 'import torch\n'), ((1602, 1631), 'torch.cat', 'torch.cat', (['(feat_2d, feat_3d)'], {}), '((feat_2d, feat_3d))\n', (1611, 1631), False, 'import torch\n'), ((1653, 1666), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (1663, 1666), False, 'import torch\n'), ((595, 618), 'torch.from_numpy', 'torch.from_numpy', (['audio'], {}), '(audio)\n', (611, 618), False, 'import torch\n'), ((644, 672), 'torch.from_numpy', 'torch.from_numpy', (['audio_mask'], {}), '(audio_mask)\n', (660, 672), False, 'import torch\n'), ((1133, 1155), 'torch.from_numpy', 'torch.from_numpy', (['text'], {}), '(text)\n', (1149, 1155), False, 'import torch\n'), ((1180, 1207), 'torch.from_numpy', 'torch.from_numpy', (['text_mask'], {}), '(text_mask)\n', (1196, 1207), False, 'import torch\n'), ((1438, 1467), 'torch.zeros', 'torch.zeros', (['feat_2d.shape[1]'], {}), '(feat_2d.shape[1])\n', (1449, 1467), False, 'import torch\n'), ((1556, 1585), 'torch.zeros', 'torch.zeros', (['feat_3d.shape[1]'], {}), '(feat_3d.shape[1])\n', (1567, 1585), False, 'import torch\n'), ((1832, 1872), 'torch.zeros', 'torch.zeros', (['n_tokens', 'feat_3d.shape[-1]'], {}), '(n_tokens, feat_3d.shape[-1])\n', (1843, 1872), False, 'import torch\n'), ((1902, 1923), 'torch.zeros', 'torch.zeros', (['n_tokens'], {}), '(n_tokens)\n', (1913, 1923), False, 'import torch\n'), ((2022, 2060), 'torch.nn.functional.normalize', 'F.normalize', (['feat_3d[:n_tokens]'], {'dim': '(1)'}), '(feat_3d[:n_tokens], dim=1)\n', (2033, 2060), True, 'from torch.nn import functional as F\n'), ((8832, 8853), 'numpy.floor', 'np.floor', (['(start * fps)'], {}), '(start * fps)\n', (8840, 8853), True, 'import numpy as np\n'), ((8877, 8895), 'numpy.ceil', 'np.ceil', (['(end * fps)'], {}), '(end * fps)\n', (8884, 8895), True, 'import numpy as np\n'), ((1380, 1405), 'torch.max', 'torch.max', (['feat_2d'], {'dim': '(0)'}), '(feat_2d, dim=0)\n', (1389, 1405), False, 'import torch\n'), ((1498, 1523), 'torch.max', 'torch.max', (['feat_3d'], {'dim': '(0)'}), '(feat_3d, dim=0)\n', (1507, 1523), False, 'import torch\n'), ((2209, 2249), 'torch.zeros', 'torch.zeros', (['n_tokens', 'feat_2d.shape[-1]'], {}), '(n_tokens, feat_2d.shape[-1])\n', (2220, 2249), False, 'import torch\n'), ((2279, 2300), 'torch.zeros', 'torch.zeros', (['n_tokens'], {}), '(n_tokens)\n', (2290, 2300), False, 'import torch\n'), ((2399, 2437), 'torch.nn.functional.normalize', 'F.normalize', (['feat_2d[:n_tokens]'], {'dim': '(1)'}), '(feat_2d[:n_tokens], dim=1)\n', (2410, 2437), True, 'from torch.nn import functional as F\n'), ((2570, 2630), 'torch.zeros', 'torch.zeros', (['n_tokens', '(feat_2d.shape[-1] + feat_3d.shape[-1])'], {}), '(n_tokens, feat_2d.shape[-1] + feat_3d.shape[-1])\n', (2581, 2630), False, 'import torch\n'), ((2660, 2681), 'torch.zeros', 'torch.zeros', (['n_tokens'], {}), '(n_tokens)\n', (2671, 2681), False, 'import torch\n'), ((3879, 3906), 'torch.nn.functional.normalize', 'F.normalize', (['feat_3d'], {'dim': '(1)'}), '(feat_3d, dim=1)\n', (3890, 3906), True, 'from torch.nn import functional as F\n'), ((3936, 3956), 'torch.ones', 'torch.ones', (['n_tokens'], {}), '(n_tokens)\n', (3946, 3956), False, 'import torch\n'), ((3151, 3189), 'torch.nn.functional.normalize', 'F.normalize', (['feat_2d[:n_tokens]'], {'dim': '(1)'}), '(feat_2d[:n_tokens], dim=1)\n', (3162, 3189), True, 'from torch.nn import functional as F\n'), ((3245, 3283), 'torch.nn.functional.normalize', 'F.normalize', (['feat_3d[:n_tokens]'], {'dim': '(1)'}), '(feat_3d[:n_tokens], dim=1)\n', (3256, 3283), True, 'from torch.nn import functional as F\n'), ((4465, 4492), 'torch.nn.functional.normalize', 'F.normalize', (['feat_2d'], {'dim': '(1)'}), '(feat_2d, dim=1)\n', (4476, 4492), True, 'from torch.nn import functional as F\n'), ((4522, 4542), 'torch.ones', 'torch.ones', (['n_tokens'], {}), '(n_tokens)\n', (4532, 4542), False, 'import torch\n'), ((4905, 4965), 'torch.zeros', 'torch.zeros', (['n_tokens', '(feat_2d.shape[-1] + feat_3d.shape[-1])'], {}), '(n_tokens, feat_2d.shape[-1] + feat_3d.shape[-1])\n', (4916, 4965), False, 'import torch\n'), ((4995, 5016), 'torch.zeros', 'torch.zeros', (['n_tokens'], {}), '(n_tokens)\n', (5006, 5016), False, 'import torch\n'), ((5475, 5502), 'torch.nn.functional.normalize', 'F.normalize', (['feat_2d'], {'dim': '(1)'}), '(feat_2d, dim=1)\n', (5486, 5502), True, 'from torch.nn import functional as F\n'), ((5539, 5566), 'torch.nn.functional.normalize', 'F.normalize', (['feat_3d'], {'dim': '(1)'}), '(feat_3d, dim=1)\n', (5550, 5566), True, 'from torch.nn import functional as F\n'), ((5942, 5978), 'numpy.floor', 'np.floor', (['(cur_n_tokens_3d / n_tokens)'], {}), '(cur_n_tokens_3d / n_tokens)\n', (5950, 5978), True, 'import numpy as np\n'), ((6746, 6782), 'numpy.floor', 'np.floor', (['(cur_n_tokens_2d / n_tokens)'], {}), '(cur_n_tokens_2d / n_tokens)\n', (6754, 6782), True, 'import numpy as np\n'), ((7610, 7646), 'numpy.floor', 'np.floor', (['(cur_n_tokens_3d / n_tokens)'], {}), '(cur_n_tokens_3d / n_tokens)\n', (7618, 7646), True, 'import numpy as np\n'), ((7685, 7721), 'numpy.floor', 'np.floor', (['(cur_n_tokens_2d / n_tokens)'], {}), '(cur_n_tokens_2d / n_tokens)\n', (7693, 7721), True, 'import numpy as np\n')] |
import numpy as np
from scipy import interpolate
from scipy import optimize
import warnings
def calculate_smax(spin_C=False):
r"""Returns maximal saturation factor.
Args:
spin_C (float): unpaired spin concentration in units of Molar
Returns:
smax (float): maximal saturation factor
.. math::
\mathrm{s_{max}} = 1 - (2 / (3 + (3 * (\mathrm{spin\_C} * 198.7))))
<NAME>, <NAME>, Phys. Chem. Chem. Phys. 13 (2011) 3630. & <NAME>, <NAME>, <NAME>, J. Chem. Phys. 48 (1968) 4211.
"""
if spin_C > 5.0:
warnings.warn(
"Spin concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future."
)
return 1 - (2 / (3 + (3 * (spin_C * 1e-6 * 198.7))))
else:
return 1 - (2 / (3 + (3 * (spin_C * 198.7))))
def interpolate_T1(
E_powers=False,
T1_powers=False,
T1_array=False,
interpolate_method="linear",
delta_T1_water=False,
T1_water=False,
macro_C=False,
spin_C=1,
T10=2.0,
T100=2.5,
):
"""Returns interpolated T1 data.
Args:
E_powers (numpy.array): The microwave powers at which to evaluate
T1_powers (numpy.array): The microwave powers of the T1s to interpolate
T1_array (numpy.array): The original T1s
interpolate_method (str): "second_order" or "linear"
spin_C (float): unpaired electron spin concentration in M
T10 (float): T1 measured with unpaired electrons
T100 (float): T1 measured without unpaired electrons
delta_T1_water (optional) (float): change in T1 of water at max microwave power
T1_water (optional) (float): T1 of pure water
macro_C (optional) (float): concentration of macromolecule in M
Returns:
interpolated_T1 (numpy.array): Array of T1 values same shape as E_powers and E_array
T1 data is interpolated using Eq. 39 of http://dx.doi.org/10.1016/j.pnmrs.2013.06.001 for "linear" or Eq. 22 of https://doi.org/10.1016/bs.mie.2018.09.024 for "second_order"
"""
if spin_C > 10.0:
warnings.warn(
"Spin concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future."
)
spin_C = spin_C / 1e6
# 2nd order fit, Franck and Han MIE (Eq. 22) and (Eq. 23)
if interpolate_method == "second_order":
# spin_C = spin_C / 1e6
if macro_C:
if macro_C > 10.0:
warnings.warn(
"Macromolecule concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future."
)
macro_C = macro_C / 1e6
else:
macro_C = spin_C
if not delta_T1_water:
delta_T1_water = T1_array[-1] - T1_array[0]
if not T1_water:
T1_water = T100
kHH = (1.0 / T10 - 1.0 / T1_water) / macro_C
krp = (
(1.0 / T1_array)
- (1.0 / (T1_water + delta_T1_water * T1_powers))
- (kHH * (macro_C))
) / (spin_C)
p = np.polyfit(T1_powers, krp, 2)
T1_fit_2order = np.polyval(p, E_powers)
interpolated_T1 = 1.0 / (
((spin_C) * T1_fit_2order)
+ (1.0 / (T1_water + delta_T1_water * E_powers))
+ (kHH * (macro_C))
)
# linear fit, Franck et al. PNMRS (Eq. 39)
elif interpolate_method == "linear":
linear_t1 = 1.0 / ((1.0 / T1_array) - (1.0 / T10) + (1.0 / T100))
p = np.polyfit(T1_powers, linear_t1, 1)
T1_fit_linear = np.polyval(p, E_powers)
interpolated_T1 = T1_fit_linear / (
1.0 + (T1_fit_linear / T10) - (T1_fit_linear / T100)
)
else:
raise Exception("invalid interpolate_method")
return interpolated_T1
def calculate_ksigma_array(powers=False, ksigma_smax=95.4, p_12=False):
"""Function to calcualte ksig array for any given ksigma and p_12
Args:
powers (numpy.array): Array of powers
ksigma_smax (float): product of ksigma and smax
p_12 (float): power at half max for ksigma fit
Returns:
ksig_fit (numpy.array): calculated ksigma array
<NAME>. / Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
"""
# Right side of Eq. 42. This function should fit to ksig_sp
ksig_fit = (ksigma_smax * powers) / (p_12 + powers)
return ksig_fit
def calculate_ksigma(ksigma_sp=False, powers=False, smax=1):
"""Get ksigma and E_power at half max of ksig
Args:
ksig (numpy.array): Array of ksigmas
powers (numpy.array): Array of E_powers
Returns:
ksigma (float): calculated ksigma
ksigma_stdd (float): standard deviation in ksigma
p_12 (float): power at half max for ksigma fit
<NAME>. / Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
"""
# curve fitting
# see https://docs.scipy.org/doc/scipy/reference/optimize.html
popt, pcov = optimize.curve_fit(
calculate_ksigma_array,
powers,
ksigma_sp,
p0=[95.4 / 2, (max(powers) * 0.1)],
method="lm",
)
assert popt[0] > 0, "Unexpected ksigma value: %d < 0" % popt[0]
ksigma_smax = popt[0]
p_12 = popt[1]
ksigma_std = np.sqrt(np.diag(pcov))
ksigma_stdd = ksigma_std[0] / smax
ksigma_fit = calculate_ksigma_array(powers, ksigma_smax, p_12)
ksigma = ksigma_smax / smax
return ksigma, ksigma_stdd, ksigma_fit
def calculate_xi(tcorr=54, omega_e=0.0614, omega_H=9.3231e-05):
"""Returns coupling_factor for any given tcorr
Args:
tcorr (float): translational diffusion correlation time
omega_e (float): electron gyromagnetic ratio
omega_H (float): proton gyromagnetic ratio
Returns:
xi (float): coupling factor
<NAME> al. / Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
"""
# Using Franck et al. PNMRS (2013)
zdiff = np.sqrt(1j * (omega_e - omega_H) * tcorr)
zsum = np.sqrt(1j * (omega_e + omega_H) * tcorr)
zH = np.sqrt(1j * omega_H * tcorr)
# (Eq. 2)
Jdiff = (1 + (zdiff / 4)) / (
1 + zdiff + ((4 * (zdiff**2)) / 9) + ((zdiff**3) / 9)
)
Jsum = (1 + (zsum / 4)) / (1 + zsum + ((4 * (zsum**2)) / 9) + ((zsum**3) / 9))
JH = (1 + (zH / 4)) / (1 + zH + ((4 * (zH**2)) / 9) + ((zH**3) / 9))
# (Eq. 23) calculation of coupling_factor from the spectral density functions
xi = ((6 * np.real(Jdiff)) - np.real(Jsum)) / (
(6 * np.real(Jdiff)) + (3 * np.real(JH)) + np.real(Jsum)
)
return xi
def calculate_tcorr(coupling_factor=0.27, omega_e=0.0614, omega_H=9.3231e-05):
"""Returns translational correlation time (tcorr) in pico second
Args:
coupling_factor (float): coupling factor
omega_e (float): electron gyromagnetic ratio
omega_H (float): proton gyromagnetic ratio
Returns:
t_corr (float): tcorr, translational diffusion correlation time in pico second
<NAME> et al. / Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
"""
# root finding
# see https://docs.scipy.org/doc/scipy/reference/optimize.html
result = optimize.root_scalar(
lambda tcorr: calculate_xi(tcorr, omega_e=omega_e, omega_H=omega_H)
- coupling_factor,
method="brentq",
bracket=[1, 1e5],
)
if not result.converged:
raise ValueError("Could not find tcorr")
t_corr = result.root
return t_corr
def calculate_uncorrected_Ep(
uncorrected_xi=0.33,
p_12_unc=0,
E_powers=False,
T10=2.0,
T100=2.5,
omega_ratio=658.5792,
smax=1,
):
"""Function for E(p) for any given xi and p_12
Args:
uncorrected_xi (float): uncorrected coupling factor
p_12_unc (float): power at half max for uncorrected_xi fit
E_array (numpy.array): Array of enhancements
E_powers (numpy.array): Array of E_powers
T10 (float): T1(0), proton T1 with microwave power=0
T100 (float): T10(0), proton T1 with spin_C=0 and microwave power=0
omega_ratio (float): ratio of electron & proton gyromagnetic ratios
smax (float): maximal saturation factor
Returns:
Ep_fit (numpy.array): uncorrected Enhancement curve
<NAME> al. / Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
"""
# Right side of Eq. 42. This function should fit to ksig_sp
Ep_fit = 1 - (
(uncorrected_xi * (1 - (T10 / T100)) * omega_ratio)
* ((E_powers * smax) / (p_12_unc + E_powers))
)
return Ep_fit
def _residual_Ep(
x,
E_array: np.array,
E_powers: np.array,
T10: float,
T100: float,
omega_ratio: float,
smax: float,
):
"""Function for residuals between E(p) for any given xi and p_12 and the experimental E_array
Args:
x (list): [uncorrected coupling factor, power at half max for uncorrected_xi fit]
E_array (numpy.array): Array of enhancements
E_powers (numpy.array): Array of E_power
T10 (float): T1(0), proton T1 with microwave power=0
T100 (float): T10(0), proton T1 with spin_C=0 and microwave power=0
omega_ratio (float): ratio of electron & proton gyromagnetic ratios
smax (float): maximal saturation factor
Returns:
Ep_fit (numpy.array): uncorrected enhancement curve
<NAME>. / Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
"""
return E_array - calculate_uncorrected_Ep(
uncorrected_xi=x[0],
p_12_unc=x[1],
E_powers=E_powers,
T10=T10,
T100=T100,
omega_ratio=omega_ratio,
smax=smax,
)
def calculate_uncorrected_xi(
E_array=False,
E_powers=False,
T10=2.0,
T100=2.5,
omega_ratio=658.5792,
smax=1,
):
"""Get coupling_factor and E_power at half saturation
Args:
E_array (numpy.array): Array of enhancements
E_powers (numpy.array): Array of powers
T10 (float): T1(0), proton T1 with microwave power=0
T100 (float): T10(0), proton T1 with spin_C=0 and microwave power=0
omega_ratio (float): ratio of electron & proton gyromagnetic ratios
smax (float): maximal saturation factor
Returns:
uncorrected_xi (float): uncorrected coupling factor
p_12_unc (float): power at half max for uncorrected_xi fit
<NAME> et al.; Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
"""
# least-squares fitting.
# see https://docs.scipy.org/doc/scipy/reference/optimize.html
results = optimize.least_squares(
fun=_residual_Ep,
x0=[0.27, (max(E_powers) * 0.1)],
args=(E_array, E_powers, T10, T100, omega_ratio, smax),
jac="2-point",
method="lm",
)
if not results.success:
raise ValueError("Could not fit Ep")
assert results.x[0] > 0, "Unexpected coupling_factor value: %d < 0" % results.x[0]
uncorrected_xi = results.x[0]
p_12_unc = results.x[1]
return uncorrected_xi, p_12_unc
def odnp(inputs={}, constants={}):
"""Function for performing ODNP calculations
Args:
inputs (dict) : keys and values described in example above
constants (optional) (dict) : keys and values described in example above
Returns:
hydration_results (dict) : keys and values described in table above
<NAME> et al.; Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
http://dx.doi.org/10.1016/j.pnmrs.2013.06.001
<NAME>, <NAME>; Methods in Enzymology, Chapter 5, Volume 615, (2019) 131-175
https://doi.org/10.1016/bs.mie.2018.09.024
"""
if not inputs:
raise ValueError("Please supply a valid inputs dictionary")
odnp_constants = {
"ksigma_bulk": 95.4,
"krho_bulk": 353.4,
"klow_bulk": 366,
"tcorr_bulk": 54,
"D_H2O": 2.3e-9,
"D_SL": 4.1e-10,
"delta_T1_water": False,
"T1_water": False,
"macro_C": False,
}
# these constants have been compiled from the various ODNP literature
if constants:
for ky in odnp_constants.keys():
if ky in constants.keys():
odnp_constants[ky] = constants[ky]
if inputs["smax_model"] == "tethered":
# Option 1, tether spin label
s_max = 1 # (section 2.2) maximal saturation factor
elif inputs["smax_model"] == "free":
# Option 2, free spin probe
s_max = calculate_smax(inputs["spin_C"]) # from:
# <NAME>, <NAME>, Phys. Chem. Chem. Phys. 13 (2011) 3630. &
# <NAME>, <NAME>, <NAME>, J. Chem. Phys. 48 (1968) 4211.
if isinstance(inputs["smax_model"], (int, float)):
# Option 3, manual input of smax
if not (inputs["smax_model"] <= 1 and inputs["smax_model"] > 0):
raise ValueError("smax must be a number between 0 and 1")
s_max = inputs["smax_model"]
omega_e = (1.76085963023e-1) * (inputs["field"] / 1000)
# gamma_e in 1/ps for the tcorr unit, then correct by field in T.
# gamma_e is from NIST. The field cancels in the following omega_ratio but you
# need these individually for the spectral density functions later.
omega_H = (2.6752218744e-4) * (inputs["field"] / 1000)
# gamma_H in 1/ps for the tcorr unit, then correct by field in T.
# gamma_H is from NIST. The field cancels in the following omega_ratio but you
# need these individually for the spectral density functions later.
omega_ratio = (omega_e / (2 * np.pi)) / (omega_H / (2 * np.pi))
# (Eq. 4-6) ratio of omega_e and omega_H, divide by (2*pi) to get angular
# frequency units in order to correspond to S_0/I_0, this is also ~= to the
# ratio of the resonance frequencies for the experiment, i.e. MW freq/RF freq
if "T1_powers" in inputs.keys():
T1p = interpolate_T1(
E_powers=inputs["E_powers"],
T1_powers=inputs["T1_powers"],
T1_array=inputs["T1_array"],
interpolate_method=inputs["interpolate_method"],
delta_T1_water=odnp_constants["delta_T1_water"],
T1_water=odnp_constants["T1_water"],
macro_C=odnp_constants["macro_C"],
spin_C=inputs["spin_C"],
T10=inputs["T10"],
T100=inputs["T100"],
)
else:
if len(inputs["T1_array"]) == len(inputs["E_array"]):
T1p = inputs["T1_array"]
else:
raise ValueError(
"'T1_array' must be equal in length to 'E_array'. Otherwise give 'T1_powers' equal in length to 'T1_array' to interpolate."
)
# ksigma_array = (1 - inputs["E_array"]) / (
# inputs["spin_C"] * 1e-6 * omega_ratio * T1p
# )
if inputs["spin_C"] > 10.0:
warnings.warn(
"Spin concentration should be given in units of Molar. Units will be interpreted as uM, but in the future this will be removed."
)
inputs["spin_C"] *= 1e-6
ksigma_array = (1 - inputs["E_array"]) / (inputs["spin_C"] * omega_ratio * T1p)
# (Eq. 41) this calculates the array of ksigma*s(p) from the enhancement array,
# dividing by the T1 array for the "corrected" analysis
ksigma, ksigma_stdd, ksigma_fit = calculate_ksigma(
ksigma_array, inputs["E_powers"], s_max
)
# fit to the right side of Eq. 42 to get (ksigma*smax) and half of the E_power at s_max, called p_12 here
krho = ((1 / inputs["T10"]) - (1 / inputs["T100"])) / (
inputs["spin_C"]
) # (Eq. 36) "self" relaxivity, unit is s^-1 M^-1
coupling_factor = ksigma / krho # coupling factor, unitless
tcorr = calculate_tcorr(coupling_factor, omega_e, omega_H)
# (Eq. 21-23) this calls the fit to the spectral density functions. The fit
# optimizes the value of tcorr in the calculation of coupling_factor, the correct tcorr
# is the one for which the calculation of coupling_factor from the spectral density
# functions matches the coupling_factor found experimentally. tcorr unit is ps
Dlocal = (odnp_constants["tcorr_bulk"] / tcorr) * (
odnp_constants["D_H2O"] + odnp_constants["D_SL"]
)
# (Eq. 19-20) local diffusivity, i.e. diffusivity of the water near the spin label
klow = ((5 * krho) - (7 * ksigma)) / 3
# section 6, (Eq. 13). this describes the relatively slowly diffusing water
# near the spin label, sometimes called "bound" water.
# This is defined in its most compact form in:
# <NAME> and Han, SI; Chapter Five - Overhauser Dynamic Nuclear Polarization
# for the Study of Hydration Dynamics, Explained. Methods in Enzymology, Volume 615, 2019
# But also explained well in:
# <NAME>, et. al.; "Anomalously Rapid Hydration Water Diffusion Dynamics
# Near DNA Surfaces" J. Am. Chem. Soc. 2015, 137, 12013−12023.
xi_unc, p_12_unc = calculate_uncorrected_xi(
inputs["E_array"],
inputs["E_powers"],
inputs["T10"],
inputs["T100"],
omega_ratio,
s_max,
)
# (Eqs. 7 and 44) this calculates the coupling factor using the "uncorrected" analysis
uncorrected_Ep = calculate_uncorrected_Ep(
xi_unc,
p_12_unc,
inputs["E_powers"],
inputs["T10"],
inputs["T100"],
omega_ratio,
s_max,
)
# (Eqs. 7 and 44) this calculates the "uncorrected" enhnacement array using xi_unc
return {
"uncorrected_Ep": uncorrected_Ep,
"uncorrected_xi": xi_unc,
"interpolated_T1": T1p,
"ksigma_array": ksigma_array,
"ksigma_fit": ksigma_fit,
"ksigma": ksigma,
"ksigma_stdd": ksigma_stdd,
"ksigma_bulk_ratio": ksigma / odnp_constants["ksigma_bulk"],
"krho": krho,
"krho_bulk_ratio": krho / odnp_constants["krho_bulk"],
"klow": klow,
"klow_bulk_ratio": klow / odnp_constants["klow_bulk"],
"coupling_factor": coupling_factor,
"tcorr": tcorr,
"tcorr_bulk_ratio": tcorr / odnp_constants["tcorr_bulk"],
"Dlocal": Dlocal,
}
def hydration(workspace):
"""Function for calculating hydration quantities
Args:
workspace (dict): workspace or dictionary with 'hydration_inputs', see above
Returns:
results (dict) : 'hydration_results' dictionary, see above
Raises:
TypeError: If 'hydration_inputs' dictionary is missing
<NAME> et al.; Progress in Nuclear Magnetic Resonance Spectroscopy 74 (2013) 33–56
http://dx.doi.org/10.1016/j.pnmrs.2013.06.001
<NAME>, <NAME>; Methods in Enzymology, Chapter 5, Volume 615, (2019) 131-175
https://doi.org/10.1016/bs.mie.2018.09.024
"""
if "hydration_inputs" in workspace.keys():
odnp_constants = {
"ksigma_bulk": 95.4,
"krho_bulk": 353.4,
"klow_bulk": 366,
"tcorr_bulk": 54,
"D_H2O": 2.3e-9,
"D_SL": 4.1e-10,
"delta_T1_water": False,
"T1_water": False,
"macro_C": False,
}
if "hydration_constants" in workspace.keys():
for ky in odnp_constants.keys():
if ky in workspace["hydration_constants"].keys():
odnp_constants[ky] = workspace["hydration_constants"][ky]
odnp_inputs = workspace["hydration_inputs"]
results = odnp(odnp_inputs, odnp_constants)
workspace["hydration_results"] = results
return results
else:
raise TypeError("the 'hydration_inputs' dictionary is missing!")
| [
"numpy.polyfit",
"numpy.polyval",
"numpy.real",
"warnings.warn",
"numpy.diag",
"numpy.sqrt"
] | [((6212, 6255), 'numpy.sqrt', 'np.sqrt', (['(1.0j * (omega_e - omega_H) * tcorr)'], {}), '(1.0j * (omega_e - omega_H) * tcorr)\n', (6219, 6255), True, 'import numpy as np\n'), ((6265, 6308), 'numpy.sqrt', 'np.sqrt', (['(1.0j * (omega_e + omega_H) * tcorr)'], {}), '(1.0j * (omega_e + omega_H) * tcorr)\n', (6272, 6308), True, 'import numpy as np\n'), ((6316, 6347), 'numpy.sqrt', 'np.sqrt', (['(1.0j * omega_H * tcorr)'], {}), '(1.0j * omega_H * tcorr)\n', (6323, 6347), True, 'import numpy as np\n'), ((561, 760), 'warnings.warn', 'warnings.warn', (['"""Spin concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future."""'], {}), "(\n 'Spin concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future.'\n )\n", (574, 760), False, 'import warnings\n'), ((2159, 2358), 'warnings.warn', 'warnings.warn', (['"""Spin concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future."""'], {}), "(\n 'Spin concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future.'\n )\n", (2172, 2358), False, 'import warnings\n'), ((3296, 3325), 'numpy.polyfit', 'np.polyfit', (['T1_powers', 'krp', '(2)'], {}), '(T1_powers, krp, 2)\n', (3306, 3325), True, 'import numpy as np\n'), ((3350, 3373), 'numpy.polyval', 'np.polyval', (['p', 'E_powers'], {}), '(p, E_powers)\n', (3360, 3373), True, 'import numpy as np\n'), ((5520, 5533), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (5527, 5533), True, 'import numpy as np\n'), ((15122, 15275), 'warnings.warn', 'warnings.warn', (['"""Spin concentration should be given in units of Molar. Units will be interpreted as uM, but in the future this will be removed."""'], {}), "(\n 'Spin concentration should be given in units of Molar. Units will be interpreted as uM, but in the future this will be removed.'\n )\n", (15135, 15275), False, 'import warnings\n'), ((3728, 3763), 'numpy.polyfit', 'np.polyfit', (['T1_powers', 'linear_t1', '(1)'], {}), '(T1_powers, linear_t1, 1)\n', (3738, 3763), True, 'import numpy as np\n'), ((3788, 3811), 'numpy.polyval', 'np.polyval', (['p', 'E_powers'], {}), '(p, E_powers)\n', (3798, 3811), True, 'import numpy as np\n'), ((6737, 6750), 'numpy.real', 'np.real', (['Jsum'], {}), '(Jsum)\n', (6744, 6750), True, 'import numpy as np\n'), ((6807, 6820), 'numpy.real', 'np.real', (['Jsum'], {}), '(Jsum)\n', (6814, 6820), True, 'import numpy as np\n'), ((2608, 2816), 'warnings.warn', 'warnings.warn', (['"""Macromolecule concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future."""'], {}), "(\n 'Macromolecule concentration will be interpreted as uM. Please give concentration in units of Molar. All units should be SI base units, other units will be depreciated in the future.'\n )\n", (2621, 2816), False, 'import warnings\n'), ((6719, 6733), 'numpy.real', 'np.real', (['Jdiff'], {}), '(Jdiff)\n', (6726, 6733), True, 'import numpy as np\n'), ((6769, 6783), 'numpy.real', 'np.real', (['Jdiff'], {}), '(Jdiff)\n', (6776, 6783), True, 'import numpy as np\n'), ((6792, 6803), 'numpy.real', 'np.real', (['JH'], {}), '(JH)\n', (6799, 6803), True, 'import numpy as np\n')] |
# Copyright (C) 2021 <NAME>, <NAME>
#
# SPDX-License-Identifier: MIT
"""Motors and a class bundling two motors together"""
from controller import Motor
import numpy as np
class IDPGate(Motor):
def __init__(self, name):
super().__init__(name)
def open(self):
"""Opens the robot gate"""
self.setPosition(np.pi / 2)
def close(self):
"""Closes the robot gate"""
self.setPosition(0)
class IDPMotor(Motor):
def __init__(self, name):
super().__init__(name)
self.setPosition(float('inf'))
self.setVelocity(0.0)
class IDPMotorController:
def __init__(self, left_motor_name: str, right_motor_name: str, robot):
self.robot = robot
self.left_motor = IDPMotor(left_motor_name)
self.right_motor = IDPMotor(right_motor_name)
self.max_motor_speed = min(self.left_motor.getMaxVelocity(), self.right_motor.getMaxVelocity())
self.last_speed = {"f": 0, "r": 0} # TODO - Occasionally sync these with robot's velocities
@property
def velocities(self):
return np.array([self.left_motor.getVelocity(), self.right_motor.getVelocity()]) / self.max_motor_speed
@velocities.setter
def velocities(self, drive_fractions: np.array):
"""Set the velocities for each motor
Args:
drive_fractions (np.array): Speeds for left and right wheel respectively,
as fractions of max speed (-1 -> 1), [left, right]
"""
if len(drive_fractions) != 2:
raise Exception("Velocities should be set by a 2 element array")
# Reconstitute forward and rotational velocities
f_speed = 0.5 * sum(drive_fractions)
r_speed = 0.5 * (drive_fractions[0] - drive_fractions[1])
# Process them to limit motor velocity changes
def limit_velocity_change(drive, speed_type):
speed = drive * self.robot.max_possible_speed[speed_type]
max_speed = self.last_speed[speed_type] + (self.robot.max_acc[speed_type] * self.robot.timestep_actual)
min_speed = self.last_speed[speed_type] - (self.robot.max_acc[speed_type] * self.robot.timestep_actual)
speed = max(min(speed, max_speed), min_speed)
self.last_speed[speed_type] = speed
return speed / self.robot.max_possible_speed[speed_type]
f_drive = limit_velocity_change(f_speed, "f")
r_drive = limit_velocity_change(r_speed, "r")
# Reassemble drive and convert to motor values
values = np.array([f_drive + r_drive, f_drive - r_drive]) * self.max_motor_speed
self.left_motor.setVelocity(values[0])
self.right_motor.setVelocity(values[1])
| [
"numpy.array"
] | [((2536, 2584), 'numpy.array', 'np.array', (['[f_drive + r_drive, f_drive - r_drive]'], {}), '([f_drive + r_drive, f_drive - r_drive])\n', (2544, 2584), True, 'import numpy as np\n')] |
from __future__ import division
from matplotlib import pyplot as plt
from matplotlib.pylab import *
import matplotlib.colors as colors
import pandas as pd
import math
import numpy as np
import geopandas as gp
__all__ = ['plot_network_admcolmap_betweenness',
'plot_socioeconomic_attribute',
'truncate_colormap',
'plot_network_admcolmap_betweenness_new',
'plot_od_heatmap']
def plot_network_admcolmap_betweenness(gdf,gdf2, colname,betweenness_string,
cmap='OrRd', linewidth=1.25, edgecolor='grey',
maxbetweenness=0, maxpop=0, thres1=0.1, thres2=0.2):
fig, ax = plt.subplots(figsize=(12,9))
ax.set_aspect('equal')
valmin1 = min(list(gdf2[betweenness_string]))
valmax1 = max(list(gdf2[betweenness_string]))
gdf2.plot(ax=ax, column=betweenness_string, cmap=cmap,vmin=valmin1, vmax=valmax1, linewidth=linewidth)
#adjust linewidth based on betweenness
betweenness_list = list(gdf2[betweenness_string])
#change small betweenness values to 0.1 so that they are still visible in the figure
betweenness_list = [1 if x < thres1 else 2 if x >= thres1 and x < thres2 else 3.5 for x in betweenness_list]
#betweenness_list = [0.1 if x < 0.1 else x for x in betweenness_list]
i = 0
for ln in ax.lines:
ln.set_linewidth(betweenness_list[i]*1)
ln.set_linewidth(betweenness_list[i])
i +=1
valmin2 = min(list(gdf[colname]))
valmax2 = max(list(gdf[colname]))
gdf.plot(ax=ax, column=colname, cmap='Greys',vmin=valmin2, vmax=valmax2, linewidth=0.5, edgecolor=edgecolor, alpha=0.3)
ax.set_title(colname)
#remove the lon-lat in the x-y axis of the plot
ax.axis('off')
# add colorbar1
fig = ax.get_figure()
cax = fig.add_axes([0.85, 0.45, 0.02, 0.43])
sm = plt.cm.ScalarMappable(cmap='Greys')
columnlist = list(gdf[colname])
columnlist.append(0)
columnlist.append(maxpop) #hardcoded, not good
cbmin, cbmax = min(columnlist), max(columnlist)
sm.set_array(columnlist)
cb = plt.colorbar(sm, cax=cax, label = colname, alpha=0.3)
labels = [0, cbmax/4, cbmax/4*2, cbmax/4*3, cbmax/4*4]
loc = labels
cb.set_ticks(loc)
cb.set_ticklabels(labels)
cb.ax.yaxis.label.set_font_properties(matplotlib.font_manager.FontProperties(size=16))
cb.ax.tick_params(labelsize=16)
#add colorbar2
fig = ax.get_figure()
cax = fig.add_axes([0.7, 0.45, 0.02, 0.43])
sm = plt.cm.ScalarMappable(cmap=cmap)
columnlist = list(gdf2[betweenness_string])
columnlist.append(0)
columnlist.append(maxbetweenness)
cbmin, cbmax = min(columnlist), max(columnlist)
cbmin, cbmax = round(cbmin,3), round(cbmax,3)
sm.set_array(columnlist)
cb = plt.colorbar(sm, cax=cax, label=betweenness_string)
labels = [0, cbmax/4, cbmax/4*2, cbmax/4*3, cbmax/4*4]
loc = labels
cb.set_ticks(loc)
cb.set_ticklabels(labels)
cb.ax.yaxis.label.set_font_properties(matplotlib.font_manager.FontProperties(size=16))
cb.ax.tick_params(labelsize=16)
def plot_socioeconomic_attribute(gdf, colname,cmap='OrRd', linewidth=1.25, edgecolor='grey', maxpop=0):
print('maximum number of '+colname+' is',max(list(gdf[colname])))
fig, ax = plt.subplots(figsize=(12,9))
ax.set_aspect('equal')
valmin2 = min(list(gdf[colname]))
valmax2 = max(list(gdf[colname]))
gdf.plot(ax=ax, column=colname, cmap=cmap,vmin=valmin2, vmax=valmax2, linewidth=0.5, edgecolor=edgecolor, alpha=0.3)
ax.set_title(colname)
#remove the lon-lat in the x-y axis of the plot
ax.axis('off')
# add colorbar1
fig = ax.get_figure()
cax = fig.add_axes([0.7, 0.45, 0.02, 0.43])
sm = plt.cm.ScalarMappable(cmap=cmap)
columnlist = list(gdf[colname])
columnlist.append(0)
columnlist.append(maxpop)
cbmin, cbmax = min(columnlist), max(columnlist)
sm.set_array(columnlist)
cb = plt.colorbar(sm, cax=cax, label = colname, alpha=0.3)
labels = [0, cbmax/4, cbmax/4*2, cbmax/4*3, cbmax/4*4]
loc = labels
cb.set_ticks(loc)
cb.set_ticklabels(labels)
cb.ax.yaxis.label.set_font_properties(matplotlib.font_manager.FontProperties(size=16))
cb.ax.tick_params(labelsize=16)
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def _get_percentile(gdf2, col, n):
#get n-th percentile of a DataFrame column
get_col = list(gdf2[col])
get_col = [x for x in get_col if x > 0]
nth_percentile = np.percentile(get_col, n)
return nth_percentile
def plot_network_admcolmap_betweenness_new(gdf, gdf2, colname,betweenness_string,
cmap='OrRd', linewidth=1.25, edgecolor='grey',
maxbetweenness=0, maxpop=0, perc1=60, perc2=90):
fig, ax = plt.subplots(figsize=(12,9))
ax.set_aspect('equal')
valmin1 = min(list(gdf2[betweenness_string]))
valmax1 = max(list(gdf2[betweenness_string]))
thres1 = _get_percentile(gdf2, betweenness_string, perc1)
thres2 = _get_percentile(gdf2, betweenness_string, perc2)
gdf2.plot(ax=ax, column=betweenness_string, cmap=cmap,vmin=valmin1, vmax=valmax1, linewidth=linewidth)
#adjust linewidth based on betweenness
betweenness_list = list(gdf2[betweenness_string])
#change the linewidth based on the percentile
betweenness_list = [1 if x < thres1 else 2 if x >= thres1 and x < thres2 else 3 for x in betweenness_list]
i = 0
for ln in ax.lines:
ln.set_linewidth(betweenness_list[i]*1)
i +=1
valmin2 = min(list(gdf[colname]))
valmax2 = max(list(gdf[colname]))
gdf.plot(ax=ax, column=colname, cmap='Greys',vmin=valmin2, vmax=valmax2, linewidth=0.5, edgecolor=edgecolor, alpha=0.3)
ax.set_title(colname)
#remove the lon-lat in the x-y axis of the plot
ax.axis('off')
# add colorbar1
fig = ax.get_figure()
cax = fig.add_axes([0.85, 0.45, 0.02, 0.43])
sm = plt.cm.ScalarMappable(cmap='Greys')
columnlist = list(gdf[colname])
columnlist.append(0)
columnlist.append(maxpop) #hardcoded, not good
cbmin, cbmax = min(columnlist), max(columnlist)
sm.set_array(columnlist)
cb = plt.colorbar(sm, cax=cax, label = colname, alpha=0.3)
labels = [0, cbmax/4, cbmax/4*2, cbmax/4*3, cbmax/4*4]
loc = labels
cb.set_ticks(loc)
cb.set_ticklabels(labels)
cb.ax.yaxis.label.set_font_properties(matplotlib.font_manager.FontProperties(size=16))
cb.ax.tick_params(labelsize=16)
#add colorbar2
fig = ax.get_figure()
cax = fig.add_axes([0.7, 0.45, 0.02, 0.43])
sm = plt.cm.ScalarMappable(cmap=cmap)
columnlist = list(gdf2[betweenness_string])
# columnlist.append(0)
columnlist.append(maxbetweenness)
cbmin, cbmax = min(columnlist), max(columnlist)
# cbmin, cbmax = round(cbmin,3), round(cbmax,3)
sm.set_array(columnlist)
cb = plt.colorbar(sm, cax=cax, label=betweenness_string)
poin1 = cbmin+(cbmax-cbmin)/4
poin2 = cbmin+(cbmax-cbmin)/4*2
poin3 = cbmin+(cbmax-cbmin)/4*3
labels = [cbmin, poin1, poin2, poin3, cbmax]
loc = labels
cb.set_ticks(loc)
cb.set_ticklabels(labels)
cb.ax.yaxis.label.set_font_properties(matplotlib.font_manager.FontProperties(size=16))
cb.ax.tick_params(labelsize=16)
def _log_base_n(x,logn):
try:
return math.log(x,logn)
except:
return 0
def plot_od_heatmap(OD_df, gdf_points, log=False, logn=100, division=False):
#adopted from http://nbviewer.jupyter.org/gist/joelotz/5427209
#Scale data logarithmically if we want to dampen the Chittagong effect (tremendous amount of goods
#is transported to Chittagong)
if log:
OD_df = OD_df.applymap(lambda x: _log_base_n(x, logn))
# Plot it out
fig, ax = plt.subplots()
#if we don't want to aggregate to division level
if not division:
heatmap = ax.pcolor(OD_df, cmap=plt.cm.Blues, alpha=0.8)
##################################################
## FORMAT ##
##################################################
fig = plt.gcf()
fig.set_size_inches(14,14)
# turn off the frame
ax.set_frame_on(False)
# put the major ticks at the middle of each cell
ax.set_yticks(np.arange(OD_df.shape[0])+0.5, minor=False)
ax.set_xticks(np.arange(OD_df.shape[1])+0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
# Set the labels
ax.set_xticklabels(gdf_points.District, minor=False)
ax.set_yticklabels(gdf_points.District, minor=False)
#if we want to aggregate to division level
else:
OD_dummy = OD_df.copy()
gdf_points_dummy = gdf_points.copy()
node_division_dict = dict(zip(list(gdf_points_dummy['Node']), list(gdf_points_dummy['Division'])))
OD_dummy.index = [node_division_dict[x] for x in OD_dummy.columns]
OD_dummy.columns = [node_division_dict[x] for x in OD_dummy.columns]
OD_dummy = OD_dummy.groupby(OD_dummy.index).sum().groupby(OD_dummy.columns, axis=1).sum()
heatmap = ax.pcolor(OD_dummy, cmap=plt.cm.Blues, alpha=0.8)
##################################################
## FORMAT ##
##################################################
fig = plt.gcf()
fig.set_size_inches(14,14)
# turn off the frame
ax.set_frame_on(False)
# put the major ticks at the middle of each cell
ax.set_yticks(np.arange(OD_dummy.shape[0])+0.5, minor=False)
ax.set_xticks(np.arange(OD_dummy.shape[1])+0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
# Set the labels
ax.set_xticklabels(OD_dummy.columns, minor=False, fontsize=18)
ax.set_yticklabels(OD_dummy.index, minor=False, fontsize=18)
# rotate the labels
plt.xticks(rotation=90)
# give the x and y label
plt.xlabel('To', fontsize=18)
ax.xaxis.set_label_position('top')
plt.ylabel('From', fontsize=18)
ax.grid(False)
# Turn off all the ticks
ax = plt.gca()
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
| [
"matplotlib.pyplot.colorbar",
"numpy.percentile",
"matplotlib.pyplot.cm.ScalarMappable",
"numpy.arange",
"matplotlib.pyplot.gcf",
"numpy.linspace",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"math.log",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlab... | [((689, 718), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (701, 718), True, 'from matplotlib import pyplot as plt\n'), ((1874, 1909), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': '"""Greys"""'}), "(cmap='Greys')\n", (1895, 1909), True, 'from matplotlib import pyplot as plt\n'), ((2112, 2163), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'cax', 'label': 'colname', 'alpha': '(0.3)'}), '(sm, cax=cax, label=colname, alpha=0.3)\n', (2124, 2163), True, 'from matplotlib import pyplot as plt\n'), ((2524, 2556), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (2545, 2556), True, 'from matplotlib import pyplot as plt\n'), ((2808, 2859), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'cax', 'label': 'betweenness_string'}), '(sm, cax=cax, label=betweenness_string)\n', (2820, 2859), True, 'from matplotlib import pyplot as plt\n'), ((3304, 3333), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (3316, 3333), True, 'from matplotlib import pyplot as plt\n'), ((3762, 3794), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (3783, 3794), True, 'from matplotlib import pyplot as plt\n'), ((3976, 4027), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'cax', 'label': 'colname', 'alpha': '(0.3)'}), '(sm, cax=cax, label=colname, alpha=0.3)\n', (3988, 4027), True, 'from matplotlib import pyplot as plt\n'), ((4725, 4750), 'numpy.percentile', 'np.percentile', (['get_col', 'n'], {}), '(get_col, n)\n', (4738, 4750), True, 'import numpy as np\n'), ((5049, 5078), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (5061, 5078), True, 'from matplotlib import pyplot as plt\n'), ((6198, 6233), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': '"""Greys"""'}), "(cmap='Greys')\n", (6219, 6233), True, 'from matplotlib import pyplot as plt\n'), ((6436, 6487), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'cax', 'label': 'colname', 'alpha': '(0.3)'}), '(sm, cax=cax, label=colname, alpha=0.3)\n', (6448, 6487), True, 'from matplotlib import pyplot as plt\n'), ((6848, 6880), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (6869, 6880), True, 'from matplotlib import pyplot as plt\n'), ((7136, 7187), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'cax', 'label': 'betweenness_string'}), '(sm, cax=cax, label=betweenness_string)\n', (7148, 7187), True, 'from matplotlib import pyplot as plt\n'), ((8028, 8042), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8040, 8042), True, 'from matplotlib import pyplot as plt\n'), ((10208, 10231), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (10218, 10231), True, 'from matplotlib import pyplot as plt\n'), ((10266, 10295), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""To"""'], {'fontsize': '(18)'}), "('To', fontsize=18)\n", (10276, 10295), True, 'from matplotlib import pyplot as plt\n'), ((10339, 10370), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""From"""'], {'fontsize': '(18)'}), "('From', fontsize=18)\n", (10349, 10370), True, 'from matplotlib import pyplot as plt\n'), ((10430, 10439), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10437, 10439), True, 'from matplotlib import pyplot as plt\n'), ((7589, 7606), 'math.log', 'math.log', (['x', 'logn'], {}), '(x, logn)\n', (7597, 7606), False, 'import math\n'), ((8338, 8347), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8345, 8347), True, 'from matplotlib import pyplot as plt\n'), ((9606, 9615), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9613, 9615), True, 'from matplotlib import pyplot as plt\n'), ((4494, 4524), 'numpy.linspace', 'np.linspace', (['minval', 'maxval', 'n'], {}), '(minval, maxval, n)\n', (4505, 4524), True, 'import numpy as np\n'), ((8524, 8549), 'numpy.arange', 'np.arange', (['OD_df.shape[0]'], {}), '(OD_df.shape[0])\n', (8533, 8549), True, 'import numpy as np\n'), ((8590, 8615), 'numpy.arange', 'np.arange', (['OD_df.shape[1]'], {}), '(OD_df.shape[1])\n', (8599, 8615), True, 'import numpy as np\n'), ((9792, 9820), 'numpy.arange', 'np.arange', (['OD_dummy.shape[0]'], {}), '(OD_dummy.shape[0])\n', (9801, 9820), True, 'import numpy as np\n'), ((9861, 9889), 'numpy.arange', 'np.arange', (['OD_dummy.shape[1]'], {}), '(OD_dummy.shape[1])\n', (9870, 9889), True, 'import numpy as np\n')] |
import numpy as np
from didyprog.reference.local import HardMaxOp, SparseMaxOp, SoftMaxOp
def make_data():
rng = np.random.RandomState(0)
return rng.randint(-10, 10, size=10)
def test_hardmax():
x = make_data()
op = HardMaxOp()
max_x, argmax_x = op.max(x)
assert np.all(x <= max_x)
assert np.sum(argmax_x * x) == max_x
def test_sparsemax():
x = make_data()
op = SparseMaxOp()
max_x, argmax_x = op.max(x)
assert np.all(argmax_x >= 0.)
assert np.sum(argmax_x) == 1.
def test_softmax():
x = make_data()
op = SoftMaxOp()
max_x, argmax_x = op.max(x)
assert np.all(argmax_x >= 0.)
assert np.sum(argmax_x) == 1. | [
"numpy.sum",
"didyprog.reference.local.HardMaxOp",
"didyprog.reference.local.SparseMaxOp",
"didyprog.reference.local.SoftMaxOp",
"numpy.random.RandomState",
"numpy.all"
] | [((120, 144), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (141, 144), True, 'import numpy as np\n'), ((237, 248), 'didyprog.reference.local.HardMaxOp', 'HardMaxOp', ([], {}), '()\n', (246, 248), False, 'from didyprog.reference.local import HardMaxOp, SparseMaxOp, SoftMaxOp\n'), ((292, 310), 'numpy.all', 'np.all', (['(x <= max_x)'], {}), '(x <= max_x)\n', (298, 310), True, 'import numpy as np\n'), ((405, 418), 'didyprog.reference.local.SparseMaxOp', 'SparseMaxOp', ([], {}), '()\n', (416, 418), False, 'from didyprog.reference.local import HardMaxOp, SparseMaxOp, SoftMaxOp\n'), ((462, 485), 'numpy.all', 'np.all', (['(argmax_x >= 0.0)'], {}), '(argmax_x >= 0.0)\n', (468, 485), True, 'import numpy as np\n'), ((570, 581), 'didyprog.reference.local.SoftMaxOp', 'SoftMaxOp', ([], {}), '()\n', (579, 581), False, 'from didyprog.reference.local import HardMaxOp, SparseMaxOp, SoftMaxOp\n'), ((625, 648), 'numpy.all', 'np.all', (['(argmax_x >= 0.0)'], {}), '(argmax_x >= 0.0)\n', (631, 648), True, 'import numpy as np\n'), ((322, 342), 'numpy.sum', 'np.sum', (['(argmax_x * x)'], {}), '(argmax_x * x)\n', (328, 342), True, 'import numpy as np\n'), ((496, 512), 'numpy.sum', 'np.sum', (['argmax_x'], {}), '(argmax_x)\n', (502, 512), True, 'import numpy as np\n'), ((659, 675), 'numpy.sum', 'np.sum', (['argmax_x'], {}), '(argmax_x)\n', (665, 675), True, 'import numpy as np\n')] |
from collections import defaultdict
import numpy
try:
# try importing the C version and set docstring
from .hv import hypervolume as __hv
except ImportError:
# fallback on python version
from .pyhv import hypervolume as __hv
def argsortNondominated(losses, k, first_front_only=False):
"""Sort input in Pareto-equal groups.
Sort the first *k* *losses* into different nondomination levels
using the "Fast Nondominated Sorting Approach" proposed by Deb et al.,
see [Deb2002]_. This algorithm has a time complexity of :math:`O(MN^2)`,
where :math:`M` is the number of objectives and :math:`N` the number of
losses.
:param losses: A list of losses to select from.
:param k: The number of elements to select.
:param first_front_only: If :obj:`True` sort only the first front and
exit.
:returns: A list of Pareto fronts (lists) containing the losses
index.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if k == 0:
return []
loss2c = defaultdict(list)
for i, c in enumerate(losses):
loss2c[tuple(c)].append(i)
losses_keys = list(loss2c.keys())
current_front = []
next_front = []
dominating_losses = defaultdict(int)
dominated_losses = defaultdict(list)
# Rank first Pareto front
for i, li in enumerate(losses_keys):
for lj in losses_keys[i+1:]:
if dominates(li, lj):
dominating_losses[lj] += 1
dominated_losses[li].append(lj)
elif dominates(lj, li):
dominating_losses[li] += 1
dominated_losses[lj].append(li)
if dominating_losses[li] == 0:
current_front.append(li)
fronts = [[]]
for loss in current_front:
fronts[0].extend(loss2c[loss])
pareto_sorted = len(fronts[0])
if first_front_only:
return fronts[0]
# Rank the next front until at least the requested number
# candidates are sorted
N = min(len(losses), k)
while pareto_sorted < N:
fronts.append([])
for lp in current_front:
for ld in dominated_losses[lp]:
dominating_losses[ld] -= 1
if dominating_losses[ld] == 0:
next_front.append(ld)
pareto_sorted += len(loss2c[ld])
fronts[-1].extend(loss2c[ld])
current_front = next_front
next_front = []
return fronts
def dominates(loss1, loss2, obj=slice(None)):
"""Returns wether or not loss1 dominates loss2, while minimizing all
objectives.
"""
not_equal = False
for l1i, l2i in zip(loss1[obj], loss2[obj]):
if l1i < l2i:
not_equal = True
elif l1i > l2i:
return False
return not_equal
def hypervolume(pointset, ref):
"""Computes the hypervolume of a point set.
Args:
pointset: A list of points.
ref: The origin from which to comute the hypervolume.
This value should be larger than all values in the
point set.
Returns:
The hypervolume of this point set.
"""
return __hv(pointset, ref)
def hypervolume_indicator(front, **kargs):
"""Indicator function using the hypervolume value.
Computes the contribution of each of the front candidates to the
front hypervolume. The hypervolume indicator assumes minimization.
Args:
front: A list of Pareto equal candidate solutions.
ref: The origin from which to compute the hypervolume (optional).
If not given, ref is set to the maximum value in each dimension + 1.
Returns:
The index of the least contributing candidate.
"""
# Hypervolume use implicit minimization
obj = numpy.array(front)
ref = kargs.get("ref", None)
if ref is None:
ref = numpy.max(obj, axis=0) + 1
def contribution(i):
# The contribution of point p_i in point set P
# is the hypervolume of P without p_i
return hypervolume(numpy.concatenate((obj[:i], obj[i+1:])), ref)
contrib_values = map(contribution, range(len(front)))
# Select the maximum hypervolume value (correspond to the minimum difference)
return numpy.argmax(contrib_values) | [
"numpy.argmax",
"collections.defaultdict",
"numpy.max",
"numpy.array",
"numpy.concatenate"
] | [((1185, 1202), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1196, 1202), False, 'from collections import defaultdict\n'), ((1379, 1395), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1390, 1395), False, 'from collections import defaultdict\n'), ((1419, 1436), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1430, 1436), False, 'from collections import defaultdict\n'), ((3920, 3938), 'numpy.array', 'numpy.array', (['front'], {}), '(front)\n', (3931, 3938), False, 'import numpy\n'), ((4386, 4414), 'numpy.argmax', 'numpy.argmax', (['contrib_values'], {}), '(contrib_values)\n', (4398, 4414), False, 'import numpy\n'), ((4006, 4028), 'numpy.max', 'numpy.max', (['obj'], {'axis': '(0)'}), '(obj, axis=0)\n', (4015, 4028), False, 'import numpy\n'), ((4187, 4228), 'numpy.concatenate', 'numpy.concatenate', (['(obj[:i], obj[i + 1:])'], {}), '((obj[:i], obj[i + 1:]))\n', (4204, 4228), False, 'import numpy\n')] |
import numpy as np
class NoControllerFound(Exception):
"""Raised when there is no common controller for the sampled systems"""
pass
class NumericalProblem(Exception):
pass
class LQRSyntheziser:
def __init__(self, uncertainStateSpaceModel, Q, R, settings):
self.ussm = uncertainStateSpaceModel
dim = self.ussm.dim
n_states = dim[0]
n_inputs = dim[1]
assert (Q.shape == (n_states, n_states))
assert (R.shape == (n_inputs, n_inputs))
# Normalize
Q_norm = np.linalg.norm(Q, ord=2, keepdims=True)
R_norm = np.linalg.norm(R, ord=2, keepdims=True)
avg_norm = (Q_norm + R_norm) / 2
self.Q = Q / avg_norm
self.R = R / avg_norm
self.confidence_interval = settings['confidence_interval']
self.verbosity = 0
| [
"numpy.linalg.norm"
] | [((542, 581), 'numpy.linalg.norm', 'np.linalg.norm', (['Q'], {'ord': '(2)', 'keepdims': '(True)'}), '(Q, ord=2, keepdims=True)\n', (556, 581), True, 'import numpy as np\n'), ((599, 638), 'numpy.linalg.norm', 'np.linalg.norm', (['R'], {'ord': '(2)', 'keepdims': '(True)'}), '(R, ord=2, keepdims=True)\n', (613, 638), True, 'import numpy as np\n')] |
import random
import numpy as np
class MNIST_DS(object):
def __init__(self, train_dataset, test_dataset):
self.__train_labels_idx_map = {}
self.__test_labels_idx_map = {}
self.__train_data = train_dataset.data
self.__test_data = test_dataset.data
self.__train_labels = train_dataset.targets
self.__test_labels = test_dataset.targets
self.__train_labels_np = self.__train_labels.numpy()
self.__train_unique_labels = np.unique(self.__train_labels_np)
self.__test_labels_np = self.__test_labels.numpy()
self.__test_unique_labels = np.unique(self.__test_labels_np)
def load(self):
self.__train_labels_idx_map = {}
for label in self.__train_unique_labels:
self.__train_labels_idx_map[label] = np.where(self.__train_labels_np == label)[0]
self.__test_labels_idx_map = {}
for label in self.__test_unique_labels:
self.__test_labels_idx_map[label] = np.where(self.__test_labels_np == label)[0]
def getTriplet(self, split="train"):
pos_label = 0
neg_label = 0
label_idx_map = None
data = None
if split == 'train':
pos_label = self.__train_unique_labels[random.randint(0, len(self.__train_unique_labels) - 1)]
neg_label = pos_label
while neg_label is pos_label:
neg_label = self.__train_unique_labels[random.randint(0, len(self.__train_unique_labels) - 1)]
label_idx_map = self.__train_labels_idx_map
data = self.__train_data
else:
pos_label = self.__test_unique_labels[random.randint(0, len(self.__test_unique_labels) - 1)]
neg_label = pos_label
while neg_label is pos_label:
neg_label = self.__test_unique_labels[random.randint(0, len(self.__test_unique_labels) - 1)]
label_idx_map = self.__test_labels_idx_map
data = self.__test_data
pos_label_idx_map = label_idx_map[pos_label]
pos_img_anchor_idx = pos_label_idx_map[random.randint(0, len(pos_label_idx_map) - 1)]
pos_img_idx = pos_img_anchor_idx
while pos_img_idx is pos_img_anchor_idx:
pos_img_idx = pos_label_idx_map[random.randint(0, len(pos_label_idx_map) - 1)]
neg_label_idx_map = label_idx_map[neg_label]
neg_img_idx = neg_label_idx_map[random.randint(0, len(neg_label_idx_map) - 1)]
pos_anchor_img = data[pos_img_anchor_idx].numpy()
pos_img = data[pos_img_idx].numpy()
neg_img = data[neg_img_idx].numpy()
return pos_anchor_img, pos_img, neg_img
| [
"numpy.where",
"numpy.unique"
] | [((488, 521), 'numpy.unique', 'np.unique', (['self.__train_labels_np'], {}), '(self.__train_labels_np)\n', (497, 521), True, 'import numpy as np\n'), ((618, 650), 'numpy.unique', 'np.unique', (['self.__test_labels_np'], {}), '(self.__test_labels_np)\n', (627, 650), True, 'import numpy as np\n'), ((811, 852), 'numpy.where', 'np.where', (['(self.__train_labels_np == label)'], {}), '(self.__train_labels_np == label)\n', (819, 852), True, 'import numpy as np\n'), ((993, 1033), 'numpy.where', 'np.where', (['(self.__test_labels_np == label)'], {}), '(self.__test_labels_np == label)\n', (1001, 1033), True, 'import numpy as np\n')] |
import numpy as np
from os import path
try:
from mahotas.io import freeimage
except OSError:
import pytest
pytestmark = pytest.mark.skip
def test_freeimage(tmpdir):
img = np.arange(256).reshape((16,16)).astype(np.uint8)
fname = tmpdir.join('mahotas_test.png')
freeimage.imsave(fname, img)
img_ = freeimage.imread(fname)
assert img.shape == img_.shape
assert np.all(img == img_)
def test_as_grey(tmpdir):
fname = tmpdir.join('mahotas_test.png')
colour = np.arange(16*16*3).reshape((16,16,3))
freeimage.imsave(fname, colour.astype(np.uint8))
c2 = freeimage.imread(fname, as_grey=True)
assert len(c2.shape) == 2
assert c2.shape == colour.shape[:-1]
def test_rgba():
rgba = path.join(
path.dirname(__file__),
'data',
'rgba.png')
rgba = freeimage.imread(rgba)
assert np.all(np.diff(rgba[:,:,3].mean(1)) < 0 ) # the image contains an alpha gradient
def test_save_load_rgba(tmpdir):
fname = tmpdir.join('mahotas_test.png')
img = np.arange(256).reshape((8,8,4)).astype(np.uint8)
freeimage.imsave(fname, img)
img_ = freeimage.imread(fname)
assert img.shape == img_.shape
assert np.all(img == img_)
def test_fromblob():
img = np.arange(100, dtype=np.uint8).reshape((10,10))
s = freeimage.imsavetoblob(img, 't.png')
assert np.all(freeimage.imreadfromblob(s) == img)
s = freeimage.imsavetoblob(img, 't.bmp')
assert np.all(freeimage.imreadfromblob(s) == img)
def test_1bpp():
bpp = path.join(
path.dirname(__file__),
'data',
'1bpp.bmp')
bpp = freeimage.imread(bpp)
assert bpp.sum()
assert bpp.sum() < bpp.size
def test_multi(tmpdir):
testtif = tmpdir.join('/mahotas_test.tif')
f = np.zeros((16,16), np.uint8)
fs = []
for t in range(8):
f[:t,:t] = t
fs.append(f.copy())
freeimage.write_multipage(fs, testtif)
fs2 = freeimage.read_multipage(testtif)
for f,f2 in zip(fs,fs2):
assert np.all(f == f2)
def test_uint16(tmpdir):
img = np.zeros((32,32), dtype=np.uint16)
fname = tmpdir.join('mahotas_test.png')
freeimage.imsave(fname, img)
img_ = freeimage.imread(fname)
assert img.shape == img_.shape
assert img.dtype == img_.dtype
assert np.all(img == img_)
| [
"mahotas.io.freeimage.imsave",
"mahotas.io.freeimage.imreadfromblob",
"mahotas.io.freeimage.imsavetoblob",
"mahotas.io.freeimage.read_multipage",
"numpy.zeros",
"mahotas.io.freeimage.imread",
"os.path.dirname",
"mahotas.io.freeimage.write_multipage",
"numpy.arange",
"numpy.all"
] | [((288, 316), 'mahotas.io.freeimage.imsave', 'freeimage.imsave', (['fname', 'img'], {}), '(fname, img)\n', (304, 316), False, 'from mahotas.io import freeimage\n'), ((328, 351), 'mahotas.io.freeimage.imread', 'freeimage.imread', (['fname'], {}), '(fname)\n', (344, 351), False, 'from mahotas.io import freeimage\n'), ((398, 417), 'numpy.all', 'np.all', (['(img == img_)'], {}), '(img == img_)\n', (404, 417), True, 'import numpy as np\n'), ((603, 640), 'mahotas.io.freeimage.imread', 'freeimage.imread', (['fname'], {'as_grey': '(True)'}), '(fname, as_grey=True)\n', (619, 640), False, 'from mahotas.io import freeimage\n'), ((855, 877), 'mahotas.io.freeimage.imread', 'freeimage.imread', (['rgba'], {}), '(rgba)\n', (871, 877), False, 'from mahotas.io import freeimage\n'), ((1112, 1140), 'mahotas.io.freeimage.imsave', 'freeimage.imsave', (['fname', 'img'], {}), '(fname, img)\n', (1128, 1140), False, 'from mahotas.io import freeimage\n'), ((1152, 1175), 'mahotas.io.freeimage.imread', 'freeimage.imread', (['fname'], {}), '(fname)\n', (1168, 1175), False, 'from mahotas.io import freeimage\n'), ((1222, 1241), 'numpy.all', 'np.all', (['(img == img_)'], {}), '(img == img_)\n', (1228, 1241), True, 'import numpy as np\n'), ((1330, 1366), 'mahotas.io.freeimage.imsavetoblob', 'freeimage.imsavetoblob', (['img', '"""t.png"""'], {}), "(img, 't.png')\n", (1352, 1366), False, 'from mahotas.io import freeimage\n'), ((1430, 1466), 'mahotas.io.freeimage.imsavetoblob', 'freeimage.imsavetoblob', (['img', '"""t.bmp"""'], {}), "(img, 't.bmp')\n", (1452, 1466), False, 'from mahotas.io import freeimage\n'), ((1663, 1684), 'mahotas.io.freeimage.imread', 'freeimage.imread', (['bpp'], {}), '(bpp)\n', (1679, 1684), False, 'from mahotas.io import freeimage\n'), ((1818, 1846), 'numpy.zeros', 'np.zeros', (['(16, 16)', 'np.uint8'], {}), '((16, 16), np.uint8)\n', (1826, 1846), True, 'import numpy as np\n'), ((1930, 1968), 'mahotas.io.freeimage.write_multipage', 'freeimage.write_multipage', (['fs', 'testtif'], {}), '(fs, testtif)\n', (1955, 1968), False, 'from mahotas.io import freeimage\n'), ((1979, 2012), 'mahotas.io.freeimage.read_multipage', 'freeimage.read_multipage', (['testtif'], {}), '(testtif)\n', (2003, 2012), False, 'from mahotas.io import freeimage\n'), ((2110, 2145), 'numpy.zeros', 'np.zeros', (['(32, 32)'], {'dtype': 'np.uint16'}), '((32, 32), dtype=np.uint16)\n', (2118, 2145), True, 'import numpy as np\n'), ((2193, 2221), 'mahotas.io.freeimage.imsave', 'freeimage.imsave', (['fname', 'img'], {}), '(fname, img)\n', (2209, 2221), False, 'from mahotas.io import freeimage\n'), ((2233, 2256), 'mahotas.io.freeimage.imread', 'freeimage.imread', (['fname'], {}), '(fname)\n', (2249, 2256), False, 'from mahotas.io import freeimage\n'), ((2339, 2358), 'numpy.all', 'np.all', (['(img == img_)'], {}), '(img == img_)\n', (2345, 2358), True, 'import numpy as np\n'), ((768, 790), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (780, 790), False, 'from os import path\n'), ((1577, 1599), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (1589, 1599), False, 'from os import path\n'), ((2057, 2072), 'numpy.all', 'np.all', (['(f == f2)'], {}), '(f == f2)\n', (2063, 2072), True, 'import numpy as np\n'), ((503, 525), 'numpy.arange', 'np.arange', (['(16 * 16 * 3)'], {}), '(16 * 16 * 3)\n', (512, 525), True, 'import numpy as np\n'), ((1274, 1304), 'numpy.arange', 'np.arange', (['(100)'], {'dtype': 'np.uint8'}), '(100, dtype=np.uint8)\n', (1283, 1304), True, 'import numpy as np\n'), ((1385, 1412), 'mahotas.io.freeimage.imreadfromblob', 'freeimage.imreadfromblob', (['s'], {}), '(s)\n', (1409, 1412), False, 'from mahotas.io import freeimage\n'), ((1485, 1512), 'mahotas.io.freeimage.imreadfromblob', 'freeimage.imreadfromblob', (['s'], {}), '(s)\n', (1509, 1512), False, 'from mahotas.io import freeimage\n'), ((190, 204), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (199, 204), True, 'import numpy as np\n'), ((1059, 1073), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (1068, 1073), True, 'import numpy as np\n')] |
# -------------------------------------------------------------------------------
# Licence:
# Copyright (c) 2012-2018 <NAME>
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Name: aggregates.py
# Purpose:
#
# Author: <NAME>
#
# Created: 28/08/2018
# -------------------------------------------------------------------------------
import numpy as np
from scipy import stats
class Ensemble:
"""
Ensemble
"""
def __init__(self):
self.p = 0.5
self.values = []
def step(self, value,probability):
if (value!=None):
self.p = probability
self.values.append(value)
def finalize(self):
self.values = np.sort(self.values)
mean,std = np.mean(self.values),np.std(self.values)
cdf = stats.norm.cdf(self.values,mean,std)
for j in range(len(cdf)):
if cdf[j]>self.p:
return self.values[j]
return self.values[-1] if len(self.values)>0 else 0.0 | [
"scipy.stats.norm.cdf",
"numpy.sort",
"numpy.mean",
"numpy.std"
] | [((1261, 1281), 'numpy.sort', 'np.sort', (['self.values'], {}), '(self.values)\n', (1268, 1281), True, 'import numpy as np\n'), ((1356, 1394), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['self.values', 'mean', 'std'], {}), '(self.values, mean, std)\n', (1370, 1394), False, 'from scipy import stats\n'), ((1301, 1321), 'numpy.mean', 'np.mean', (['self.values'], {}), '(self.values)\n', (1308, 1321), True, 'import numpy as np\n'), ((1322, 1341), 'numpy.std', 'np.std', (['self.values'], {}), '(self.values)\n', (1328, 1341), True, 'import numpy as np\n')] |
import copy
import gc
import os
import pickle
import re
import sys
import tempfile
import unittest
import numpy as np
from sklearn.exceptions import NotFittedError
try:
from deep_ner.elmo_ner import ELMo_NER
from deep_ner.utils import load_dataset
from deep_ner.quality import calculate_prediction_quality
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from deep_ner.elmo_ner import ELMo_NER
from deep_ner.utils import load_dataset
from deep_ner.quality import calculate_prediction_quality
class TestELMoNER(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ELMO_HUB_MODULE = 'http://files.deeppavlov.ai/deeppavlov_data/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz'
def tearDown(self):
if hasattr(self, 'ner'):
del self.ner
if hasattr(self, 'another_ner'):
del self.another_ner
if hasattr(self, 'temp_file_name'):
if os.path.isfile(self.temp_file_name):
os.remove(self.temp_file_name)
def test_creation(self):
self.ner = ELMo_NER(elmo_hub_module_handle=self.ELMO_HUB_MODULE)
self.assertIsInstance(self.ner, ELMo_NER)
self.assertTrue(hasattr(self.ner, 'batch_size'))
self.assertTrue(hasattr(self.ner, 'lr'))
self.assertTrue(hasattr(self.ner, 'l2_reg'))
self.assertTrue(hasattr(self.ner, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(self.ner, 'finetune_elmo'))
self.assertTrue(hasattr(self.ner, 'max_epochs'))
self.assertTrue(hasattr(self.ner, 'patience'))
self.assertTrue(hasattr(self.ner, 'random_seed'))
self.assertTrue(hasattr(self.ner, 'gpu_memory_frac'))
self.assertTrue(hasattr(self.ner, 'max_seq_length'))
self.assertTrue(hasattr(self.ner, 'validation_fraction'))
self.assertTrue(hasattr(self.ner, 'verbose'))
self.assertIsInstance(self.ner.batch_size, int)
self.assertIsInstance(self.ner.lr, float)
self.assertIsInstance(self.ner.l2_reg, float)
self.assertIsInstance(self.ner.finetune_elmo, bool)
self.assertIsInstance(self.ner.max_epochs, int)
self.assertIsInstance(self.ner.patience, int)
self.assertIsNone(self.ner.random_seed)
self.assertIsInstance(self.ner.gpu_memory_frac, float)
self.assertIsInstance(self.ner.max_seq_length, int)
self.assertIsInstance(self.ner.validation_fraction, float)
self.assertIsInstance(self.ner.verbose, bool)
def test_check_params_positive(self):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512, lr=1e-3,
l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
self.assertTrue(True)
def test_check_params_negative001(self):
true_err_msg = re.escape('`elmo_hub_module_handle` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
finetune_elmo=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, validation_fraction=0.1,
max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative002(self):
true_err_msg = re.escape('`elmo_hub_module_handle` is wrong! Expected `{0}`, got `{1}`.'.format(
type('abc'), type(123)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=1, finetune_elmo=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4,
validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative003(self):
true_err_msg = re.escape('`batch_size` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, max_seq_length=512, lr=1e-3,
l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative004(self):
true_err_msg = re.escape('`batch_size` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size='32', max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative005(self):
true_err_msg = re.escape('`batch_size` is wrong! Expected a positive integer value, but -3 is not positive.')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=-3, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative006(self):
true_err_msg = re.escape('`max_epochs` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative007(self):
true_err_msg = re.escape('`max_epochs` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs='10', patience=3,
gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative008(self):
true_err_msg = re.escape('`max_epochs` is wrong! Expected a positive integer value, but -3 is not positive.')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=-3, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative009(self):
true_err_msg = re.escape('`patience` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative010(self):
true_err_msg = re.escape('`patience` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience='3', gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative011(self):
true_err_msg = re.escape('`patience` is wrong! Expected a positive integer value, but -3 is not positive.')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=-3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative012(self):
true_err_msg = re.escape('`max_seq_length` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE,
finetune_elmo=True, batch_size=32, lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10,
patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative013(self):
true_err_msg = re.escape('`max_seq_length` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length='512',
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative014(self):
true_err_msg = re.escape('`max_seq_length` is wrong! Expected a positive integer value, but -3 is not '
'positive.')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=-3,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative015(self):
true_err_msg = re.escape('`validation_fraction` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative016(self):
true_err_msg = re.escape('`validation_fraction` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction='0.1', max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative017(self):
true_err_msg = '`validation_fraction` is wrong! Expected a positive floating-point value less than 1.0, but ' \
'{0} is not positive.'.format(-0.1)
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=-0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative018(self):
true_err_msg = '`validation_fraction` is wrong! Expected a positive floating-point value less than 1.0, but ' \
'{0} is not less than 1.0.'.format(1.1)
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=1.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative019(self):
true_err_msg = re.escape('`gpu_memory_frac` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, verbose=False, random_seed=42
)
def test_check_params_negative020(self):
true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac='1.0',
verbose=False, random_seed=42
)
def test_check_params_negative021(self):
true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected a floating-point value in the (0.0, 1.0], '
'but {0} is not proper.'.format(-1.0))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=-1.0,
verbose=False, random_seed=42
)
def test_check_params_negative022(self):
true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected a floating-point value in the (0.0, 1.0], '
'but {0} is not proper.'.format(1.3))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.3,
verbose=False, random_seed=42
)
def test_check_params_negative023(self):
true_err_msg = re.escape('`lr` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative024(self):
true_err_msg = re.escape('`lr` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr='1e-3', l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative025(self):
true_err_msg = re.escape('`lr` is wrong! Expected a positive floating-point value, but {0} is not '
'positive.'.format(0.0))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=0.0, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative026(self):
true_err_msg = re.escape('`lr` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative027(self):
true_err_msg = re.escape('`lr` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr='1e-3', l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative028(self):
true_err_msg = re.escape('`lr` is wrong! Expected a positive floating-point value, but {0} is not '
'positive.'.format(0.0))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=0.0, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative029(self):
true_err_msg = re.escape('`l2_reg` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative030(self):
true_err_msg = re.escape('`l2_reg` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg='1e-4', validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative031(self):
true_err_msg = re.escape('`l2_reg` is wrong! Expected a non-negative floating-point value, but {0} is '
'negative.'.format(-2.0))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=-2.0, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative032(self):
true_err_msg = re.escape('`finetune_elmo` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4,
validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative033(self):
true_err_msg = re.escape('`finetune_elmo` is wrong! Expected `{0}`, got `{1}`.'.format(
type(True), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo='True', batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative034(self):
true_err_msg = re.escape('`verbose` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
random_seed=42
)
def test_check_params_negative035(self):
true_err_msg = re.escape('`verbose` is wrong! Expected `{0}`, got `{1}`.'.format(
type(True), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose='False', random_seed=42
)
def test_check_X_positive(self):
X = ['abc', 'defgh', '4wdffg']
ELMo_NER.check_X(X, 'X_train')
self.assertTrue(True)
def test_check_X_negative01(self):
X = {'abc', 'defgh', '4wdffg'}
true_err_msg = re.escape('`X_train` is wrong, because it is not list-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_X(X, 'X_train')
def test_check_X_negative02(self):
X = np.random.uniform(-1.0, 1.0, (10, 2))
true_err_msg = re.escape('`X_train` is wrong, because it is not 1-D list!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_X(X, 'X_train')
def test_check_X_negative03(self):
X = ['abc', 23, '4wdffg']
true_err_msg = re.escape('Item 1 of `X_train` is wrong, because it is not string-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_X(X, 'X_train')
def text_check_Xy_positive(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_classes_list = ('LOC', 'ORG', 'PER')
self.assertEqual(true_classes_list, ELMo_NER.check_Xy(X, 'X_train', y, 'y_train'))
def text_check_Xy_negative01(self):
X = {
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
}
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('`X_train` is wrong, because it is not list-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative02(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = {
'1': {
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
'2': {
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
}
true_err_msg = re.escape('`y_train` is wrong, because it is not a list-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative03(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = np.random.uniform(-1.0, 1.0, (10, 2))
true_err_msg = re.escape('`y_train` is wrong, because it is not 1-D list!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative04(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
},
{
'LOC': [(17, 24), (117, 130)]
}
]
true_err_msg = re.escape('Length of `X_train` does not correspond to length of `y_train`! 2 != 3')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative05(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
4
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because it is not a dictionary-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative06(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
1: [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because its key `1` is not a string-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative07(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'O': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `O` incorrectly specifies a named '
'entity!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative08(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'123': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `123` incorrectly specifies a named '
'entity!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative09(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'loc': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `loc` incorrectly specifies a named '
'entity!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative10(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': {1, 2}
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because its value `{0}` is not a list-like '
'object!'.format(y[0]['PER']))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative11(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), 63],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because named entity bounds `63` are not specified as '
'list-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative12(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77, 81)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because named entity bounds `{0}` are not specified as '
'2-D list!'.format((63, 77, 81)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative13(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (219, 196)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '
'incorrect!'.format((219, 196)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative14(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 519)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '
'incorrect!'.format((196, 519)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative15(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'<NAME> принимает в Белом доме своего французского коллегу <NAME>. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(-1, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '
'incorrect!'.format((-1, 137)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def test_calculate_bounds_of_tokens_positive01(self):
source_text = 'Совершенно новую технологию перекачки российской водки за рубеж начали использовать ' \
'контрабандисты.'
tokenized_text = ['Совершенно', 'новую', 'технологию', 'перекачки', 'российской', 'водки', 'за', 'рубеж',
'начали', 'использовать', 'контрабандисты', '.']
true_bounds = [(0, 10), (11, 16), (17, 27), (28, 37), (38, 48), (49, 54), (55, 57), (58, 63), (64, 70),
(71, 83), (84, 98), (98, 99)]
self.assertEqual(true_bounds, ELMo_NER.calculate_bounds_of_tokens(source_text, tokenized_text))
def test_calculate_bounds_of_tokens_positive02(self):
source_text = 'Один из последних представителей клады, тираннозавр (Tyrannosaurus rex), живший 66–67 ' \
'миллионов лет назад, был одним из крупнейших когда-либо живших сухопутных хищников'
tokenized_text = ['Один', 'из', 'последних', 'представителей', 'клады', ',', 'тираннозавр', '(',
'Tyrannosaurus', 'rex', ')', ',', 'живший', '66', '–', '67', 'миллионов', 'лет', 'назад', ',',
'был', 'одним', 'из', 'крупнейших', 'когда', '-', 'либо', 'живших', 'сухопутных', 'хищников']
true_bounds = [(0, 4), (5, 7), (8, 17), (18, 32), (33, 38), (38, 39), (40, 51), (52, 53), (53, 66), (67, 70),
(70, 71), (71, 72), (73, 79), (80, 82), (82, 83), (83, 85), (86, 95), (96, 99), (100, 105),
(105, 106), (107, 110), (111, 116), (117, 119), (120, 130), (131, 136), (136, 137), (137, 141),
(142, 148), (149, 159), (160, 168)]
self.assertEqual(true_bounds, ELMo_NER.calculate_bounds_of_tokens(source_text, tokenized_text))
def test_detect_token_labels_positive01(self):
source_text = '<NAME> принимает в Белом доме своего французского коллегу <NAME>.'
tokenized_text = ['Барак', 'Обама', 'принимает', 'в', 'Белом', 'доме', 'своего',
'французского', 'коллегу', 'Николя', 'Саркози', '.']
token_bounds = ELMo_NER.calculate_bounds_of_tokens(source_text, tokenized_text)
indices_of_named_entities = np.array(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 0],
dtype=np.int32
)
label_IDs = {1: 1, 2: 2, 3: 1}
y_true = np.array([2, 1, 0, 0, 4, 3, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0], dtype=np.int32)
y_pred = ELMo_NER.detect_token_labels(token_bounds, indices_of_named_entities, label_IDs, 16)
self.assertIsInstance(y_pred, np.ndarray)
self.assertEqual(y_true.shape, y_pred.shape)
self.assertEqual(y_true.tolist(), y_pred.tolist())
def test_detect_token_labels_positive02(self):
source_text = 'С 1876 г Павлов ассистирует профессору <NAME> в Медико-хирургической академии и ' \
'параллельно изучает физиологию кровообращения.'
tokenized_text = ['С', '1876', 'г', 'Павлов', 'ассистирует', 'профессору', 'К', '.', 'Н', '.', 'Устимовичу',
'в', 'Медико', '-', 'хирургической', 'академии', 'и', 'параллельно', 'изучает', 'физиологию',
'кровообращения', '.']
token_bounds = ELMo_NER.calculate_bounds_of_tokens(source_text, tokenized_text)
indices_of_named_entities = np.array(
[0, 0, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=np.int32
)
label_IDs = {1: 1, 2: 2, 3: 3, 4: 2, 5: 4}
y_true = np.array(
[0, 2, 1, 4, 0, 6, 4, 3, 3, 3, 3, 0, 8, 7, 7, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=np.int32
)
y_pred = ELMo_NER.detect_token_labels(token_bounds, indices_of_named_entities, label_IDs, 32)
self.assertIsInstance(y_pred, np.ndarray)
self.assertEqual(y_true.shape, y_pred.shape)
self.assertEqual(y_true.tolist(), y_pred.tolist())
def test_calculate_indices_of_named_entities(self):
source_text = '<NAME> принимает в Белом доме своего французского коллегу <NAME>.'
classes_list = ('LOCATION', 'ORG', 'PERSON')
named_entities = {'PERSON': [(0, 11), (63, 77)], 'LOCATION': [(24, 34)]}
true_indices = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 0],
dtype=np.int32
)
true_labels_to_classes = {1: 1, 2: 3, 3: 3}
indices, labels_to_classes = ELMo_NER.calculate_indices_of_named_entities(source_text, classes_list,
named_entities)
self.assertIsInstance(indices, np.ndarray)
self.assertIsInstance(labels_to_classes, dict)
self.assertEqual(true_indices.shape, indices.shape)
self.assertEqual(true_indices.tolist(), indices.tolist())
self.assertEqual(set(true_labels_to_classes.keys()), set(labels_to_classes.keys()))
for label_ID in true_labels_to_classes:
self.assertEqual(true_labels_to_classes[label_ID], labels_to_classes[label_ID])
def test_fit_positive01(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
def test_fit_positive02(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=True, max_epochs=3, batch_size=2, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=42, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertEqual(res.random_seed, 42)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'shapes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
self.assertIsInstance(res.shapes_list_, tuple)
self.assertGreater(len(res.shapes_list_), 0)
def test_fit_positive03(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'shapes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
self.assertIsInstance(res.shapes_list_, tuple)
self.assertGreater(len(res.shapes_list_), 0)
def test_fit_predict(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'shapes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
self.assertIsInstance(res.shapes_list_, tuple)
self.assertGreater(len(res.shapes_list_), 0)
y_pred = res.predict(X_train)
self.assertIsInstance(y_pred, list)
self.assertEqual(len(X_train), len(y_pred))
for sample_idx in range(len(y_pred)):
self.assertIsInstance(y_pred[sample_idx], dict)
f1, precision, recall, _ = calculate_prediction_quality(y_train, y_pred, res.classes_list_)
self.assertGreater(f1, 0.0)
self.assertGreater(precision, 0.0)
self.assertGreater(recall, 0.0)
def test_predict_negative(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, random_seed=None,
elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
with self.assertRaises(NotFittedError):
_ = self.ner.predict(X_train)
def test_serialize_positive01(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'shapes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
self.assertIsInstance(res.shapes_list_, tuple)
self.assertGreater(len(res.shapes_list_), 0)
y_pred1 = res.predict(X_train)
self.assertIsInstance(y_pred1, list)
self.assertEqual(len(X_train), len(y_pred1))
for sample_idx in range(len(y_pred1)):
self.assertIsInstance(y_pred1[sample_idx], dict)
f1, precision, recall, _ = calculate_prediction_quality(y_train, y_pred1, res.classes_list_)
self.assertGreater(f1, 0.0)
self.assertGreater(precision, 0.0)
self.assertGreater(recall, 0.0)
self.temp_file_name = tempfile.NamedTemporaryFile(mode='w').name
with open(self.temp_file_name, mode='wb') as fp:
pickle.dump(res, fp)
del res, self.ner
gc.collect()
with open(self.temp_file_name, mode='rb') as fp:
self.ner = pickle.load(fp)
y_pred2 = self.ner.predict(X_train)
self.assertIsInstance(y_pred2, list)
self.assertEqual(len(y_pred2), len(y_pred2))
for sample_idx in range(len(y_pred2)):
self.assertIsInstance(y_pred2[sample_idx], dict)
self.assertEqual(set(y_pred1[sample_idx]), set(y_pred2[sample_idx]))
for ne_type in y_pred1[sample_idx]:
self.assertEqual(y_pred1[sample_idx][ne_type], y_pred2[sample_idx][ne_type])
def test_serialize_positive02(self):
self.ner = ELMo_NER(random_seed=31, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
old_batch_size = self.ner.batch_size
old_lr = self.ner.lr
old_l2_reg = self.ner.l2_reg
old_elmo_hub_module_handle = self.ner.elmo_hub_module_handle
old_finetune_elmo = self.ner.finetune_elmo
old_max_epochs = self.ner.max_epochs
old_patience = self.ner.patience
old_random_seed = self.ner.random_seed
old_gpu_memory_frac = self.ner.gpu_memory_frac
old_max_seq_length = self.ner.max_seq_length
old_validation_fraction = self.ner.validation_fraction
old_verbose = self.ner.verbose
self.temp_file_name = tempfile.NamedTemporaryFile().name
with open(self.temp_file_name, mode='wb') as fp:
pickle.dump(self.ner, fp)
del self.ner
gc.collect()
with open(self.temp_file_name, mode='rb') as fp:
self.ner = pickle.load(fp)
self.assertIsInstance(self.ner, ELMo_NER)
self.assertTrue(hasattr(self.ner, 'batch_size'))
self.assertTrue(hasattr(self.ner, 'lr'))
self.assertTrue(hasattr(self.ner, 'l2_reg'))
self.assertTrue(hasattr(self.ner, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(self.ner, 'finetune_elmo'))
self.assertTrue(hasattr(self.ner, 'max_epochs'))
self.assertTrue(hasattr(self.ner, 'patience'))
self.assertTrue(hasattr(self.ner, 'random_seed'))
self.assertTrue(hasattr(self.ner, 'gpu_memory_frac'))
self.assertTrue(hasattr(self.ner, 'max_seq_length'))
self.assertTrue(hasattr(self.ner, 'validation_fraction'))
self.assertTrue(hasattr(self.ner, 'verbose'))
self.assertEqual(self.ner.batch_size, old_batch_size)
self.assertAlmostEqual(self.ner.lr, old_lr)
self.assertAlmostEqual(self.ner.l2_reg, old_l2_reg)
self.assertEqual(self.ner.elmo_hub_module_handle, old_elmo_hub_module_handle)
self.assertEqual(self.ner.finetune_elmo, old_finetune_elmo)
self.assertEqual(self.ner.max_epochs, old_max_epochs)
self.assertEqual(self.ner.patience, old_patience)
self.assertAlmostEqual(self.ner.gpu_memory_frac, old_gpu_memory_frac)
self.assertEqual(self.ner.max_seq_length, old_max_seq_length)
self.assertAlmostEqual(self.ner.validation_fraction, old_validation_fraction)
self.assertEqual(self.ner.verbose, old_verbose)
self.assertEqual(self.ner.random_seed, old_random_seed)
def test_copy_positive01(self):
self.ner = ELMo_NER(random_seed=0, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
self.another_ner = copy.copy(self.ner)
self.assertIsInstance(self.another_ner, ELMo_NER)
self.assertIsNot(self.ner, self.another_ner)
self.assertTrue(hasattr(self.another_ner, 'batch_size'))
self.assertTrue(hasattr(self.another_ner, 'lr'))
self.assertTrue(hasattr(self.another_ner, 'l2_reg'))
self.assertTrue(hasattr(self.another_ner, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(self.another_ner, 'finetune_elmo'))
self.assertTrue(hasattr(self.another_ner, 'max_epochs'))
self.assertTrue(hasattr(self.another_ner, 'patience'))
self.assertTrue(hasattr(self.another_ner, 'random_seed'))
self.assertTrue(hasattr(self.another_ner, 'gpu_memory_frac'))
self.assertTrue(hasattr(self.another_ner, 'max_seq_length'))
self.assertTrue(hasattr(self.another_ner, 'validation_fraction'))
self.assertTrue(hasattr(self.another_ner, 'verbose'))
self.assertEqual(self.ner.batch_size, self.another_ner.batch_size)
self.assertAlmostEqual(self.ner.lr, self.another_ner.lr)
self.assertAlmostEqual(self.ner.l2_reg, self.another_ner.l2_reg)
self.assertEqual(self.ner.elmo_hub_module_handle, self.another_ner.elmo_hub_module_handle)
self.assertEqual(self.ner.finetune_elmo, self.another_ner.finetune_elmo)
self.assertEqual(self.ner.max_epochs, self.another_ner.max_epochs)
self.assertEqual(self.ner.patience, self.another_ner.patience)
self.assertEqual(self.ner.random_seed, self.another_ner.random_seed)
self.assertAlmostEqual(self.ner.gpu_memory_frac, self.another_ner.gpu_memory_frac)
self.assertEqual(self.ner.max_seq_length, self.another_ner.max_seq_length)
self.assertAlmostEqual(self.ner.validation_fraction, self.another_ner.validation_fraction)
self.assertEqual(self.ner.verbose, self.another_ner.verbose)
def test_copy_positive02(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
self.ner.fit(X_train, y_train)
self.another_ner = copy.copy(self.ner)
self.assertIsInstance(self.another_ner, ELMo_NER)
self.assertIsNot(self.ner, self.another_ner)
self.assertTrue(hasattr(self.another_ner, 'batch_size'))
self.assertTrue(hasattr(self.another_ner, 'lr'))
self.assertTrue(hasattr(self.another_ner, 'l2_reg'))
self.assertTrue(hasattr(self.another_ner, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(self.another_ner, 'finetune_elmo'))
self.assertTrue(hasattr(self.another_ner, 'max_epochs'))
self.assertTrue(hasattr(self.another_ner, 'patience'))
self.assertTrue(hasattr(self.another_ner, 'random_seed'))
self.assertTrue(hasattr(self.another_ner, 'gpu_memory_frac'))
self.assertTrue(hasattr(self.another_ner, 'max_seq_length'))
self.assertTrue(hasattr(self.another_ner, 'validation_fraction'))
self.assertTrue(hasattr(self.another_ner, 'verbose'))
self.assertTrue(hasattr(self.another_ner, 'classes_list_'))
self.assertTrue(hasattr(self.another_ner, 'shapes_list_'))
self.assertTrue(hasattr(self.another_ner, 'logits_'))
self.assertTrue(hasattr(self.another_ner, 'transition_params_'))
self.assertTrue(hasattr(self.another_ner, 'input_tokens_'))
self.assertTrue(hasattr(self.another_ner, 'sequence_lengths_'))
self.assertTrue(hasattr(self.another_ner, 'additional_features_'))
self.assertTrue(hasattr(self.another_ner, 'y_ph_'))
self.assertTrue(hasattr(self.another_ner, 'sess_'))
self.assertEqual(self.ner.batch_size, self.another_ner.batch_size)
self.assertAlmostEqual(self.ner.lr, self.another_ner.lr)
self.assertAlmostEqual(self.ner.l2_reg, self.another_ner.l2_reg)
self.assertEqual(self.ner.elmo_hub_module_handle, self.another_ner.elmo_hub_module_handle)
self.assertEqual(self.ner.finetune_elmo, self.another_ner.finetune_elmo)
self.assertEqual(self.ner.max_epochs, self.another_ner.max_epochs)
self.assertEqual(self.ner.patience, self.another_ner.patience)
self.assertEqual(self.ner.random_seed, self.another_ner.random_seed)
self.assertAlmostEqual(self.ner.gpu_memory_frac, self.another_ner.gpu_memory_frac)
self.assertEqual(self.ner.max_seq_length, self.another_ner.max_seq_length)
self.assertAlmostEqual(self.ner.validation_fraction, self.another_ner.validation_fraction)
self.assertEqual(self.ner.verbose, self.another_ner.verbose)
self.assertIs(self.ner.classes_list_, self.another_ner.classes_list_)
self.assertIs(self.ner.shapes_list_, self.another_ner.shapes_list_)
self.assertIs(self.ner.logits_, self.another_ner.logits_)
self.assertIs(self.ner.transition_params_, self.another_ner.transition_params_)
self.assertIs(self.ner.input_tokens_, self.another_ner.input_tokens_)
self.assertIs(self.ner.sequence_lengths_, self.another_ner.sequence_lengths_)
self.assertIs(self.ner.additional_features_, self.another_ner.additional_features_)
self.assertIs(self.ner.y_ph_, self.another_ner.y_ph_)
self.assertIs(self.ner.sess_, self.another_ner.sess_)
def test_calculate_bounds_of_named_entities(self):
bounds_of_tokens = [(0, 2), (2, 5), (5, 8), (8, 10), (11, 16), (17, 20), (20, 22), (22, 26), (26, 27), (28, 31),
(31, 34), (34, 37), (38, 48), (49, 52), (52, 54), (55, 57), (58, 59), (59, 61), (61, 63),
(64, 70), (71, 83), (84, 87), (87, 90), (90, 93), (93, 95), (95, 98), (98, 99)]
classes_list = ('LOCATION', 'ORG', 'PERSON')
labels_of_tokens = [0, 0, 2, 1, 1, 2, 1, 0, 0, 0, 4, 3, 0, 6, 5, 5, 5, 0, 5, 5, 0, 2, 2, 3, 3, 6, 5]
true_entities = {
'LOCATION': [(5, 16), (17, 22), (84, 87), (87, 90)],
'ORG': [(31, 37), (90, 95)],
'PERSON': [(49, 59), (61, 70), (95, 99)]
}
calc_entities = ELMo_NER.calculate_bounds_of_named_entities(bounds_of_tokens, classes_list, labels_of_tokens)
self.assertIsInstance(calc_entities, dict)
self.assertEqual(set(true_entities.keys()), set(calc_entities.keys()))
for entity_type in true_entities:
self.assertEqual(true_entities[entity_type], calc_entities[entity_type])
def test_get_shape_of_string_positive01(self):
src = 'уже'
dst = 'a'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_positive02(self):
src = 'К'
dst = 'A'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_positive03(self):
src = 'Однако'
dst = 'Aa'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_positive04(self):
src = '66–67'
dst = 'D-D'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_positive05(self):
src = '…'
dst = 'U'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_negative(self):
src = ''
dst = ''
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"pickle.dump",
"os.remove",
"deep_ner.elmo_ner.ELMo_NER.check_Xy",
"gc.collect",
"os.path.isfile",
"pickle.load",
"os.path.join",
"deep_ner.elmo_ner.ELMo_NER.detect_token_labels",
"unittest.main",
"os.path.dirname",
"re.escape",
"deep_ner.elmo_ner.ELMo_NER.calculate_indices_of_named_entities",... | [((72267, 72293), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (72280, 72293), False, 'import unittest\n'), ((1090, 1143), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (1098, 1143), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((2568, 2831), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (2589, 2831), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((2961, 3016), 're.escape', 're.escape', (['"""`elmo_hub_module_handle` is not specified!"""'], {}), "('`elmo_hub_module_handle` is not specified!')\n", (2970, 3016), False, 'import re\n'), ((3948, 3991), 're.escape', 're.escape', (['"""`batch_size` is not specified!"""'], {}), "('`batch_size` is not specified!')\n", (3957, 3991), False, 'import re\n'), ((4990, 5094), 're.escape', 're.escape', (['"""`batch_size` is wrong! Expected a positive integer value, but -3 is not positive."""'], {}), "(\n '`batch_size` is wrong! Expected a positive integer value, but -3 is not positive.'\n )\n", (4999, 5094), False, 'import re\n'), ((5539, 5582), 're.escape', 're.escape', (['"""`max_epochs` is not specified!"""'], {}), "('`max_epochs` is not specified!')\n", (5548, 5582), False, 'import re\n'), ((6581, 6685), 're.escape', 're.escape', (['"""`max_epochs` is wrong! Expected a positive integer value, but -3 is not positive."""'], {}), "(\n '`max_epochs` is wrong! Expected a positive integer value, but -3 is not positive.'\n )\n", (6590, 6685), False, 'import re\n'), ((7130, 7171), 're.escape', 're.escape', (['"""`patience` is not specified!"""'], {}), "('`patience` is not specified!')\n", (7139, 7171), False, 'import re\n'), ((8171, 8273), 're.escape', 're.escape', (['"""`patience` is wrong! Expected a positive integer value, but -3 is not positive."""'], {}), "(\n '`patience` is wrong! Expected a positive integer value, but -3 is not positive.'\n )\n", (8180, 8273), False, 'import re\n'), ((8719, 8766), 're.escape', 're.escape', (['"""`max_seq_length` is not specified!"""'], {}), "('`max_seq_length` is not specified!')\n", (8728, 8766), False, 'import re\n'), ((9764, 9872), 're.escape', 're.escape', (['"""`max_seq_length` is wrong! Expected a positive integer value, but -3 is not positive."""'], {}), "(\n '`max_seq_length` is wrong! Expected a positive integer value, but -3 is not positive.'\n )\n", (9773, 9872), False, 'import re\n'), ((10352, 10404), 're.escape', 're.escape', (['"""`validation_fraction` is not specified!"""'], {}), "('`validation_fraction` is not specified!')\n", (10361, 10404), False, 'import re\n'), ((12613, 12661), 're.escape', 're.escape', (['"""`gpu_memory_frac` is not specified!"""'], {}), "('`gpu_memory_frac` is not specified!')\n", (12622, 12661), False, 'import re\n'), ((14881, 14916), 're.escape', 're.escape', (['"""`lr` is not specified!"""'], {}), "('`lr` is not specified!')\n", (14890, 14916), False, 'import re\n'), ((16511, 16546), 're.escape', 're.escape', (['"""`lr` is not specified!"""'], {}), "('`lr` is not specified!')\n", (16520, 16546), False, 'import re\n'), ((18141, 18180), 're.escape', 're.escape', (['"""`l2_reg` is not specified!"""'], {}), "('`l2_reg` is not specified!')\n", (18150, 18180), False, 'import re\n'), ((19781, 19827), 're.escape', 're.escape', (['"""`finetune_elmo` is not specified!"""'], {}), "('`finetune_elmo` is not specified!')\n", (19790, 19827), False, 'import re\n'), ((20811, 20851), 're.escape', 're.escape', (['"""`verbose` is not specified!"""'], {}), "('`verbose` is not specified!')\n", (20820, 20851), False, 'import re\n'), ((21866, 21896), 'deep_ner.elmo_ner.ELMo_NER.check_X', 'ELMo_NER.check_X', (['X', '"""X_train"""'], {}), "(X, 'X_train')\n", (21882, 21896), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((22029, 22097), 're.escape', 're.escape', (['"""`X_train` is wrong, because it is not list-like object!"""'], {}), "('`X_train` is wrong, because it is not list-like object!')\n", (22038, 22097), False, 'import re\n'), ((22256, 22293), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(10, 2)'], {}), '(-1.0, 1.0, (10, 2))\n', (22273, 22293), True, 'import numpy as np\n'), ((22317, 22377), 're.escape', 're.escape', (['"""`X_train` is wrong, because it is not 1-D list!"""'], {}), "('`X_train` is wrong, because it is not 1-D list!')\n", (22326, 22377), False, 'import re\n'), ((22581, 22666), 're.escape', 're.escape', (['"""Item 1 of `X_train` is wrong, because it is not string-like object!"""'], {}), "('Item 1 of `X_train` is wrong, because it is not string-like object!'\n )\n", (22590, 22666), False, 'import re\n'), ((24808, 24876), 're.escape', 're.escape', (['"""`X_train` is wrong, because it is not list-like object!"""'], {}), "('`X_train` is wrong, because it is not list-like object!')\n", (24817, 24876), False, 'import re\n'), ((25966, 26036), 're.escape', 're.escape', (['"""`y_train` is wrong, because it is not a list-like object!"""'], {}), "('`y_train` is wrong, because it is not a list-like object!')\n", (25975, 26036), False, 'import re\n'), ((26814, 26851), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(10, 2)'], {}), '(-1.0, 1.0, (10, 2))\n', (26831, 26851), True, 'import numpy as np\n'), ((26875, 26935), 're.escape', 're.escape', (['"""`y_train` is wrong, because it is not 1-D list!"""'], {}), "('`y_train` is wrong, because it is not 1-D list!')\n", (26884, 26935), False, 'import re\n'), ((28098, 28186), 're.escape', 're.escape', (['"""Length of `X_train` does not correspond to length of `y_train`! 2 != 3"""'], {}), "(\n 'Length of `X_train` does not correspond to length of `y_train`! 2 != 3')\n", (28107, 28186), False, 'import re\n'), ((29128, 29224), 're.escape', 're.escape', (['"""Item 1 of `y_train` is wrong, because it is not a dictionary-like object!"""'], {}), "(\n 'Item 1 of `y_train` is wrong, because it is not a dictionary-like object!'\n )\n", (29137, 29224), False, 'import re\n'), ((30290, 30391), 're.escape', 're.escape', (['"""Item 0 of `y_train` is wrong, because its key `1` is not a string-like object!"""'], {}), "(\n 'Item 0 of `y_train` is wrong, because its key `1` is not a string-like object!'\n )\n", (30299, 30391), False, 'import re\n'), ((31464, 31574), 're.escape', 're.escape', (['"""Item 1 of `y_train` is wrong, because its key `O` incorrectly specifies a named entity!"""'], {}), "(\n 'Item 1 of `y_train` is wrong, because its key `O` incorrectly specifies a named entity!'\n )\n", (31473, 31574), False, 'import re\n'), ((32693, 32805), 're.escape', 're.escape', (['"""Item 1 of `y_train` is wrong, because its key `123` incorrectly specifies a named entity!"""'], {}), "(\n 'Item 1 of `y_train` is wrong, because its key `123` incorrectly specifies a named entity!'\n )\n", (32702, 32805), False, 'import re\n'), ((33911, 34023), 're.escape', 're.escape', (['"""Item 1 of `y_train` is wrong, because its key `loc` incorrectly specifies a named entity!"""'], {}), "(\n 'Item 1 of `y_train` is wrong, because its key `loc` incorrectly specifies a named entity!'\n )\n", (33920, 34023), False, 'import re\n'), ((36342, 36466), 're.escape', 're.escape', (['"""Item 1 of `y_train` is wrong, because named entity bounds `63` are not specified as list-like object!"""'], {}), "(\n 'Item 1 of `y_train` is wrong, because named entity bounds `63` are not specified as list-like object!'\n )\n", (36351, 36466), False, 'import re\n'), ((43687, 43751), 'deep_ner.elmo_ner.ELMo_NER.calculate_bounds_of_tokens', 'ELMo_NER.calculate_bounds_of_tokens', (['source_text', 'tokenized_text'], {}), '(source_text, tokenized_text)\n', (43722, 43751), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((43788, 44060), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 0]'], {'dtype': 'np.int32'}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 0], dtype=np.int32)\n', (43796, 44060), True, 'import numpy as np\n'), ((44165, 44239), 'numpy.array', 'np.array', (['[2, 1, 0, 0, 4, 3, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0]'], {'dtype': 'np.int32'}), '([2, 1, 0, 0, 4, 3, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0], dtype=np.int32)\n', (44173, 44239), True, 'import numpy as np\n'), ((44257, 44345), 'deep_ner.elmo_ner.ELMo_NER.detect_token_labels', 'ELMo_NER.detect_token_labels', (['token_bounds', 'indices_of_named_entities', 'label_IDs', '(16)'], {}), '(token_bounds, indices_of_named_entities,\n label_IDs, 16)\n', (44285, 44345), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((45043, 45107), 'deep_ner.elmo_ner.ELMo_NER.calculate_bounds_of_tokens', 'ELMo_NER.calculate_bounds_of_tokens', (['source_text', 'tokenized_text'], {}), '(source_text, tokenized_text)\n', (45078, 45107), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((45144, 45598), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,\n 4, 4, 4, 4, 4, 4, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 4, 4, 4, 4, 4, 4, 4,\n 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int32)\n', (45152, 45598), True, 'import numpy as np\n'), ((45720, 45846), 'numpy.array', 'np.array', (['[0, 2, 1, 4, 0, 6, 4, 3, 3, 3, 3, 0, 8, 7, 7, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 2, 1, 4, 0, 6, 4, 3, 3, 3, 3, 0, 8, 7, 7, 7, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int32)\n', (45728, 45846), True, 'import numpy as np\n'), ((45894, 45982), 'deep_ner.elmo_ner.ELMo_NER.detect_token_labels', 'ELMo_NER.detect_token_labels', (['token_bounds', 'indices_of_named_entities', 'label_IDs', '(32)'], {}), '(token_bounds, indices_of_named_entities,\n label_IDs, 32)\n', (45922, 45982), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((46445, 46717), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 0]'], {'dtype': 'np.int32'}), '([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 0], dtype=np.int32)\n', (46453, 46717), True, 'import numpy as np\n'), ((46855, 46946), 'deep_ner.elmo_ner.ELMo_NER.calculate_indices_of_named_entities', 'ELMo_NER.calculate_indices_of_named_entities', (['source_text', 'classes_list', 'named_entities'], {}), '(source_text, classes_list,\n named_entities)\n', (46899, 46946), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((47615, 47808), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'finetune_elmo': '(False)', 'max_epochs': '(3)', 'batch_size': '(4)', 'max_seq_length': '(64)', 'gpu_memory_frac': '(0.9)', 'validation_fraction': '(0.3)', 'random_seed': 'None', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64,\n gpu_memory_frac=0.9, validation_fraction=0.3, random_seed=None,\n elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (47623, 47808), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((49926, 50116), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'finetune_elmo': '(True)', 'max_epochs': '(3)', 'batch_size': '(2)', 'max_seq_length': '(64)', 'gpu_memory_frac': '(0.9)', 'validation_fraction': '(0.3)', 'random_seed': '(42)', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(finetune_elmo=True, max_epochs=3, batch_size=2, max_seq_length=64,\n gpu_memory_frac=0.9, validation_fraction=0.3, random_seed=42,\n elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (49934, 50116), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((52442, 52635), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'finetune_elmo': '(False)', 'max_epochs': '(3)', 'batch_size': '(4)', 'max_seq_length': '(64)', 'gpu_memory_frac': '(0.9)', 'validation_fraction': '(0.3)', 'random_seed': 'None', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64,\n gpu_memory_frac=0.9, validation_fraction=0.3, random_seed=None,\n elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (52450, 52635), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((54912, 55105), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'finetune_elmo': '(False)', 'max_epochs': '(3)', 'batch_size': '(4)', 'max_seq_length': '(64)', 'gpu_memory_frac': '(0.9)', 'validation_fraction': '(0.3)', 'random_seed': 'None', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64,\n gpu_memory_frac=0.9, validation_fraction=0.3, random_seed=None,\n elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (54920, 55105), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((57534, 57598), 'deep_ner.quality.calculate_prediction_quality', 'calculate_prediction_quality', (['y_train', 'y_pred', 'res.classes_list_'], {}), '(y_train, y_pred, res.classes_list_)\n', (57562, 57598), False, 'from deep_ner.quality import calculate_prediction_quality\n'), ((57846, 57970), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'finetune_elmo': '(False)', 'max_epochs': '(3)', 'batch_size': '(4)', 'random_seed': 'None', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(finetune_elmo=False, max_epochs=3, batch_size=4, random_seed=None,\n elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (57854, 57970), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((58309, 58502), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'finetune_elmo': '(False)', 'max_epochs': '(3)', 'batch_size': '(4)', 'max_seq_length': '(64)', 'gpu_memory_frac': '(0.9)', 'validation_fraction': '(0.3)', 'random_seed': 'None', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64,\n gpu_memory_frac=0.9, validation_fraction=0.3, random_seed=None,\n elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (58317, 58502), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((60936, 61001), 'deep_ner.quality.calculate_prediction_quality', 'calculate_prediction_quality', (['y_train', 'y_pred1', 'res.classes_list_'], {}), '(y_train, y_pred1, res.classes_list_)\n', (60964, 61001), False, 'from deep_ner.quality import calculate_prediction_quality\n'), ((61318, 61330), 'gc.collect', 'gc.collect', ([], {}), '()\n', (61328, 61330), False, 'import gc\n'), ((61960, 62029), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'random_seed': '(31)', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(random_seed=31, elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (61968, 62029), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((62793, 62805), 'gc.collect', 'gc.collect', ([], {}), '()\n', (62803, 62805), False, 'import gc\n'), ((64511, 64579), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'random_seed': '(0)', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(random_seed=0, elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (64519, 64579), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((64607, 64626), 'copy.copy', 'copy.copy', (['self.ner'], {}), '(self.ner)\n', (64616, 64626), False, 'import copy\n'), ((66620, 66813), 'deep_ner.elmo_ner.ELMo_NER', 'ELMo_NER', ([], {'finetune_elmo': '(False)', 'max_epochs': '(3)', 'batch_size': '(4)', 'max_seq_length': '(64)', 'gpu_memory_frac': '(0.9)', 'validation_fraction': '(0.3)', 'random_seed': 'None', 'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE'}), '(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64,\n gpu_memory_frac=0.9, validation_fraction=0.3, random_seed=None,\n elmo_hub_module_handle=self.ELMO_HUB_MODULE)\n', (66628, 66813), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((66992, 67011), 'copy.copy', 'copy.copy', (['self.ner'], {}), '(self.ner)\n', (67001, 67011), False, 'import copy\n'), ((70955, 71052), 'deep_ner.elmo_ner.ELMo_NER.calculate_bounds_of_named_entities', 'ELMo_NER.calculate_bounds_of_named_entities', (['bounds_of_tokens', 'classes_list', 'labels_of_tokens'], {}), '(bounds_of_tokens, classes_list,\n labels_of_tokens)\n', (70998, 71052), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((957, 992), 'os.path.isfile', 'os.path.isfile', (['self.temp_file_name'], {}), '(self.temp_file_name)\n', (971, 992), False, 'import os\n'), ((3092, 3305), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(finetune_elmo=True, batch_size=32, max_seq_length=512,\n lr=0.001, l2_reg=0.0001, validation_fraction=0.1, max_epochs=10,\n patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (3113, 3305), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((3604, 3847), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': '(1)', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=1, finetune_elmo=True,\n batch_size=32, max_seq_length=512, lr=0.001, l2_reg=0.0001,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (3625, 3847), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((4067, 4314), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, max_seq_length=512, lr=0.001, l2_reg=0.0001,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (4088, 4314), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((4609, 4873), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '"""32"""', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size='32', max_seq_length=512, lr=0.001,\n l2_reg=0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (4630, 4873), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((5160, 5423), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(-3)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=-3, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (5181, 5423), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((5658, 5906), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, patience=3, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (5679, 5906), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((6200, 6465), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '"""10"""', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs='10', patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (6221, 6465), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((6751, 7014), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(-3)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=-3, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (6772, 7014), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((7247, 7498), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (7268, 7498), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((7790, 8055), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '"""3"""', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience='3',\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (7811, 8055), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((8339, 8603), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(-3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=-3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (8360, 8603), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((8842, 9084), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, lr=0.001, l2_reg=0.0001,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (8863, 9084), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((9383, 9647), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '"""512"""', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length='512', lr=0.001,\n l2_reg=0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (9404, 9647), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((9974, 10236), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(-3)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=-3, lr=0.001, l2_reg=\n 0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (9995, 10236), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((10480, 10718), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,\n random_seed=42)\n', (10501, 10718), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((11007, 11272), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '"""0.1"""', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction='0.1', max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (11028, 11272), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((11619, 11883), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(-0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=-0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (11640, 11883), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((12234, 12497), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(1.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=1.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (12255, 12497), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((12737, 12980), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=3, verbose=\n False, random_seed=42)\n', (12758, 12980), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((13264, 13529), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '"""1.0"""', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac='1.0', verbose=False, random_seed=42)\n", (13285, 13529), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((13884, 14148), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(-1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=-1.0, verbose=False, random_seed=42)\n', (13905, 14148), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((14502, 14765), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.3)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.3, verbose=False, random_seed=42)\n', (14523, 14765), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((14992, 15244), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, l2_reg=0.0001,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (15013, 15244), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((15534, 15797), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '"""1e-3"""', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr='1e-3',\n l2_reg=0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (15555, 15797), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((16133, 16394), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.0)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.0, l2_reg=\n 0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (16154, 16394), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((16622, 16874), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, l2_reg=0.0001,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (16643, 16874), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((17164, 17427), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '"""1e-3"""', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr='1e-3',\n l2_reg=0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (17185, 17427), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((17763, 18024), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.0)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.0, l2_reg=\n 0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (17784, 18024), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((18256, 18503), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (18277, 18503), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((18798, 19061), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '"""1e-4"""', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n ='1e-4', validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (18819, 19061), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((19402, 19663), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(-2.0)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =-2.0, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n', (19423, 19663), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((19903, 20145), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n batch_size=32, max_seq_length=512, lr=0.001, l2_reg=0.0001,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,\n verbose=False, random_seed=42)\n', (19924, 20145), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((20430, 20694), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '"""True"""', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '(False)', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo='True', batch_size=32, max_seq_length=512, lr=0.001,\n l2_reg=0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose=False, random_seed=42)\n", (20451, 20694), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((20927, 21175), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'random_seed': '(42)'}), '(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, random_seed=42)\n', (20948, 21175), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((21469, 21734), 'deep_ner.elmo_ner.ELMo_NER.check_params', 'ELMo_NER.check_params', ([], {'elmo_hub_module_handle': 'self.ELMO_HUB_MODULE', 'finetune_elmo': '(True)', 'batch_size': '(32)', 'max_seq_length': '(512)', 'lr': '(0.001)', 'l2_reg': '(0.0001)', 'validation_fraction': '(0.1)', 'max_epochs': '(10)', 'patience': '(3)', 'gpu_memory_frac': '(1.0)', 'verbose': '"""False"""', 'random_seed': '(42)'}), "(elmo_hub_module_handle=self.ELMO_HUB_MODULE,\n finetune_elmo=True, batch_size=32, max_seq_length=512, lr=0.001, l2_reg\n =0.0001, validation_fraction=0.1, max_epochs=10, patience=3,\n gpu_memory_frac=1.0, verbose='False', random_seed=42)\n", (21490, 21734), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((22173, 22203), 'deep_ner.elmo_ner.ELMo_NER.check_X', 'ELMo_NER.check_X', (['X', '"""X_train"""'], {}), "(X, 'X_train')\n", (22189, 22203), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((22453, 22483), 'deep_ner.elmo_ner.ELMo_NER.check_X', 'ELMo_NER.check_X', (['X', '"""X_train"""'], {}), "(X, 'X_train')\n", (22469, 22483), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((22737, 22767), 'deep_ner.elmo_ner.ELMo_NER.check_X', 'ELMo_NER.check_X', (['X', '"""X_train"""'], {}), "(X, 'X_train')\n", (22753, 22767), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((23803, 23848), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (23820, 23848), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((24952, 24997), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (24969, 24997), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((26112, 26157), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (26129, 26157), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((27011, 27056), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (27028, 27056), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((28257, 28302), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (28274, 28302), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((29290, 29335), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (29307, 29335), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((30457, 30502), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (30474, 30502), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((31676, 31721), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (31693, 31721), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((32907, 32952), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (32924, 32952), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((34125, 34170), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (34142, 34170), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((35336, 35381), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (35353, 35381), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((36568, 36613), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (36585, 36613), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((37816, 37861), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (37833, 37861), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((39042, 39087), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (39059, 39087), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((40276, 40321), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (40293, 40321), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((41500, 41545), 'deep_ner.elmo_ner.ELMo_NER.check_Xy', 'ELMo_NER.check_Xy', (['X', '"""X_train"""', 'y', '"""y_train"""'], {}), "(X, 'X_train', y, 'y_train')\n", (41517, 41545), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((42148, 42212), 'deep_ner.elmo_ner.ELMo_NER.calculate_bounds_of_tokens', 'ELMo_NER.calculate_bounds_of_tokens', (['source_text', 'tokenized_text'], {}), '(source_text, tokenized_text)\n', (42183, 42212), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((43288, 43352), 'deep_ner.elmo_ner.ELMo_NER.calculate_bounds_of_tokens', 'ELMo_NER.calculate_bounds_of_tokens', (['source_text', 'tokenized_text'], {}), '(source_text, tokenized_text)\n', (43323, 43352), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((47557, 47582), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (47572, 47582), False, 'import os\n'), ((47869, 47919), 'os.path.join', 'os.path.join', (['base_dir', '"""true_named_entities.json"""'], {}), "(base_dir, 'true_named_entities.json')\n", (47881, 47919), False, 'import os\n'), ((49868, 49893), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (49883, 49893), False, 'import os\n'), ((50177, 50227), 'os.path.join', 'os.path.join', (['base_dir', '"""true_named_entities.json"""'], {}), "(base_dir, 'true_named_entities.json')\n", (50189, 50227), False, 'import os\n'), ((52384, 52409), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (52399, 52409), False, 'import os\n'), ((52696, 52746), 'os.path.join', 'os.path.join', (['base_dir', '"""true_named_entities.json"""'], {}), "(base_dir, 'true_named_entities.json')\n", (52708, 52746), False, 'import os\n'), ((54854, 54879), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (54869, 54879), False, 'import os\n'), ((55166, 55216), 'os.path.join', 'os.path.join', (['base_dir', '"""true_named_entities.json"""'], {}), "(base_dir, 'true_named_entities.json')\n", (55178, 55216), False, 'import os\n'), ((57788, 57813), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (57803, 57813), False, 'import os\n'), ((58035, 58085), 'os.path.join', 'os.path.join', (['base_dir', '"""true_named_entities.json"""'], {}), "(base_dir, 'true_named_entities.json')\n", (58047, 58085), False, 'import os\n'), ((58251, 58276), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (58266, 58276), False, 'import os\n'), ((58563, 58613), 'os.path.join', 'os.path.join', (['base_dir', '"""true_named_entities.json"""'], {}), "(base_dir, 'true_named_entities.json')\n", (58575, 58613), False, 'import os\n'), ((61151, 61188), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""'}), "(mode='w')\n", (61178, 61188), False, 'import tempfile\n'), ((61263, 61283), 'pickle.dump', 'pickle.dump', (['res', 'fp'], {}), '(res, fp)\n', (61274, 61283), False, 'import pickle\n'), ((61411, 61426), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (61422, 61426), False, 'import pickle\n'), ((62634, 62663), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (62661, 62663), False, 'import tempfile\n'), ((62738, 62763), 'pickle.dump', 'pickle.dump', (['self.ner', 'fp'], {}), '(self.ner, fp)\n', (62749, 62763), False, 'import pickle\n'), ((62886, 62901), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (62897, 62901), False, 'import pickle\n'), ((66562, 66587), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (66577, 66587), False, 'import os\n'), ((66874, 66924), 'os.path.join', 'os.path.join', (['base_dir', '"""true_named_entities.json"""'], {}), "(base_dir, 'true_named_entities.json')\n", (66886, 66924), False, 'import os\n'), ((71426, 71459), 'deep_ner.elmo_ner.ELMo_NER.get_shape_of_string', 'ELMo_NER.get_shape_of_string', (['src'], {}), '(src)\n', (71454, 71459), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((71579, 71612), 'deep_ner.elmo_ner.ELMo_NER.get_shape_of_string', 'ELMo_NER.get_shape_of_string', (['src'], {}), '(src)\n', (71607, 71612), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((71738, 71771), 'deep_ner.elmo_ner.ELMo_NER.get_shape_of_string', 'ELMo_NER.get_shape_of_string', (['src'], {}), '(src)\n', (71766, 71771), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((71897, 71930), 'deep_ner.elmo_ner.ELMo_NER.get_shape_of_string', 'ELMo_NER.get_shape_of_string', (['src'], {}), '(src)\n', (71925, 71930), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((72050, 72083), 'deep_ner.elmo_ner.ELMo_NER.get_shape_of_string', 'ELMo_NER.get_shape_of_string', (['src'], {}), '(src)\n', (72078, 72083), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((72199, 72232), 'deep_ner.elmo_ner.ELMo_NER.get_shape_of_string', 'ELMo_NER.get_shape_of_string', (['src'], {}), '(src)\n', (72227, 72232), False, 'from deep_ner.elmo_ner import ELMo_NER\n'), ((361, 386), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (376, 386), False, 'import os\n'), ((1010, 1040), 'os.remove', 'os.remove', (['self.temp_file_name'], {}), '(self.temp_file_name)\n', (1019, 1040), False, 'import os\n')] |
from sys import stdin
from copy import copy, deepcopy
import time
import argparse
import numpy
def parseFileInput(in_file, cnf): # Parse
cnf.append(list())
for line in in_file:
tokens = line.split()
if len(tokens) > 0 and tokens[0] not in ("p", "c"):
for token in tokens:
lit = int(token)
if lit == 0:
cnf.append(list())
else:
cnf[-1].append(lit)
cnf.pop()
return cnf
def transformSudoku(in_file):
file = open("sudoku.txt", 'w')
for line in in_file:
row = 1
col = 1
if line != '.': file.write(str(row) + str(col) + str(line) + ' 0\n')
col += 1
if col == 10:
row += 1
col = 1
if row == 10: break
file.close()
def parse(files):
file1 = open(files[0], "r")
if len(files) == 1:
line = file1.readline()
if "p" in line:
cnf = parseFileInput(file1, list())
file1.close()
return True, cnf
transformSudoku(file1)
file1.close()
return False, list()
file2 = open(files[1], "r")
cnf = parseFileInput(file1, parseFileInput(file2, list()))
file1.close()
file2.close()
return True, cnf
def assignValue(cnf, lit):
for clause in copy(cnf):
if lit in clause:
cnf.remove(clause)
variables[abs(lit)] = variables.get(abs(lit), 0) + 2 ** -len(clause)
if -lit in clause:
clause.remove(-lit)
variables[abs(lit)] = variables.get(abs(lit), 0) + 2 ** -len(clause)
if lit > 0: solution.append(lit)
return cnf
def unitPropagation(cnf):
unit_clause = False
for clause in cnf:
if len(clause) == 1:
cnf = assignValue(cnf, clause[0])
unit_clause = True
break
return cnf, unit_clause
def pureLiteralElimination(cnf):
pure_rule = False
for clause in cnf:
if pure_rule == False:
for lit in clause:
pure = True
for c in cnf:
if -lit in c:
pure = False
break
if pure:
pure_rule = True
cnf = assignValue(cnf, lit)
break
return cnf, pure_rule
def printSudoku(literals):
sudoku = [[0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(9)]
for lit in literals:
row, col, digit = int(str(lit)[:1]) - 1, int(str(lit)[1:2]) - 1, int(str(lit)[2:3])
sudoku[row][col] = digit
for i in range(9): print(sudoku[i])
def createOutFile(filename, literals):
file = open(filename, "w")
for lit in literals: file.write(str(lit) + ' 0\n')
file.close
def chooseLit(cnf):
globals()['splits'] += 1
if heuristic == 1: return cnf[0][0]
if heuristic == 2: return MOM(cnf)
if heuristic == 3: return JW(cnf, solution)
def DP(cnf):
cnf, unit_clause = unitPropagation(cnf) # Satisfy unit clauses
while unit_clause: cnf, unit_clause = unitPropagation(cnf)
cnf, pure_rule = pureLiteralElimination(cnf) # Remove pure literals
while pure_rule: cnf, pure_rule = unitPropagation(cnf)
if len(cnf) == 0:
return True
if [] in cnf: return False # Empty clause
cnf = deepcopy(cnf)
lit = chooseLit(cnf)
cnf1 = assignValue(cnf, lit)
if DP(cnf1): return True
cnf2 = assignValue(cnf, -lit)
return DP(cnf2)
def MOM(cnf):
bestValue = 0
minClause = min(len(clause) for clause in cnf)
maxFunction = 0
# k = 1.8
count = dict()
for clause in cnf:
if len(clause) == minClause:
for lit in clause: count[lit] = count.get(lit, 0) + 1
for val in count.keys():
function = (count[val] * count.get(-val, 0)) * 2 ** k + count[val] * count.get(-val, 0)
if function > maxFunction:
maxFunction = function
lit = val
return lit
def JW(cnf, literals):
count = variables
for clause in cnf:
for lit in clause: count[abs(lit)] = count.get(abs(lit), 0) + 2 ** -len(clause)
lit = max(variables, key=count.get)
return lit
def main():
start_time = time.time()
sat = DP(cnf)
temp = time.time() - start_time
# print("--- %s seconds ---" % (time.time() - start_time))
ris.write(str(temp) + ' seconds ' + str(splits) + ' splits ' + str(k) + ' k value ' + '\n')
# printSudoku(solution)
# if sat == True: print("Satisfiable")
# elif sat == False: print("Unsatisfiable")
def parseArguments():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser()
parser.add_argument("-S", type=int)
parser.add_argument("files", nargs='+')
args = parser.parse_args()
return args.S, args.files
solution, variables = list(), dict()
heuristic = 2
ris = open("ResultsHard.txt", 'w')
k = 0
for i in range(1, 41):
globals()['splits'] = 0
print(i)
files = ['sudoku-rules.txt', 'Hard%s.txt' % (i)]
execute, cnf = parse(files)
if execute:
for i in numpy.arange(0, 4, 0.5):
k = i
main()
ris.close()
| [
"copy.deepcopy",
"argparse.ArgumentParser",
"copy.copy",
"time.time",
"numpy.arange"
] | [((1349, 1358), 'copy.copy', 'copy', (['cnf'], {}), '(cnf)\n', (1353, 1358), False, 'from copy import copy, deepcopy\n'), ((3357, 3370), 'copy.deepcopy', 'deepcopy', (['cnf'], {}), '(cnf)\n', (3365, 3370), False, 'from copy import copy, deepcopy\n'), ((4252, 4263), 'time.time', 'time.time', ([], {}), '()\n', (4261, 4263), False, 'import time\n'), ((4633, 4658), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4656, 4658), False, 'import argparse\n'), ((4672, 4697), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4695, 4697), False, 'import argparse\n'), ((4293, 4304), 'time.time', 'time.time', ([], {}), '()\n', (4302, 4304), False, 'import time\n'), ((5119, 5142), 'numpy.arange', 'numpy.arange', (['(0)', '(4)', '(0.5)'], {}), '(0, 4, 0.5)\n', (5131, 5142), False, 'import numpy\n')] |
# -*- coding:UTF8 -*-
import numpy as np
import sys,os
import math
from tools.loadData import load_array
from tools import ulti
path = os.getcwd()
def Schmidt_procedure(mat, m, n):
# mat_B = mat.copy()
Q = np.zeros((m,n))
R = np.zeros((n,n))
for col in range(n):
curr_col = mat[:,col]
# print(math.sqrt(np.sum(np.square(curr_col))))
if col == 0:
R[0,col] = math.sqrt(np.sum(np.square(curr_col)))
q = curr_col / R[0,col]
Q[:,col] = q.copy()
else:
q = curr_col.copy()
for j in range(col):
R[j,col] = np.matmul(Q[:, j], mat[:, col])
for k in range(col):
q -= R[k,col] * Q[:, k]
R[col,col] = math.sqrt(np.sum(np.square(q)))
q = q/ R[col,col]
Q[:,col] = q.copy()
return Q,R
if __name__ == "__main__":
input_file=path+'/data/example2.txt'
# output_file=''
matrix, m, n = load_array(input_file,"QR")
ulti.print_array(matrix, m, n )
if matrix.size ==0:
print("input Error!")
sys.exit()
# 首先判断矩阵是否是列线性无关的
mat_rank = ulti.rank_of_matrix(matrix,m,n)
if mat_rank<n:
print("Error, the matrix with linearly dependent columns Can Not be uniquely factored as A=QR!\n\n")
print("123")
sys.exit()
Q,R = Schmidt_procedure(matrix,m,n)
print("Q=")
ulti.print_array(Q,m,n)
print("R=")
ulti.print_array(R,n,n)
| [
"os.getcwd",
"numpy.square",
"numpy.zeros",
"tools.loadData.load_array",
"tools.ulti.print_array",
"numpy.matmul",
"tools.ulti.rank_of_matrix",
"sys.exit"
] | [((137, 148), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (146, 148), False, 'import sys, os\n'), ((218, 234), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (226, 234), True, 'import numpy as np\n'), ((242, 258), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (250, 258), True, 'import numpy as np\n'), ((991, 1019), 'tools.loadData.load_array', 'load_array', (['input_file', '"""QR"""'], {}), "(input_file, 'QR')\n", (1001, 1019), False, 'from tools.loadData import load_array\n'), ((1023, 1053), 'tools.ulti.print_array', 'ulti.print_array', (['matrix', 'm', 'n'], {}), '(matrix, m, n)\n', (1039, 1053), False, 'from tools import ulti\n'), ((1165, 1198), 'tools.ulti.rank_of_matrix', 'ulti.rank_of_matrix', (['matrix', 'm', 'n'], {}), '(matrix, m, n)\n', (1184, 1198), False, 'from tools import ulti\n'), ((1425, 1450), 'tools.ulti.print_array', 'ulti.print_array', (['Q', 'm', 'n'], {}), '(Q, m, n)\n', (1441, 1450), False, 'from tools import ulti\n'), ((1469, 1494), 'tools.ulti.print_array', 'ulti.print_array', (['R', 'n', 'n'], {}), '(R, n, n)\n', (1485, 1494), False, 'from tools import ulti\n'), ((1117, 1127), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1125, 1127), False, 'import sys, os\n'), ((1354, 1364), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1362, 1364), False, 'import sys, os\n'), ((626, 657), 'numpy.matmul', 'np.matmul', (['Q[:, j]', 'mat[:, col]'], {}), '(Q[:, j], mat[:, col])\n', (635, 657), True, 'import numpy as np\n'), ((430, 449), 'numpy.square', 'np.square', (['curr_col'], {}), '(curr_col)\n', (439, 449), True, 'import numpy as np\n'), ((790, 802), 'numpy.square', 'np.square', (['q'], {}), '(q)\n', (799, 802), True, 'import numpy as np\n')] |
import math
import numpy as np
from collections import defaultdict
from .common import beta_binomial_model, gamma_poission_model, requires_keys
@requires_keys('threads[].children')
def discussion_score(asset, k=1, theta=2):
"""
description:
en: Estimated number of comments this asset will get.
de: Geschätzte Zahl der Kommentare dieser Vermögenswert erhalten.
type: float
valid: nonnegative
"""
X = np.array([_max_thread_width(t) * _max_thread_depth(t) for t in asset['threads']])
n = len(X)
k = np.sum(X) + k
t = theta/(theta*n + 1)
return gamma_poission_model(X, n, k, theta, 0.05)
@requires_keys('threads[].children')
def diversity_score(asset, alpha=2, beta=2):
"""
description:
en: Probability that a new reply would be from a new user.
de: Wahrscheinlichkeit, dass eine neue Antwort würde von einem neuen Benutzer sein.
type: float
valid: probability
"""
X = set()
n = 0
for t in asset['threads']:
users, n_comments = _unique_participants(t)
X = X | users
n += n_comments
y = len(X)
return beta_binomial_model(y, n, alpha, beta, 0.05)
def _max_thread_depth(thread):
"""compute the length deepest branch of the thread"""
if not thread['children']:
return 1
return 1 + max([_max_thread_depth(reply) for reply in thread['children']])
def _max_thread_width(thread):
"""compute the widest breadth of the thread,
that is the max number of replies a comment in the thread has received"""
if not thread['children']:
return 0
return max(
max([_max_thread_width(reply) for reply in thread['children']]),
len(thread['children'])
)
def _count_replies(thread):
return 1 + sum(_count_replies(r) for r in thread['children'])
def _unique_participants(thread):
"""count unique participants and number of comments in a thread"""
users = set([thread['user_id']])
n_replies = 1 + len(thread['children'])
for reply in thread['children']:
r_users, r_replies = _unique_participants(reply)
n_replies += r_replies
users = users | r_users
return users, n_replies
def _reconstruct_threads(asset):
"""reconstruct threads structure from a flat list of comments"""
id = asset['_id']
parents = defaultdict(list)
for c in asset['comments']:
p_id = c['parent_id']
if isinstance(p_id, float) and math.isnan(p_id):
p_id = id
parents[p_id].append(c)
threads = []
for top_level_parent in sorted(parents[id], key=lambda p: p['date_created']):
threads.append(_reconstruct_thread(top_level_parent, parents))
asset['threads'] = threads
return asset
def _reconstruct_thread(comment, parents):
"""recursively reconstruct a thread from comments"""
id = comment['_id']
thread = {
'id': id,
'user_id': comment['user_id'],
'children': []
}
children = parents[id]
for reply in sorted(children, key=lambda c: c['date_created']):
thread['children'].append(_reconstruct_thread(reply, parents))
return thread
| [
"collections.defaultdict",
"math.isnan",
"numpy.sum"
] | [((2347, 2364), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2358, 2364), False, 'from collections import defaultdict\n'), ((548, 557), 'numpy.sum', 'np.sum', (['X'], {}), '(X)\n', (554, 557), True, 'import numpy as np\n'), ((2466, 2482), 'math.isnan', 'math.isnan', (['p_id'], {}), '(p_id)\n', (2476, 2482), False, 'import math\n')] |
import potential
import wavefunction
import numpy as np
import pytest
import random
def test_delta_potential():
x = np.linspace(-50, 50, 40000)
depths = np.linspace(0.1, 10, 10)
for d in depths:
v = potential.DeltaPotential1D(d)
assert(v.get_delta_depth() == d)
assert(v.get_number_of_levels() == 1)
with pytest.raises(ValueError):
v.get_eigenenergy(random.randint(1, 100))
with pytest.raises(ValueError):
v.get_eigenfunction(random.randint(1, 100))
psi = v.get_eigenfunction()(x)
np.testing.assert_almost_equal(wavefunction.norm(x, psi), 1.0, decimal=4,
err_msg=f"Norm is not 1 for depth {d}")
def test_quadratic_potential():
frequencies = [0.1, 1.0, 7.5]
x = np.linspace(-50, 50, 40000)
levels = range(20)
for f in frequencies:
v = potential.QuadraticPotential1D(f)
assert(v.get_frequency() == f)
with pytest.raises(Exception):
v.get_number_of_levels()
for l in levels:
e = v.get_eigenenergy(l)
np.testing.assert_almost_equal(e, (2 * l + 1) * f * 0.5)
psi = v.get_eigenfunction(l)
psi_value = psi(x)
np.testing.assert_almost_equal(wavefunction.norm(x, psi_value), 1.0,
err_msg=f'Norm is not 1 for frequency {f}, level {l}')
psi0_value = v.get_eigenfunction()(x)
psi0_expected = (f / np.pi) ** 0.25 * np.exp(- 0.5 * f * x ** 2)
np.testing.assert_allclose(psi0_value, psi0_expected)
def test_quadratic_orthogonality():
frequencies = [0.1, 1.0, 7.5]
x = np.linspace(-50, 50, 40000)
levels = range(10)
for f in frequencies:
v = potential.QuadraticPotential1D(f)
for l1 in levels:
for l2 in levels[l1+1:]:
psi1 = v.get_eigenfunction(l1)(x)
psi2 = v.get_eigenfunction(l2)(x)
np.testing.assert_almost_equal(wavefunction.correlation(x, psi1, psi2), 0.0,
err_msg=f'Functions for levels {l1} and {l2} are not orthogonal '
f'for frequency {f}')
def test_uniform_field():
amps = [1.0, -2.0, lambda t: 0.5 * t]
t = 3.0
x = np.linspace(-10, 10, 1000)
for amp in amps:
v = potential.UniformField1D(amp)
assert(v.get_number_of_levels() == 0)
with pytest.raises(ValueError):
v.get_eigenfunction()
with pytest.raises(ValueError):
v.get_eigenenergy()
if callable(amp):
np.testing.assert_allclose(-amp(t) * x, v.get_potential()(t, x))
else:
np.testing.assert_allclose(-amp * x, v.get_potential()(x))
assert(v.get_delta_depth() == 0.0)
v = potential.UniformField1D(amp, potential=potential.QuadraticPotential1D(1.0))
if callable(amp):
value1 = - amp(t) * x + 0.5 * x ** 2
value2 = v.get_potential()(t, x)
else:
value1 = - amp * x + 0.5 * x ** 2
value2 = v.get_potential()(x)
np.testing.assert_allclose(value1, value2)
v = potential.UniformField1D(amp, potential=potential.DeltaPotential1D(1.0))
if callable(amp):
np.testing.assert_allclose(-amp(t) * x, v.get_potential()(t, x))
else:
np.testing.assert_allclose(-amp * x, v.get_potential()(x))
assert(v.get_delta_depth() == 1.0)
def test_square_potential():
widths = [1.0, 0.5, 2.0]
depths = [1.0, 5.0, 10.0]
x = np.linspace(-150, 150, 3000)
expected_levels = {
(1.0, 1.0): 1,
(0.5, 1.0): 1,
(2.0, 1.0): 2,
(1.0, 5.0): 3,
(0.5, 5.0): 2,
(2.0, 5.0): 5,
(1.0, 10.0): 3,
(0.5, 10.0): 2,
(2.0, 10.0): 6
}
from itertools import product
for V0, a in product(depths, widths):
v = potential.SquarePotential1D(V0, a)
assert v.get_depth() == V0, f"Depth {v.get_depth()} is not {V0}"
assert v.get_width() == a, f"Width {v.get_width()} is not {a}"
assert v.get_delta_depth() == 0.0, f"Delta depth {v.get_delta_depth()} is non-zero"
max_levels = v.get_number_of_levels()
assert max_levels == expected_levels[a, V0], f"Max levels {max_levels}, expected {expected_levels[a, V0]}"
with pytest.raises(ValueError):
v.get_eigenenergy(max_levels)
with pytest.raises(ValueError):
v.get_eigenfunction(max_levels)
assert v.get_potential()(0.0) == -V0
assert v.get_potential()(2 * a) == 0.0
assert v.get_potential()(-2 * a) == 0.0
energies = np.array([v.get_eigenenergy(i) for i in range(max_levels)])
np.testing.assert_equal(energies, np.sort(energies), err_msg=f"Energies aren't sorted for V0={V0}, a={a}")
assert np.all(energies < 0.0), f"Positive energies for V0={V0}, a={a}"
assert np.all(energies > -V0), f"Too low energies for V0={V0}, a={a}"
for i in range(max_levels):
psi = v.get_eigenfunction(i)(x)
np.testing.assert_almost_equal(wavefunction.norm(x, psi), 1.0, decimal=4,
err_msg=f"Eigenfunction {i} norm is incorrect for V0={V0}, a={a}")
def test_square_potential_orthogonality():
from itertools import combinations
widths = [1.0, 2.0]
depths = [10.0, 5.0]
x = np.linspace(-15, 15, 3000)
for V0, a in zip(depths, widths):
v = potential.SquarePotential1D(V0, a)
assert v.get_depth() == V0, f"Depth {v.get_depth()} is not {V0}"
assert v.get_width() == a, f"Width {v.get_width()} is not {a}"
psis = [v.get_eigenfunction(n)(x) for n in range(v.get_number_of_levels())]
for psi1, psi2 in combinations(psis, 2):
np.testing.assert_almost_equal(wavefunction.correlation(x, psi1, psi2), 0.0,
err_msg="Non-orthogonal eigenfunctions for V0={V0}, a={a}")
def test_coulomb_potential():
x = np.linspace(-30, 30, 201)
xx, yy, zz = np.meshgrid(x, x, x, indexing='ij')
r = (xx, yy, zz)
levels = [
(1, 0, 0),
(2, 0, 0),
(2, 1, 1),
(2, 1, -1),
(3, 2, -1)
]
v = potential.CoulombPotential()
psis = []
for l in levels:
e = v.get_eigenenergy(*l)
np.testing.assert_allclose(e, - 0.5 / l[0] ** 2)
psi = v.get_eigenfunction(*l)(*r)
psis.append(psi)
np.testing.assert_array_almost_equal(wavefunction.norm(r, psi), 1.0, decimal=3,
err_msg=f"Wavefunction norm for level {l} is not unity")
from itertools import combinations
for psi1, psi2 in combinations(psis, 2):
np.testing.assert_allclose(wavefunction.correlation(r, psi1, psi2), 0.0, atol=0.001,
err_msg=f"Non-orthogonal wavefunctions") | [
"numpy.meshgrid",
"wavefunction.correlation",
"random.randint",
"potential.QuadraticPotential1D",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_allclose",
"potential.UniformField1D",
"itertools.combinations",
"pytest.raises",
"numpy.sort",
"potential.DeltaPotential1D",
"numpy.lins... | [((122, 149), 'numpy.linspace', 'np.linspace', (['(-50)', '(50)', '(40000)'], {}), '(-50, 50, 40000)\n', (133, 149), True, 'import numpy as np\n'), ((163, 187), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10)', '(10)'], {}), '(0.1, 10, 10)\n', (174, 187), True, 'import numpy as np\n'), ((809, 836), 'numpy.linspace', 'np.linspace', (['(-50)', '(50)', '(40000)'], {}), '(-50, 50, 40000)\n', (820, 836), True, 'import numpy as np\n'), ((1697, 1724), 'numpy.linspace', 'np.linspace', (['(-50)', '(50)', '(40000)'], {}), '(-50, 50, 40000)\n', (1708, 1724), True, 'import numpy as np\n'), ((2357, 2383), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(1000)'], {}), '(-10, 10, 1000)\n', (2368, 2383), True, 'import numpy as np\n'), ((3652, 3680), 'numpy.linspace', 'np.linspace', (['(-150)', '(150)', '(3000)'], {}), '(-150, 150, 3000)\n', (3663, 3680), True, 'import numpy as np\n'), ((3973, 3996), 'itertools.product', 'product', (['depths', 'widths'], {}), '(depths, widths)\n', (3980, 3996), False, 'from itertools import product\n'), ((5521, 5547), 'numpy.linspace', 'np.linspace', (['(-15)', '(15)', '(3000)'], {}), '(-15, 15, 3000)\n', (5532, 5547), True, 'import numpy as np\n'), ((6144, 6169), 'numpy.linspace', 'np.linspace', (['(-30)', '(30)', '(201)'], {}), '(-30, 30, 201)\n', (6155, 6169), True, 'import numpy as np\n'), ((6187, 6222), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x', 'x'], {'indexing': '"""ij"""'}), "(x, x, x, indexing='ij')\n", (6198, 6222), True, 'import numpy as np\n'), ((6369, 6397), 'potential.CoulombPotential', 'potential.CoulombPotential', ([], {}), '()\n', (6395, 6397), False, 'import potential\n'), ((6846, 6867), 'itertools.combinations', 'combinations', (['psis', '(2)'], {}), '(psis, 2)\n', (6858, 6867), False, 'from itertools import combinations\n'), ((221, 250), 'potential.DeltaPotential1D', 'potential.DeltaPotential1D', (['d'], {}), '(d)\n', (247, 250), False, 'import potential\n'), ((899, 932), 'potential.QuadraticPotential1D', 'potential.QuadraticPotential1D', (['f'], {}), '(f)\n', (929, 932), False, 'import potential\n'), ((1563, 1616), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['psi0_value', 'psi0_expected'], {}), '(psi0_value, psi0_expected)\n', (1589, 1616), True, 'import numpy as np\n'), ((1787, 1820), 'potential.QuadraticPotential1D', 'potential.QuadraticPotential1D', (['f'], {}), '(f)\n', (1817, 1820), False, 'import potential\n'), ((2418, 2447), 'potential.UniformField1D', 'potential.UniformField1D', (['amp'], {}), '(amp)\n', (2442, 2447), False, 'import potential\n'), ((3194, 3236), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['value1', 'value2'], {}), '(value1, value2)\n', (3220, 3236), True, 'import numpy as np\n'), ((4010, 4044), 'potential.SquarePotential1D', 'potential.SquarePotential1D', (['V0', 'a'], {}), '(V0, a)\n', (4037, 4044), False, 'import potential\n'), ((4961, 4983), 'numpy.all', 'np.all', (['(energies < 0.0)'], {}), '(energies < 0.0)\n', (4967, 4983), True, 'import numpy as np\n'), ((5040, 5062), 'numpy.all', 'np.all', (['(energies > -V0)'], {}), '(energies > -V0)\n', (5046, 5062), True, 'import numpy as np\n'), ((5599, 5633), 'potential.SquarePotential1D', 'potential.SquarePotential1D', (['V0', 'a'], {}), '(V0, a)\n', (5626, 5633), False, 'import potential\n'), ((5889, 5910), 'itertools.combinations', 'combinations', (['psis', '(2)'], {}), '(psis, 2)\n', (5901, 5910), False, 'from itertools import combinations\n'), ((6476, 6523), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['e', '(-0.5 / l[0] ** 2)'], {}), '(e, -0.5 / l[0] ** 2)\n', (6502, 6523), True, 'import numpy as np\n'), ((353, 378), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (366, 378), False, 'import pytest\n'), ((448, 473), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (461, 473), False, 'import pytest\n'), ((610, 635), 'wavefunction.norm', 'wavefunction.norm', (['x', 'psi'], {}), '(x, psi)\n', (627, 635), False, 'import wavefunction\n'), ((986, 1010), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (999, 1010), False, 'import pytest\n'), ((1124, 1180), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['e', '((2 * l + 1) * f * 0.5)'], {}), '(e, (2 * l + 1) * f * 0.5)\n', (1154, 1180), True, 'import numpy as np\n'), ((1527, 1552), 'numpy.exp', 'np.exp', (['(-0.5 * f * x ** 2)'], {}), '(-0.5 * f * x ** 2)\n', (1533, 1552), True, 'import numpy as np\n'), ((2509, 2534), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2522, 2534), False, 'import pytest\n'), ((2583, 2608), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2596, 2608), False, 'import pytest\n'), ((4457, 4482), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4470, 4482), False, 'import pytest\n'), ((4539, 4564), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4552, 4564), False, 'import pytest\n'), ((4873, 4890), 'numpy.sort', 'np.sort', (['energies'], {}), '(energies)\n', (4880, 4890), True, 'import numpy as np\n'), ((6639, 6664), 'wavefunction.norm', 'wavefunction.norm', (['r', 'psi'], {}), '(r, psi)\n', (6656, 6664), False, 'import wavefunction\n'), ((6904, 6943), 'wavefunction.correlation', 'wavefunction.correlation', (['r', 'psi1', 'psi2'], {}), '(r, psi1, psi2)\n', (6928, 6943), False, 'import wavefunction\n'), ((410, 432), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (424, 432), False, 'import random\n'), ((507, 529), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (521, 529), False, 'import random\n'), ((1298, 1329), 'wavefunction.norm', 'wavefunction.norm', (['x', 'psi_value'], {}), '(x, psi_value)\n', (1315, 1329), False, 'import wavefunction\n'), ((2927, 2962), 'potential.QuadraticPotential1D', 'potential.QuadraticPotential1D', (['(1.0)'], {}), '(1.0)\n', (2957, 2962), False, 'import potential\n'), ((3290, 3321), 'potential.DeltaPotential1D', 'potential.DeltaPotential1D', (['(1.0)'], {}), '(1.0)\n', (3316, 3321), False, 'import potential\n'), ((5227, 5252), 'wavefunction.norm', 'wavefunction.norm', (['x', 'psi'], {}), '(x, psi)\n', (5244, 5252), False, 'import wavefunction\n'), ((5955, 5994), 'wavefunction.correlation', 'wavefunction.correlation', (['x', 'psi1', 'psi2'], {}), '(x, psi1, psi2)\n', (5979, 5994), False, 'import wavefunction\n'), ((2031, 2070), 'wavefunction.correlation', 'wavefunction.correlation', (['x', 'psi1', 'psi2'], {}), '(x, psi1, psi2)\n', (2055, 2070), False, 'import wavefunction\n')] |
#!/usr/bin/env python
# coding: utf-8
# ### Define all functions
# In[1]:
import cv2
import csv
import numpy as np
import os
# In[2]:
def getCSVRows(dataPath, skipHeader=False):
"""
Returns the rows from a driving log with base directory `dataPath`.
If the file include headers, pass `skipHeader=True`.
"""
lines = []
with open(dataPath + '/driving_log.csv') as csvFile:
reader = csv.reader(csvFile)
if skipHeader:
next(reader, None)
for line in reader:
lines.append(line)
return lines
# In[3]:
def findImages(dataPath):
"""
Finds all the images needed for training on the path `dataPath`.
Returns `([centerPaths], [leftPath], [rightPath], [measurements])`
"""
imgPath = dataPath + 'IMG/'
lines = getCSVRows(dataPath)
centerPaths = []
leftPath = []
rightPath = []
measurements = []
for line in lines:
try:
measurements.append(float(line[3]))
centerPaths.append(imgPath + line[0].split('\\')[-1])
leftPath.append(imgPath + line[1].split('\\')[-1])
rightPath.append(imgPath + line[2].split('\\')[-1])
except:
print("CSV may contain null")
return (centerPaths, leftPath, rightPath, measurements)
# In[4]:
def applyAngleCorrection(centerPath, leftPath, rightPath, measurements, angle_correction=0.2):
"""
Combine the image paths from `centerPath`, `leftPath` and `rightPath` using the correction factor `angle_correction`
Returns ([imagePaths], [mod_measurements])
"""
imagePaths = []
imagePaths.extend(centerPath)
imagePaths.extend(leftPath)
imagePaths.extend(rightPath)
mod_measurements = []
mod_measurements.extend(measurements)
mod_measurements.extend([x + angle_correction for x in measurements])
mod_measurements.extend([x - angle_correction for x in measurements])
return (imagePaths, mod_measurements)
# In[5]:
import sklearn
def generator(samples, batch_size=128):
"""
Generate the required images and measurments for training/
`samples` is a list of pairs (`imagePath`, `measurement`).
"""
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
samples = sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for imagePath, measurement in batch_samples:
originalImage = cv2.imread(imagePath)
image = cv2.cvtColor(originalImage, cv2.COLOR_BGR2RGB)
images.append(image)
angles.append(measurement)
# Augment image by flipping
images.append(cv2.flip(image,1))
angles.append(-measurement)
# trim image to only see section with road
inputs = np.array(images)
outputs = np.array(angles)
shuffled = sklearn.utils.shuffle(inputs, outputs)
yield shuffled[0], shuffled[1]
# In[6]:
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D
from keras.layers import Conv2D, AveragePooling2D, Dropout
def getModel():
"""
Create model based on NVDIA Self Driving Cars model
"""
model = Sequential()
# Preprocess: Normalize and zero mean variance
model.add(Lambda(lambda x: (x / 255.0) - 0.5421, input_shape=(160,320,3)))
# Trim image to see road section
model.add(Cropping2D(cropping=((60,20), (0,0))))
# Modified NVIDIA Self Driving Cars Model
model.add(Conv2D(filters=3, kernel_size=(5,5), strides=(1, 2), padding="valid", activation="relu"))
model.add(Conv2D(filters=24, kernel_size=(5,5), strides=(1, 2), padding="valid", activation="relu"))
model.add(Conv2D(filters=36, kernel_size=(5,5), strides=(2, 2), padding="valid", activation="relu"))
model.add(Conv2D(filters=48, kernel_size=(5,5), strides=(2, 2), padding="valid", activation="relu"))
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(2, 2), padding="valid", activation="relu"))
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(2, 2), padding="valid", activation="relu"))
model.add(Flatten())
model.add(Dense(1164))
model.add(Dropout(0.5, seed=0))
model.add(Dense(100))
model.add(Dropout(0.5, seed=0))
model.add(Dense(50))
model.add(Dropout(0.5, seed=0))
model.add(Dense(10))
model.add(Dropout(0.5, seed=0))
model.add(Dense(1))
return model
# ### Reading Images
# In[7]:
centerPaths, leftPaths, rightPaths, measurements = findImages('data/')
imagePaths, measurements = applyAngleCorrection(centerPaths, leftPaths, rightPaths, measurements, angle_correction=0.2)
print('Total Images: {}'.format(len(imagePaths)))
print('Total Measurements: {}'.format(len(measurements)))
# ### Split Images using Data Generator
# In[8]:
BATCH_SIZE = 256
from sklearn.model_selection import train_test_split
samples = list(zip(imagePaths, measurements))
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
print('Train samples: {}'.format(len(train_samples)))
print('Validation samples: {}'.format(len(validation_samples)))
train_generator = generator(train_samples, batch_size=BATCH_SIZE)
validation_generator = generator(validation_samples, batch_size=BATCH_SIZE)
# ### Get Model Summary
# In[9]:
model = getModel()
model.summary()
# ### Train the model
# In[16]:
from math import ceil, exp
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
# Set Learning Rate Scheduler
def scheduler(epoch, lr):
if epoch < 5:
return lr
else:
return lr * exp(-0.1)
# Set checkpoint to save the best epoch
checkpoint_filepath = './tmp/checkpoint'
model_checkpoint_callback = ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor='val_loss',
mode='min',
save_best_only=True)
# In[ ]:
# Compiling and training the model
model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator, steps_per_epoch=ceil(len(train_samples)/BATCH_SIZE),
validation_data=validation_generator, validation_steps=ceil(len(validation_samples)/BATCH_SIZE),
epochs=10, verbose=1, callbacks=[LearningRateScheduler(scheduler, verbose=1), model_checkpoint_callback])
model.save('model.h5')
# In[12]:
print(history_object.history.keys())
print('Loss')
print(history_object.history['loss'])
print('Validation Loss')
print(history_object.history['val_loss'])
# In[13]:
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
# In[14]:
plt.plot(history_object.history['lr'])
plt.title('Learning Rate over Epoch')
plt.ylabel('Learning Rate')
plt.xlabel('epoch')
plt.show()
# In[ ]:
| [
"matplotlib.pyplot.title",
"csv.reader",
"keras.layers.Cropping2D",
"sklearn.model_selection.train_test_split",
"keras.callbacks.LearningRateScheduler",
"cv2.cvtColor",
"keras.layers.Flatten",
"matplotlib.pyplot.show",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"matplotlib.pyplot... | [((5196, 5236), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (5212, 5236), False, 'from sklearn.model_selection import train_test_split\n'), ((5946, 6073), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'checkpoint_filepath', 'save_weights_only': '(False)', 'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_best_only': '(True)'}), "(filepath=checkpoint_filepath, save_weights_only=False,\n monitor='val_loss', mode='min', save_best_only=True)\n", (5961, 6073), False, 'from keras.callbacks import LearningRateScheduler, ModelCheckpoint\n'), ((6822, 6862), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (6830, 6862), True, 'import matplotlib.pyplot as plt\n'), ((6863, 6907), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (6871, 6907), True, 'import matplotlib.pyplot as plt\n'), ((6908, 6950), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (6917, 6950), True, 'import matplotlib.pyplot as plt\n'), ((6951, 6988), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (6961, 6988), True, 'import matplotlib.pyplot as plt\n'), ((6989, 7008), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6999, 7008), True, 'import matplotlib.pyplot as plt\n'), ((7009, 7074), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (7019, 7074), True, 'import matplotlib.pyplot as plt\n'), ((7075, 7085), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7083, 7085), True, 'import matplotlib.pyplot as plt\n'), ((7100, 7138), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['lr']"], {}), "(history_object.history['lr'])\n", (7108, 7138), True, 'import matplotlib.pyplot as plt\n'), ((7139, 7176), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning Rate over Epoch"""'], {}), "('Learning Rate over Epoch')\n", (7148, 7176), True, 'import matplotlib.pyplot as plt\n'), ((7177, 7204), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Learning Rate"""'], {}), "('Learning Rate')\n", (7187, 7204), True, 'import matplotlib.pyplot as plt\n'), ((7205, 7224), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (7215, 7224), True, 'import matplotlib.pyplot as plt\n'), ((7225, 7235), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7233, 7235), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3430), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3428, 3430), False, 'from keras.models import Sequential\n'), ((420, 439), 'csv.reader', 'csv.reader', (['csvFile'], {}), '(csvFile)\n', (430, 439), False, 'import csv\n'), ((2296, 2326), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['samples'], {}), '(samples)\n', (2317, 2326), False, 'import sklearn\n'), ((3501, 3564), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5421)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5421, input_shape=(160, 320, 3))\n', (3507, 3564), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((3622, 3661), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((60, 20), (0, 0))'}), '(cropping=((60, 20), (0, 0)))\n', (3632, 3661), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((3726, 3819), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(3)', 'kernel_size': '(5, 5)', 'strides': '(1, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=3, kernel_size=(5, 5), strides=(1, 2), padding='valid',\n activation='relu')\n", (3732, 3819), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((3830, 3924), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(24)', 'kernel_size': '(5, 5)', 'strides': '(1, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=24, kernel_size=(5, 5), strides=(1, 2), padding='valid',\n activation='relu')\n", (3836, 3924), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((3935, 4029), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(36)', 'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=36, kernel_size=(5, 5), strides=(2, 2), padding='valid',\n activation='relu')\n", (3941, 4029), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((4040, 4134), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(48)', 'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=48, kernel_size=(5, 5), strides=(2, 2), padding='valid',\n activation='relu')\n", (4046, 4134), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((4145, 4239), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='valid',\n activation='relu')\n", (4151, 4239), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((4250, 4344), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='valid',\n activation='relu')\n", (4256, 4344), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((4355, 4364), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4362, 4364), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((4380, 4391), 'keras.layers.Dense', 'Dense', (['(1164)'], {}), '(1164)\n', (4385, 4391), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((4407, 4427), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {'seed': '(0)'}), '(0.5, seed=0)\n', (4414, 4427), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((4443, 4453), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (4448, 4453), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((4469, 4489), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {'seed': '(0)'}), '(0.5, seed=0)\n', (4476, 4489), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((4505, 4514), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (4510, 4514), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((4530, 4550), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {'seed': '(0)'}), '(0.5, seed=0)\n', (4537, 4550), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((4566, 4575), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (4571, 4575), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((4591, 4611), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {'seed': '(0)'}), '(0.5, seed=0)\n', (4598, 4611), False, 'from keras.layers import Conv2D, AveragePooling2D, Dropout\n'), ((4627, 4635), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4632, 4635), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((2988, 3004), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2996, 3004), True, 'import numpy as np\n'), ((3027, 3043), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (3035, 3043), True, 'import numpy as np\n'), ((3067, 3105), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (3088, 3105), False, 'import sklearn\n'), ((5826, 5835), 'math.exp', 'exp', (['(-0.1)'], {}), '(-0.1)\n', (5829, 5835), False, 'from math import ceil, exp\n'), ((6455, 6498), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['scheduler'], {'verbose': '(1)'}), '(scheduler, verbose=1)\n', (6476, 6498), False, 'from keras.callbacks import LearningRateScheduler, ModelCheckpoint\n'), ((2584, 2605), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (2594, 2605), False, 'import cv2\n'), ((2630, 2676), 'cv2.cvtColor', 'cv2.cvtColor', (['originalImage', 'cv2.COLOR_BGR2RGB'], {}), '(originalImage, cv2.COLOR_BGR2RGB)\n', (2642, 2676), False, 'import cv2\n'), ((2848, 2866), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2856, 2866), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.integrate
import scipy.special
import collections
import fisx
import logging
from contextlib import contextmanager
from ..utils import instance
from ..utils import cache
from ..utils import listtools
from ..math import fit1d
from ..math.utils import weightedsum
from . import xrayspectrum
from ..simulation.classfactory import with_metaclass
from ..simulation import xrmc
from ..simulation import xmimsim
from ..math import noisepropagation
from . import pymca
from . import element
from ..materials import compoundfromdb
from ..materials import mixture
from ..materials import types
from ..utils.copyable import Copyable
from .utils import reshape_spectrum_lines
from ..io import localfs
from ..io import spe
logger = logging.getLogger(__name__)
class Layer(Copyable):
def __init__(self, material=None, thickness=None, fixed=False, parent=None):
"""
Args:
material(compound|mixture|str): material composition
thickness(num): thickness in cm
fixed(bool): thickness and composition are fixed
parent(Multilayer): part of this ensemble
"""
if instance.isstring(material):
ret = compoundfromdb.factory(material)
if ret is None:
raise RuntimeError("Invalid material {}".format(material))
material = ret
self.material = material
self.thickness = thickness
self.fixed = fixed
self.parent = parent
def __getstate__(self):
return {
"material": self.material,
"thickness": self.thickness,
"fixed": self.fixed,
}
def __setstate__(self, state):
self.material = state["material"]
self.thickness = state["thickness"]
self.fixed = state["fixed"]
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.material == other.material
and self.thickness == other.thickness
and self.fixed == other.fixed
)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __getattr__(self, attr):
return getattr(self.material, attr)
def __str__(self):
return "{} um ({})".format(self.thickness * 1e4, self.material)
@property
def xraythicknessin(self):
return self.thickness / self.parent.geometry.cosnormin
@xraythicknessin.setter
def xraythicknessin(self, value):
self.thickness = value * self.parent.geometry.cosnormin
@property
def xraythicknessout(self):
return self.thickness / self.parent.geometry.cosnormout
@xraythicknessout.setter
def xraythicknessout(self, value):
self.thickness = value * self.parent.geometry.cosnormout
def absorbance(self, energy, weights=None, out=False, **kwargs):
kwargs.pop("decomposed", None)
if out:
thickness = self.xraythicknessout
else:
thickness = self.xraythicknessin
return self.material.absorbance(energy, thickness, weights=weights, **kwargs)
def addtofisx(self, setup, cfg):
name = cfg.addtofisx_material(self.material)
return [name, self.density, self.thickness]
def fisxgroups(self, emin=0, emax=np.inf):
return self.material.fisxgroups(emin=emin, emax=emax)
def arealdensity(self):
wfrac = self.material.elemental_massfractions()
m = self.density * self.thickness
return dict(zip(wfrac.keys(), np.asarray(list(wfrac.values())) * m))
class Multilayer(with_metaclass((Copyable, cache.Cache))):
"""
Class representing a multilayer of compounds or mixtures
"""
FISXCFG = pymca.FisxConfig()
def __init__(
self, material=None, thickness=None, fixed=False, geometry=None, name=None
):
"""
Args:
material(list(spectrocrunch.materials.compound|mixture)): layer composition
thickness(list(num)): layer thickness in cm
fixed(list(num)): do not change this layer
geometry(spectrocrunch.geometries.base.Centric):
"""
self.geometry = geometry
if not instance.isarray(material):
material = [material]
if not instance.isarray(thickness):
thickness = [thickness]
if not instance.isarray(fixed):
fixed = [fixed]
if len(fixed) != len(material) and len(fixed) == 1:
fixed = fixed * len(material)
self.layers = [
Layer(material=mat, thickness=t, fixed=f, parent=self)
for mat, t, f in zip(material, thickness, fixed)
]
if not name:
name = "MULTILAYER"
self.name = name
super(Multilayer, self).__init__(force=True)
def __getstate__(self):
return {"layers": self.layers, "geometry": self.geometry}
def __setstate__(self, state):
self.layers = state["layers"]
for layer in self.layers:
layer.parent = self
self.geometry = state["geometry"]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.layers == other.layers and self.geometry == other.geometry
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.layers)
def __getitem__(self, index):
return self.layers[index]
@property
def nlayers(self):
return len(self.layers)
def fixediter(self):
for layer in self:
if layer.fixed:
yield layer
def freeiter(self):
for layer in self:
if not layer.fixed:
yield layer
def __str__(self):
layers = "\n ".join(
"Layer {}. {}".format(i, str(layer)) for i, layer in enumerate(self)
)
return "Multilayer (ordered top-bottom):\n {}".format(layers)
def markscatterer(self, name):
for layer in self:
layer.markscatterer(name)
def ummarkscatterer(self):
for layer in self:
layer.ummarkscatterer()
@property
def density(self):
return np.vectorize(lambda layer: layer.density)(self)
@property
def thickness(self):
return np.vectorize(lambda layer: layer.thickness)(self)
@property
def xraythicknessin(self):
return np.vectorize(lambda layer: layer.xraythicknessin)(self)
@property
def xraythicknessout(self):
return np.vectorize(lambda layer: layer.xraythicknessin)(self)
def arealdensity(self):
ret = collections.Counter()
for layer in self:
ret.update(layer.arealdensity())
return dict(ret)
def elemental_massfractions(self):
ret = self.arealdensity()
s = sum(ret.values())
return {el: w / s for el, w in ret.items()}
def change_elemental_massfraction(self, Z, wZ):
# wZ * sum_li(w_il*rho_il*t_il) = sum_l(w_Zl*rho_Zl*t_zl)
# a: layers that contain Z
# b: layers that do not contain Z
# wZ * sum_ai(w_ia*rho_a*t_a) + wZ * sum_bi(w_ib*rho_b*t_b) = sum_a(w_Za*rho_a*t_a) + sum_b(w_Zb*rho_b*t_b)
#
# t_A = sum_a(t_a)
# t_B = sum_b(t_b)
# t_a = t_A*r_a
# t_b = t_B*r_b = t*r_b - t_A*r_b
# t_B = t - t_A
#
# denom = + wZ * sum_ai(w_ia*rho_a*r_a) - sum_a(w_Za*rho_a*r_a)
# - wZ * sum_bi(w_ib*rho_b*r_b) + sum_b(w_Zb*rho_b*r_b)
# num = t * sum_b(w_Zb*rho_b*r_b) - wZ*t * sum_bi(w_ib*rho_b*r_b)
# t_A = num/denom
#
# w_Zb = 0
#
# num = t * wZ * sum_bi(w_ib*rho_b*r_b)
# denom = sum_a(w_Za*rho_a*r_a) - wZ * [sum_bi(w_ib*rho_b*r_b) - sum_ai(w_ia*rho_a*r_a)]
pass
def elemental_molefractions(self):
return self.mixlayers().elemental_molefractions()
def elemental_equivalents(self):
return self.mixlayers().elemental_equivalents()
def mixlayers(self):
n = len(self)
if n == 0:
return None
elif n == 1:
return self[0].material
else:
vfrac = self.thickness
vfrac = vfrac / float(vfrac.sum())
materials = [layer.material for layer in self]
return mixture.Mixture(
materials, vfrac, types.fraction.volume, name=self.name
)
def mass_att_coeff(self, energy):
"""Total mass attenuation coefficient
Args:
energy(num|array): keV
Returns:
array: nz x nenergy
"""
return np.asarray(
[instance.asarray(layer.mass_att_coeff(energy)) for layer in self]
)
def markabsorber(self, symb, shells=[], fluolines=[]):
"""
Args:
symb(str): element symbol
"""
for layer in self:
layer.markabsorber(symb, shells=shells, fluolines=fluolines)
def unmarkabsorber(self):
for layer in self:
layer.unmarkabsorber()
def absorbance(self, energy, weights=None, out=False, fine=False, decomposed=False):
if decomposed:
return [
layer.absorbance(energy, weights=weights, out=out, fine=fine)
for layer in self
]
else:
return np.sum(
[
layer.absorbance(energy, weights=weights, out=out, fine=fine)
for layer in self
],
axis=0,
)
def transmission(
self, energy, weights=None, out=False, fine=False, decomposed=False
):
A = self.absorbance(
energy, weights=weights, out=out, fine=fine, decomposed=decomposed
)
if decomposed:
return A # TODO: apply recursively
else:
return np.exp(-A)
def fixlayers(self, ind=None):
if ind is None:
for layer in self:
layer.fixed = True
else:
for i in ind:
self[i].fixed = True
def freelayers(self, ind=None):
if ind is None:
for layer in self:
layer.fixed = False
else:
for i in ind:
self[i].fixed = False
def _refine_linear(self, A, y, constant=False, constraint=True):
y = instance.asarray(y)
if y.size == 1 and len(A) == 1:
return y / A[0]
if constant:
A.append(np.ones_like(y))
A = np.vstack(A).T
if constraint:
lb = np.zeros(len(A), dtype=float)
lb[-1] = -np.inf
ub = np.inf
params = fit1d.lstsq_bound(A, y, lb, ub)
else:
params = fit1d.lstsq(A, y)
params = params[:-1]
else:
A = np.vstack(A).T
if constraint:
params = fit1d.lstsq_nonnegative(A, y)
else:
params = fit1d.lstsq(A, y)
return params
def _refinerhod(
self, energy, absorbance, refinedattr, fixedattr, weights=None, **kwargs
):
y = absorbance
for layer in self.fixediter():
y = y - layer.absorbance(energy)
A = [layer.mass_att_coeff(energy) for layer in self.freeiter()]
if weights is not None:
A = [weightedsum(csi, weights=weights) for csi in A]
params = self._refine_linear(A, y, **kwargs)
for param, layer in zip(params, self.freeiter()):
setattr(layer, refinedattr, param / getattr(layer, fixedattr))
logger.info(
'Refined {} of "{}": {}'.format(
refinedattr, layer, getattr(layer, refinedattr)
)
)
def refinecomposition(
self, energy, absorbance, weights=None, fixthickness=True, **kwargs
):
y = absorbance
for layer in self.fixediter():
y = y - layer.absorbance(energy, weights=weights)
A = []
for layer in self.freeiter():
mu = layer.mass_att_coeff(energy, decomposed=True)
w, cs = layer.csdict_parse(mu)
if weights is not None:
cs = [weightedsum(csi, weights=weights) for csi in cs]
A.extend(cs)
params = self._refine_linear(A, y, **kwargs)
for layer in self.freeiter():
n = layer.nparts
w = params[0:n]
params = params[n:]
s = w.sum()
w = w / s
w = dict(zip(layer.parts.keys(), w))
layer.change_fractions(w, "mass")
if fixthickness:
layer.density = s / layer.xraythicknessin
logger.info(
'Refined density of "{}": {} g/cm^3'.format(layer, layer.density)
)
else:
layer.xraythicknessin = s / layer.density
logger.info(
'Refined thickness "{}": {} g/cm^3'.format(
layer, layer.xraythicknessin
)
)
def refinethickness(self, energy, absorbance, **kwargs):
self._refinerhod(energy, absorbance, "xraythicknessin", "density", **kwargs)
def refinedensity(self, energy, absorbance, **kwargs):
self._refinerhod(energy, absorbance, "density", "xraythicknessin", **kwargs)
def _cache_layerinfo(self):
t = np.empty(self.nlayers + 1)
np.cumsum(self.thickness, out=t[1:])
t[0] = 0
if self.geometry.reflection:
zexit = 0.0
else:
zexit = t[-1]
return {"cumul_thickness": t, "zexit": zexit}
def _zlayer(self, z):
"""Get layer in which z falls
Args:
z(num|array): depth
Returns:
num|array:
0 when z<=0
n+1 when z>totalthickness
{1,...,n} otherwise (the layers)
"""
layerinfo = self.getcache("layerinfo")
ret = np.digitize(z, layerinfo["cumul_thickness"], right=True)
return instance.asscalar(ret)
def _cache_attenuationinfo(self, energy):
energy = np.unique(instance.asarray(energy))
nenergies = len(energy)
density = self.density[:, np.newaxis]
thickness = self.thickness[:, np.newaxis]
mu = self.mass_att_coeff(energy)
# We will add one layer at the beginning and one at the end, both vacuum
# linear attenuation coefficient for each layer
linatt = mu * density
linattout = np.empty((self.nlayers + 2, nenergies), dtype=linatt.dtype)
linattout[1:-1, :] = linatt
linattout[[0, -1], :] = 0 # outside sample (vacuum)
# Cumulative linear attenuation coefficient (= linatt*z + correction)
attall = (linatt * thickness).sum(axis=0)
cor = np.empty((self.nlayers + 2, nenergies), dtype=attall.dtype)
cor[0, :] = 0 # before sample (vacuum)
cor[-1, :] = attall # after sample
for i in range(nenergies):
tmp = np.subtract.outer(linatt[:, i], linatt[:, i])
tmp *= thickness
cor[1:-1, i] = np.triu(tmp).sum(axis=0)
linattout = pd.DataFrame(
linattout, columns=energy, index=range(self.nlayers + 2)
)
cor = pd.DataFrame(cor, columns=energy, index=range(self.nlayers + 2))
return {"linatt": linattout, "linatt_cumulcor": cor}
def _cum_attenuation(self, z, energy):
"""Total attenuation from surface to z
Args:
z(num|array): depth of attenuation
energy(num|array): energies to be attenuation
Returns:
array: nz x nenergy
"""
lz = self._zlayer(z)
att = self.getcache("attenuationinfo")
linatt = att["linatt"].loc[lz][energy]
cor = att["linatt_cumulcor"].loc[lz][energy]
if linatt.ndim != 0:
linatt = linatt.values
cor = cor.values
if linatt.ndim == 2:
z = z[:, np.newaxis]
return z * linatt + cor
def _transmission(self, zi, zj, cosaij, energy):
"""Transmission from depth zi to zj
Args:
zi(num|array): start depth of attenuation (nz)
zj(num|array): end depth of attenuation (nz)
cosaij(num|array): angle with surface normal (nz)
energy(num|array): energies to be attenuation (nenergy)
Returns:
array: nz x nenergy
"""
datt = self._cum_attenuation(zj, energy) - self._cum_attenuation(zi, energy)
if datt.ndim == 2:
if instance.isarray(cosaij):
cosaij = cosaij[:, np.newaxis]
# assert(sum(instance.asarray(-datt/cosaij)>0)==0)
return np.exp(-datt / cosaij)
def _cache_interactioninfo(
self, energy, emin=None, emax=None, ninteractions=None, geomkwargs=None
):
"""
Args:
energy(array): nSource x nSourceLines
"""
def getenergy(x, **kwargs):
return list(listtools.flatten(line.energy(**kwargs) for line in x.columns))
# probabilities: list of pandas dataframes (one for each interaction)
# which saves the interaction probability of a layer
# at a particular energy
# column: line as a result of an interaction
# index: [layer_index, energy_index]
# value: interaction probability (1/cm/srad)
# energy_to_index: list of functions (one for each interaction)
# to get the energy_index closest to an energy
_nlayers = self.nlayers + 2
_ninteractions = ninteractions + 2
probabilities = [None] * _ninteractions
energy_to_index = [None] * _ninteractions
interactioninfo = {
"probabilities": probabilities,
"energy_to_index": energy_to_index,
"getenergy": getenergy,
}
# Interaction 0 has no probabilities
# this is the source, not the result of an interaction
source = [xrayspectrum.RayleighLine(energy)]
probabilities[0] = pd.DataFrame(columns=source)
# Calculate interaction probabilities (ph/cm/srad)
for i in range(ninteractions):
# Line energies after previous interaction
energyi = getenergy(probabilities[i], **geomkwargs)
nenergyi = len(energyi)
# Interaction probabilities of each energy with each layer
probs = [None] * _nlayers
probs[1:-1] = [
pd.DataFrame.from_dict(
dict(
layer.xrayspectrum(energyi, emin=emin, emax=emax).probabilities
)
)
for layer in self
]
probs[0] = pd.DataFrame(index=range(nenergyi))
probs[-1] = probs[0]
probs = pd.concat(probs, sort=True)
probs.fillna(0.0, inplace=True)
probs.index = pd.MultiIndex.from_product(
[np.arange(_nlayers), range(nenergyi)],
names=["layer_index", "energy_index"],
)
probabilities[i + 1] = probs
# Get energy_index closest to an energy
energy_to_index[i + 1] = lambda x: (np.abs(energyi - x)).argmin()
return interactioninfo
def _prob_interaction(self, zi, i, energyi, interactionj):
"""
Probability of interaction at depth zi
Args:
zi(num|array): one or more depths
i(num): interaction order (1, 2, ...)
energyi(num): energy of photon that interacts
interactionj(object|array): one of more interactions
Returns:
array:
"""
lz = self._zlayer(zi)
lzarr = instance.isarray(lz)
if lzarr:
lz = lz.tolist()
interactioninfo = self.getcache("interactioninfo")
energy_index = interactioninfo["energy_to_index"][i](energyi)
# Advanced indexing on MultiIndex: does not preserve order and repeats
probs = interactioninfo["probabilities"][i].loc[
(lz, energy_index), interactionj
]
if probs.ndim != 0:
if lzarr:
# apply order and repeats in lz
probs.index = probs.index.droplevel(1)
probs = probs.loc[lz]
probs = probs.values
return probs
def _prob_interaction_transmission(
self, zi, zj, cosaij, i, energyi, energyj, interactionj
):
"""
Probability of interaction at depth zi and reaching zj
under a particular angle
Args:
zi(num|array): start depth of attenuation
zj(num|array): end depth of attenuation
cosaij(num|array): angle with surface normal
i(num): interaction order (1, 2, ...)
energyi(num): energy of photon that interacts
energyj(num|array): energy of interactionj
interactionj(object|array):
Returns:
array:
"""
probs = self._prob_interaction(zi, i, energyi, interactionj)
T = self._transmission(zi, zj, cosaij, energyj)
return probs * T
def _prob_interaction_transmission_saintegrated(
self, zi, zj, i, energyi, energyj, interactionj
):
"""
Total probability of interaction at depth zi and reaching depth zj
Args:
zi(num|array): start depth of attenuation
zj(num|array): end depth of attenuation
i(num): interaction order (1, 2, ...)
energyi(num): energy of photon that interacts
energyj(num): energy of interactionj
interactionj(): energies to be attenuation
Returns:
array:
"""
probs = self._prob_interaction(zi, i, energyi, interactionj)
Aj = self._cum_attenuation(zj, energyj)
Ai = self._cum_attenuation(zi, energyj)
barri = instance.isarray(zi)
barrj = instance.isarray(zj)
if barri and barrj:
probs = instance.asarray(probs)[:, np.newaxis]
Ai = instance.asarray(Ai)[:, np.newaxis]
Aj = instance.asarray(Aj)[np.newaxis, :]
# Integrate over solid angle of emission from zi to zj (hemisphere)
# TODO: assume isotropic emission from zi for now
# func = lambda theta,phi: probs*np.exp(-(Aj-Ai)/np.cos(theta))*np.tan(theta)
# return np.nquad(func,[(0,np.pi/2),(0,2*np.pi)])
return (2 * np.pi) * probs * scipy.special.exp1(Aj - Ai)
def _primary_rates(self, selfabs=True):
"""
Returns the ph generated per source line after 1 interaction (without efficiency term)
returns:
dict: line: rates (nSourceLines)
"""
interactionindex = 1
interactioninfo = self.getcache("interactioninfo")
energy0 = interactioninfo["getenergy"](interactioninfo["probabilities"][0])
nsource = len(energy0)
interactions1 = interactioninfo["probabilities"][interactionindex].columns
nlayers = self.nlayers
nlines = len(interactions1)
# Effective sample thickness (corrected for attenuation)
if selfabs:
geomkwargs = self.geometry.xrayspectrumkwargs()
energy1 = interactioninfo["getenergy"](
interactioninfo["probabilities"][interactionindex], **geomkwargs
)
att = self.getcache("attenuationinfo")
cosafirst = self.geometry.cosnormin
cosalast = self.geometry.cosnormout
mu0 = att["linatt"][energy0].values / cosafirst
mu1 = att["linatt"][energy1].values / cosalast
cor0 = att["linatt_cumulcor"][energy0].values / cosafirst
cor1 = att["linatt_cumulcor"][energy1].values / cosalast
chi = mu1[1:-1, :, np.newaxis] - mu0[1:-1, np.newaxis, :]
chicor = cor1[1:-1, :, np.newaxis] - cor0[1:-1, np.newaxis, :]
layerinfo = self.getcache("layerinfo")
J2 = np.exp(chi * layerinfo["cumul_thickness"][1:, np.newaxis, np.newaxis])
J2 -= np.exp(
chi * layerinfo["cumul_thickness"][:-1, np.newaxis, np.newaxis]
)
J2 /= chi
J2 *= np.exp(chicor)
if not self.geometry.reflection:
J2 *= np.exp(-cor1[-1, np.newaxis, :, np.newaxis])
# nlayers x nenergy1 x nenergy0 -> nlayers x nsource x nenergy1
J2 = np.transpose(J2, [0, 2, 1])
# nlayers x nenergy0 x nenergy1 -> nlayers x nsource x nlines (reduce scattering lines)
interactions1exp = list(
listtools.flatten(
[interaction] * interaction.nenergy for interaction in interactions1
)
)
indC = np.asarray(
[interaction == "Compton" for interaction in interactions1exp]
)
indR = np.asarray(
[interaction == "Rayleigh" for interaction in interactions1exp]
)
indF = ~indC & ~indR
indsource = range(nsource)
J2 = np.concatenate(
(
J2[..., indF],
J2[:, indsource, indC][..., np.newaxis],
J2[:, indsource, indR][..., np.newaxis],
),
axis=-1,
)
interactions1 = interactions1.tolist()
interactions1.append(interactions1.pop(interactions1.index("Compton")))
interactions1.append(interactions1.pop(interactions1.index("Rayleigh")))
else:
# lim[chi->0] (exp(chi.thickness)-1)/chi = thickness
# nlayers x 1 x 1
J2 = self.thickness[:, np.newaxis, np.newaxis]
# Multiply thickness with geometrical factor: cm -> cm.srad
J2 *= self.geometry.solidangle / self.geometry.cosnormin
# Interaction probability: nlayers x nsource x nlines (1/cm/srad)
probs = interactioninfo["probabilities"][interactionindex].loc[
(range(1, self.nlayers + 1),), interactions1
]
probs = probs.values.reshape((nlayers, nsource, nlines))
# Rate: fluoresence/scattering per incoming photon
J2 = J2 * probs # ph/phsource
# Sum over layers
J2 = J2.sum(axis=0).T # nlines x nsource
return dict(zip(interactions1, J2))
def _primary_rates_numerical(self):
"""Returns the ph generated per source line after 1 interaction (without efficiency term)"""
interactionindex = 1
cosafirst = self.geometry.cosnormin
cosalast = self.geometry.cosnormout
integratormult = self.geometry.solidangle / cosafirst
layerinfo = self.getcache("layerinfo")
za = layerinfo["cumul_thickness"][0]
zb = layerinfo["cumul_thickness"][-1]
zfirst = layerinfo["cumul_thickness"][0]
zlast = layerinfo["zexit"]
geomkwargs = self.geometry.xrayspectrumkwargs()
interactioninfo = self.getcache("interactioninfo")
energy0 = interactioninfo["getenergy"](interactioninfo["probabilities"][0])
interactions1 = interactioninfo["probabilities"][interactionindex].columns
def numintegrate(path, za, zb):
return scipy.integrate.quad(path, za, zb)[0]
n = (zb - za) / min(self.thickness) * 100
def numintegratefast(path, za, zb):
x = np.linspace(za, zb, n)
y = path(x)
return np.trapz(y, x=x)
# return scipy.integrate.trapz(y, x=x)
# import matplotlib.pyplot as plt
J2 = {}
for interaction1 in interactions1:
energy1 = interaction1.energy(**geomkwargs)
if isinstance(interaction1, xrayspectrum.FluoZLine):
energy1 = [energy1] * len(energy0)
def pathgen(en0, en1):
return lambda z1: self._transmission(
zfirst, z1, cosafirst, en0
) * self._prob_interaction_transmission(
z1, zlast, cosalast, interactionindex, en0, en1, interaction1
)
paths = [pathgen(en0, en1) for en0, en1 in zip(energy0, energy1)]
rates = [numintegrate(path, za, zb) for path in paths]
# if interaction1 == 'Compton':
# plt.figure()
# x = np.linspace(za, zb, n)
# for path in paths:
# plt.plot(x, path(x))
# plt.show()
J2[interaction1] = np.asarray(rates) * integratormult
return J2
def _secondary_interaction_numerical(self):
"""Returns the ph generated per source line after 2 interactions (without efficiency term)"""
# TODO: not finished
interactionindex = 2
cosafirst = self.geometry.cosnormin
cosalast = self.geometry.cosnormout
integratormult = self.geometry.solidangle / cosafirst
layerinfo = self.getcache("layerinfo")
za = layerinfo["cumul_thickness"][0]
zb = layerinfo["cumul_thickness"][-1]
zfirst = layerinfo["cumul_thickness"][0]
zlast = layerinfo["zexit"]
geomkwargs1 = self.geometry.xrayspectrumkwargs()
geomkwargs2 = geomkwargs1
interactioninfo = self.getcache("interactioninfo")
energy0 = interactioninfo["getenergy"](interactioninfo["probabilities"][0])
interactions1 = interactioninfo["probabilities"][1].columns
interactions2 = interactioninfo["probabilities"][2].columns
J3 = {}
def path(z1, z2):
return (
self._transmission(zfirst, z1, cosafirst, en0)[:, np.newaxis]
* self._prob_interaction_transmission_saintegrated(
z1, z2, interactionindex - 1, en0, en1, interaction1
)
* self._prob_interaction_transmission(
z2, zlast, cosalast, interactionindex, en1, en2, interaction2
)[np.newaxis, :]
)
def numintegrate(path, za, zb):
return scipy.integrate.nquad(path, [(za, zb)] * 2)[0]
n = (zb - za) / min(self.thickness) * 100
def numintegratefast(path, za, zb):
x1 = np.linspace(za, zb, n)
x2 = np.linspace(za, zb, n)
y = path(x1, x2)
y = np.trapz(y, x=x1, axis=0)
y = np.trapz(y, x=x2, axis=0)
return y
import matplotlib.pyplot as plt
for interaction1 in interactions1:
energy1 = interaction1.energy(**geomkwargs1)
if isinstance(interaction1, xrayspectrum.FluoZLine):
energy1 = [energy1] * len(energy0)
for interaction2 in interactions2:
energy2 = interaction2.energy(**geomkwargs2)
if isinstance(interaction2, xrayspectrum.FluoZLine):
energy2 = [energy2] * len(energy1)
for en0, en1, en2 in zip(energy0, energy1, energy2):
x1 = np.linspace(za, zb, n)
x2 = np.linspace(za, zb, n + 1)
print(self._transmission(zfirst, x1, cosafirst, en0).shape)
print(
self._prob_interaction_transmission_saintegrated(
x1, x2, interactionindex - 1, en0, en1, interaction1
).shape
)
print(
self._prob_interaction_transmission(
x2,
zlast,
cosalast,
interactionindex,
en1,
en2,
interaction2,
).shape
)
plt.figure()
img = path(x1, x2)
plt.imshow(img)
plt.show()
rates = [
numintegrate(path, za, zb)
for en0, en1, en2 in zip(energy0, energy1, energy2)
]
J3[interaction2] = np.asarray(rates) * integratormult
return J3
def addtofisx(self, setup, cfg):
setup.setSample([layer.addtofisx(setup, cfg) for layer in self])
self.geometry.addtofisx(setup, cfg)
def addtopymca_matrix(self, setup, cfg, name, thickness=0.0):
anglein = self.geometry.anglein
angleout = self.geometry.angleout
scatteringangle = self.geometry.scatteringangle
if name == "MULTILAYER":
density = 0.0
else:
v = cfg["materials"][name]
density = v["Density"]
cfg["attenuators"]["Matrix"] = [
1,
name,
density,
thickness,
anglein,
angleout,
0,
scatteringangle,
]
def loadfrompymca_matrix(self, setup, cfg):
_, name, density, thickness, anglein, angleout, _, scatteringangle = cfg[
"attenuators"
]["Matrix"]
self.geometry.anglein = anglein
self.geometry.angleout = angleout
return name, density, thickness
def addtopymca_layer(self, setup, cfg, index, layer):
name = setup.addtopymca_material(cfg, layer, defaultthickness=layer.thickness)
l = "Layer{}".format(index)
cfg["multilayer"][l] = [1, name, layer.density, layer.thickness]
def loadfrompymca_layer(self, setup, cfg, index):
l = "Layer{}".format(index)
if l in cfg["multilayer"]:
enabled, name, density, thickness = cfg["multilayer"][l]
if enabled:
material = setup.loadfrompymca_material(cfg, name, density)
return (material, thickness)
else:
return tuple()
else:
return None
def addtopymca_shells(self, setup, cfg, elements):
emax = setup.emax_strict
emin = setup.emin
if "peaks" not in cfg:
cfg["peaks"] = {}
for e in elements:
shells = e.pymcashellfactory(emin=emin, emax=emax)
if shells:
cfg["peaks"][str(e)] = shells
def addtopymca(self, setup, cfg):
if self.nlayers == 1:
name = setup.addtopymca_material(
cfg, self[0], defaultthickness=self[0].thickness
)
self.addtopymca_shells(setup, cfg, self[0].elements)
self.addtopymca_matrix(setup, cfg, name, thickness=self[0].thickness)
else:
for index, layer in enumerate(self):
self.addtopymca_layer(setup, cfg, index, layer)
self.addtopymca_shells(setup, cfg, layer.elements)
self.addtopymca_matrix(setup, cfg, "MULTILAYER")
self.geometry.addtopymca(setup, cfg)
def loadfrompymca(self, setup, cfg):
self.geometry.loadfrompymca(setup, cfg)
name, density, thickness = self.loadfrompymca_matrix(setup, cfg)
if name == "MULTILAYER":
layer = tuple()
index = 0
layers = []
while layer is not None:
layer = self.loadfrompymca_layer(setup, cfg, index)
index += 1
if layer:
layers.append(layer)
material, thickness = zip(*layers)
else:
material = [setup.loadfrompymca_material(cfg, name, density)]
thickness = [thickness]
self.layers = [
Layer(material=mat, thickness=t, parent=self)
for mat, t in zip(material, thickness)
]
def _parse_fisx_result(self, fisxresult):
"""
Args:
fisxresult(dict): group:dict(layer:dict(line):dict)
Returns:
dict: line: rate
"""
# Get fluorescence rates from fisx (add escape peaks)
rates = {}
for group, layers in fisxresult.items():
el = element.Element(group.split(" ")[0])
for layer, peaks in layers.items():
for peak, peakinfo in peaks.items():
line = xrayspectrum.FluoLine(peak.split(" ")[0])
line = xrayspectrum.FluoZLine(el, line)
rate = peakinfo["rate"]
if line in rates:
rates[line] += rate
else:
rates[line] = rate
# Correction for detector in transmission
# TODO: correct?
if not self.geometry.reflection:
for line in rates:
energy = line.energy(**self.geometry.xrayspectrumkwargs())
result[line] *= self.transmission(energy, out=True)
return rates
def _rates_to_spectrum(self, rates, emin=0, emax=None, scattering=True):
"""
Args:
rates(dict): line: rate
emin(Optional(num)):
emax(Optional(num)):
scattering(Optional(bool)):
Returns:
xrayspectrum.Spectrum
"""
if not scattering:
rates = {
k: v
for k, v in rates.items()
if not isinstance(k, xrayspectrum.ScatteringLine)
}
if emax is None:
emax = max(
listtools.flatten(
line.energy(**self.geometry.xrayspectrumkwargs()) for line in rates
)
)
return xrayspectrum.Spectrum(
rates,
xlim=[emin, emax],
density=None,
title=str(self),
type=xrayspectrum.Spectrum.TYPES.rate,
geometry=self.geometry,
)
def _print_fisx(self, fluo, details=False):
"""
Args:
fluo(dict): group:dict(layer:dict(line):dict)
"""
if details:
rowfmt = "{:>6}{:>8}{:>20}{:>10}{:>10}{:>20}{:>20}{:>20}{:>20}{:>20}"
print(
rowfmt.format(
"Layer",
"Element",
"MassFrac",
"Line",
"Energy",
"Rate",
"Primary",
"Multiplier(2)",
"Multiplier(2+3)",
"Efficiency",
)
)
else:
rowfmt = "{:>6}{:>8}{:>20}{:>10}{:>10}{:>20}"
print(
rowfmt.format("Layer", "Element", "MassFrac", "Line", "Energy", "Rate")
)
for key in sorted(fluo):
ele = key.split(" ")[0]
for layer in fluo[key]:
lines = sorted(list(fluo[key][layer].keys()))
for line in lines:
if line.endswith("esc"):
continue
# Mass fraction in this layer
w = fluo[key][layer][line]["massFraction"]
# energy of the line
energy = fluo[key][layer][line]["energy"]
# expected measured rate (everything except flux*time)
rate = fluo[key][layer][line]["rate"]
escaperate = sum(
fluo[key][layer][line2]["rate"]
for line2 in lines
if line2.endswith("esc") and line2.startswith(line)
)
rate += escaperate
# primary photons (no attenuation and no detector considered)
primary = fluo[key][layer][line]["primary"]
# secondary photons (no attenuation and no detector considered)
secondary = fluo[key][layer][line]["secondary"]
# tertiary photons (no attenuation and no detector considered)
tertiary = fluo[key][layer][line].get("tertiary", 0.0)
# attenuation and detector
efficiency = fluo[key][layer][line].get("efficiency", 0.0)
# correction due to secondary excitation
enhancement2 = (primary + secondary) / primary
# correction due to tertiary excitation
enhancement3 = (primary + secondary + tertiary) / primary
if details:
print(
rowfmt.format(
layer,
ele,
w,
line,
energy,
rate + escaperate,
primary,
enhancement2,
enhancement3,
efficiency,
)
)
else:
print(
rowfmt.format(
layer, ele, w, line, energy, rate + escaperate
)
)
assert np.isclose(
rate, (primary + secondary + tertiary) * efficiency
)
def _rates_fisx(self, energy0, weights, ninteractions, emin=0, emax=None):
"""
Args:
energy0(array): nSource x nLines
weights(array): nSource x nLines
ninteractions(num):
emin(Optional(num)):
emax(Optional(num)):
Returns:
list(dict): line: rate (nSource)
"""
# Add sample, detector and geometry
setup = fisx.XRF()
cfg = self.FISXCFG
self.addtofisx(setup, cfg)
def shellparse(shell):
shell = str(shell)
if not shell.startswith("K") and not shell.startswith("L"):
# Only K and L splitting supported
shell = shell[0]
return shell
# Get fluorescence
secondary = 2 * (ninteractions > 1)
# 0: none, 1: intralayer, 2: interlayer
rates = []
for energy0i, weightsi in zip(energy0, weights):
# Peak groups
groups = {}
if emax is None:
emaxi = np.max(energy0i)
else:
emaxi = emax
for layer in self:
groups.update(layer.fisxgroups(emin=emin, emax=emaxi))
groups = {
"{} {}".format(el, shellparse(shell))
for el, shells in groups.items()
for shell in shells
}
# Add source
setup.setBeam(energy0i, weights=weightsi)
# Calculate fluorescence
fixresult = setup.getMultilayerFluorescence(
groups, cfg.FISXMATERIALS, secondary=secondary, useMassFractions=1
)
# self._print_fisx(fixresult)
rates.append(self._parse_fisx_result(fixresult))
return rates
def _rates_calc(
self,
method,
energy0,
weights,
ninteractions,
emin=0,
emax=None,
withdetectorresponse=True,
):
"""
Args:
energy0(array): nSource x nLines
weights(array): nSource x nLines
ninteractions(num):
emin(Optional(num)):
emax(Optional(num)):
Returns:
list(dict): line: rate (nSource)
"""
geomkwargs = self.geometry.xrayspectrumkwargs()
rates = []
for energy0i, weightsi in zip(energy0, weights):
if emax is None:
emaxi = np.max(energy0i)
else:
emaxi = emax
with self.cachectx(
"interactioninfo",
energy0i,
emin=emin,
emax=emaxi,
ninteractions=ninteractions,
geomkwargs=geomkwargs,
):
interactioninfo = self.getcache("interactioninfo")
allenergies = interactioninfo["getenergy"](
interactioninfo["probabilities"][-2], **geomkwargs
)
with self.cachectx("attenuationinfo", allenergies):
# Primary interaction (with self-absorption)
if method == "numerical":
ratesi = self._primary_rates_numerical()
else:
ratesi = self._primary_rates()
# Secondary interaction (with self-absorption)
if ninteractions >= 2 and False: # TODO
for k, v in self._secondary_interaction_numerical().items():
if k in ratesi:
ratesi[k] += v
else:
ratesi[k] = v
# Attenuation of source and detected X-rays
self._attenuated_rates(ratesi, withdetectorattenuation=withdetectorresponse)
# Apply source weights
for k in ratesi:
ratesi[k] = ratesi[k] * weightsi
rates.append(ratesi)
return rates
def _attenuated_rates(self, rates, withdetectorattenuation=True):
"""
Apply various attenuations: source filter, detector filter, detector
Args:
rates(dict): line: rate
"""
# Flat list of lines
lines = list(rates.keys())
# Source and detected lines
geom = self.geometry.xrayspectrumkwargs()
energysource = lines[lines.index("Rayleigh")].energy(**geom)
energydet = [k.energy(**geom) for k in lines]
ind = np.cumsum([listtools.length(en) for en in energydet])
ind = np.insert(ind, 0, 0)
ind = zip(ind[:-1], ind[1:])
# Efficiency (nSource x nLines): filter and detector attenuation
energydet = list(listtools.flatten(energydet))
efficiency = self.geometry.efficiency(
energysource, energydet, withdetectorattenuation=withdetectorattenuation
)
for k, (a, b) in zip(lines, ind):
if a + 1 == b: # Fluorescence
eff = efficiency[:, a]
else: # Scattering
eff = np.diag(efficiency[:, a:b])
rates[k] = rates[k] * eff
@contextmanager
def _xrmc_context(
self,
flux=1,
time=1,
convoluted=False,
pulseproctime=0,
source_distance=1000,
beamsize=1e-4,
):
with localfs.temp(remove=True) as path:
path.mkdir()
# Units: keV, cm, degrees and sec
world = xrmc.XrmcWorldBuilder(
str(path), atmosphere=self.geometry.atmosphere
)
world.define_source(flux=flux, distance=source_distance, beamsize=beamsize)
# Make sure the sample is larger than the beam footprint
detdistance = self.geometry.distance.to("cm").magnitude
samplesize = min(beamsize * 1000, detdistance)
samplesize = min(samplesize, source_distance)
# All layers the same size and beam goes through the sample center
nlayers = self.nlayers
dxs = [samplesize] * nlayers
dys = [samplesize] * nlayers
oxs = [0] * nlayers
oys = [0] * nlayers
for layer, dx, dy, ox, oy in zip(self, dxs, dys, oxs, oys):
world.sample.add_layer(
material=layer.material,
thickness=layer.thickness,
dhor=dx,
dvert=dy,
ohor=ox,
overt=oy,
)
world.sample.polar = self.geometry.anglenormin
world.sample.azimuth = self.geometry.sample_azimuth
# Add beam filters
dx = samplesize
dy = samplesize
ox = 0
oy = 0
for layer in self.geometry.beamfilters():
world.source.add_layer(
material=layer["material"],
thickness=layer["thickness"],
dhor=dx,
dvert=dy,
ohor=ox,
overt=oy,
surface=10,
)
# Add detector
activearea = self.geometry.detector.activearea.to("cm**2").magnitude
mcagain = self.geometry.detector.mcagain
polar = self.geometry.scatteringangle
azimuth = self.geometry.detector_azimuth
if convoluted:
response = {
"material": self.geometry.detector.material,
"thickness": self.geometry.detector.thickness,
"noise": self.geometry.detector.mcanoise * 0
+ 1e-10, # to obtain line spectrum
"fano": self.geometry.detector.mcafano * 0
+ 1e-10, # to obtain line spectrum
"pulseproctime": pulseproctime,
}
else:
response = {}
world.add_xrfdetector(
distance=detdistance,
activearea=activearea,
polar=polar,
azimuth=azimuth,
hoffset=0,
voffset=0,
emin=0,
emax=1,
ebinsize=mcagain,
forcedetect=True,
multiplicity=10,
time=time,
response=response,
)
# Add detector filters
dhor, dvert = world.detector.pixelsize
for layer in self.geometry.detectorfilters(include_atmosphere=False):
world.detector.add_layer(
material=layer["material"],
thickness=-layer["thickness"],
dhor=dhor,
dvert=dvert,
)
yield world
def _sourceflux(self, energies, samplesourcedist, sampleflux=1e10):
"""Convert flux on sample to flux of the source
Args:
energies(array):
samplesourcedist(num): in cm
sampleflux(num)
Returns:
array: flux for each source line
"""
atmosphere = self.geometry.atmosphere
if atmosphere:
sourcelineflux = sampleflux / atmosphere.transmission(
energies, samplesourcedist
)
else:
sourcelineflux = np.full_like(energies, sampleflux)
sourcelineflux /= len(sourcelineflux)
for layer in self.geometry.beamfilters(include_atmosphere=False):
sourcelineflux = sourcelineflux / layer["material"].transmission(
energies, layer["thickness"]
)
return sourcelineflux
def _rates_xrmc(
self,
energy0,
weights,
ninteractions,
emin=0,
emax=None,
withdetectorresponse=True,
):
"""
Args:
energy0(array): nSource x nLines
weights(array): nSource x nLines
ninteractions(num):
emin(Optional(num)):
emax(Optional(num)):
withdetectorresponse(Optional(bool))
Returns:
list(dict): line: rate (nSource)
"""
rates = []
with self._xrmc_context(flux=1, time=1, convoluted=True) as world:
for energy0i, weightsi in zip(energy0, weights):
# Sample flux to source lines
fluxi = self._sourceflux(energy0i, world.source.distance)
flux = fluxi.sum()
weightsi = fluxi / flux
world.spectrum.lines = [
[en, 0, fl * w] for en, w, fl in zip(energy0i, weightsi, fluxi)
]
# Detector energy range
if emax is None:
emaxi = np.max(energy0i) + 1
else:
emaxi = emax
# world.detector.emin = self.geometry.detector.mcazero
world.detector.emin = emin
world.detector.emax = emaxi
world.detector.ebinsize = self.geometry.detector.mcagain
# Run simulation
interactions = (0,) + (10000,) * ninteractions
world.finalize(interactions=interactions)
if not world.simulate():
raise RuntimeError("Simulation failed")
# TODO: xrmc issue #49
data, info = world.detector.result(convoluted=True)
mca = data.sum(axis=tuple(range(data.ndim - 1)))
# Extract lines
mask = mca != 0
mca = mca[mask] / flux
xenergy = info["xenergy"][mask]
# TODO: response already included
# if withdetectorresponse:
# mca = mca * self.geometry.detector.attenuation(xenergy)
linesi = [xrayspectrum.Line(en) for en in xenergy]
ratesi = dict(zip(linesi, mca))
rates.append(ratesi)
return rates
def _rates_xmimsim(
self,
energy0,
weights,
ninteractions,
emin=0,
emax=None,
withdetectorresponse=True,
source_distance=100,
beamsize=1e-4,
runxrmc=False,
):
"""
Args:
energy0(array): nSource x nLines
weights(array): nSource x nLines
ninteractions(num):
emin(Optional(num)):
emax(Optional(num)):
withdetectorresponse(Optional(bool))
Returns:
list(dict), bool: line: rate (nSource), convoluted
"""
rates = []
with localfs.temp(remove=True) as path:
path.mkdir()
for energy0i, weightsi in zip(energy0, weights):
# Sample flux to source lines
fluxi = self._sourceflux(energy0i, source_distance)
flux = fluxi.sum()
weightsi = fluxi / flux
sample = self._xmimsim_sample()
# Run simulation
ph = pymca.PymcaHandle(
energy=energy0i,
weights=weightsi,
emin=emin,
emax=emax,
ninteractions=ninteractions,
flux=flux,
time=1,
sample=sample,
)
xmimsim.run(
str(path),
pymcahandle=ph,
source_distance=source_distance,
beamsize=beamsize,
has_atmosphere=bool(self.geometry.atmosphere),
runxrmc=runxrmc,
)
if runxrmc:
# TODO: xrmc issue #49
data, info = xrmc.loadxrmcresult_xmimsim(str(path), convoluted=True)
mca = data.sum(axis=tuple(range(data.ndim - 1)))
else:
mca, info = xmimsim.loadxmimsimresult(str(path), convoluted=False)
# Extract lines
mask = mca != 0
mca = mca[mask] / flux
xenergy = info["xenergy"][mask]
# TODO: response already included
# if withdetectorresponse:
# mca = mca * self.geometry.detector.attenuation(xenergy)
linesi = [xrayspectrum.Line(en) for en in xenergy]
ratesi = dict(zip(linesi, mca))
rates.append(ratesi)
return rates
def _xmimsim_sample(self):
# Add atmosphere layer which is thick enough to include source and detector
if self.geometry.atmosphere:
atm_thickness = max(source_distance, self.geometry.distance) * 2
lst = [(atmosphere, atm_thickness)] + [
(layer.material, layer.thickness) for layer in self
]
material, thickness = zip(*lst)
return self.__class__(
material=material,
thickness=thickness,
geometry=self.geometry,
name=self.name,
)
else:
return self
def _assert_rate_parameters(
self, method, ninteractions=1, scattering=True, withdetectorresponse=True
):
"""
Modify the method based on requested features
"""
if method == "xrmc":
if not xrmc.installed():
raise RuntimeError("'xrmc' is not installed")
if not withdetectorresponse:
raise RuntimeError("'xrmc' cannot disable detector response")
elif method == "xmimsim":
if not xmimsim.installed():
raise RuntimeError("'xmimsim' is not installed")
if not withdetectorresponse:
raise RuntimeError("'xmimsim' cannot disable detector response")
elif method == "fisx":
if scattering:
raise RuntimeError("'fisx' does not support scattering")
if not withdetectorresponse:
raise RuntimeError("'fisx' cannot disable detector response")
if not self.geometry.reflection:
raise RuntimeError("'fisx' does not support transmission geometry")
if ninteractions > 3:
raise RuntimeError(
"'fisx' does not support {} interactions".format(ninteractions)
)
elif method == "analytical":
if ninteractions >= 2:
raise RuntimeError(
"'analytical' does not support {} interactions".format(
ninteractions
)
)
return method
@cache.withcache("layerinfo")
def xrayspectrum(
self,
energy0,
emin=0,
emax=None,
method="analytical",
ninteractions=1,
weights=None,
scattering=True,
withdetectorresponse=True,
**kwargs
):
"""
Spectrum of this sample measured under the associated gemetry
Args:
energy0(array): nLines or nSource x nLines
emin:
emax:
method:
ninteractions:
weights(array): nLines or nSource x nLines
scattering(bool): include scattering peaks
withdetectorresponse(bool):
Returns:
Spectrum or list(Spectrum)
"""
self._assert_rate_parameters(
method,
ninteractions=ninteractions,
scattering=scattering,
withdetectorresponse=withdetectorresponse,
)
# Calculate line rate dictionary for each source
energy0, weights, singlespectrum, singleline = reshape_spectrum_lines(
energy0, weights=weights
)
if method == "fisx":
rates = self._rates_fisx(
energy0, weights, ninteractions, emin=emin, emax=emax, **kwargs
)
elif method == "xrmc":
rates = self._rates_xrmc(
energy0,
weights,
ninteractions,
emin=emin,
emax=emax,
withdetectorresponse=withdetectorresponse,
**kwargs
)
elif method == "xmimsim":
rates = self._rates_xmimsim(
energy0,
weights,
ninteractions,
emin=emin,
emax=emax,
withdetectorresponse=withdetectorresponse,
**kwargs
)
else:
rates = self._rates_calc(
method,
energy0,
weights,
ninteractions,
emin=emin,
emax=emax,
withdetectorresponse=withdetectorresponse,
**kwargs
)
# X-ray spectrum for each source
spectra = [
self._rates_to_spectrum(rdict, emin=emin, emax=emax, scattering=scattering)
for rdict in rates
]
if singlespectrum:
return spectra[0]
else:
return spectra
def convoluted_xrayspectrum(
self,
energy0,
emin=0,
emax=None,
method="analytical",
ninteractions=1,
weights=None,
scattering=True,
escape=True,
pileup=True,
flux=1e9,
time=1,
**kwargs
):
"""
Spectrum of this sample measured under the associated gemetry
Args:
energy0(array): nLines or nSource x nLines
emin:
emax:
method:
ninteractions:
weights(array): nLines or nSource x nLines
scattering(bool): include scattering peaks
Returns:
tuple or list(Spectrum)
"""
self._assert_rate_parameters(
method,
ninteractions=ninteractions,
scattering=scattering,
withdetectorresponse=True,
)
if method == "xrmc":
pass
elif method == "xmimsim":
pass
else:
result = self.xrayspectrum(
energy0,
emin=emin,
emax=emax,
method=method,
ninteractions=ninteractions,
weights=weights,
scattering=scattering,
**kwargs
)
kwargs = {"fluxtime": flux * time, "histogram": True}
if isinstance(result, list):
result = [s.sumspectrum(**kwargs) for s in result]
else:
result = result.sumspectrum(**kwargs)
return result
def propagate(self, N, energy, interaction="transmission", forward=True):
"""
Error propagation of transmitted number of photons.
Args:
N(num|array): incomming number of photons with uncertainties
energy(num|array): energies
Returns:
num|numpy.array
"""
# Bernouilli processes: compounding is the same as multiplication
# so we can multiply the probabilities
if interaction == "transmission":
probsuccess = self.transmission(energy)
else:
raise RuntimeError("{} not implemented yet".format(interaction))
N, probsuccess = self.propagate_broadcast(N, probsuccess)
if instance.isuscalar(N):
process = noisepropagation.bernouilli(probsuccess)
Nout = noisepropagation.compound(N, process, forward=forward)
else:
if forward:
Nout = N * probsuccess
else:
Nout = N / probsuccess
return Nout
factory = Multilayer.factory
registry = Multilayer.clsregistry
| [
"numpy.triu",
"numpy.abs",
"numpy.empty",
"matplotlib.pyplot.figure",
"numpy.isclose",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"pandas.DataFrame",
"numpy.full_like",
"matplotlib.pyplot.imshow",
"numpy.transpose",
"numpy.insert",
"numpy.cumsum",
"numpy.max",
"numpy.linspace",
"coll... | [((798, 825), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (815, 825), False, 'import logging\n'), ((6749, 6770), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (6768, 6770), False, 'import collections\n'), ((13637, 13663), 'numpy.empty', 'np.empty', (['(self.nlayers + 1)'], {}), '(self.nlayers + 1)\n', (13645, 13663), True, 'import numpy as np\n'), ((13672, 13708), 'numpy.cumsum', 'np.cumsum', (['self.thickness'], {'out': 't[1:]'}), '(self.thickness, out=t[1:])\n', (13681, 13708), True, 'import numpy as np\n'), ((14226, 14282), 'numpy.digitize', 'np.digitize', (['z', "layerinfo['cumul_thickness']"], {'right': '(True)'}), "(z, layerinfo['cumul_thickness'], right=True)\n", (14237, 14282), True, 'import numpy as np\n'), ((14780, 14839), 'numpy.empty', 'np.empty', (['(self.nlayers + 2, nenergies)'], {'dtype': 'linatt.dtype'}), '((self.nlayers + 2, nenergies), dtype=linatt.dtype)\n', (14788, 14839), True, 'import numpy as np\n'), ((15080, 15139), 'numpy.empty', 'np.empty', (['(self.nlayers + 2, nenergies)'], {'dtype': 'attall.dtype'}), '((self.nlayers + 2, nenergies), dtype=attall.dtype)\n', (15088, 15139), True, 'import numpy as np\n'), ((16999, 17021), 'numpy.exp', 'np.exp', (['(-datt / cosaij)'], {}), '(-datt / cosaij)\n', (17005, 17021), True, 'import numpy as np\n'), ((18393, 18421), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'source'}), '(columns=source)\n', (18405, 18421), True, 'import pandas as pd\n'), ((42065, 42075), 'fisx.XRF', 'fisx.XRF', ([], {}), '()\n', (42073, 42075), False, 'import fisx\n'), ((46203, 46223), 'numpy.insert', 'np.insert', (['ind', '(0)', '(0)'], {}), '(ind, 0, 0)\n', (46212, 46223), True, 'import numpy as np\n'), ((6318, 6359), 'numpy.vectorize', 'np.vectorize', (['(lambda layer: layer.density)'], {}), '(lambda layer: layer.density)\n', (6330, 6359), True, 'import numpy as np\n'), ((6421, 6464), 'numpy.vectorize', 'np.vectorize', (['(lambda layer: layer.thickness)'], {}), '(lambda layer: layer.thickness)\n', (6433, 6464), True, 'import numpy as np\n'), ((6532, 6581), 'numpy.vectorize', 'np.vectorize', (['(lambda layer: layer.xraythicknessin)'], {}), '(lambda layer: layer.xraythicknessin)\n', (6544, 6581), True, 'import numpy as np\n'), ((6650, 6699), 'numpy.vectorize', 'np.vectorize', (['(lambda layer: layer.xraythicknessin)'], {}), '(lambda layer: layer.xraythicknessin)\n', (6662, 6699), True, 'import numpy as np\n'), ((10034, 10044), 'numpy.exp', 'np.exp', (['(-A)'], {}), '(-A)\n', (10040, 10044), True, 'import numpy as np\n'), ((15286, 15331), 'numpy.subtract.outer', 'np.subtract.outer', (['linatt[:, i]', 'linatt[:, i]'], {}), '(linatt[:, i], linatt[:, i])\n', (15303, 15331), True, 'import numpy as np\n'), ((19167, 19194), 'pandas.concat', 'pd.concat', (['probs'], {'sort': '(True)'}), '(probs, sort=True)\n', (19176, 19194), True, 'import pandas as pd\n'), ((24363, 24433), 'numpy.exp', 'np.exp', (["(chi * layerinfo['cumul_thickness'][1:, np.newaxis, np.newaxis])"], {}), "(chi * layerinfo['cumul_thickness'][1:, np.newaxis, np.newaxis])\n", (24369, 24433), True, 'import numpy as np\n'), ((24452, 24523), 'numpy.exp', 'np.exp', (["(chi * layerinfo['cumul_thickness'][:-1, np.newaxis, np.newaxis])"], {}), "(chi * layerinfo['cumul_thickness'][:-1, np.newaxis, np.newaxis])\n", (24458, 24523), True, 'import numpy as np\n'), ((24594, 24608), 'numpy.exp', 'np.exp', (['chicor'], {}), '(chicor)\n', (24600, 24608), True, 'import numpy as np\n'), ((24815, 24842), 'numpy.transpose', 'np.transpose', (['J2', '[0, 2, 1]'], {}), '(J2, [0, 2, 1])\n', (24827, 24842), True, 'import numpy as np\n'), ((25156, 25232), 'numpy.asarray', 'np.asarray', (["[(interaction == 'Compton') for interaction in interactions1exp]"], {}), "([(interaction == 'Compton') for interaction in interactions1exp])\n", (25166, 25232), True, 'import numpy as np\n'), ((25280, 25357), 'numpy.asarray', 'np.asarray', (["[(interaction == 'Rayleigh') for interaction in interactions1exp]"], {}), "([(interaction == 'Rayleigh') for interaction in interactions1exp])\n", (25290, 25357), True, 'import numpy as np\n'), ((25476, 25603), 'numpy.concatenate', 'np.concatenate', (['(J2[..., indF], J2[:, indsource, indC][..., np.newaxis], J2[:, indsource,\n indR][..., np.newaxis])'], {'axis': '(-1)'}), '((J2[..., indF], J2[:, indsource, indC][..., np.newaxis], J2[\n :, indsource, indR][..., np.newaxis]), axis=-1)\n', (25490, 25603), True, 'import numpy as np\n'), ((27785, 27807), 'numpy.linspace', 'np.linspace', (['za', 'zb', 'n'], {}), '(za, zb, n)\n', (27796, 27807), True, 'import numpy as np\n'), ((27851, 27867), 'numpy.trapz', 'np.trapz', (['y'], {'x': 'x'}), '(y, x=x)\n', (27859, 27867), True, 'import numpy as np\n'), ((30599, 30621), 'numpy.linspace', 'np.linspace', (['za', 'zb', 'n'], {}), '(za, zb, n)\n', (30610, 30621), True, 'import numpy as np\n'), ((30639, 30661), 'numpy.linspace', 'np.linspace', (['za', 'zb', 'n'], {}), '(za, zb, n)\n', (30650, 30661), True, 'import numpy as np\n'), ((30707, 30732), 'numpy.trapz', 'np.trapz', (['y'], {'x': 'x1', 'axis': '(0)'}), '(y, x=x1, axis=0)\n', (30715, 30732), True, 'import numpy as np\n'), ((30749, 30774), 'numpy.trapz', 'np.trapz', (['y'], {'x': 'x2', 'axis': '(0)'}), '(y, x=x2, axis=0)\n', (30757, 30774), True, 'import numpy as np\n'), ((50996, 51030), 'numpy.full_like', 'np.full_like', (['energies', 'sampleflux'], {}), '(energies, sampleflux)\n', (51008, 51030), True, 'import numpy as np\n'), ((10668, 10683), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (10680, 10683), True, 'import numpy as np\n'), ((10701, 10713), 'numpy.vstack', 'np.vstack', (['A'], {}), '(A)\n', (10710, 10713), True, 'import numpy as np\n'), ((11037, 11049), 'numpy.vstack', 'np.vstack', (['A'], {}), '(A)\n', (11046, 11049), True, 'import numpy as np\n'), ((24676, 24720), 'numpy.exp', 'np.exp', (['(-cor1[-1, np.newaxis, :, np.newaxis])'], {}), '(-cor1[-1, np.newaxis, :, np.newaxis])\n', (24682, 24720), True, 'import numpy as np\n'), ((28888, 28905), 'numpy.asarray', 'np.asarray', (['rates'], {}), '(rates)\n', (28898, 28905), True, 'import numpy as np\n'), ((42681, 42697), 'numpy.max', 'np.max', (['energy0i'], {}), '(energy0i)\n', (42687, 42697), True, 'import numpy as np\n'), ((44076, 44092), 'numpy.max', 'np.max', (['energy0i'], {}), '(energy0i)\n', (44082, 44092), True, 'import numpy as np\n'), ((46710, 46737), 'numpy.diag', 'np.diag', (['efficiency[:, a:b]'], {}), '(efficiency[:, a:b])\n', (46717, 46737), True, 'import numpy as np\n'), ((15388, 15400), 'numpy.triu', 'np.triu', (['tmp'], {}), '(tmp)\n', (15395, 15400), True, 'import numpy as np\n'), ((19310, 19329), 'numpy.arange', 'np.arange', (['_nlayers'], {}), '(_nlayers)\n', (19319, 19329), True, 'import numpy as np\n'), ((31382, 31404), 'numpy.linspace', 'np.linspace', (['za', 'zb', 'n'], {}), '(za, zb, n)\n', (31393, 31404), True, 'import numpy as np\n'), ((31430, 31456), 'numpy.linspace', 'np.linspace', (['za', 'zb', '(n + 1)'], {}), '(za, zb, n + 1)\n', (31441, 31456), True, 'import numpy as np\n'), ((32196, 32208), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (32206, 32208), True, 'import matplotlib.pyplot as plt\n'), ((32269, 32284), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (32279, 32284), True, 'import matplotlib.pyplot as plt\n'), ((32305, 32315), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32313, 32315), True, 'import matplotlib.pyplot as plt\n'), ((32516, 32533), 'numpy.asarray', 'np.asarray', (['rates'], {}), '(rates)\n', (32526, 32533), True, 'import numpy as np\n'), ((41527, 41590), 'numpy.isclose', 'np.isclose', (['rate', '((primary + secondary + tertiary) * efficiency)'], {}), '(rate, (primary + secondary + tertiary) * efficiency)\n', (41537, 41590), True, 'import numpy as np\n'), ((19559, 19578), 'numpy.abs', 'np.abs', (['(energyi - x)'], {}), '(energyi - x)\n', (19565, 19578), True, 'import numpy as np\n'), ((52420, 52436), 'numpy.max', 'np.max', (['energy0i'], {}), '(energy0i)\n', (52426, 52436), True, 'import numpy as np\n')] |
import itertools
import math
import random
import time
from typing import *
import keras
import sklearn.metrics
import numpy as np
import PythonExtras.Normalizer as Normalizer
from PythonExtras import numpy_extras as npe
class KerasBatchedCallback(keras.callbacks.Callback):
def on_macro_batch_start(self, macroBatch: int, logs=None):
pass
def on_macro_batch_end(self, macroBatch: int, logs=None):
pass
class KerasBatchedLambdaCallback(KerasBatchedCallback):
def __init__(self, on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None,
on_train_begin=None, on_train_end=None, on_macro_batch_start=None, on_macro_batch_end=None):
super(KerasBatchedCallback, self).__init__()
emptyCallback = lambda *args: None
self.on_epoch_begin = on_epoch_begin or emptyCallback
self.on_epoch_end = on_epoch_end or emptyCallback
self.on_batch_begin = on_batch_begin or emptyCallback
self.on_batch_end = on_batch_end or emptyCallback
self.on_train_begin = on_train_begin or emptyCallback
self.on_train_end = on_train_end or emptyCallback
self.on_macro_batch_start = on_macro_batch_start or emptyCallback
self.on_macro_batch_end = on_macro_batch_end or emptyCallback
class KerasBatchedCallbackList(keras.callbacks.CallbackList):
def on_macro_batch_start(self, macroBatch: int, logs=None):
logs = logs or {}
for callback in self.callbacks:
if hasattr(callback, 'on_macro_batch_start'):
callback.on_macro_batch_start(macroBatch, logs)
def on_macro_batch_end(self, macroBatch: int, logs=None):
logs = logs or {}
for callback in self.callbacks:
if hasattr(callback, 'on_macro_batch_end'):
callback.on_macro_batch_end(macroBatch, logs)
def set_validation_data(self, valDataX: List[np.ndarray], valDataY: List[np.ndarray]):
# Have to interface with Keras here.
# It expects not only X and Y data, but 'sample weights' and an optional 'learning phase' bool.
valData = list(itertools.chain(valDataX, valDataY, [np.ones((valDataX[0].shape[0],))]))
if self.callbacks[0].model._uses_dynamic_learning_phase():
valData += [0.0]
for callback in self.callbacks:
callback.validation_data = valData
class KerasBatchedTrainer:
class History:
def __init__(self, metricsPerEpoch: Optional[List[Dict[str, float]]] = None):
self.metricsPerEpoch = metricsPerEpoch or []
def add_epoch(self, trainMetrics: Dict[str, float], testMetrics: Dict[str, float]):
self.metricsPerEpoch.append({
**trainMetrics,
**testMetrics
})
def get_train_loss_history(self):
return [d['loss'] for d in self.metricsPerEpoch]
def get_test_loss_history(self):
return [d['val_loss'] for d in self.metricsPerEpoch]
# todo document better
def __init__(self,
model: keras.Model,
trainX: Union[npe.LargeArray, List[npe.LargeArray]],
trainY: Union[npe.LargeArray, List[npe.LargeArray]],
testX: Union[npe.LargeArray, List[npe.LargeArray]],
testY: Union[npe.LargeArray, List[npe.LargeArray]],
macrobatchSize: int,
minibatchSize: int, minibatchSizeEval: int = None,
normalizerX: Normalizer = None,
featureAxis: int = None,
macrobatchPreprocess: Optional[Callable] = None):
if minibatchSizeEval is None:
minibatchSizeEval = minibatchSize
trainX, trainY, testX, testY = self._to_list(trainX), self._to_list(trainY), \
self._to_list(testX), self._to_list(testY)
# The interface allows multiple outputs, but we don't need that functionality atm.
if len(trainY) > 1:
raise NotImplementedError()
tensorsTrain = itertools.chain(trainX, trainY)
tensorsTest = itertools.chain(testX, testY)
if not all((tensor.shape[0] == trainX[0].shape[0] for tensor in tensorsTrain)) or \
not all((tensor.shape[0] == testX[0].shape[0] for tensor in tensorsTest)):
raise RuntimeError("The batch dimension of all input and output tensors should have the same size.")
# Round up the macrobatch size to be a multiple of the minibatch size.
# This sometimes helps to avoid very small minibatches at the end of an epoch.
macrobatchSize = int(math.ceil(macrobatchSize / minibatchSize)) * minibatchSize
self.model = model
self.trainX = trainX
self.trainY = trainY
self.testX = testX
self.testY = testY
self.macrobatchSize = macrobatchSize
self.minibatchSize = minibatchSize
self.minibatchSizeEval = minibatchSizeEval
self.normalizerX = normalizerX
self.featureAxis = featureAxis
self.macrobatchPreprocess = macrobatchPreprocess
self.trainPointNumber = self.trainX[0].shape[0]
self.testPointNumber = self.testX[0].shape[0]
self.macrobatchNumberTrain = int(math.ceil(self.trainPointNumber / self.macrobatchSize))
self.macrobatchNumberTest = int(math.ceil(self.testPointNumber / self.macrobatchSize))
def fit_normalizer(self, printFunc=print):
if len(self.trainX) > 1:
raise NotImplementedError()
# The normalizer is fit only to the first macrobatch, to save on time and code.
# This should be sufficient, esp. if the training data is shuffled.
trainBatchX = self.trainX[0][:min(self.macrobatchSize, self.trainPointNumber)]
self.normalizerX.fit(trainBatchX, axis=self.featureAxis)
printFunc("Computed normalization parameters: {}".format(self.normalizerX.get_weights()))
return self.normalizerX
def test_init_loss(self, macrobatchNumber: int = None, lossType: str = 'mse', printFunc=print):
if len(self.trainX) > 1:
raise NotImplementedError()
macrobatchNumber = macrobatchNumber or self.macrobatchNumberTrain
printFunc("Performing init loss check using {} macrobatches.".format(macrobatchNumber))
if lossType == 'mse':
# This is rather copy-pasty, cf. train() method.
loss = 0
predMean, predVar, dataMean, dataVar = 0, 0, 0, 0
for macrobatchIndex in range(0, macrobatchNumber):
dataStart = macrobatchIndex * self.macrobatchSize
dataEnd = min(dataStart + self.macrobatchSize, self.trainPointNumber)
# Extract the batch and normalize if needed.
predictionInput = self.trainX[0][dataStart:dataEnd]
predictionTarget = self.trainY[0][dataStart:dataEnd]
if self.normalizerX is not None:
if not self.normalizerX.isFit:
self.fit_normalizer(printFunc=printFunc)
predictionInput = self.normalizerX.scale(predictionInput)
prediction = self.model.predict(predictionInput, batch_size=self.minibatchSizeEval, verbose=0)
macrobatchMse = sklearn.metrics.mean_squared_error(predictionTarget.flatten(),
prediction.flatten())
# Normalize to avoid bias.
normCoef = (dataEnd - dataStart) / min(macrobatchNumber * self.macrobatchSize, self.trainPointNumber)
# Compute loss and also prediction and data statistics.
# Technically, for variance, we should multiply by N/N-1 at the end, but it's not crucial here.
loss += macrobatchMse * normCoef
predMean += np.mean(prediction) * normCoef
predVar += np.var(prediction) * normCoef
dataMean += np.mean(predictionTarget) * normCoef
dataVar += np.var(predictionTarget) * normCoef
else:
raise ValueError("Invalid loss: '{}'".format(lossType))
# (Not 100% on this): MSE = Var(y - y_hat) + E(y - y_hat)^2
expectedLoss = predVar + dataVar + (predMean - dataMean) ** 2
printFunc("Loss at init: {:.3f}. Expected loss: {:.3f} Prediction mean(var): {:.3f} ({:.3f}) "
"Data mean(var): {:.3f} ({:.3f})".format(loss, expectedLoss, predMean, predVar, dataMean, dataVar))
return loss, expectedLoss, predMean, predVar, dataMean, dataVar
def train(self, epochNumber: int, callbacks: List[keras.callbacks.Callback] = None, printFunc=print) -> History:
# Construct the callback list, provide parameters that won't change during the training.
callbackList = KerasBatchedCallbackList(callbacks)
callbackList.set_params({
'batch_size': self.minibatchSize,
'epochs': epochNumber,
'steps': None,
'samples': self.trainPointNumber,
'verbose': False,
'do_validation': False,
'metrics': [],
})
callbackList.set_model(self.model)
# Some callbacks (e.g. Tensorboard) require val.data. Provide only one macrobatch to not overflow RAM.
callbackList.set_validation_data([array[:self.macrobatchSize] for array in self.testX],
[array[:self.macrobatchSize] for array in self.testY])
callbackList.on_train_begin()
printFunc("Starting batched training. {} epochs, {:,} macrobatches with up to {:,} points each.".format(
epochNumber, self.macrobatchNumberTrain, self.macrobatchSize
))
# Main 'learning epoch' loop, which goes through all the data at each step.
trainingHistory = KerasBatchedTrainer.History()
for epochIndex in range(0, epochNumber):
epochTrainMetrics = {}
timeLoad, timeTraining, timeNormalization = 0, 0, 0
timeStart = time.time()
callbackList.on_epoch_begin(epochIndex)
# Access macrobatches in a different random order every epoch.
macrobatchDataIndices = list(range(0, self.macrobatchNumberTrain))
random.shuffle(macrobatchDataIndices)
# 'Macrobatch' loop, which splits the training data into chunks for out-of-core processing.
for macrobatchIndex in range(0, self.macrobatchNumberTrain):
macrobatchDataIndex = macrobatchDataIndices[macrobatchIndex]
dataStart = macrobatchDataIndex * self.macrobatchSize
dataEnd = min(dataStart + self.macrobatchSize, self.trainPointNumber)
# Load the training data chunk from disk.
t = time.time()
trainBatchX = [array[dataStart:dataEnd, ...] for array in self.trainX]
trainBatchY = [array[dataStart:dataEnd, ...] for array in self.trainY]
timeLoad += time.time() - t
t = time.time()
if self.normalizerX is not None:
if len(self.trainX) > 1:
raise NotImplementedError()
if not self.normalizerX.isFit:
self.fit_normalizer(printFunc=printFunc)
# Scale the training data.
assert len(trainBatchX) == 1
self.normalizerX.scale(trainBatchX[0], inPlace=True) # todo Support multiple tensors.
if self.macrobatchPreprocess:
trainBatchX, trainBatchY = self.macrobatchPreprocess(trainBatchX, trainBatchY)
timeNormalization += time.time() - t
callbackList.on_macro_batch_start(macrobatchIndex, {'epoch': epochIndex})
t = time.time()
# Train the model on the macrobatch. Minibatch size specifies SGD batch size,
# which is also the chunk size for GPU loads.
batchHistory = self.model.fit(trainBatchX, trainBatchY,
epochs=1, shuffle=True,
batch_size=self.minibatchSize,
verbose=0)
timeForMacrobatch = time.time() - t
timeTraining += timeForMacrobatch
# Maintain epoch metric values as the mean of macrobatch metrics.
# Note, that since the model is training progressively,
# this is not the true metric value. But this is faster and accurate enough.
# Batches have different sizes -> weight the batch metrics to get an unbiased epoch mean estimate.
meanNormCoef = (dataEnd - dataStart) / self.trainPointNumber
for metricName in batchHistory.history.keys():
macrobatchMetric = batchHistory.history[metricName][0] # There's only one epoch.
if macrobatchIndex == 0:
epochTrainMetrics[metricName] = 0
epochTrainMetrics[metricName] += meanNormCoef * macrobatchMetric
callbackList.on_macro_batch_end(macrobatchIndex, {'epoch': epochIndex, **epochTrainMetrics})
t = time.time()
# Perform out-of-core prediction for the test data.
epochTestMetrics = self.compute_test_metrics()
timeEvaluation = time.time() - t
timeTotal = time.time() - timeStart
timeRest = timeTotal - timeTraining - timeEvaluation - timeNormalization - timeLoad
trainingHistory.add_epoch(epochTrainMetrics, epochTestMetrics)
callbackList.on_epoch_end(epochIndex, {**epochTrainMetrics, **epochTestMetrics,
'time_total': timeTotal, 'time_load': timeLoad,
'time_train': timeTraining, 'time_norm': timeNormalization,
'time_val': timeEvaluation, 'time_rest': timeRest})
# Support the standard EarlyStopping callback.
# todo Write our own early stopping logic?
if self.model.stop_training:
if printFunc:
printFunc("Training stop requested on epoch {}.".format(epochIndex))
break
callbackList.on_train_end()
return trainingHistory
def compute_test_metrics(self) -> Dict[str, float]:
testMetrics = {}
for macrobatchIndex in range(0, self.macrobatchNumberTest):
dataStart = macrobatchIndex * self.macrobatchSize
dataEnd = min(dataStart + self.macrobatchSize, self.testPointNumber)
# Extract the batch and normalize if needed.
predictionInput = [array[dataStart:dataEnd] for array in self.testX]
if self.normalizerX is not None:
if len(predictionInput) > 1:
raise NotImplementedError()
predictionInput = self.normalizerX.scale(predictionInput[0]) # todo Support multiple tensors.
if self.macrobatchPreprocess:
predictionInput, _ = self.macrobatchPreprocess(predictionInput, None)
prediction = self.model.predict(predictionInput, batch_size=self.minibatchSizeEval, verbose=0)
predictionTarget = self.testY[0][dataStart:dataEnd]
batchMetrics = self._compute_batch_metrics(prediction, predictionTarget)
# Add up the metrics to compute the average, while normalizing to avoid bias.
# Rename the metrics to follow Keras' conventions.
for k, v in batchMetrics.items():
nameFull = 'val_' + k
valueNorm = v * ((dataEnd - dataStart) / self.testPointNumber)
if nameFull in testMetrics:
testMetrics[nameFull] += valueNorm
else:
testMetrics[nameFull] = valueNorm
return testMetrics
def _compute_batch_metrics(self, prediction, predictionTarget) -> Dict[str, float]:
metrics = {}
for metricName in ['loss'] + self.model.metrics:
if metricName == 'loss':
if self.model.loss == 'mse':
metric = sklearn.metrics.mean_squared_error(predictionTarget.flatten(), prediction.flatten())
elif self.model.loss == 'binary_crossentropy':
# We need a lower epsilon (1e-7, instead of 1e-14) because we're dealing with float32, not 64.
# Was getting NaNs here, but haven't 100% confirmed that this fixed the issue.
metric = sklearn.metrics.log_loss(predictionTarget, prediction, eps=1e-7)
else:
raise ValueError("Unknown model loss: '{}'.".format(self.model.loss))
elif metricName == 'accuracy':
metricName = 'acc' # Alias.
metric = np.mean(np.round(prediction) == predictionTarget)
else:
raise ValueError("Unknown model metric: '{}'.".format(metricName))
metrics[metricName] = metric
return metrics
@staticmethod
def _to_list(value):
return [value] if not isinstance(value, list) else value
| [
"math.ceil",
"random.shuffle",
"numpy.ones",
"numpy.var",
"time.time",
"numpy.mean",
"numpy.round",
"itertools.chain"
] | [((4074, 4105), 'itertools.chain', 'itertools.chain', (['trainX', 'trainY'], {}), '(trainX, trainY)\n', (4089, 4105), False, 'import itertools\n'), ((4128, 4157), 'itertools.chain', 'itertools.chain', (['testX', 'testY'], {}), '(testX, testY)\n', (4143, 4157), False, 'import itertools\n'), ((5271, 5325), 'math.ceil', 'math.ceil', (['(self.trainPointNumber / self.macrobatchSize)'], {}), '(self.trainPointNumber / self.macrobatchSize)\n', (5280, 5325), False, 'import math\n'), ((5367, 5420), 'math.ceil', 'math.ceil', (['(self.testPointNumber / self.macrobatchSize)'], {}), '(self.testPointNumber / self.macrobatchSize)\n', (5376, 5420), False, 'import math\n'), ((10081, 10092), 'time.time', 'time.time', ([], {}), '()\n', (10090, 10092), False, 'import time\n'), ((10311, 10348), 'random.shuffle', 'random.shuffle', (['macrobatchDataIndices'], {}), '(macrobatchDataIndices)\n', (10325, 10348), False, 'import random\n'), ((13348, 13359), 'time.time', 'time.time', ([], {}), '()\n', (13357, 13359), False, 'import time\n'), ((4645, 4686), 'math.ceil', 'math.ceil', (['(macrobatchSize / minibatchSize)'], {}), '(macrobatchSize / minibatchSize)\n', (4654, 4686), False, 'import math\n'), ((10838, 10849), 'time.time', 'time.time', ([], {}), '()\n', (10847, 10849), False, 'import time\n'), ((11089, 11100), 'time.time', 'time.time', ([], {}), '()\n', (11098, 11100), False, 'import time\n'), ((11879, 11890), 'time.time', 'time.time', ([], {}), '()\n', (11888, 11890), False, 'import time\n'), ((13513, 13524), 'time.time', 'time.time', ([], {}), '()\n', (13522, 13524), False, 'import time\n'), ((13553, 13564), 'time.time', 'time.time', ([], {}), '()\n', (13562, 13564), False, 'import time\n'), ((2170, 2202), 'numpy.ones', 'np.ones', (['(valDataX[0].shape[0],)'], {}), '((valDataX[0].shape[0],))\n', (2177, 2202), True, 'import numpy as np\n'), ((7888, 7907), 'numpy.mean', 'np.mean', (['prediction'], {}), '(prediction)\n', (7895, 7907), True, 'import numpy as np\n'), ((7946, 7964), 'numpy.var', 'np.var', (['prediction'], {}), '(prediction)\n', (7952, 7964), True, 'import numpy as np\n'), ((8004, 8029), 'numpy.mean', 'np.mean', (['predictionTarget'], {}), '(predictionTarget)\n', (8011, 8029), True, 'import numpy as np\n'), ((8068, 8092), 'numpy.var', 'np.var', (['predictionTarget'], {}), '(predictionTarget)\n', (8074, 8092), True, 'import numpy as np\n'), ((11052, 11063), 'time.time', 'time.time', ([], {}), '()\n', (11061, 11063), False, 'import time\n'), ((11752, 11763), 'time.time', 'time.time', ([], {}), '()\n', (11761, 11763), False, 'import time\n'), ((12360, 12371), 'time.time', 'time.time', ([], {}), '()\n', (12369, 12371), False, 'import time\n'), ((17077, 17097), 'numpy.round', 'np.round', (['prediction'], {}), '(prediction)\n', (17085, 17097), True, 'import numpy as np\n')] |
"""Return pie chart showing class distribution of dataset.
Based on Bokeh pie chart gallery example available at:
https://docs.bokeh.org/en/latest/docs/gallery/pie_chart.html
"""
# %% Imports
# Standard system imports
from math import pi
# Related third party imports
from bokeh.models import ColumnDataSource
from bokeh.palettes import Category10, Category20, Turbo256
from bokeh.plotting import figure
from bokeh.transform import cumsum
import numpy as np
# Local application/library specific imports
# %% Create pie chart
def create_pie_chart(data, metadata, MARGIN):
"""Create pie chart plot."""
# -------------------------------------------------------------------------
# Setup
# -------------------------------------------------------------------------
TARGET = metadata['target']
CLASSES = list(np.unique(data[TARGET]))
COUNTS = [data[TARGET].count(x) for x in CLASSES]
pie_total = sum(COUNTS)
angle = [(2*pi*cnt)/pie_total for cnt in COUNTS]
# Define color map and markers
if len(CLASSES) <= 10:
colors = Category10[len(CLASSES)]
elif len(CLASSES) <= 20:
colors = Category20[len(CLASSES)]
else:
color_idx = np.linspace(0, len(Turbo256), num=len(CLASSES),
endpoint=False, dtype=int)
colors = [Turbo256[x] for x in color_idx]
# Define plot source
source = ColumnDataSource({'angle': angle, 'color': colors,
'classes': CLASSES, 'counts': COUNTS})
# -------------------------------------------------------------------------
# Plots
# -------------------------------------------------------------------------
p = figure(plot_height=350, title="Class Distribution",
toolbar_location=None, tools="hover",
tooltips="@classes: @counts", x_range=(-0.5, 1.0),
output_backend="webgl", sizing_mode="scale_both",
margin=(MARGIN, MARGIN, 0, 0))
p.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True),
end_angle=cumsum('angle'), line_color="white", fill_color='color',
legend_field='classes', source=source)
# Style plot
p.axis.axis_label = None
p.axis.visible = False
p.grid.grid_line_color = None
p.outline_line_color = "#DDDDDD"
p.outline_line_width = 0
# Style legend
p.legend.label_text_font_style = "bold"
p.legend.border_line_color = "#DDDDDD"
p.legend.border_line_width = 0
return p
| [
"bokeh.transform.cumsum",
"bokeh.models.ColumnDataSource",
"bokeh.plotting.figure",
"numpy.unique"
] | [((1395, 1488), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'angle': angle, 'color': colors, 'classes': CLASSES, 'counts': COUNTS}"], {}), "({'angle': angle, 'color': colors, 'classes': CLASSES,\n 'counts': COUNTS})\n", (1411, 1488), False, 'from bokeh.models import ColumnDataSource\n'), ((1697, 1930), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': '(350)', 'title': '"""Class Distribution"""', 'toolbar_location': 'None', 'tools': '"""hover"""', 'tooltips': '"""@classes: @counts"""', 'x_range': '(-0.5, 1.0)', 'output_backend': '"""webgl"""', 'sizing_mode': '"""scale_both"""', 'margin': '(MARGIN, MARGIN, 0, 0)'}), "(plot_height=350, title='Class Distribution', toolbar_location=None,\n tools='hover', tooltips='@classes: @counts', x_range=(-0.5, 1.0),\n output_backend='webgl', sizing_mode='scale_both', margin=(MARGIN,\n MARGIN, 0, 0))\n", (1703, 1930), False, 'from bokeh.plotting import figure\n'), ((833, 856), 'numpy.unique', 'np.unique', (['data[TARGET]'], {}), '(data[TARGET])\n', (842, 856), True, 'import numpy as np\n'), ((2038, 2072), 'bokeh.transform.cumsum', 'cumsum', (['"""angle"""'], {'include_zero': '(True)'}), "('angle', include_zero=True)\n", (2044, 2072), False, 'from bokeh.transform import cumsum\n'), ((2096, 2111), 'bokeh.transform.cumsum', 'cumsum', (['"""angle"""'], {}), "('angle')\n", (2102, 2111), False, 'from bokeh.transform import cumsum\n')] |
'''
Created on Jul 13, 2017
@author: <NAME>, <NAME>
'''
import numpy as np
from collections import Iterable
def _create_structured_vector(size, fields, copy=False):
''' create np.array of structure filled with default values
Args:
shape: tuple, shape of the array
fields: list of tuples of (name, type, value)
copy_: copy function for value assignment
Returns:
np.array
Example:
create 10 elements one dimentional array with x,y record defaults to (-1,-1)
_create_structure( (10,), ('x', int, -1), ('y', int, -1) )
'''
dts = list()
for name, type_, value in fields:
if hasattr(value, 'shape'):
shape = value.shape
elif isinstance(value, Iterable):
shape = (len(value))
else:
shape = None
if shape is not None:
dts.append((name, type_, shape))
else:
dts.append((name, type_))
# dt = np.dtype([(name, type_, ) for name, type_, value in fields])
dt = np.dtype(dts)
values = [tuple([value if not copy else np.copy(value) for _, _, value in fields]) for _ in range(size)]
array = np.rec.array(values, dtype=dt)
return array, dt
def make_var_struct(nobj, nobs):
'''
; Purpose: To create structure holding summary information on variables.
;
; Inputs:
; nobj -- number of objects
; nobs -- number of observations
;
; Return Value: initialized structure
;
; adopted from make_var_struct.pro idl procedure
;
; Created: <NAME>, <NAME>
'''
# make observation substructure
obs_map = [
('state', int, -1),
('dis', np.float32, -1.0),
('posangle', np.float32, -1.0),
('err', np.float32, -1.0),
('phot', np.float32, -1.0),
('photerr', np.float32, -1.0),
]
all_obs, obs_dt = _create_structured_vector(nobs, obs_map)
var_map = [
('name', str, " "), ('ptr', np.int64, 0),
('maxmag', np.float32, -1.0), ('errmaxmag', np.float32, -1.0),
('minmag', np.float32, -1.0), ('avgmag', np.float32, -1.0),
('delta', np.float32, -1.0), ('nmiss', int, 0),
('nobs', np.int32, 0), ('ngdobs', np.int32, 0),
('sdev', np.float32, -1.0), ('sdevcl', np.float32, -1.0),
('chisq', np.float32, -1.0), ('chisqcl', np.float32, -1.0),
('maxsig', np.float32, -1.0), ('pos_sdv', np.float32, -1.0),
('posrange', np.float32, -1.0), ('avgdev', np.float32, -1.0),
('skew', np.float32, -1.0,), ('kurt', np.float32, -1.0), ('duration', np.float32, -1.0),
('mdnerr', np.float32, -1.0), ('avgdevsig', np.float32, -1.0),
('avgdevsigcl', np.float32, -1.0), ('bestdelta', np.float32, -1.0),
('bestsig', np.float32, -1.0), ('ival', np.float32, -1.0),
('ival2', np.float32, -1.0), ('obs', obs_dt, all_obs),
]
# for o in var_map:
# print(o[0], len(o))
all_vars, vars_dt = _create_structured_vector(nobj, var_map, copy=True)
return all_vars # , vars_dt, obs_dt
if __name__ == '__main__':
# a, vars_dt, obs_dt =make_var_struct(10,30)
a = make_var_struct(10, 30)
a[0].obs[0].err = 30
a[0].obs[2].err = 50
vars_dt = a[0].dtype
obs_dt = a[0].obs.dtype
ind = np.ndarray((5,), buffer=np.array([0, 1, 2, 3, 4]), dtype=np. int64)
rec = np.rec.array(a[0].obs[ind], dtype=obs_dt)
obs = np.where(rec.err > 0)
print(obs, rec[obs])
rec = np.rec.array(a[0].obs[obs], dtype=obs_dt)
print([r.err for r in rec])
| [
"numpy.copy",
"numpy.dtype",
"numpy.rec.array",
"numpy.where",
"numpy.array"
] | [((1040, 1053), 'numpy.dtype', 'np.dtype', (['dts'], {}), '(dts)\n', (1048, 1053), True, 'import numpy as np\n'), ((1176, 1206), 'numpy.rec.array', 'np.rec.array', (['values'], {'dtype': 'dt'}), '(values, dtype=dt)\n', (1188, 1206), True, 'import numpy as np\n'), ((3434, 3475), 'numpy.rec.array', 'np.rec.array', (['a[0].obs[ind]'], {'dtype': 'obs_dt'}), '(a[0].obs[ind], dtype=obs_dt)\n', (3446, 3475), True, 'import numpy as np\n'), ((3486, 3507), 'numpy.where', 'np.where', (['(rec.err > 0)'], {}), '(rec.err > 0)\n', (3494, 3507), True, 'import numpy as np\n'), ((3544, 3585), 'numpy.rec.array', 'np.rec.array', (['a[0].obs[obs]'], {'dtype': 'obs_dt'}), '(a[0].obs[obs], dtype=obs_dt)\n', (3556, 3585), True, 'import numpy as np\n'), ((3380, 3405), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (3388, 3405), True, 'import numpy as np\n'), ((1099, 1113), 'numpy.copy', 'np.copy', (['value'], {}), '(value)\n', (1106, 1113), True, 'import numpy as np\n')] |
import os
import cv2 as cv
import argparse
import numpy as np
import math
import shutil
rootdir = "/mnt/data/datasets/PID_YOLO" #images+labels acquire from
savepath = "/mnt/data/datasets/PID_YOLO/divide" # images+labels save in
#rootdir = "D:/Download/PID_YOLO"
#savepath = "D:/Download/PID_YOLO/train"
#the pre-set final size for task w-width h-height , for P&ID maps : split into 1280*1280, origin is 4961*3580
w_after = 1280
h_after = 1280
ratio = 0.15 #interception ration
#first calculate as setting , and then adapt to different size of cropped images
def main():
#clear the last time output and make the save file folder
if os.path.isdir(savepath):
clear(savepath)
os.makedirs(savepath)
# show(rootdir) #show all the labels on origin image
#list from the original image
filelist = os.listdir(rootdir)
for filename in filelist:
if filename[-1] == "g" :
image = cv.imread(os.path.join(rootdir, filename))
print(filename)
#original information of images
size = image.shape
w = size [1]
h = size [0]
#enlarge the size of orgin images to ensure the feasibility of picture cropping
# w_actual h_actual refers to the actual number which orgin image can split horizontally and vertically respectively
# w_count h_count refers to the rounded up result of the w_actual and h_actual respectively
w_count = math.ceil(((w-w_after)/((1-ratio)*w_after)) + 1 )
w_actual = ((w-w_after)/((1-ratio)*w_after)) + 1
h_count = math.ceil(((h-h_after)/((1-ratio)*h_after)) + 1 )
h_actual = ((h-h_after)/((1-ratio)*h_after)) + 1
if w_count > w_actual or h_count > h_actual:
image = cv.copyMakeBorder(image,0, int(h_after*h_count-h-ratio*h_after*(h_count-1)),
0, int(w_after*w_count-w-ratio*w_after*(w_count-1)),cv.BORDER_CONSTANT,value=0)
#save the enlarged image into the enlarge folder
save = savepath + '/enlarge/' + filename
if not os.path.isdir(savepath + '/enlarge/'):
os.makedirs(savepath + '/enlarge/')
cv.imwrite(save, image)
#loop in turns of cropped images name, open the original label file one time
for i in range(h_count) :
for j in range(w_count) :
f = open(savepath +'/' +filename[:-4]+'-'+str(i)+str(j)+'.txt','a+')
f.close()
maximum = np.zeros(5)
#imx_min and imy_min refers to minimum vertex (x,y) absolute coordinate of each cropped picture
#the maximum vertex absolute coordinate is the minimum one adding the width or height
if i == 0 and j == 0:
imx_min=0
imy_min=0
elif i == 0 and j != 0:
imx_min=(1-ratio)*w_after*j
imy_min=0
elif i != 0 and j == 0 :
imx_min=0
imy_min=(1-ratio)*h_after*i
else :
imx_min=(1-ratio)*w_after*j
imy_min=(1-ratio)*h_after*i
imx_max = imx_min + w_after
imy_max = imy_min + h_after
maximum[1] = imx_min
maximum[2] = imx_max
maximum[3] = imy_min
maximum[4] = imy_max
file = open(rootdir + "/" + filename[:-3]+'txt')
lines = file.readlines()
#every line in the original label files referring to one bounding box
#there are 5 numbers in every line:
#the class of object, the related coordinate of the target center (x,y), the related width and height of bounding box
for line in lines :
flag = 0
#calculate the absolute coordinate of bounding box in original picture
#xmin and xmax refers to the minimum and maximum absolute x-axis coordinate of the bounding box respectively
#the same as ymin and ymax
char = line.strip().split(" ")
char = list(map(float,char))
xmin = (char[1]-char[3]/2)*w
ymin = (char[2]- char[4]/2)*h
xmax = (char[1]+char[3]/2)*w
ymax = (char[2]+char[4]/2)*h
#decide whether every bounding box is subjected to all cropped pictures or not
#there are two situations:
#one vertex , two vertices and four vertices of the bounding box in the cropped image
# x1 y1 & x1 y2 & x2 y1 & x2 y2
if ((imx_max>xmin>imx_min) or (imx_min<xmax<imx_max)) and ((imy_max>ymin>imy_min) or (imy_min<ymax< imy_max)) :
flag = 1
#no vertex in the cropped image but still the bounding box through the image
# x1 x2 & y1 y2
elif (xmin>imx_min) and (xmax<imx_max) and (ymin<imy_min) and (ymax>imy_max):
flag = 1
elif (ymin>imy_min) and (ymax< imy_max) and (xmin<imx_min) and (xmax>imx_max):
flag = 1
#find out max and mini coordinate of x&y in subsjected bounding box for adaptation
if flag == 1 :
char[1] = xmin
char[2] = xmax
char[3] = ymin
char[4] = ymax
for m in range(5) :
if m%2 == 0 :
maximum [m] = max(maximum[m],char[m])
else :
maximum [m] = min(maximum[m],char[m])
w_after_adapted = maximum[2]-maximum[1]
h_after_adapted = maximum[4]-maximum[3]
#split up every picture into the required adapted size and save
cropped= image[int(maximum[3]):int(maximum[4]),int(maximum[1]):int(maximum[2])]
save_dir = savepath +'/'+filename[:-4]+'-'+str(i)+str(j)+".jpg"
cv.imwrite(save_dir, cropped)
#save the coordinate information of adapted images for post-process
abs_coordinate = list(map(str,maximum[1:]))
if not os.path.isdir(savepath + '/coordinate/'):
os.makedirs(savepath + '/coordinate/')
f = open(savepath +'/coordinate/'+filename[:-4]+'-'+str(i)+str(j)+'.txt','a+')
f.write(' '.join(abs_coordinate))
f.write('\n')
f.close()
#save the cropped labels information according to adaptation information
for line in lines :
#calculate the absolute coordinate of the original picture
#xmin and xmax refers to the minimum and maximum absolute x-axis coordinate of the bounding box respectively
#the same as ymin and ymax
char = line.strip().split(" ")
char = list(map(float,char))
x = char[1]*w
y = char[2]*h
xmin = (char[1]-char[3]/2)*w
ymin = (char[2]- char[4]/2)*h
xmax = (char[1]+char[3]/2)*w
ymax = (char[2]+char[4]/2)*h
#decide whether every bounding box is subjected to this cropped pictures or not
#there are two sitiations:
#if the four vertices of bounding box all included
#save the coordinate directly
if xmin>maximum[1] and xmax<maximum[2] and ymin>maximum[3] and ymax<maximum[4]:
flag = 1
#if part of vertices included, and others are not
#reset the excluded coordinate to ensure all including
elif ((maximum[2]>xmin>maximum[1]) or (maximum[1]<xmax<maximum[2])) and ((maximum[4]>ymin>maximum[3]) or (maximum[3]<ymax< maximum[4])) :
xmin = max(maximum[1],xmin)
xmax = min(maximum[2],xmax)
ymin = max(maximum[3],ymin)
ymax = min(maximum[4],ymax)
flag = 1
#change the labels into the related one for cropped image
if flag == 1 :
char[0] = int(char[0])
char[1] = ((xmin+xmax)/2 - maximum[1])/w_after_adapted
char[2] = ((ymin+ymax)/2 - maximum[3])/h_after_adapted
char[3] = (xmax-xmin)/w_after_adapted
char[4] = (ymax-ymin)/h_after_adapted
char = list(map(str,char))
f = open(savepath +'/' +filename[:-4]+'-'+str(i)+str(j)+'.txt','a+')
f.write(' '.join(char))
f.write('\n')
f.close()
file.close()
# show(savepath) #show the output of afapted images
#show the picture with labels and save the output picture
def show(rootdir):
filelist = os.listdir(rootdir)
savepath = rootdir + '/show/'
if not os.path.isdir(rootdir + '/show/'):
os.makedirs(savepath)
#draw the bounding boxes on crossponding image
for filename in filelist:
if filename[-1] == "g" :
image = cv.imread(os.path.join(rootdir, filename))
size = image.shape
w = size [1]
h = size [0]
#calculate the absolute coordinate of bounding box on cropped images
file = open(rootdir + "/" + filename[:-3]+'txt')
lines = file.readlines()
for line in lines:
char = line.strip().split(" ")
char = list(map(float,char))
xmin = int((char[1]-char[3]/2)*w)
ymin = int((char[2]- char[4]/2)*h)
xmax = int((char[1]+char[3]/2)*w)
ymax = int((char[2]+char[4]/2)*h)
rec = cv.rectangle(image,(xmin,ymin),(xmax,ymax),(0,255,0),2)
save = savepath + filename
cv.imwrite(save, rec)
#clear all the former output files
def clear(savepath):
shutil.rmtree(savepath)
# read file as rows to find out the size after adaptation
def readrow(savepath,filename,maximum):
file= open(savepath + "/" + filename)
lines = file.readlines()
for line in lines :
char = line.strip().split(" ")#tab split the number apart
char = list(map(float,char))
for i in range(5) :
if i%2 == 0 :
maximum [i] = max(maximum[i],char[i])
else :
maximum [i] = min(maximum[i],char[i])
file.close()
if __name__ == '__main__':
main()
| [
"os.makedirs",
"math.ceil",
"os.path.isdir",
"cv2.imwrite",
"numpy.zeros",
"cv2.rectangle",
"shutil.rmtree",
"os.path.join",
"os.listdir"
] | [((673, 696), 'os.path.isdir', 'os.path.isdir', (['savepath'], {}), '(savepath)\n', (686, 696), False, 'import os\n'), ((729, 750), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (740, 750), False, 'import os\n'), ((864, 883), 'os.listdir', 'os.listdir', (['rootdir'], {}), '(rootdir)\n', (874, 883), False, 'import os\n'), ((10401, 10420), 'os.listdir', 'os.listdir', (['rootdir'], {}), '(rootdir)\n', (10411, 10420), False, 'import os\n'), ((11561, 11584), 'shutil.rmtree', 'shutil.rmtree', (['savepath'], {}), '(savepath)\n', (11574, 11584), False, 'import shutil\n'), ((10469, 10502), 'os.path.isdir', 'os.path.isdir', (["(rootdir + '/show/')"], {}), "(rootdir + '/show/')\n", (10482, 10502), False, 'import os\n'), ((10513, 10534), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (10524, 10534), False, 'import os\n'), ((1539, 1593), 'math.ceil', 'math.ceil', (['((w - w_after) / ((1 - ratio) * w_after) + 1)'], {}), '((w - w_after) / ((1 - ratio) * w_after) + 1)\n', (1548, 1593), False, 'import math\n'), ((1675, 1729), 'math.ceil', 'math.ceil', (['((h - h_after) / ((1 - ratio) * h_after) + 1)'], {}), '((h - h_after) / ((1 - ratio) * h_after) + 1)\n', (1684, 1729), False, 'import math\n'), ((985, 1016), 'os.path.join', 'os.path.join', (['rootdir', 'filename'], {}), '(rootdir, filename)\n', (997, 1016), False, 'import os\n'), ((2347, 2370), 'cv2.imwrite', 'cv.imwrite', (['save', 'image'], {}), '(save, image)\n', (2357, 2370), True, 'import cv2 as cv\n'), ((10686, 10717), 'os.path.join', 'os.path.join', (['rootdir', 'filename'], {}), '(rootdir, filename)\n', (10698, 10717), False, 'import os\n'), ((11339, 11402), 'cv2.rectangle', 'cv.rectangle', (['image', '(xmin, ymin)', '(xmax, ymax)', '(0, 255, 0)', '(2)'], {}), '(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)\n', (11351, 11402), True, 'import cv2 as cv\n'), ((11456, 11477), 'cv2.imwrite', 'cv.imwrite', (['save', 'rec'], {}), '(save, rec)\n', (11466, 11477), True, 'import cv2 as cv\n'), ((2234, 2271), 'os.path.isdir', 'os.path.isdir', (["(savepath + '/enlarge/')"], {}), "(savepath + '/enlarge/')\n", (2247, 2271), False, 'import os\n'), ((2294, 2329), 'os.makedirs', 'os.makedirs', (["(savepath + '/enlarge/')"], {}), "(savepath + '/enlarge/')\n", (2305, 2329), False, 'import os\n'), ((2709, 2720), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2717, 2720), True, 'import numpy as np\n'), ((6914, 6943), 'cv2.imwrite', 'cv.imwrite', (['save_dir', 'cropped'], {}), '(save_dir, cropped)\n', (6924, 6943), True, 'import cv2 as cv\n'), ((7152, 7192), 'os.path.isdir', 'os.path.isdir', (["(savepath + '/coordinate/')"], {}), "(savepath + '/coordinate/')\n", (7165, 7192), False, 'import os\n'), ((7219, 7257), 'os.makedirs', 'os.makedirs', (["(savepath + '/coordinate/')"], {}), "(savepath + '/coordinate/')\n", (7230, 7257), False, 'import os\n')] |
#!/usr/bin/env python
# flake8: noqa
"""Tests `nineturn.core.tf_functions` package."""
import numpy as np
import tensorflow as tf
from nineturn.core.config import set_backend
from nineturn.core.backends import TENSORFLOW
from tests.core.common_functions import *
arr1 = np.random.rand(3, 4)
def test_to_tensor():
"""Test _to_tensor"""
clear_background()
set_backend(TENSORFLOW)
from nineturn.core.tf_functions import _to_tensor
assert tf.is_tensor(_to_tensor(arr1))
def test_nt_layers_list():
"""Test nt_layers_list"""
clear_background()
set_backend(TENSORFLOW)
from nineturn.core.tf_functions import nt_layers_list
assert type(nt_layers_list()) == type([])
assert len(nt_layers_list()) == 0
def test_reshape_tensor():
"""Test reshape_tensor"""
clear_background()
set_backend(TENSORFLOW)
from nineturn.core.tf_functions import reshape_tensor
shape = [2, 6]
new_tensor = reshape_tensor(arr1, shape)
assert new_tensor.shape == shape | [
"nineturn.core.tf_functions.nt_layers_list",
"nineturn.core.config.set_backend",
"nineturn.core.tf_functions.reshape_tensor",
"numpy.random.rand",
"nineturn.core.tf_functions._to_tensor"
] | [((280, 300), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (294, 300), True, 'import numpy as np\n'), ((384, 407), 'nineturn.core.config.set_backend', 'set_backend', (['TENSORFLOW'], {}), '(TENSORFLOW)\n', (395, 407), False, 'from nineturn.core.config import set_backend\n'), ((600, 623), 'nineturn.core.config.set_backend', 'set_backend', (['TENSORFLOW'], {}), '(TENSORFLOW)\n', (611, 623), False, 'from nineturn.core.config import set_backend\n'), ((863, 886), 'nineturn.core.config.set_backend', 'set_backend', (['TENSORFLOW'], {}), '(TENSORFLOW)\n', (874, 886), False, 'from nineturn.core.config import set_backend\n'), ((987, 1014), 'nineturn.core.tf_functions.reshape_tensor', 'reshape_tensor', (['arr1', 'shape'], {}), '(arr1, shape)\n', (1001, 1014), False, 'from nineturn.core.tf_functions import reshape_tensor\n'), ((490, 506), 'nineturn.core.tf_functions._to_tensor', '_to_tensor', (['arr1'], {}), '(arr1)\n', (500, 506), False, 'from nineturn.core.tf_functions import _to_tensor\n'), ((702, 718), 'nineturn.core.tf_functions.nt_layers_list', 'nt_layers_list', ([], {}), '()\n', (716, 718), False, 'from nineturn.core.tf_functions import nt_layers_list\n'), ((748, 764), 'nineturn.core.tf_functions.nt_layers_list', 'nt_layers_list', ([], {}), '()\n', (762, 764), False, 'from nineturn.core.tf_functions import nt_layers_list\n')] |
"""validate.py: Utilities for validating input."""
# Standard imports
import numpy as np
import pandas as pd
import pdb
# MAVE-NN imports
from mavenn.src.reshape import _get_shape_and_return_1d_array
from mavenn.src.error_handling import check, handle_errors
# Define built-in alphabets to use with MAVE-NN
alphabet_dict = {
'dna': np.array(['A', 'C', 'G', 'T']),
'rna': np.array(['A', 'C', 'G', 'U']),
'protein': np.array(['A', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R',
'S', 'T', 'V', 'W', 'Y']),
'protein*': np.array(['A', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R',
'S', 'T', 'V', 'W', 'Y', '*'])
}
# Translate from amino acid abbreviations to single letter symbols.
abreviation_dict = {
'Ala': 'A',
'Arg': 'R',
'Asn': 'N',
'Asp': 'D',
'Cys': 'C',
'Glu': 'E',
'Gln': 'Q',
'Gly': 'G',
'His': 'H',
'Ile': 'I',
'Leu': 'L',
'Lys': 'K',
'Met': 'M',
'Phe': 'F',
'Pro': 'P',
'Ser': 'S',
'Thr': 'T',
'Trp': 'W',
'Tyr': 'Y',
'Val': 'V'
}
@handle_errors
def validate_1d_array(x):
"""Cast x as a 1d numpy array."""
# Get shape and cast as 1d
x, shape = _get_shape_and_return_1d_array(x)
return x
@handle_errors
def validate_nd_array(x):
"""Casts x as a numpy array of the original input shape."""
# Get shape and cast as 1d
x, shape = _get_shape_and_return_1d_array(x)
# Return to original shape
x = x.reshape(shape)
return x
# TODO: 'protein*' will break current regular expressions. Those need to change.
@handle_errors
def validate_alphabet(alphabet):
"""
Return a validated alphabet.
String inputs are interpreted
as the name of one of four alphabets:
['dna','rna','protein','protein*'].
Otherwise alphabet must be one of
[set, list, np.ndarray, pd.Series],
containing only unique characters.
"""
valid_types = (str, list, set, np.ndarray, pd.Series)
check(isinstance(alphabet, valid_types),
f'type(alphabet)={type(alphabet)} is invalid. '
f'Must be one of {valid_types}.')
# If alphabet is a string, replace with array from alphabet_dict
if isinstance(alphabet, str):
check(alphabet in alphabet_dict,
f'Unknown alphabet={alphabet}. Must be one of [{alphabet_dict.keys()}].')
alphabet = alphabet_dict[alphabet]
# If alphabet is a set, cast as np.ndarray
elif isinstance(alphabet, set):
alphabet = np.array(list(alphabet))
# If alphabet is a list, cast an np.ndarray
elif isinstance(alphabet, list):
alphabet = np.array(alphabet)
# If alphabet is a pd.Series, get values
elif isinstance(alphabet, pd.Series):
alphabet = alphabet.values
# Make sure alphabet is 1D
check(len(alphabet.shape) == 1,
f'Alphabet must be 1D. alphabet.shape={alphabet.shape}')
# Make sure the entries of alphabet are unique
check(len(alphabet) == len(set(alphabet)),
f'Entries of alphabet are not unique.')
# Make sure alphabet is not empty
check(len(alphabet) > 0,
f'len(alphabet)={len(alphabet)}; must be >= 1.')
# Make sure all alphabet entries are strings
check(all([isinstance(c, str) for c in alphabet]),
'Alphabet contains non-string characters.')
# Make sure all alphabet entries are single-character
check(all([len(c) == 1 for c in alphabet]),
'Alphabet contains non-string characters.')
# Sort alphabet
# commented out because this causes a bug in heatmap visualization
# when user tries to reorder characters.
#alphabet.sort()
return alphabet
@handle_errors
def validate_seqs(x,
alphabet=None,
restrict_seqs_to_alphabet=True):
"""
Validate sequences for use in MAVE-NN.
Makes sure that seqs is an array of equal-length sequences
drawn from the set of characters in alphabet. Returns
a version of seqs cast as a numpy array of strings. Note that
alphabet must be set when setting restrict_seqs_to_alphabet=True.
Parameters
----------
x: (array-like)
Array of equal-length sequences.
alphabet: (str, array-like)
Alphabet from which strings are drawn.
restrict_seqs_to_alphabet: (bool)
Whether to restrict sequences to the specified alphabet.
Returns
-------
x: (np.array)
Nrray of validated sequences
"""
# Cast as np.array
if isinstance(x, str):
x = np.array([x])
elif isinstance(x, (list, np.ndarray)):
x = np.array(x).astype(str)
elif isinstance(x, pd.Series):
x = x.values.astype(str)
else:
check(False, f'type(x)={type(x)} is invalid.')
# Make sure array is 1D
check(len(x.shape) == 1, f'x should be 1D; x.shape={x.shape}')
# Get N and make sure its >= 1
N = len(x)
check(N >= 1, f'N={N} must be >= 1')
# Make sure all x are the same length
lengths = np.unique([len(seq) for seq in x])
check(len(lengths) == 1,
f"Sequences should all be the same length"
"; found multiple lengths={lengths}")
# If user requests to restrict sequences to a given alphabet
if restrict_seqs_to_alphabet:
# Check that alphabet is specified
check(alphabet is not None,
"alphabet must be specified when restrict_seqs_to_alphabet=True.")
# Validate alphabet
alphabet = validate_alphabet(alphabet)
# Make sure all sequences are in alphabet
seq_chars = set(''.join(x))
alphabet_chars = set(alphabet)
check(seq_chars <= alphabet_chars,
f"x contain the following characters not in alphabet:"
f"{seq_chars-alphabet_chars}")
return x
| [
"mavenn.src.reshape._get_shape_and_return_1d_array",
"numpy.array",
"mavenn.src.error_handling.check"
] | [((339, 369), 'numpy.array', 'np.array', (["['A', 'C', 'G', 'T']"], {}), "(['A', 'C', 'G', 'T'])\n", (347, 369), True, 'import numpy as np\n'), ((382, 412), 'numpy.array', 'np.array', (["['A', 'C', 'G', 'U']"], {}), "(['A', 'C', 'G', 'U'])\n", (390, 412), True, 'import numpy as np\n'), ((429, 543), 'numpy.array', 'np.array', (["['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R',\n 'S', 'T', 'V', 'W', 'Y']"], {}), "(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P',\n 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'])\n", (437, 543), True, 'import numpy as np\n'), ((632, 751), 'numpy.array', 'np.array', (["['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R',\n 'S', 'T', 'V', 'W', 'Y', '*']"], {}), "(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P',\n 'Q', 'R', 'S', 'T', 'V', 'W', 'Y', '*'])\n", (640, 751), True, 'import numpy as np\n'), ((1366, 1399), 'mavenn.src.reshape._get_shape_and_return_1d_array', '_get_shape_and_return_1d_array', (['x'], {}), '(x)\n', (1396, 1399), False, 'from mavenn.src.reshape import _get_shape_and_return_1d_array\n'), ((1567, 1600), 'mavenn.src.reshape._get_shape_and_return_1d_array', '_get_shape_and_return_1d_array', (['x'], {}), '(x)\n', (1597, 1600), False, 'from mavenn.src.reshape import _get_shape_and_return_1d_array\n'), ((5089, 5125), 'mavenn.src.error_handling.check', 'check', (['(N >= 1)', 'f"""N={N} must be >= 1"""'], {}), "(N >= 1, f'N={N} must be >= 1')\n", (5094, 5125), False, 'from mavenn.src.error_handling import check, handle_errors\n'), ((4711, 4724), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (4719, 4724), True, 'import numpy as np\n'), ((5500, 5598), 'mavenn.src.error_handling.check', 'check', (['(alphabet is not None)', '"""alphabet must be specified when restrict_seqs_to_alphabet=True."""'], {}), "(alphabet is not None,\n 'alphabet must be specified when restrict_seqs_to_alphabet=True.')\n", (5505, 5598), False, 'from mavenn.src.error_handling import check, handle_errors\n'), ((5819, 5946), 'mavenn.src.error_handling.check', 'check', (['(seq_chars <= alphabet_chars)', 'f"""x contain the following characters not in alphabet:{seq_chars - alphabet_chars}"""'], {}), "(seq_chars <= alphabet_chars,\n f'x contain the following characters not in alphabet:{seq_chars - alphabet_chars}'\n )\n", (5824, 5946), False, 'from mavenn.src.error_handling import check, handle_errors\n'), ((2808, 2826), 'numpy.array', 'np.array', (['alphabet'], {}), '(alphabet)\n', (2816, 2826), True, 'import numpy as np\n'), ((4781, 4792), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4789, 4792), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" File utilities comparable to similarly named bash utils: rm_rf(), rm_f(), and mkdir_p()
dataset1.0 is in files like: PPE1.rar PPE2.zip PPE3.zip PP4.7zip
dataset2.0 is in gs:/Buckets/safety_monitoring/data/obj/supplemental/"""
from __future__ import print_function, unicode_literals, division, absolute_import
from builtins import (bytes, dict, int, list, object, range, str, # noqa
ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
from future import standard_library
standard_library.install_aliases() # noqa
from past.builtins import basestring
import gzip
import io
import os
import json
import re
from html2text import html2text
import pandas as pd
from pugnlp.futil import mkdir_p, path_status, find_files # noqa
from nlpia.constants import logging, MAX_LEN_FILEPATH
from nlpia.constants import BASE_DIR, DATA_PATH, BIGDATA_PATH, BOOK_PATH # noqa
from nlpia.constants import HTML_TAGS, EOL
from nlpia.constants import tqdm, no_tqdm
try:
np = pd.np
except ImportError:
import numpy as np # noqa
logger = logging.getLogger(__name__)
def wc(f, verbose=False, nrows=None):
r""" Count lines in a text file
References:
https://stackoverflow.com/q/845058/623735
>>> with open(os.path.join(DATA_PATH, 'dictionary_fda_drug_names.txt')) as fin:
... print(wc(fin) == wc(fin) == 7037 == wc(fin.name))
True
>>> wc(fin.name)
7037
"""
tqdm_prog = tqdm if verbose else no_tqdm
with ensure_open(f, mode='r') as fin:
for i, line in tqdm_prog(enumerate(fin)):
if nrows is not None and i >= nrows - 1:
break
# fin.seek(0)
return i + 1
def ensure_str(s):
r""" Ensure that s is a str and not a bytes (.decode() if necessary)
>>> ensure_str(b"I'm 2. When I grow up I want to be a str!")
"I'm 2. When I grow up I want to be a str!"
>>> ensure_str(42)
'42'
"""
try:
return s.decode()
except AttributeError:
if isinstance(s, str):
return s
return repr(s) # create a python repr (str) of a non-bytes nonstr object
def ls(path, force=False):
""" bash `ls -a`: List both file paths or directory contents (files and directories)
>>> ls('.')
[...]
>>> ls('~/')
[...]
>>> __file__.endswith(os.path.join('nlpia', 'futil.py'))
True
>>> ls(__file__).endswith(os.path.join('nlpia', 'futil.py'))
True
"""
path = expand_filepath(path)
logger.debug('path={}'.format(path))
if os.path.isfile(path):
return path
elif os.path.isdir(path):
return os.listdir(path)
elif not force:
return os.listdir(path)
try:
return os.listdir(path)
except IOError:
pass
def ls_a(path, force=False):
""" bash `ls -a`: List both file paths or directory contents (files and directories)
>>> path = ls(__file__)
>>> path.endswith(os.path.join('nlpia', 'futil.py'))
True
"""
return ls(path, force=force)
def rm_r(path, force=False):
""" bash `rm -r`: Recursively remove dirpath. If `force==True`, don't raise exception if path doesn't exist.
>>> rm_r('/tmp/nlpia_dir_that_doesnt_exist_3.141591234/', force=True)
>>> rm_r('/tmp/nlpia_dir_that_doesnt_exist_3.141591234/')
Traceback (most recent call last):
...
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/nlpia_dir_that_doesnt_exist_3.141591234'
"""
path = expand_filepath(path)
logger.debug('path={}'.format(path))
if os.path.isfile(path):
return os.remove(path)
elif os.path.isdir(path):
try:
return os.rmdir(path)
except OSError: # OSError: [Errno 66] Directory not empty:
pass
except:
if not force:
raise
elif not force:
return os.rmdir(path)
names = ls(path, force=force)
# if ls() returns a list, path must be the full path to a directory
if isinstance(names, list):
if names:
for filename in names:
return rm_r(os.path.join(path, filename), force=force)
else:
os.rmdir(path)
# if ls() returns a str, path must be the full path to a file
elif isinstance(names, str):
return os.remove(names, force=force)
if force:
return None
return os.rmdir(path)
def rm_rf(path):
""" bash `rm -rf`: Recursively remove dirpath. Don't raise exception if path doesn't exist.
>>> rm_rf('/tmp/nlpia_dir_that_doesnt_exist_3.141591234/')
"""
return rm_r(path, force=True)
def find_data_path(path):
for fullpath in [path,
os.path.join(DATA_PATH, path),
os.path.join(BIGDATA_PATH, path),
os.path.join(BASE_DIR, path),
os.path.expanduser(os.path.join('~', path)),
os.path.abspath(os.path.join('.', path))
]:
if os.path.exists(fullpath):
return fullpath
return None
def expand_filepath(filepath):
""" Expand any '~', '.', '*' variables in filepath.
See also: pugnlp.futil.expand_path
>>> len(expand_filepath('~')) > 3
True
"""
return os.path.abspath(os.path.expandvars(os.path.expanduser(filepath)))
def ensure_open(f, mode='r'):
r""" Return a file pointer using gzip.open if filename ends with .gz otherwise open()
TODO: try to read a gzip rather than relying on gz extension, likewise for zip and other formats
TODO: monkey patch the file so that .write_bytes=.write and .write writes both str and bytes
>>> fn = os.path.join(DATA_PATH, 'pointcloud.csv.gz')
>>> fp = ensure_open(fn)
>>> fp
<gzip _io.BufferedReader name='...src/nlpia/data/pointcloud.csv.gz' 0x...>
>>> fp.closed
False
>>> with fp:
... print(len(fp.readlines()))
48485
>>> fp.read()
Traceback (most recent call last):
...
ValueError: I/O operation on closed file
>>> len(ensure_open(fp).readlines())
48485
>>> fn = os.path.join(DATA_PATH, 'mavis-batey-greetings.txt')
>>> fp = ensure_open(fn)
>>> len(fp.read())
314
>>> len(fp.read())
0
>>> len(ensure_open(fp).read())
0
>>> fp.close()
>>> len(fp.read())
Traceback (most recent call last):
...
ValueError: I/O operation on closed file.
"""
fin = f
if isinstance(f, basestring):
if len(f) <= MAX_LEN_FILEPATH:
f = find_filepath(f) or f
if f and (not hasattr(f, 'seek') or not hasattr(f, 'readlines')):
if f.lower().endswith('.gz'):
return gzip.open(f, mode=mode)
return open(f, mode=mode)
f = fin # reset path in case it is the text that needs to be opened with StringIO
else:
f = io.StringIO(f)
elif f and getattr(f, 'closed', None):
if hasattr(f, '_write_gzip_header'):
return gzip.open(f.name, mode=mode)
else:
return open(f.name, mode=mode)
return f
def normalize_ext(filepath):
""" Convert file extension(s) to normalized form, e.g. '.tgz' -> '.tar.gz'
Normalized extensions are ordered in reverse order of how they should be processed.
Also extensions are ordered in order of decreasing specificity/detail.
e.g. zip last, then txt/bin, then model type, then model dimensionality
.TGZ => .tar.gz
.ZIP => .zip
.tgz => .tar.gz
.bin.gz => .w2v.bin.gz
.6B.zip => .6B.glove.txt.zip
.27B.zip => .27B.glove.txt.zip
.42B.300d.zip => .42B.300d.glove.txt.zip
.840B.300d.zip => .840B.300d.glove.txt.zip
FIXME: Don't do this! Stick with the original file names and let the text loader figure out what it is!
TODO: use regexes to be more general (deal with .300D and .42B extensions)
>>> normalize_ext('glove.42B.300d.zip')
'glove.42B.300d.glove.txt.zip'
"""
mapping = tuple(reversed((
('.tgz', '.tar.gz'),
('.bin.gz', '.w2v.bin.gz'),
('.6B.zip', '.6b.glove.txt.zip'),
('.42B.zip', '.42b.glove.txt.zip'),
('.27B.zip', '.27b.glove.txt.zip'),
('.300d.zip', '.300d.glove.txt.zip'),
)))
if not isinstance(filepath, str):
return [normalize_ext(fp) for fp in filepath]
if '~' == filepath[0] or '$' in filepath:
filepath = expand_filepath(filepath)
fplower = filepath.lower()
for ext, newext in mapping:
r = ext.lower().replace('.', r'\.') + r'$'
r = r'^[.]?([^.]*)\.([^.]{1,10})*' + r
if re.match(r, fplower) and not fplower.endswith(newext):
filepath = filepath[:-len(ext)] + newext
return filepath
def normalize_filepath(filepath):
r""" Lowercase the filename and ext, expanding extensions like .tgz to .tar.gz.
>>> normalize_filepath('/Hello_World.txt\n')
'hello_world.txt'
>>> normalize_filepath('NLPIA/src/nlpia/bigdata/Goog New 300Dneg\f.bIn\n.GZ')
'NLPIA/src/nlpia/bigdata/goog new 300dneg.bin.gz'
"""
filename = os.path.basename(filepath)
dirpath = filepath[:-len(filename)]
cre_controlspace = re.compile(r'[\t\r\n\f]+')
new_filename = cre_controlspace.sub('', filename)
if not new_filename == filename:
logger.warning('Stripping whitespace from filename: {} => {}'.format(
repr(filename), repr(new_filename)))
filename = new_filename
filename = filename.lower()
filename = normalize_ext(filename)
if dirpath:
dirpath = dirpath[:-1] # get rid of the trailing os.path.sep
return os.path.join(dirpath, filename)
return filename
def find_filepath(
filename,
basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')):
""" Given a filename or path see if it exists in any of the common places datafiles might be
>>> p = find_filepath('iq_test.csv')
>>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv'))
True
>>> p[-len('iq_test.csv'):]
'iq_test.csv'
>>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent')
False
"""
if os.path.isfile(filename):
return filename
for basedir in basepaths:
fullpath = expand_filepath(os.path.join(basedir, filename))
if os.path.isfile(fullpath):
return fullpath
return False
def update_dict_types(d, update_keys=True, update_values=True, typ=(int,)):
di = {}
if not isinstance(typ, tuple):
typ = (typ, )
for k, v in d.items():
ki, vi = k, v
for t in typ: # stop coercing type when the first conversion works
if update_values and vi is v:
try:
vi = t(v)
except ValueError:
pass
if update_keys and ki is k:
try:
ki = t(k)
except ValueError:
pass
di[ki] = vi
d.update(di)
return d
def read_json(filepath, intkeys=True, intvalues=True):
""" read text from filepath (`open(find_filepath(expand_filepath(fp)))`) then json.loads()
>>> read_json('HTTP_1.1 Status Code Definitions.html.json')
{'100': 'Continue',
'101': 'Switching Protocols',...
"""
d = json.load(ensure_open(find_filepath(filepath), mode='rt'))
d = update_dict_types(d, update_keys=intkeys, update_values=intvalues)
return d
def looks_like_index(series, index_names=('Unnamed: 0', 'pk', 'index', '')):
""" Tries to infer if the Series (usually leftmost column) should be the index_col
>>> looks_like_index(pd.Series(np.arange(100)))
True
"""
if series.name in index_names:
return True
if (series == series.index.values).all():
return True
if (series == np.arange(len(series))).all():
return True
if (
(series.index == np.arange(len(series))).all() and
str(series.dtype).startswith('int') and
(series.count() == len(series))
):
return True
return False
def read_csv(*args, **kwargs):
"""Like pandas.read_csv, only little smarter: check left column to see if it should be the index_col
>>> read_csv(os.path.join(DATA_PATH, 'mavis-batey-greetings.csv')).head()
sentence is_greeting
0 It was a strange little outfit in the cottage. 0
1 Organisation is not a word you would associate... 0
2 When I arrived, he said: "Oh, hello, we're bre... 0
3 That was it. 0
4 I was never really told what to do. 0
"""
kwargs.update({'low_memory': False})
if isinstance(args[0], pd.DataFrame):
df = args[0]
else:
logger.info('Reading CSV with `read_csv(*{}, **{})`...'.format(args, kwargs))
df = pd.read_csv(*args, **kwargs)
if looks_like_index(df[df.columns[0]]):
df = df.set_index(df.columns[0], drop=True)
if df.index.name in ('Unnamed: 0', ''):
df.index.name = None
if ((str(df.index.values.dtype).startswith('int') and (df.index.values > 1e9 * 3600 * 24 * 366 * 10).any()) or
(str(df.index.values.dtype) == 'object')):
try:
df.index = pd.to_datetime(df.index)
except (ValueError, TypeError, pd.errors.OutOfBoundsDatetime):
logger.info('Unable to coerce DataFrame.index into a datetime using pd.to_datetime([{},...])'.format(
df.index.values[0]))
return df
def read_text(forfn, nrows=None, verbose=True):
r""" Read all the lines (up to nrows) from a text file or txt.gz file
>>> fn = os.path.join(DATA_PATH, 'mavis-batey-greetings.txt')
>>> len(read_text(fn, nrows=3))
3
"""
tqdm_prog = tqdm if verbose else no_tqdm
nrows = wc(forfn, nrows=nrows) # not necessary when nrows==None
lines = np.empty(dtype=object, shape=nrows)
with ensure_open(forfn) as f:
for i, line in enumerate(tqdm_prog(f, total=nrows)):
if i >= len(lines):
break
lines[i] = ensure_str(line).rstrip('\n').rstrip('\r')
if all('\t' in line for line in lines):
num_tabs = [sum([1 for c in line if c == '\t']) for line in lines]
del lines
if all(i == num_tabs[0] for i in num_tabs):
f.seek(0)
return read_csv(f, sep='\t', header=None, nrows=nrows)
elif sum((1 for line in lines if any((tag.lower() in line.lower() for tag in HTML_TAGS)))
) / float(len(lines)) > .05:
return np.array(html2text(EOL.join(lines)).split(EOL))
return lines
| [
"os.remove",
"pandas.read_csv",
"future.standard_library.install_aliases",
"numpy.empty",
"os.path.isfile",
"os.path.join",
"builtins.open",
"os.path.exists",
"nlpia.constants.logging.getLogger",
"io.StringIO",
"os.path.basename",
"re.match",
"pandas.to_datetime",
"os.rmdir",
"builtins.s... | [((552, 586), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (584, 586), False, 'from future import standard_library\n'), ((1108, 1135), 'nlpia.constants.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1125, 1135), False, 'from nlpia.constants import logging, MAX_LEN_FILEPATH\n'), ((2574, 2594), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2588, 2594), False, 'import os\n'), ((3588, 3608), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (3602, 3608), False, 'import os\n'), ((4409, 4423), 'os.rmdir', 'os.rmdir', (['path'], {}), '(path)\n', (4417, 4423), False, 'import os\n'), ((9122, 9148), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (9138, 9148), False, 'import os\n'), ((9212, 9241), 're.compile', 're.compile', (['"""[\\\\t\\\\r\\\\n\\\\f]+"""'], {}), "('[\\\\t\\\\r\\\\n\\\\f]+')\n", (9222, 9241), False, 'import re\n'), ((10252, 10276), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (10266, 10276), False, 'import os\n'), ((14079, 14114), 'numpy.empty', 'np.empty', ([], {'dtype': 'object', 'shape': 'nrows'}), '(dtype=object, shape=nrows)\n', (14087, 14114), True, 'import numpy as np\n'), ((2625, 2644), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2638, 2644), False, 'import os\n'), ((2754, 2770), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2764, 2770), False, 'import os\n'), ((3625, 3640), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (3634, 3640), False, 'import os\n'), ((3650, 3669), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3663, 3669), False, 'import os\n'), ((4721, 4750), 'os.path.join', 'os.path.join', (['DATA_PATH', 'path'], {}), '(DATA_PATH, path)\n', (4733, 4750), False, 'import os\n'), ((4773, 4805), 'os.path.join', 'os.path.join', (['BIGDATA_PATH', 'path'], {}), '(BIGDATA_PATH, path)\n', (4785, 4805), False, 'import os\n'), ((4828, 4856), 'os.path.join', 'os.path.join', (['BASE_DIR', 'path'], {}), '(BASE_DIR, path)\n', (4840, 4856), False, 'import os\n'), ((5021, 5045), 'os.path.exists', 'os.path.exists', (['fullpath'], {}), '(fullpath)\n', (5035, 5045), False, 'import os\n'), ((9661, 9692), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (9673, 9692), False, 'import os\n'), ((9842, 9866), 'os.path.join', 'os.path.join', (['"""/"""', '"""tmp"""'], {}), "('/', 'tmp')\n", (9854, 9866), False, 'import os\n'), ((10411, 10435), 'os.path.isfile', 'os.path.isfile', (['fullpath'], {}), '(fullpath)\n', (10425, 10435), False, 'import os\n'), ((13039, 13067), 'pandas.read_csv', 'pd.read_csv', (['*args'], {}), '(*args, **kwargs)\n', (13050, 13067), True, 'import pandas as pd\n'), ((2661, 2677), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2671, 2677), False, 'import os\n'), ((4205, 4219), 'os.rmdir', 'os.rmdir', (['path'], {}), '(path)\n', (4213, 4219), False, 'import os\n'), ((4334, 4363), 'os.remove', 'os.remove', (['names'], {'force': 'force'}), '(names, force=force)\n', (4343, 4363), False, 'import os\n'), ((4898, 4921), 'os.path.join', 'os.path.join', (['"""~"""', 'path'], {}), "('~', path)\n", (4910, 4921), False, 'import os\n'), ((4961, 4984), 'os.path.join', 'os.path.join', (['"""."""', 'path'], {}), "('.', path)\n", (4973, 4984), False, 'import os\n'), ((5322, 5350), 'os.path.expanduser', 'os.path.expanduser', (['filepath'], {}), '(filepath)\n', (5340, 5350), False, 'import os\n'), ((6916, 6930), 'io.StringIO', 'io.StringIO', (['f'], {}), '(f)\n', (6927, 6930), False, 'import io\n'), ((8643, 8663), 're.match', 're.match', (['r', 'fplower'], {}), '(r, fplower)\n', (8651, 8663), False, 'import re\n'), ((10367, 10398), 'os.path.join', 'os.path.join', (['basedir', 'filename'], {}), '(basedir, filename)\n', (10379, 10398), False, 'import os\n'), ((13373, 13399), 'builtins.str', 'str', (['df.index.values.dtype'], {}), '(df.index.values.dtype)\n', (13376, 13399), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((13451, 13475), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (13465, 13475), True, 'import pandas as pd\n'), ((2713, 2729), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2723, 2729), False, 'import os\n'), ((3703, 3717), 'os.rmdir', 'os.rmdir', (['path'], {}), '(path)\n', (3711, 3717), False, 'import os\n'), ((3902, 3916), 'os.rmdir', 'os.rmdir', (['path'], {}), '(path)\n', (3910, 3916), False, 'import os\n'), ((6772, 6790), 'builtins.open', 'open', (['f'], {'mode': 'mode'}), '(f, mode=mode)\n', (6776, 6790), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((7038, 7066), 'gzip.open', 'gzip.open', (['f.name'], {'mode': 'mode'}), '(f.name, mode=mode)\n', (7047, 7066), False, 'import gzip\n'), ((7100, 7123), 'builtins.open', 'open', (['f.name'], {'mode': 'mode'}), '(f.name, mode=mode)\n', (7104, 7123), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((12053, 12070), 'builtins.str', 'str', (['series.dtype'], {}), '(series.dtype)\n', (12056, 12070), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((4136, 4164), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (4148, 4164), False, 'import os\n'), ((6725, 6748), 'gzip.open', 'gzip.open', (['f'], {'mode': 'mode'}), '(f, mode=mode)\n', (6734, 6748), False, 'import gzip\n'), ((13254, 13280), 'builtins.str', 'str', (['df.index.values.dtype'], {}), '(df.index.values.dtype)\n', (13257, 13280), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((14813, 14828), 'nlpia.constants.EOL.join', 'EOL.join', (['lines'], {}), '(lines)\n', (14821, 14828), False, 'from nlpia.constants import HTML_TAGS, EOL\n')] |
from typing import Optional, List, Sequence
import pandas as pd
import numpy as np
from scipy import stats
import sha_calc as sha_calc
from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list
from gmhazard_calc import gm_data
from gmhazard_calc import site
from gmhazard_calc import constants
from gmhazard_calc import hazard
from gmhazard_calc import shared
from gmhazard_calc import site_source
from gmhazard_calc import disagg
from .GroundMotionDataset import GMDataset, HistoricalGMDataset
from .GMSResult import GMSResult
from .GCIMResult import BranchUniGCIM, IMEnsembleUniGCIM
from .CausalParamBounds import CausalParamBounds
SF_LOW, SF_HIGH = 0.3, 3.0
def run_ensemble_gms(
ensemble: gm_data.Ensemble,
site_info: site.SiteInfo,
n_gms: int,
IMj: IM,
gm_dataset: GMDataset,
IMs: np.ndarray = None,
exceedance: float = None,
im_j: float = None,
n_replica: int = 10,
im_weights: pd.Series = None,
cs_param_bounds: CausalParamBounds = None,
) -> GMSResult:
"""
Performs ensemble based ground motion selection
Note: Currently only supports Ensembles based on
empirical GMMs (i.e. parametric)
Parameters
----------
ensemble: Ensemble
site_info: SiteInfo
n_gms: int
Number of ground
IMj: IM
Conditioning IM
gm_dataset: GMDataset
The GM source (either simulations or historical) from which
to select ground motions
IMs: numpy array of strings
The IMs to consider
exceedance: float
Exceedance of interest
Either exceedance or im_j has to be specified
im_j: float
Level/Value of interest of the conditioning IM
n_replica: int
Number of times the GM selection process is repeated
im_weights: Series
Weighting of the IMs
cs_param_bounds: CausalParamBounds
The causal filter parameters to apply
pre-ground motion selection
Returns
-------
GMSResult
"""
# Use all available IMs if none are specified
if IMs is None:
IMs = np.asarray(list(set(ensemble.ims.copy()).intersection(gm_dataset.ims)))
IMs = IMs[IMs != IMj]
if im_weights is None:
im_weights = default_IM_weights(IMj, IMs)
else:
im_weights.index = to_im_list(im_weights.index)
# Sanity checks
assert np.all(
np.isin(IMs, im_weights.index)
), "IM weights are not specified for all IMs"
assert np.isclose(np.sum(im_weights), 1.0), "IM weights need to sum to 1.0"
ensemble.check_im(IMj)
assert np.all(
np.isin(IMs, ensemble.ims)
), f"Not all of the specified IM types are availble in the ensemble {ensemble.name}"
assert exceedance is not None or im_j is not None, (
"Either the exceedance probability or the conditioning "
"IM level has to be specified"
)
assert all(
[
ensemble.get_im_ensemble(IMi.im_type).im_data_type
== constants.IMDataType.parametric
for IMi in IMs
]
), "Currently only support GMS for fully parametric ensembles"
if exceedance is not None and im_j is not None:
print(
f"An exceedance level and a conditioning IM level were specified, "
f"ignoring the exceedance level and using the conditioning IM"
)
exceedance = None
ens_hazard = hazard.run_ensemble_hazard(ensemble, site_info, IMj)
if im_j is not None and not (
ens_hazard.im_values.min() < im_j < ens_hazard.im_values.max()
):
raise ValueError(
"The specified conditioning IM value is not supported (too small or large)"
)
# Compute the conditioning IM level using the ensemble hazard
if exceedance is not None:
if not (
ens_hazard.total_hazard.values.min()
< exceedance
< ens_hazard.total_hazard.values.max()
):
raise ValueError(
"The specified conditioning exceedance value is not supported (too small or large)"
)
im_j = ens_hazard.exceedance_to_im(exceedance)
# Compute the combined rupture weights
P_Rup_IMj = sha_calc.compute_rupture_weights(
im_j,
{
cur_branch_name: (
shared.get_IM_params(
IMj, cur_branch.get_imdb_ffps(constants.SourceType.fault), site_info
),
cur_branch.flt_rupture_df.set_index("rupture_name").annual_rec_prob,
)
for cur_branch_name, cur_branch in ensemble.get_im_ensemble(
IMj.im_type
).branches_dict.items()
},
)
# Compute the adjusted branch weights
IMj_adj_branch_weights, IMj_hazard_mean = shared.compute_adj_branch_weights(
ensemble, IMj, im_j, site_info
)
# Combine & Apply the branch weights
P_Rup_IMj = P_Rup_IMj.multiply(IMj_adj_branch_weights, axis=1).sum(axis=1)
# Compute the correlation matrix
rho = sha_calc.compute_correlation_matrix(np.asarray(to_string_list(IMs)), str(IMj))
# Get correlated vector
v_vectors = sha_calc.generate_correlated_vector(
n_gms, np.asarray(to_string_list(IMs)), rho, n_replica=n_replica
)
# Pre-allocate the realisation IM value array (and array for
# sigma of selected lnIMi|IMj,Rup distributions, required for residual calculation)
rel_IM_values = [
{IMi: np.full(n_gms, np.nan) for IMi in IMs} for ix in range(n_replica)
]
rel_sigma_lnIMi_IMj_Rup = [
{IMi: np.full(n_gms, np.nan) for IMi in IMs} for ix in range(n_replica)
]
# Get list of ensembles that cover all IMi in IM vector (i.e. variable IMs)
IMi_gcims = {}
im_ensembles = list({ensemble.get_im_ensemble(IMi.im_type) for IMi in IMs})
# Computation of GCIM distribution and random realisation generation
# Overview of main steps:
# Iterate over each IMEnsemble (i.e. IMi set) and compute
# 1) Correlation coefficients
# For each IMi in the IMi set:
# 2) Branch hazard & mean hazard
# 3) IMi value corresponding to exceedance of IMj=imj
# For each branch:
# 4) Compute lnIMi|IMj,RUp and lnIMi|IMj
# 5) Generate array of [n_gms, n_replica] random numbers
# between 0-1 for branch selection (same across IMi of
# the current IMi set)
# For each IMi in IMi set:
# 6) Compute adjusted branch weights, using results from step 3)
# 7) Compute combined (i.e. across branches) lnIMi|IMj
# For each replica_ix in n_replica:
# 7) Select n_gms random branches using the adjusted
# branch weights for IMi
# For each of the selected branches:
# 8) Select random rupture using rupture weights (at IMj=imj)
# 9) Using current branch & rupture lnIMi|IMj,Rup
# generate random realisation
for cur_im_ensemble in im_ensembles:
# Get the relevant IMi for this IMEnsemble
cur_IMs = IMs[np.isin(IMs, cur_im_ensemble.ims)]
# Get the correlation coefficients
corr_coeffs = pd.Series(
data=[sha_calc.get_im_correlations(str(IMi), str(IMj)) for IMi in cur_IMs],
index=to_string_list(cur_IMs),
)
# Compute the branch hazard for each of the current set of IMi
cur_branch_hazard = {
IMi: hazard.run_branches_hazard(ensemble, site_info, IMi) for IMi in cur_IMs
}
# Get the ensemble mean hazard IM value for each IMi (in the current set)
# corresponding to the exceedance rate for IMj=imj
# Needed to calculate the adjusted branch weight
cur_ens_hazard = {
IMi: hazard.run_ensemble_hazard(
ensemble, site_info, IMi, branch_hazard=cur_branch_hazard[IMi]
)
for IMi in cur_IMs
}
cur_mean_hazard_im_values = pd.Series(
data=[
cur_ens_hazard[IMi].exceedance_to_im(IMj_hazard_mean) for IMi in cur_IMs
],
index=cur_IMs,
)
cur_branch_gcims, cur_adj_branch_weights = {}, {}
for cur_branch_name, cur_branch in cur_im_ensemble.branches_dict.items():
# Retrieve the IM parameters
im_df = shared.get_IM_values(
cur_branch.get_imdb_ffps(constants.SourceType.fault), site_info
)
sigma_cols = [f"{IMi}_sigma" for IMi in cur_IMs]
# Compute lnIMi|IMj, Rup
cur_lnIMi_IMj_Rup = sha_calc.compute_lnIMi_IMj_Rup(
im_df[to_string_list(cur_IMs)],
im_df[sigma_cols].rename(
columns={
sig_col: str(IMi) for sig_col, IMi in zip(sigma_cols, cur_IMs)
}
),
corr_coeffs,
str(IMj),
im_j,
)
# Compute lnIMi|IMj
cur_lnIMi_IMj = sha_calc.compute_lnIMi_IMj(
cur_lnIMi_IMj_Rup, P_Rup_IMj, str(IMj), im_j
)
# Create branch GCIM object and save to dictionary
cur_branch_gcims[cur_branch_name] = {
IMi: BranchUniGCIM(
IMi,
IMj,
im_j,
cur_branch,
cur_lnIMi_IMj_Rup[str(IMi)],
cur_lnIMi_IMj[str(IMi)],
)
for IMi in cur_IMs
}
# Pick N_gms random numbers, to select the branches for
# realisation generation
# Use the same random number for each IMi in the current set
# to ensure consistent branch/model selection
rand_branch_float = np.random.uniform(
low=0.0, high=1.0, size=(n_gms, n_replica)
)
# Combine the branch lnIMi|IMj distributions for each of the current IMs
# and generate random realisation
cur_branch_names = np.asarray(list(cur_im_ensemble.branches_dict.keys()))
for IMi in cur_IMs:
# Compute the adjusted branch weights, using the
# ensemble mean exceedance rate for IMj=imj and
# the corresponding ensemble hazard mean IM value (for each IMi)
cur_adj_branch_weights[IMi] = pd.Series(
data=[
hazard.run_branch_hazard(
cur_branch, site_info, IMi
).im_to_exceedance(cur_mean_hazard_im_values[IMi])
* cur_branch.weight
/ IMj_hazard_mean
for cur_name, cur_branch in cur_im_ensemble.branches_dict.items()
],
index=cur_branch_names,
)
# Combine the branches lnIMi|IMj to get
# the target distribution for IMi
comb_lnIMi_IMj = sha_calc.comb_lnIMi_IMj(
{
cur_name: cur_branch_gcim[IMi].lnIMi_IMj
for cur_name, cur_branch_gcim in cur_branch_gcims.items()
},
cur_adj_branch_weights[IMi],
)
IMi_gcims[IMi] = IMEnsembleUniGCIM(
cur_im_ensemble,
IMi,
IMj,
im_j,
comb_lnIMi_IMj,
{
cur_branch_name: cur_data[IMi]
for cur_branch_name, cur_data in cur_branch_gcims.items()
},
)
# Generate realisation for current IMi,
# 1) select random branch
# 2) select random rupture
# 3) Apply the mean & sigma of the selected lnIMi|IMj,Rup to the
# vector of correlated random numbers
for replica_ix in range(n_replica):
# Select n_gms random branches based on IMi adjusted branch weights
cur_branch_cdf = cur_adj_branch_weights[IMi].sort_values().cumsum()
cur_sel_branches = sha_calc.query_non_parametric_cdf_invs(
rand_branch_float[:, replica_ix],
cur_branch_cdf.index.values.astype(str),
cur_branch_cdf.values,
)
for rel_ix, cur_branch_name in enumerate(cur_sel_branches):
# Select random rupture based on rupture contributions at IMj=imj
cur_rupture = np.random.choice(
P_Rup_IMj.index.values.astype(str), size=1, p=P_Rup_IMj.values
)[0]
# Apply mean & sigma of selected lnIMi|IMj,Rup to
# to correponding value of correlated vector
cur_branch_gcim = cur_branch_gcims[cur_branch_name][IMi]
rel_IM_values[replica_ix][IMi][rel_ix] = (
cur_branch_gcim.lnIMi_IMj_Rup.mu[cur_rupture]
+ cur_branch_gcim.lnIMi_IMj_Rup.sigma[cur_rupture]
* v_vectors[replica_ix].loc[rel_ix, str(IMi)]
)
rel_sigma_lnIMi_IMj_Rup[replica_ix][IMi][
rel_ix
] = cur_branch_gcim.lnIMi_IMj_Rup.sigma[cur_rupture]
# Convert results to dataframes (one per replica)
rel_IM_values = [pd.DataFrame(cur_values) for cur_values in rel_IM_values]
rel_sigma_lnIMi_IMj_Rup = [
pd.DataFrame(cur_sigma_values) for cur_sigma_values in rel_sigma_lnIMi_IMj_Rup
]
# IM scaling, such that IM_j=im_j for all
# ground motions in the GM dataset
sf = None
if isinstance(gm_dataset, HistoricalGMDataset):
sf = gm_dataset.compute_scaling_factor(IMj, im_j)
# Get the (scaled) ground motions IM values that fall
# within the specified causal parameter bounds
gms_im_df = gm_dataset.get_im_df(
site_info,
np.concatenate((to_string_list(IMs), [str(IMj)])),
cs_param_bounds=cs_param_bounds,
sf=sf,
)
gms_im_df.columns = to_im_list(gms_im_df.columns)
assert (
gms_im_df.shape[0] > 0
), "No GMs to select from after applying the causual parameter bounds"
assert np.allclose(gms_im_df.loc[:, IMj], im_j)
# Compute residuals and select GMs for each replica
R_values, sel_gms_ind = [], []
for replica_ix in range(n_replica):
# Compute residuals between available GMs and current set of realisations
cur_sigma_IMi_Rup_IMj = (
rel_sigma_lnIMi_IMj_Rup[replica_ix].loc[:, IMs].values[:, np.newaxis, :]
)
cur_diff = rel_IM_values[replica_ix].loc[:, IMs].values[
:, np.newaxis, :
] - np.log(gms_im_df.loc[:, IMs].values)
cur_misfit = pd.DataFrame(
index=rel_IM_values[replica_ix].index,
data=np.sum(
im_weights.loc[IMs].values * (cur_diff / cur_sigma_IMi_Rup_IMj) ** 2,
axis=2,
),
)
# Select best matching GMs
cur_selected_gms_ind = gms_im_df.index.values[cur_misfit.idxmin(axis=1).values]
# Compute the KS test statistic for each IM_i
# I.e. Check how well the empirical distribution of selected GMs
# matches with the target distribution (i.e. lnIMi|IMj)
D = []
for IMi in IMs:
cur_d, _ = stats.kstest(
gms_im_df.loc[cur_selected_gms_ind, IMi].values,
lambda x: sha_calc.query_non_parametric_cdf(
x,
IMi_gcims[IMi].lnIMi_IMj.cdf.index.values,
IMi_gcims[IMi].lnIMi_IMj.cdf.values,
),
)
D.append(cur_d)
D = pd.Series(index=IMs, data=D)
# Compute the overall residual & save selected ground motions
R_values.append(np.sum(im_weights * (D ** 2)))
sel_gms_ind.append(list(cur_selected_gms_ind))
# Select the best fitting set of ground motions (if multiple replica were run)
selected_ix = np.argmin(R_values)
sel_gms_ind, rel_IM_values = sel_gms_ind[selected_ix], rel_IM_values[selected_ix]
return GMSResult(
ensemble,
site_info,
IMj,
im_j,
IMs,
gms_im_df.loc[sel_gms_ind],
IMi_gcims,
rel_IM_values.apply(np.exp),
gm_dataset,
cs_param_bounds,
sf=sf,
)
def default_IM_weights(IM_j: IM, IMs: np.ndarray) -> pd.Series:
"""
Returns the default IM weights based on the conditioning IM
If the conditioning IM (IM_j) is spectral acceleration (SA) the
weighting is 70% across the SAs and 30% across all other IMs
Otherwise a uniform weighting distribution is used
Parameters
----------
IM_j: IM
Conditioning IM
IMs: list of IM
IM types for which to get the default weights
Returns
-------
im_weights: pandas series
Weigths for the specified IM types
"""
# Use 70% (SA) / 30% (other) weighting if
# conditioning IM is SA
if IM_j.is_pSA():
pSA_mask = np.asarray([cur_im.im_type is IMType.pSA for cur_im in IMs])
n_pSA_IMs = np.count_nonzero(pSA_mask)
n_other_IMs = IMs.size - n_pSA_IMs
if n_other_IMs == 0:
im_weights = np.ones(n_pSA_IMs, dtype=float) / n_pSA_IMs
else:
im_weights = np.full(IMs.size, np.nan)
im_weights[pSA_mask] = (1.0 / n_pSA_IMs) * 0.7
im_weights[~pSA_mask] = (1.0 / n_other_IMs) * 0.3
# Otherwise, default to uniform weighting
else:
print(
f"WARNING: Defaulting to uniform IM weighting as the "
f"conditioning is not SA."
)
im_weights = np.ones(IMs.size, dtype=float) / IMs.size
return pd.Series(data=im_weights, index=IMs)
def default_causal_params(
ensemble: gm_data.Ensemble,
site_info: site.SiteInfo,
IM_j: IM,
exceedance: Optional[float] = None,
im_value: Optional[float] = None,
disagg_data: Optional[disagg.EnsembleDisaggResult] = None,
) -> CausalParamBounds:
"""
Computes default causal parameters based on
"<NAME>. and <NAME>., 2016.
The effect of causal parameter bounds in PSHA‐based ground motion selection."
Using criterion AC (Table III)
Parameters
----------
ensemble: Ensemble
site_info: SiteInfo
IM_j: IM
Conditioning IM
exceedance : float, optional
Compute disagg at this exceedance, either the exceedance
or the im_value parameter has to be given
im_value: float, optional
Compute disagg at this im value if required
disagg_data: DisaggResult, optinal
Computed Disagg data if pre-calculated
Returns
-------
Magnitude bounds: pair of floats
(Mw lower bound, Mw upper bound)
Rrup bounds: pair of floats
(Rrup lower bound, Rrup upper bound)
Vs30 bounds: pair of floats
(Vs30 lower bound, Vs30 upper bound)
"""
# Calculate disagg if not already specified
if disagg_data is None:
disagg_data = disagg.run_ensemble_disagg(
ensemble,
site_info,
IM_j,
exceedance=exceedance,
im_value=im_value,
calc_mean_values=True,
)
# Vs30 bounds
vs_low, vs_high = site_info.vs30 * 0.5, site_info.vs30 * 1.5
contr_df = pd.concat(
(
disagg_data.fault_disagg_id.contribution,
disagg_data.ds_disagg_id.contribution,
)
)
# Mw bounds
contr_df = pd.merge(
contr_df.to_frame("contribution"),
ensemble.rupture_df_id.magnitude.to_frame("magnitude"),
how="left",
left_index=True,
right_index=True,
).sort_values("magnitude")
non_nan_mask = ~contr_df.magnitude.isna()
mw_low = min(
sha_calc.query_non_parametric_cdf_invs(
np.asarray([0.01]),
contr_df.magnitude.values[non_nan_mask],
contr_df.contribution.cumsum().values[non_nan_mask],
)[0],
sha_calc.query_non_parametric_cdf_invs(
np.asarray([0.1]),
contr_df.magnitude.values[non_nan_mask],
contr_df.contribution.cumsum().values[non_nan_mask],
)[0]
- 0.5,
)
mw_high = max(
sha_calc.query_non_parametric_cdf_invs(
np.asarray([0.99]),
contr_df.magnitude.values[non_nan_mask],
contr_df.contribution.cumsum().values[non_nan_mask],
)[0],
sha_calc.query_non_parametric_cdf_invs(
np.asarray([0.90]),
contr_df.magnitude.values[non_nan_mask],
contr_df.contribution.cumsum().values[non_nan_mask],
)[0]
+ 0.5,
)
# Get distances
fault_rrup_disagg_df = site_source.match_ruptures(
site_source.get_distance_df(ensemble.flt_ssddb_ffp, site_info),
disagg_data.fault_disagg_id.contribution.copy(),
constants.SourceType.fault,
)
ds_rrup_disagg_df = site_source.match_ruptures(
site_source.get_distance_df(ensemble.ds_ssddb_ffp, site_info),
disagg_data.ds_disagg_id.contribution.copy(),
constants.SourceType.distributed,
)
contr_df = pd.merge(
contr_df,
pd.concat([fault_rrup_disagg_df.rrup, ds_rrup_disagg_df.rrup], axis=0).to_frame(
"rrup"
),
how="left",
left_index=True,
right_index=True,
).sort_values("rrup")
non_nan_mask = ~contr_df.rrup.isna()
# Rrup bounds
rrup_low = min(
sha_calc.query_non_parametric_cdf_invs(
np.asarray([0.01]),
contr_df.rrup.values[non_nan_mask],
contr_df.contribution.cumsum().values[non_nan_mask],
)[0],
sha_calc.query_non_parametric_cdf_invs(
np.asarray([0.1]),
contr_df.rrup.values[non_nan_mask],
contr_df.contribution.cumsum().values[non_nan_mask],
)[0]
* 0.5,
)
rrup_high = max(
sha_calc.query_non_parametric_cdf_invs(
np.asarray([0.99]),
contr_df.rrup.values[non_nan_mask],
contr_df.contribution.cumsum().values[non_nan_mask],
)[0],
sha_calc.query_non_parametric_cdf_invs(
np.asarray([0.90]),
contr_df.rrup.values[non_nan_mask],
contr_df.contribution.cumsum().values[non_nan_mask],
)[0]
* 1.5,
)
return CausalParamBounds(
ensemble,
site_info,
IM_j,
(mw_low, mw_high),
(rrup_low, rrup_high),
(vs_low, vs_high),
sf_bounds=(SF_LOW, SF_HIGH),
contr_df=contr_df,
exceedance=exceedance,
im_value=im_value,
)
| [
"numpy.isin",
"gmhazard_calc.im.to_im_list",
"numpy.sum",
"numpy.allclose",
"numpy.ones",
"numpy.argmin",
"gmhazard_calc.disagg.run_ensemble_disagg",
"pandas.DataFrame",
"numpy.full",
"gmhazard_calc.hazard.run_branches_hazard",
"pandas.concat",
"gmhazard_calc.site_source.get_distance_df",
"g... | [((3380, 3432), 'gmhazard_calc.hazard.run_ensemble_hazard', 'hazard.run_ensemble_hazard', (['ensemble', 'site_info', 'IMj'], {}), '(ensemble, site_info, IMj)\n', (3406, 3432), False, 'from gmhazard_calc import hazard\n'), ((4756, 4821), 'gmhazard_calc.shared.compute_adj_branch_weights', 'shared.compute_adj_branch_weights', (['ensemble', 'IMj', 'im_j', 'site_info'], {}), '(ensemble, IMj, im_j, site_info)\n', (4789, 4821), False, 'from gmhazard_calc import shared\n'), ((14039, 14068), 'gmhazard_calc.im.to_im_list', 'to_im_list', (['gms_im_df.columns'], {}), '(gms_im_df.columns)\n', (14049, 14068), False, 'from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list\n'), ((14199, 14239), 'numpy.allclose', 'np.allclose', (['gms_im_df.loc[:, IMj]', 'im_j'], {}), '(gms_im_df.loc[:, IMj], im_j)\n', (14210, 14239), True, 'import numpy as np\n'), ((16018, 16037), 'numpy.argmin', 'np.argmin', (['R_values'], {}), '(R_values)\n', (16027, 16037), True, 'import numpy as np\n'), ((17773, 17810), 'pandas.Series', 'pd.Series', ([], {'data': 'im_weights', 'index': 'IMs'}), '(data=im_weights, index=IMs)\n', (17782, 17810), True, 'import pandas as pd\n'), ((19385, 19482), 'pandas.concat', 'pd.concat', (['(disagg_data.fault_disagg_id.contribution, disagg_data.ds_disagg_id.\n contribution)'], {}), '((disagg_data.fault_disagg_id.contribution, disagg_data.\n ds_disagg_id.contribution))\n', (19394, 19482), True, 'import pandas as pd\n'), ((2287, 2315), 'gmhazard_calc.im.to_im_list', 'to_im_list', (['im_weights.index'], {}), '(im_weights.index)\n', (2297, 2315), False, 'from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list\n'), ((2364, 2394), 'numpy.isin', 'np.isin', (['IMs', 'im_weights.index'], {}), '(IMs, im_weights.index)\n', (2371, 2394), True, 'import numpy as np\n'), ((2467, 2485), 'numpy.sum', 'np.sum', (['im_weights'], {}), '(im_weights)\n', (2473, 2485), True, 'import numpy as np\n'), ((2580, 2606), 'numpy.isin', 'np.isin', (['IMs', 'ensemble.ims'], {}), '(IMs, ensemble.ims)\n', (2587, 2606), True, 'import numpy as np\n'), ((9784, 9845), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': '(n_gms, n_replica)'}), '(low=0.0, high=1.0, size=(n_gms, n_replica))\n', (9801, 9845), True, 'import numpy as np\n'), ((13334, 13358), 'pandas.DataFrame', 'pd.DataFrame', (['cur_values'], {}), '(cur_values)\n', (13346, 13358), True, 'import pandas as pd\n'), ((13432, 13462), 'pandas.DataFrame', 'pd.DataFrame', (['cur_sigma_values'], {}), '(cur_sigma_values)\n', (13444, 13462), True, 'import pandas as pd\n'), ((15706, 15734), 'pandas.Series', 'pd.Series', ([], {'index': 'IMs', 'data': 'D'}), '(index=IMs, data=D)\n', (15715, 15734), True, 'import pandas as pd\n'), ((17074, 17136), 'numpy.asarray', 'np.asarray', (['[(cur_im.im_type is IMType.pSA) for cur_im in IMs]'], {}), '([(cur_im.im_type is IMType.pSA) for cur_im in IMs])\n', (17084, 17136), True, 'import numpy as np\n'), ((17155, 17181), 'numpy.count_nonzero', 'np.count_nonzero', (['pSA_mask'], {}), '(pSA_mask)\n', (17171, 17181), True, 'import numpy as np\n'), ((19083, 19205), 'gmhazard_calc.disagg.run_ensemble_disagg', 'disagg.run_ensemble_disagg', (['ensemble', 'site_info', 'IM_j'], {'exceedance': 'exceedance', 'im_value': 'im_value', 'calc_mean_values': '(True)'}), '(ensemble, site_info, IM_j, exceedance=exceedance,\n im_value=im_value, calc_mean_values=True)\n', (19109, 19205), False, 'from gmhazard_calc import disagg\n'), ((20832, 20894), 'gmhazard_calc.site_source.get_distance_df', 'site_source.get_distance_df', (['ensemble.flt_ssddb_ffp', 'site_info'], {}), '(ensemble.flt_ssddb_ffp, site_info)\n', (20859, 20894), False, 'from gmhazard_calc import site_source\n'), ((21055, 21116), 'gmhazard_calc.site_source.get_distance_df', 'site_source.get_distance_df', (['ensemble.ds_ssddb_ffp', 'site_info'], {}), '(ensemble.ds_ssddb_ffp, site_info)\n', (21082, 21116), False, 'from gmhazard_calc import site_source\n'), ((5052, 5071), 'gmhazard_calc.im.to_string_list', 'to_string_list', (['IMs'], {}), '(IMs)\n', (5066, 5071), False, 'from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list\n'), ((5192, 5211), 'gmhazard_calc.im.to_string_list', 'to_string_list', (['IMs'], {}), '(IMs)\n', (5206, 5211), False, 'from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list\n'), ((5435, 5457), 'numpy.full', 'np.full', (['n_gms', 'np.nan'], {}), '(n_gms, np.nan)\n', (5442, 5457), True, 'import numpy as np\n'), ((5553, 5575), 'numpy.full', 'np.full', (['n_gms', 'np.nan'], {}), '(n_gms, np.nan)\n', (5560, 5575), True, 'import numpy as np\n'), ((7066, 7099), 'numpy.isin', 'np.isin', (['IMs', 'cur_im_ensemble.ims'], {}), '(IMs, cur_im_ensemble.ims)\n', (7073, 7099), True, 'import numpy as np\n'), ((7438, 7490), 'gmhazard_calc.hazard.run_branches_hazard', 'hazard.run_branches_hazard', (['ensemble', 'site_info', 'IMi'], {}), '(ensemble, site_info, IMi)\n', (7464, 7490), False, 'from gmhazard_calc import hazard\n'), ((7763, 7858), 'gmhazard_calc.hazard.run_ensemble_hazard', 'hazard.run_ensemble_hazard', (['ensemble', 'site_info', 'IMi'], {'branch_hazard': 'cur_branch_hazard[IMi]'}), '(ensemble, site_info, IMi, branch_hazard=\n cur_branch_hazard[IMi])\n', (7789, 7858), False, 'from gmhazard_calc import hazard\n'), ((14689, 14725), 'numpy.log', 'np.log', (['gms_im_df.loc[:, IMs].values'], {}), '(gms_im_df.loc[:, IMs].values)\n', (14695, 14725), True, 'import numpy as np\n'), ((15830, 15857), 'numpy.sum', 'np.sum', (['(im_weights * D ** 2)'], {}), '(im_weights * D ** 2)\n', (15836, 15857), True, 'import numpy as np\n'), ((17363, 17388), 'numpy.full', 'np.full', (['IMs.size', 'np.nan'], {}), '(IMs.size, np.nan)\n', (17370, 17388), True, 'import numpy as np\n'), ((17719, 17749), 'numpy.ones', 'np.ones', (['IMs.size'], {'dtype': 'float'}), '(IMs.size, dtype=float)\n', (17726, 17749), True, 'import numpy as np\n'), ((7284, 7307), 'gmhazard_calc.im.to_string_list', 'to_string_list', (['cur_IMs'], {}), '(cur_IMs)\n', (7298, 7307), False, 'from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list\n'), ((13918, 13937), 'gmhazard_calc.im.to_string_list', 'to_string_list', (['IMs'], {}), '(IMs)\n', (13932, 13937), False, 'from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list\n'), ((14829, 14917), 'numpy.sum', 'np.sum', (['(im_weights.loc[IMs].values * (cur_diff / cur_sigma_IMi_Rup_IMj) ** 2)'], {'axis': '(2)'}), '(im_weights.loc[IMs].values * (cur_diff / cur_sigma_IMi_Rup_IMj) ** 2,\n axis=2)\n', (14835, 14917), True, 'import numpy as np\n'), ((17280, 17311), 'numpy.ones', 'np.ones', (['n_pSA_IMs'], {'dtype': 'float'}), '(n_pSA_IMs, dtype=float)\n', (17287, 17311), True, 'import numpy as np\n'), ((19902, 19920), 'numpy.asarray', 'np.asarray', (['[0.01]'], {}), '([0.01])\n', (19912, 19920), True, 'import numpy as np\n'), ((20364, 20382), 'numpy.asarray', 'np.asarray', (['[0.99]'], {}), '([0.99])\n', (20374, 20382), True, 'import numpy as np\n'), ((21618, 21636), 'numpy.asarray', 'np.asarray', (['[0.01]'], {}), '([0.01])\n', (21628, 21636), True, 'import numpy as np\n'), ((22072, 22090), 'numpy.asarray', 'np.asarray', (['[0.99]'], {}), '([0.99])\n', (22082, 22090), True, 'import numpy as np\n'), ((8635, 8658), 'gmhazard_calc.im.to_string_list', 'to_string_list', (['cur_IMs'], {}), '(cur_IMs)\n', (8649, 8658), False, 'from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list\n'), ((15455, 15576), 'sha_calc.query_non_parametric_cdf', 'sha_calc.query_non_parametric_cdf', (['x', 'IMi_gcims[IMi].lnIMi_IMj.cdf.index.values', 'IMi_gcims[IMi].lnIMi_IMj.cdf.values'], {}), '(x, IMi_gcims[IMi].lnIMi_IMj.cdf.index.\n values, IMi_gcims[IMi].lnIMi_IMj.cdf.values)\n', (15488, 15576), True, 'import sha_calc as sha_calc\n'), ((20114, 20131), 'numpy.asarray', 'np.asarray', (['[0.1]'], {}), '([0.1])\n', (20124, 20131), True, 'import numpy as np\n'), ((20576, 20593), 'numpy.asarray', 'np.asarray', (['[0.9]'], {}), '([0.9])\n', (20586, 20593), True, 'import numpy as np\n'), ((21825, 21842), 'numpy.asarray', 'np.asarray', (['[0.1]'], {}), '([0.1])\n', (21835, 21842), True, 'import numpy as np\n'), ((22279, 22296), 'numpy.asarray', 'np.asarray', (['[0.9]'], {}), '([0.9])\n', (22289, 22296), True, 'import numpy as np\n'), ((21271, 21341), 'pandas.concat', 'pd.concat', (['[fault_rrup_disagg_df.rrup, ds_rrup_disagg_df.rrup]'], {'axis': '(0)'}), '([fault_rrup_disagg_df.rrup, ds_rrup_disagg_df.rrup], axis=0)\n', (21280, 21341), True, 'import pandas as pd\n'), ((10396, 10448), 'gmhazard_calc.hazard.run_branch_hazard', 'hazard.run_branch_hazard', (['cur_branch', 'site_info', 'IMi'], {}), '(cur_branch, site_info, IMi)\n', (10420, 10448), False, 'from gmhazard_calc import hazard\n')] |
import os
import torch as T
import numpy as np
class OUActionNoise(object):
def __init__(self, mu, sigma=0.15, theta=.2, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_previous
dx = self.theta * (self.mu - x) * self.dt + \
self.sigma * np.sqrt(self.dt) * np.random.normal(
size=self.mu.shape)
self.x_previous = x + dx
return x
def reset(self):
self.x_previous = self.x0 if self.x0 is not None \
else np.zeros_like(self.mu)
class ReplayBuffer(object):
def __init__(self, max_size, inp_shape, nb_actions):
self.memory_size = max_size
self.memory_counter = 0
self.memory_state = np.zeros((self.memory_size, *inp_shape))
self.new_memory_state = np.zeros((self.memory_size, *inp_shape))
self.memory_action = np.zeros((self.memory_size, nb_actions))
self.memory_reward = np.zeros(self.memory_size)
self.memory_terminal = np.zeros(self.memory_size, dtype=np.float32)
def store_transition(self, state, action, reward, state_, done):
index = self.memory_counter % self.memory_size
self.memory_state[index] = state
self.new_memory_state[index] = state_
self.memory_action[index] = action
self.memory_reward[index] = reward
self.memory_terminal[index] = 1 - done
self.memory_counter += 1
def sample_buffer(self, bs):
max_memory = min(self.memory_counter, self.memory_size)
batch = np.random.choice(max_memory, bs)
states = self.memory_state[batch]
actions = self.memory_action[batch]
rewards = self.memory_reward[batch]
states_ = self.new_memory_state[batch]
terminal = self.memory_terminal[batch]
return states, actions, rewards, states_, terminal
class CriticNetwork(T.nn.Module):
def __init__(
self, beta, inp_dimensions,
fc1_dimensions, fc2_dimensions,
nb_actions):
super(CriticNetwork, self).__init__()
self.inp_dimensions = inp_dimensions
self.fc1_dimensions = fc1_dimensions
self.fc2_dimensions = fc2_dimensions
self.nb_actions = nb_actions
self.fc1 = T.nn.Linear(*self.inp_dimensions, self.fc1_dimensions)
f1 = 1./np.sqrt(self.fc1.weight.data.size()[0])
T.nn.init.uniform_(self.fc1.weight.data, -f1, f1)
T.nn.init.uniform_(self.fc1.bias.data, -f1, f1)
self.bn1 = T.nn.LayerNorm(self.fc1_dimensions)
self.fc2 = T.nn.Linear(self.fc1_dimensions, self.fc2_dimensions)
f2 = 1./np.sqrt(self.fc2.weight.data.size()[0])
T.nn.init.uniform_(self.fc2.weight.data, -f2, f2)
T.nn.init.uniform_(self.fc2.bias.data, -f2, f2)
self.bn2 = T.nn.LayerNorm(self.fc2_dimensions)
self.action_value = T.nn.Linear(self.nb_actions, self.fc2_dimensions)
f3 = 0.003
self.q = T.nn.Linear(self.fc2_dimensions, 1)
T.nn.init.uniform_(self.q.weight.data, -f3, f3)
T.nn.init.uniform_(self.q.bias.data, -f3, f3)
self.optimizer = T.optim.Adam(self.parameters(), lr=beta)
self.device = T.device("gpu" if T.cuda.is_available() else "cpu")
self.to(self.device)
def forward(self, state, action):
state_value = self.fc1(state)
state_value = self.bn1(state_value)
state_value = T.nn.functional.relu(state_value)
state_value = self.fc2(state_value)
state_value = self.bn2(state_value)
action_value = T.nn.functional.relu(self.action_value(action))
state_action_value = T.nn.functional.relu(
T.add(state_value, action_value))
state_action_value = self.q(state_action_value)
return state_action_value
class ActorNetwork(T.nn.Module):
def __init__(
self, alpha, inp_dimensions,
fc1_dimensions, fc2_dimensions,
nb_actions):
super(ActorNetwork, self).__init__()
self.inp_dimensions = inp_dimensions
self.fc1_dimensions = fc1_dimensions
self.fc2_dimensions = fc2_dimensions
self.nb_actions = nb_actions
self.fc1 = T.nn.Linear(*self.inp_dimensions, self.fc1_dimensions)
f1 = 1./np.sqrt(self.fc1.weight.data.size()[0])
T.nn.init.uniform_(self.fc1.weight.data, -f1, f1)
T.nn.init.uniform_(self.fc1.bias.data, -f1, f1)
self.bn1 = T.nn.LayerNorm(self.fc1_dimensions)
self.fc2 = T.nn.Linear(self.fc1_dimensions, self.fc2_dimensions)
f2 = 1./np.sqrt(self.fc2.weight.data.size()[0])
T.nn.init.uniform_(self.fc2.weight.data, -f2, f2)
T.nn.init.uniform_(self.fc2.bias.data, -f2, f2)
self.bn2 = T.nn.LayerNorm(self.fc2_dimensions)
f3 = 0.003
self.mu = T.nn.Linear(self.fc2_dimensions, self.nb_actions)
T.nn.init.uniform_(self.mu.weight.data, -f3, f3)
T.nn.init.uniform_(self.mu.bias.data, -f3, f3)
self.optimizer = T.optim.Adam(self.parameters(), lr=alpha)
self.device = T.device("gpu" if T.cuda.is_available() else "cpu")
self.to(self.device)
def forward(self, state):
x = self.fc1(state)
x = self.bn1(x)
x = T.nn.functional.relu(x)
x = self.fc2(x)
x = self.bn2(x)
x = T.nn.functional.relu(x)
x = T.tanh(self.mu(x))
return x
class Agent(object):
def __init__(
self, alpha, beta, inp_dimensions, tau, env,
gamma=0.99, nb_actions=2, max_size=1000000,
l1_size=400, l2_size=300, bs=64):
self.gamma = gamma
self.tau = tau
self.memory = ReplayBuffer(max_size, inp_dimensions, nb_actions)
self.bs = bs
self.actor = ActorNetwork(
alpha, inp_dimensions, l1_size, l2_size, nb_actions=nb_actions)
self.critic = CriticNetwork(
beta, inp_dimensions, l1_size, l2_size, nb_actions=nb_actions)
self.target_actor = ActorNetwork(
alpha, inp_dimensions, l1_size, l2_size, nb_actions=nb_actions)
self.target_critic = CriticNetwork(
beta, inp_dimensions, l1_size, l2_size, nb_actions=nb_actions)
self.noise = OUActionNoise(mu=np.zeros(nb_actions))
self.update_params(tau=1)
def select_action(self, observation):
self.actor.eval()
observation = T.tensor(
observation, dtype=T.float).to(self.actor.device)
mu = self.actor.forward(observation).to(self.actor.device)
mu_prime = mu + T.tensor(
self.noise(),
dtype=T.float).to(self.actor.device)
self.actor.train()
return mu_prime.cpu().detach().numpy()
def remember(self, state, action, reward, new_state, done):
self.memory.store_transition(state, action, reward, new_state, done)
def learn(self):
if self.memory.memory_counter < self.bs:
return
state, action, reward, new_state, done = \
self.memory.sample_buffer(self.bs)
reward = T.tensor(reward, dtype=T.float).to(self.critic.device)
done = T.tensor(done).to(self.critic.device)
new_state = T.tensor(new_state, dtype=T.float).to(self.critic.device)
action = T.tensor(action, dtype=T.float).to(self.critic.device)
state = T.tensor(state, dtype=T.float).to(self.critic.device)
self.target_actor.eval()
self.target_critic.eval()
self.critic.eval()
target_actions = self.target_actor.forward(new_state)
critic_value_new = self.target_critic.forward(
new_state, target_actions)
critic_value = self.critic.forward(state, action)
target = []
for j in range(self.bs):
target.append(reward[j] + self.gamma*critic_value_new[j]*done[j])
target = T.tensor(target).to(self.critic.device)
target = target.view(self.bs, 1)
self.critic.train()
self.critic.optimizer.zero_grad()
critic_loss = T.nn.functional.mse_loss(target, critic_value)
critic_loss.backward()
self.critic.optimizer.step()
self.critic.eval()
self.actor.optimizer.zero_grad()
mu = self.actor.forward(state)
self.actor.train()
actor_loss = -self.critic.forward(state, mu)
actor_loss = T.mean(actor_loss)
actor_loss.backward()
self.actor.optimizer.step()
self.update_params()
def update_params(self, tau=None):
if tau is None:
tau = self.tau # tau is 1
actor_params = self.actor.named_parameters()
critic_params = self.critic.named_parameters()
target_actor_params = self.target_actor.named_parameters()
target_critic_params = self.target_critic.named_parameters()
critic_state_dict = dict(critic_params)
actor_state_dict = dict(actor_params)
target_critic_dict = dict(target_critic_params)
target_actor_dict = dict(target_actor_params)
for name in critic_state_dict:
critic_state_dict[name] = tau*critic_state_dict[name].clone() + \
(1-tau)*target_critic_dict[name].clone()
self.target_critic.load_state_dict(critic_state_dict)
for name in actor_state_dict:
actor_state_dict[name] = tau*actor_state_dict[name].clone() + \
(1-tau)*target_actor_dict[name].clone()
self.target_actor.load_state_dict(actor_state_dict)
| [
"torch.mean",
"numpy.random.choice",
"numpy.zeros_like",
"torch.nn.init.uniform_",
"torch.nn.functional.mse_loss",
"numpy.zeros",
"torch.add",
"torch.nn.LayerNorm",
"torch.cuda.is_available",
"numpy.random.normal",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.tensor",
"numpy.sqrt"... | [((841, 881), 'numpy.zeros', 'np.zeros', (['(self.memory_size, *inp_shape)'], {}), '((self.memory_size, *inp_shape))\n', (849, 881), True, 'import numpy as np\n'), ((914, 954), 'numpy.zeros', 'np.zeros', (['(self.memory_size, *inp_shape)'], {}), '((self.memory_size, *inp_shape))\n', (922, 954), True, 'import numpy as np\n'), ((984, 1024), 'numpy.zeros', 'np.zeros', (['(self.memory_size, nb_actions)'], {}), '((self.memory_size, nb_actions))\n', (992, 1024), True, 'import numpy as np\n'), ((1054, 1080), 'numpy.zeros', 'np.zeros', (['self.memory_size'], {}), '(self.memory_size)\n', (1062, 1080), True, 'import numpy as np\n'), ((1112, 1156), 'numpy.zeros', 'np.zeros', (['self.memory_size'], {'dtype': 'np.float32'}), '(self.memory_size, dtype=np.float32)\n', (1120, 1156), True, 'import numpy as np\n'), ((1650, 1682), 'numpy.random.choice', 'np.random.choice', (['max_memory', 'bs'], {}), '(max_memory, bs)\n', (1666, 1682), True, 'import numpy as np\n'), ((2356, 2410), 'torch.nn.Linear', 'T.nn.Linear', (['*self.inp_dimensions', 'self.fc1_dimensions'], {}), '(*self.inp_dimensions, self.fc1_dimensions)\n', (2367, 2410), True, 'import torch as T\n'), ((2475, 2524), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.fc1.weight.data', '(-f1)', 'f1'], {}), '(self.fc1.weight.data, -f1, f1)\n', (2493, 2524), True, 'import torch as T\n'), ((2533, 2580), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.fc1.bias.data', '(-f1)', 'f1'], {}), '(self.fc1.bias.data, -f1, f1)\n', (2551, 2580), True, 'import torch as T\n'), ((2601, 2636), 'torch.nn.LayerNorm', 'T.nn.LayerNorm', (['self.fc1_dimensions'], {}), '(self.fc1_dimensions)\n', (2615, 2636), True, 'import torch as T\n'), ((2657, 2710), 'torch.nn.Linear', 'T.nn.Linear', (['self.fc1_dimensions', 'self.fc2_dimensions'], {}), '(self.fc1_dimensions, self.fc2_dimensions)\n', (2668, 2710), True, 'import torch as T\n'), ((2776, 2825), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.fc2.weight.data', '(-f2)', 'f2'], {}), '(self.fc2.weight.data, -f2, f2)\n', (2794, 2825), True, 'import torch as T\n'), ((2834, 2881), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.fc2.bias.data', '(-f2)', 'f2'], {}), '(self.fc2.bias.data, -f2, f2)\n', (2852, 2881), True, 'import torch as T\n'), ((2902, 2937), 'torch.nn.LayerNorm', 'T.nn.LayerNorm', (['self.fc2_dimensions'], {}), '(self.fc2_dimensions)\n', (2916, 2937), True, 'import torch as T\n'), ((2967, 3016), 'torch.nn.Linear', 'T.nn.Linear', (['self.nb_actions', 'self.fc2_dimensions'], {}), '(self.nb_actions, self.fc2_dimensions)\n', (2978, 3016), True, 'import torch as T\n'), ((3053, 3088), 'torch.nn.Linear', 'T.nn.Linear', (['self.fc2_dimensions', '(1)'], {}), '(self.fc2_dimensions, 1)\n', (3064, 3088), True, 'import torch as T\n'), ((3097, 3144), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.q.weight.data', '(-f3)', 'f3'], {}), '(self.q.weight.data, -f3, f3)\n', (3115, 3144), True, 'import torch as T\n'), ((3153, 3198), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.q.bias.data', '(-f3)', 'f3'], {}), '(self.q.bias.data, -f3, f3)\n', (3171, 3198), True, 'import torch as T\n'), ((3513, 3546), 'torch.nn.functional.relu', 'T.nn.functional.relu', (['state_value'], {}), '(state_value)\n', (3533, 3546), True, 'import torch as T\n'), ((4282, 4336), 'torch.nn.Linear', 'T.nn.Linear', (['*self.inp_dimensions', 'self.fc1_dimensions'], {}), '(*self.inp_dimensions, self.fc1_dimensions)\n', (4293, 4336), True, 'import torch as T\n'), ((4401, 4450), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.fc1.weight.data', '(-f1)', 'f1'], {}), '(self.fc1.weight.data, -f1, f1)\n', (4419, 4450), True, 'import torch as T\n'), ((4459, 4506), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.fc1.bias.data', '(-f1)', 'f1'], {}), '(self.fc1.bias.data, -f1, f1)\n', (4477, 4506), True, 'import torch as T\n'), ((4527, 4562), 'torch.nn.LayerNorm', 'T.nn.LayerNorm', (['self.fc1_dimensions'], {}), '(self.fc1_dimensions)\n', (4541, 4562), True, 'import torch as T\n'), ((4583, 4636), 'torch.nn.Linear', 'T.nn.Linear', (['self.fc1_dimensions', 'self.fc2_dimensions'], {}), '(self.fc1_dimensions, self.fc2_dimensions)\n', (4594, 4636), True, 'import torch as T\n'), ((4702, 4751), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.fc2.weight.data', '(-f2)', 'f2'], {}), '(self.fc2.weight.data, -f2, f2)\n', (4720, 4751), True, 'import torch as T\n'), ((4760, 4807), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.fc2.bias.data', '(-f2)', 'f2'], {}), '(self.fc2.bias.data, -f2, f2)\n', (4778, 4807), True, 'import torch as T\n'), ((4828, 4863), 'torch.nn.LayerNorm', 'T.nn.LayerNorm', (['self.fc2_dimensions'], {}), '(self.fc2_dimensions)\n', (4842, 4863), True, 'import torch as T\n'), ((4902, 4951), 'torch.nn.Linear', 'T.nn.Linear', (['self.fc2_dimensions', 'self.nb_actions'], {}), '(self.fc2_dimensions, self.nb_actions)\n', (4913, 4951), True, 'import torch as T\n'), ((4960, 5008), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.mu.weight.data', '(-f3)', 'f3'], {}), '(self.mu.weight.data, -f3, f3)\n', (4978, 5008), True, 'import torch as T\n'), ((5017, 5063), 'torch.nn.init.uniform_', 'T.nn.init.uniform_', (['self.mu.bias.data', '(-f3)', 'f3'], {}), '(self.mu.bias.data, -f3, f3)\n', (5035, 5063), True, 'import torch as T\n'), ((5331, 5354), 'torch.nn.functional.relu', 'T.nn.functional.relu', (['x'], {}), '(x)\n', (5351, 5354), True, 'import torch as T\n'), ((5415, 5438), 'torch.nn.functional.relu', 'T.nn.functional.relu', (['x'], {}), '(x)\n', (5435, 5438), True, 'import torch as T\n'), ((8126, 8172), 'torch.nn.functional.mse_loss', 'T.nn.functional.mse_loss', (['target', 'critic_value'], {}), '(target, critic_value)\n', (8150, 8172), True, 'import torch as T\n'), ((8450, 8468), 'torch.mean', 'T.mean', (['actor_loss'], {}), '(actor_loss)\n', (8456, 8468), True, 'import torch as T\n'), ((628, 650), 'numpy.zeros_like', 'np.zeros_like', (['self.mu'], {}), '(self.mu)\n', (641, 650), True, 'import numpy as np\n'), ((3770, 3802), 'torch.add', 'T.add', (['state_value', 'action_value'], {}), '(state_value, action_value)\n', (3775, 3802), True, 'import torch as T\n'), ((434, 470), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.mu.shape'}), '(size=self.mu.shape)\n', (450, 470), True, 'import numpy as np\n'), ((3307, 3328), 'torch.cuda.is_available', 'T.cuda.is_available', ([], {}), '()\n', (3326, 3328), True, 'import torch as T\n'), ((5173, 5194), 'torch.cuda.is_available', 'T.cuda.is_available', ([], {}), '()\n', (5192, 5194), True, 'import torch as T\n'), ((6319, 6339), 'numpy.zeros', 'np.zeros', (['nb_actions'], {}), '(nb_actions)\n', (6327, 6339), True, 'import numpy as np\n'), ((6467, 6503), 'torch.tensor', 'T.tensor', (['observation'], {'dtype': 'T.float'}), '(observation, dtype=T.float)\n', (6475, 6503), True, 'import torch as T\n'), ((7165, 7196), 'torch.tensor', 'T.tensor', (['reward'], {'dtype': 'T.float'}), '(reward, dtype=T.float)\n', (7173, 7196), True, 'import torch as T\n'), ((7235, 7249), 'torch.tensor', 'T.tensor', (['done'], {}), '(done)\n', (7243, 7249), True, 'import torch as T\n'), ((7293, 7327), 'torch.tensor', 'T.tensor', (['new_state'], {'dtype': 'T.float'}), '(new_state, dtype=T.float)\n', (7301, 7327), True, 'import torch as T\n'), ((7368, 7399), 'torch.tensor', 'T.tensor', (['action'], {'dtype': 'T.float'}), '(action, dtype=T.float)\n', (7376, 7399), True, 'import torch as T\n'), ((7439, 7469), 'torch.tensor', 'T.tensor', (['state'], {'dtype': 'T.float'}), '(state, dtype=T.float)\n', (7447, 7469), True, 'import torch as T\n'), ((7952, 7968), 'torch.tensor', 'T.tensor', (['target'], {}), '(target)\n', (7960, 7968), True, 'import torch as T\n'), ((415, 431), 'numpy.sqrt', 'np.sqrt', (['self.dt'], {}), '(self.dt)\n', (422, 431), True, 'import numpy as np\n')] |
from numpy import ma
from osgeo import gdal
from shapely.geometry import shape
def lat_long_to_idx(gt, lon, lat):
"""
Take a geotransform and calculate the array indexes for the given lat,long.
:param gt: GDAL geotransform (e.g. gdal.Open(x).GetGeoTransform()).
:type gt: GDAL Geotransform tuple.
:param lon: Longitude.
:type lon: float
:param lat: Latitude.
:type lat: float
"""
return (int((lat - gt[3]) / gt[5]),
int((lon - gt[0]) / gt[1]))
def get_masked_image(ascii):
"""
Get numpy masked array of raster grid.
:param ascii: Path to input raster file.
:type ascii: string
:returns: tuple(numpy.ma, tuple(geotransform))
"""
grid = gdal.Open(ascii, gdal.GA_ReadOnly)
gt = grid.GetGeoTransform()
band = grid.GetRasterBand(1)
nodata = band.GetNoDataValue()
image = band.ReadAsArray(0, 0, band.XSize, band.YSize)
masked_image = ma.masked_values(image, nodata, copy=False)
masked_image.fill_value = nodata
return masked_image, gt
def get_values_for_catchments(ascii, catchments, func = None):
"""
Read the values for all points inside each of the supplied catchments.
:param ascii: Path to input raster file.
:type ascii: string
:param catchments: Dictionary of catchments and their grid points.
:type catchments: dict
:param func: Function to apply to the grid values array for each catchment.
:type func: function
"""
mi, gt = get_masked_image(ascii)
results = {}
for catchment, points in catchments.items():
# TODO: Properly handle small/null catchments.
if len(points) <= 2:
continue
catchment_values = []
for point in points:
x, y = lat_long_to_idx(gt, point[0], point[1])
catchment_values.append(mi[x,y])
if func is None:
results[catchment] = catchment_values
else:
results[catchment] = func(catchment_values)
return results
| [
"osgeo.gdal.Open",
"numpy.ma.masked_values"
] | [((764, 798), 'osgeo.gdal.Open', 'gdal.Open', (['ascii', 'gdal.GA_ReadOnly'], {}), '(ascii, gdal.GA_ReadOnly)\n', (773, 798), False, 'from osgeo import gdal\n'), ((978, 1021), 'numpy.ma.masked_values', 'ma.masked_values', (['image', 'nodata'], {'copy': '(False)'}), '(image, nodata, copy=False)\n', (994, 1021), False, 'from numpy import ma\n')] |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python import ipu
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class ImageOpsTest(test_util.TensorFlowTestCase):
@test_util.deprecated_graph_mode_only
def testNormaliseImage(self):
NUM_IMAGES = 3
def make_graph(offsets, scales, scale=1, im_type=None, im_shape=None):
im_shape = im_shape or [2, 2, 2, 3]
dataset = tu.create_single_increasing_dataset(NUM_IMAGES,
shape=im_shape,
dtype=np.float32)
infeed = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(image):
if im_type:
image = math_ops.cast(image, im_type)
normalised = ipu.ops.image_ops.normalise_image(image, offsets, scales,
scale)
enqueue = outfeed.enqueue(normalised)
return enqueue
def my_net():
return ipu.loops.repeat(NUM_IMAGES, body, [], infeed)
with ipu.scopes.ipu_scope("/device:IPU:0"):
return ipu.ipu_compiler.compile(my_net), infeed, outfeed
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
def test_case(offsets,
scales,
scale=1,
im_type=None,
im_shape=None,
tensor_scales_offsets=False):
im_shape = im_shape or [2, 2, 2, 3]
offsets_t = offsets
scales_t = scales
if tensor_scales_offsets:
offsets_t = constant_op.constant(offsets)
scales_t = constant_op.constant(scales)
run, inf, outf = make_graph(offsets_t, scales_t, scale, im_type,
im_shape)
sess.run(inf.initializer)
sess.run(run)
results = sess.run(outf.dequeue())
# Calculate expected results:
# Make n images which have linearly increasing blanket values.
expected = (np.ones([1] + im_shape).T * np.arange(NUM_IMAGES)).T
# Cast and normalize (elementwise, then broadcasted scales and offsets).
expected = ((expected.astype(im_type) * scale) - offsets) * scales
# Pad to 4 channels.
padding = np.zeros([NUM_IMAGES] + im_shape[:-1] + [4 - im_shape[-1]])
expected = np.c_[expected, padding]
self.assertAllClose(results, expected)
# Simple usage in float32.
test_case(np.array([1, 2, 3], np.float32),
np.array([4, 5, 6], np.float32),
scale=2)
# Strange but valid shape.
test_case(np.array([1, 2, 3], np.float32),
np.array([4, 5, 6], np.float32),
im_shape=[2, 1, 2, 9, 3])
# Only 2 channels.
with self.assertRaisesRegex(errors.InvalidArgumentError,
"The image has 2 channels, expected 3."):
test_case(np.array([1, 2, 3], np.float32),
np.array([4, 5, 6], np.float32),
im_shape=[2, 2])
# Bad shapes for scales/offsets.
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"must be the same size as the number of image channels 3,"
" but was"):
test_case(np.array([1, 2], np.float32), np.array([4, 5, 6],
np.float32))
test_case(np.array([1, 2, 3], np.float32),
np.array([4, 5, 6, 7], np.float32))
# Precise and negative values.
test_case(np.array([3.82, -1.9999, 6000], np.float32),
np.array([-1, 1.5, 6.3333], np.float32),
scale=-3.283)
# float16.
test_case(np.array([1, 2, 3], np.float16),
np.array([4, 5, 6], np.float16), 2, np.float16)
# uint8.
test_case(np.array([1, 2, 3], np.float16),
np.array([4, 5, 6], np.float16), 2, np.uint8)
# Differing types are automatically handled.
# float16 scales/offsets --> float32 image.
test_case(np.array([1, 2, 3], np.float16),
np.array([4, 5, 6], np.float16), 2, np.float32)
# float32 scales/offsets --> uint8 image.
test_case(np.array([1, 2, 3], np.float32),
np.array([4, 5, 6], np.float32), 2, np.uint8)
# They're also handled for tensor scales and offsets.
test_case(np.array([1, 2, 3], np.float32),
np.array([4, 5, 6], np.float32),
2,
np.uint8,
tensor_scales_offsets=True)
if __name__ == "__main__":
googletest.main()
| [
"tensorflow.python.ipu.scopes.ipu_scope",
"tensorflow.compiler.plugin.poplar.tests.test_utils.create_single_increasing_dataset",
"tensorflow.python.ipu.config.IPUConfig",
"tensorflow.python.ipu.ops.image_ops.normalise_image",
"tensorflow.python.ipu.loops.repeat",
"numpy.zeros",
"numpy.ones",
"tensorfl... | [((5697, 5714), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (5712, 5714), False, 'from tensorflow.python.platform import googletest\n'), ((2197, 2208), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (2206, 2208), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((1382, 1468), 'tensorflow.compiler.plugin.poplar.tests.test_utils.create_single_increasing_dataset', 'tu.create_single_increasing_dataset', (['NUM_IMAGES'], {'shape': 'im_shape', 'dtype': 'np.float32'}), '(NUM_IMAGES, shape=im_shape, dtype=np.\n float32)\n', (1417, 1468), True, 'from tensorflow.compiler.plugin.poplar.tests import test_utils as tu\n'), ((1584, 1628), 'tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue', 'ipu.ipu_infeed_queue.IPUInfeedQueue', (['dataset'], {}), '(dataset)\n', (1619, 1628), False, 'from tensorflow.python import ipu\n'), ((1645, 1684), 'tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue', 'ipu.ipu_outfeed_queue.IPUOutfeedQueue', ([], {}), '()\n', (1682, 1684), False, 'from tensorflow.python import ipu\n'), ((1798, 1862), 'tensorflow.python.ipu.ops.image_ops.normalise_image', 'ipu.ops.image_ops.normalise_image', (['image', 'offsets', 'scales', 'scale'], {}), '(image, offsets, scales, scale)\n', (1831, 1862), False, 'from tensorflow.python import ipu\n'), ((2023, 2069), 'tensorflow.python.ipu.loops.repeat', 'ipu.loops.repeat', (['NUM_IMAGES', 'body', '[]', 'infeed'], {}), '(NUM_IMAGES, body, [], infeed)\n', (2039, 2069), False, 'from tensorflow.python import ipu\n'), ((2082, 2119), 'tensorflow.python.ipu.scopes.ipu_scope', 'ipu.scopes.ipu_scope', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (2102, 2119), False, 'from tensorflow.python import ipu\n'), ((3365, 3424), 'numpy.zeros', 'np.zeros', (['([NUM_IMAGES] + im_shape[:-1] + [4 - im_shape[-1]])'], {}), '([NUM_IMAGES] + im_shape[:-1] + [4 - im_shape[-1]])\n', (3373, 3424), True, 'import numpy as np\n'), ((3567, 3598), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float32'], {}), '([1, 2, 3], np.float32)\n', (3575, 3598), True, 'import numpy as np\n'), ((3616, 3647), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float32'], {}), '([4, 5, 6], np.float32)\n', (3624, 3647), True, 'import numpy as np\n'), ((3724, 3755), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float32'], {}), '([1, 2, 3], np.float32)\n', (3732, 3755), True, 'import numpy as np\n'), ((3773, 3804), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float32'], {}), '([4, 5, 6], np.float32)\n', (3781, 3804), True, 'import numpy as np\n'), ((4653, 4696), 'numpy.array', 'np.array', (['[3.82, -1.9999, 6000]', 'np.float32'], {}), '([3.82, -1.9999, 6000], np.float32)\n', (4661, 4696), True, 'import numpy as np\n'), ((4714, 4753), 'numpy.array', 'np.array', (['[-1, 1.5, 6.3333]', 'np.float32'], {}), '([-1, 1.5, 6.3333], np.float32)\n', (4722, 4753), True, 'import numpy as np\n'), ((4819, 4850), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float16'], {}), '([1, 2, 3], np.float16)\n', (4827, 4850), True, 'import numpy as np\n'), ((4868, 4899), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float16'], {}), '([4, 5, 6], np.float16)\n', (4876, 4899), True, 'import numpy as np\n'), ((4948, 4979), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float16'], {}), '([1, 2, 3], np.float16)\n', (4956, 4979), True, 'import numpy as np\n'), ((4997, 5028), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float16'], {}), '([4, 5, 6], np.float16)\n', (5005, 5028), True, 'import numpy as np\n'), ((5161, 5192), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float16'], {}), '([1, 2, 3], np.float16)\n', (5169, 5192), True, 'import numpy as np\n'), ((5210, 5241), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float16'], {}), '([4, 5, 6], np.float16)\n', (5218, 5241), True, 'import numpy as np\n'), ((5323, 5354), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float32'], {}), '([1, 2, 3], np.float32)\n', (5331, 5354), True, 'import numpy as np\n'), ((5372, 5403), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float32'], {}), '([4, 5, 6], np.float32)\n', (5380, 5403), True, 'import numpy as np\n'), ((5495, 5526), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float32'], {}), '([1, 2, 3], np.float32)\n', (5503, 5526), True, 'import numpy as np\n'), ((5544, 5575), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float32'], {}), '([4, 5, 6], np.float32)\n', (5552, 5575), True, 'import numpy as np\n'), ((1747, 1776), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['image', 'im_type'], {}), '(image, im_type)\n', (1760, 1776), False, 'from tensorflow.python.ops import math_ops\n'), ((2136, 2168), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['my_net'], {}), '(my_net)\n', (2160, 2168), False, 'from tensorflow.python import ipu\n'), ((2678, 2707), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['offsets'], {}), '(offsets)\n', (2698, 2707), False, 'from tensorflow.python.framework import constant_op\n'), ((2729, 2757), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['scales'], {}), '(scales)\n', (2749, 2757), False, 'from tensorflow.python.framework import constant_op\n'), ((4031, 4062), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float32'], {}), '([1, 2, 3], np.float32)\n', (4039, 4062), True, 'import numpy as np\n'), ((4082, 4113), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float32'], {}), '([4, 5, 6], np.float32)\n', (4090, 4113), True, 'import numpy as np\n'), ((4374, 4402), 'numpy.array', 'np.array', (['[1, 2]', 'np.float32'], {}), '([1, 2], np.float32)\n', (4382, 4402), True, 'import numpy as np\n'), ((4404, 4435), 'numpy.array', 'np.array', (['[4, 5, 6]', 'np.float32'], {}), '([4, 5, 6], np.float32)\n', (4412, 4435), True, 'import numpy as np\n'), ((4512, 4543), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.float32'], {}), '([1, 2, 3], np.float32)\n', (4520, 4543), True, 'import numpy as np\n'), ((4563, 4597), 'numpy.array', 'np.array', (['[4, 5, 6, 7]', 'np.float32'], {}), '([4, 5, 6, 7], np.float32)\n', (4571, 4597), True, 'import numpy as np\n'), ((3135, 3156), 'numpy.arange', 'np.arange', (['NUM_IMAGES'], {}), '(NUM_IMAGES)\n', (3144, 3156), True, 'import numpy as np\n'), ((3107, 3130), 'numpy.ones', 'np.ones', (['([1] + im_shape)'], {}), '([1] + im_shape)\n', (3114, 3130), True, 'import numpy as np\n')] |
# use Python 3 style print function rather than Python 2 print statements:
from __future__ import print_function
def read_asc_file(file_path, verbose=True):
"""
Read in a file in ESRI ASCII raster format,
which consists of a header describing the grid followed by
values on the grid.
For more information see:
http://resources.esri.com/help/9.3/arcgisengine/java/GP_ToolRef/spatial_analyst_tools/esri_ascii_raster_format.htm
"""
import numpy as np
asc_file = open(file_path, 'r')
tokens = asc_file.readline().split()
ncols = int(tokens[1])
tokens = asc_file.readline().split()
nrows = int(tokens[1])
tokens = asc_file.readline().split()
xllcorner = float(tokens[1])
tokens = asc_file.readline().split()
yllcorner = float(tokens[1])
tokens = asc_file.readline().split()
cellsize = float(tokens[1])
tokens = asc_file.readline().split()
nodata_value = float(tokens[1])
if verbose:
print("ncols = %i" % ncols)
print("nrows = %i" % nrows)
print("xllcorner = %g" % xllcorner)
print("yllcorner = %g" % yllcorner)
print("cellsize = %g" % cellsize)
print("nodata_value = %g" % nodata_value)
# read in all the data, assumed to be on ncols lines,
# each containing nrows values
asc_file.close() # close file so we can load array
asc_data = np.loadtxt(file_path, skiprows=6) # skip header
# reshape
values = asc_data.reshape((nrows,ncols))
# flip in y because of data order
values = np.flipud(values)
x = xllcorner + cellsize * np.arange(0,ncols)
y = yllcorner + cellsize * np.arange(0,nrows)
X,Y = np.meshgrid(x,y)
asc_data_dict = {'ncols': ncols, 'nrows': nrows, 'xllcorner':xllcorner, \
'yllcorner':yllcorner, 'cellsize':cellsize, \
'nodata_value':nodata_value, \
'X': X, 'Y':Y, 'values': values}
return asc_data_dict
| [
"numpy.arange",
"numpy.meshgrid",
"numpy.flipud",
"numpy.loadtxt"
] | [((1443, 1476), 'numpy.loadtxt', 'np.loadtxt', (['file_path'], {'skiprows': '(6)'}), '(file_path, skiprows=6)\n', (1453, 1476), True, 'import numpy as np\n'), ((1612, 1629), 'numpy.flipud', 'np.flipud', (['values'], {}), '(values)\n', (1621, 1629), True, 'import numpy as np\n'), ((1754, 1771), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1765, 1771), True, 'import numpy as np\n'), ((1670, 1689), 'numpy.arange', 'np.arange', (['(0)', 'ncols'], {}), '(0, ncols)\n', (1679, 1689), True, 'import numpy as np\n'), ((1720, 1739), 'numpy.arange', 'np.arange', (['(0)', 'nrows'], {}), '(0, nrows)\n', (1729, 1739), True, 'import numpy as np\n')] |
'''
____ __ __ __ __ _ __
/_ / ___ _/ / ___ ___ ___________ / /__ / /__/ /_____(_) /__
/ /_/ _ `/ _ \/ _ \/ -_) __/___/ -_) / -_) '_/ __/ __/ / '_/
/___/\_,_/_//_/_//_/\__/_/ \__/_/\__/_/\_\\__/_/ /_/_/\_\
Copyright 2021 ZAHNER-elek<NAME> GmbH & Co. KG
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
import numpy as np
class DCPlot(object):
""" Example class for plotting the data.
This class is an example to show the data in an exemplary way. For special use cases, everyone
must implement the plots themselves. The plot was optimized for data over a time track.
X and Y axis are always displayed linearly. The labeling and unit of the axes can be adjusted
separately.
The constructor creates the plotting window with labels without data.
Theoretically, an infinite number of y axes are possible. However, only 2 have been tested so far.
The axes are automatically formatted with engineering prefixes.
The display blocks as long as it does not get any computing time. It is optimized to be able
to append data, and remains open after plt.show().
By default, matplotlib would wait until the display is closed by the user.
Example of how the yAxis parameter must be formatted:
[{"label": "Voltage", "unit": "V"}, {"label": "Current", "unit": "A", "log": True}]
The structure is an array with a dictionary for each axis. The dictionary has two keys:
* label: The label of the axis.
* unit: The unit of the axis.
:param figureTitle: Title of the figure.
:param xAxisLabel: Lable of the X-axis.
:param xAxisUnit: Unit of the X-axis.
:param yAxis: Data structure for the Y-axis.
"""
colors = ["r", "b", "g", "c", "m", "y"]
def __init__(self, figureTitle, xAxisLabel, xAxisUnit, yAxis, data = None,**kwargs):
self._isOpen = True
self.xData = []
self.yData = []
self.yAxisConfig = yAxis
xFormatter = EngFormatter(unit=xAxisUnit)
yFormatters = []
for yAx in yAxis:
if "unit" in yAx.keys():
yFormatters.append(EngFormatter(unit=yAx["unit"]))
else:
yFormatters.append(EngFormatter(unit=""))
self.fig, self.axis = plt.subplots(1, 1)
self.fig.set_size_inches(10, 6)
self.fig.canvas.manager.set_window_title(figureTitle)
"""
Add a close event to easily check if the window is still open.
"""
self.fig.canvas.mpl_connect('close_event', self._closeEvent)
plt.ion()
i = 0
self.line = []
self.allAxes = [self.axis]
for yAx in yAxis:
self.line.append(None)
self.yData.append([])
axLabel = ""
if "label" in yAx.keys():
axLabel = yAx["label"]
if "log" in self.yAxisConfig[i] and self.yAxisConfig[i]["log"] == True:
axLabel = "|" + axLabel + "|"
color = "fuchsia" # default, if there are not enough colors in the array
if i < len(DCPlot.colors):
color = DCPlot.colors[i]
#Voltage blue current red. Must be adjusted later if there are different voltages or currents.
if "unit" in yAx.keys():
if yAx["unit"] == "V":
color = "b"
elif yAx["unit"] == "A":
color = "r"
if i == 0:
self.line[i], = self.axis.plot(self.xData, self.yData[i], label=axLabel, color=color, linewidth=1)
self.axis.set_ylabel(axLabel)
if "log" in yAx.keys() and yAx["log"] == True:
self.axis.set_yscale("log")
self.axis.yaxis.set_major_formatter(yFormatters[i])
else:
self.allAxes.append(self.axis.twinx())
self.line[i], = self.allAxes[i].plot(self.xData, self.yData[i], label=axLabel, color=color, linewidth=1)
self.allAxes[i].set_ylabel(axLabel)
if "log" in yAx.keys() and yAx["log"] == True:
self.allAxes[i].set_yscale("log")
self.allAxes[i].yaxis.set_major_formatter(yFormatters[i])
i += 1
self.axis.xaxis.set_major_formatter(xFormatter)
self.axis.set_xlabel(xAxisLabel)
self.axis.xaxis.grid(which='both', linestyle='--')
self.axis.yaxis.grid(which='both', linestyle='--')
if len(yAxis) > 1:
plt.legend(handles=self.line, loc="best")
if data != None:
self.addData(data[0], data[1])
plt.show()
plt.tight_layout()
plt.draw()
plt.pause(1e-3)
return
def addData(self, xData, yDatas):
""" Append the data of the plot.
This method is used to append data to the plot.
xData contains an array with values for the X-axis. yDatas contains an array with one array
for each Y-axis. The number of points must be the same for each data track.
Example structure:
xData = [0,1,2,3]
yDatas = [[0,1,2,3],[0,1,2,3],...]
:param xData: Array with points for the X-axis.
:param yData: Array with Arrys for each Y-axis.
"""
for data in xData:
self.xData.append(data)
for i in range(len(self.yData)):
absRequired = False
if "log" in self.yAxisConfig[i] and self.yAxisConfig[i]["log"] == True:
absRequired = True
if absRequired == False:
self.yData[i] = yDatas[i]
else:
self.yData[i] = np.abs(yDatas[i])
self.line[i].set_ydata(self.yData[i])
self.line[i].set_xdata(self.xData)
for ax in self.allAxes:
ax.relim(visible_only=True)
ax.autoscale_view(True, True, True)
if len(self.xData) > 0:
if min(self.xData) != max(self.xData):
self.axis.set_xlim(min(self.xData), max(self.xData))
plt.tight_layout()
plt.draw()
plt.pause(1e-3)
return
def pause(self, time):
""" Pause the plot.
When the display pause is called, it gets compute time and is re-rendered.
:param time: Pause in seconds.
"""
plt.pause(time)
return
def clearData(self):
""" Clear the data from the plot.
This command only deletes the data from the display.
"""
self.xData = []
for i in range(len(self.yData)):
self.yData[i] = []
self.line[i].set_ydata(self.yData[i])
self.line[i].set_xdata(self.xData)
return
def clearPlot(self):
""" Clear the data from the plot.
This command deletes the data from the display and then redraws all of them to update the display.
"""
self.clearData()
plt.tight_layout()
plt.draw()
plt.pause(1e-3)
return
def savePlot(self, file, w=None, h=None):
""" Saving the plot.
Saving the plot, where the size of the plot can be adjusted beforehand.
When saving, the file type must also be specified in the file name.
These are the data types of the corresponding matplotlib command
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.savefig.html .
PDF works well for vector graphics.
:param file: File to save, with path and filetype.
:param w: With of the image in inches, but can be omitted if not needed.
:param h: Height of the image in inches, but can be omitted if not needed.
If only w is set, w is also used for the height, by matplotlib
and the image is square.
"""
if w != None:
self.fig.set_size_inches(w, h)
plt.tight_layout()
plt.draw()
plt.pause(1e-3)
self.fig.savefig(file, bbox_inches='tight')
return
def close(self):
""" Close the plot.
"""
plt.close()
return
def isOpen(self):
""" Check if the window is open.
Checks if the window is still open. If the window is closed, a private variable in the
callback is set to false.
:returns: True if the window is open else False.
"""
return self._isOpen
def _closeEvent(self, evt):
""" Close event.
This function is called when the plotting window is closed.
"""
self._isOpen = False
plt.close(self.fig)
return
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots",
"matplotlib.ticker.EngFormatter"
] | [((3109, 3137), 'matplotlib.ticker.EngFormatter', 'EngFormatter', ([], {'unit': 'xAxisUnit'}), '(unit=xAxisUnit)\n', (3121, 3137), False, 'from matplotlib.ticker import EngFormatter\n'), ((3400, 3418), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3412, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3693, 3702), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3700, 3702), True, 'import matplotlib.pyplot as plt\n'), ((5919, 5929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5927, 5929), True, 'import matplotlib.pyplot as plt\n'), ((5938, 5956), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5954, 5956), True, 'import matplotlib.pyplot as plt\n'), ((5965, 5975), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5973, 5975), True, 'import matplotlib.pyplot as plt\n'), ((5984, 6000), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (5993, 6000), True, 'import matplotlib.pyplot as plt\n'), ((7442, 7460), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7458, 7460), True, 'import matplotlib.pyplot as plt\n'), ((7469, 7479), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (7477, 7479), True, 'import matplotlib.pyplot as plt\n'), ((7488, 7504), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (7497, 7504), True, 'import matplotlib.pyplot as plt\n'), ((7743, 7758), 'matplotlib.pyplot.pause', 'plt.pause', (['time'], {}), '(time)\n', (7752, 7758), True, 'import matplotlib.pyplot as plt\n'), ((8411, 8429), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8427, 8429), True, 'import matplotlib.pyplot as plt\n'), ((8438, 8448), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (8446, 8448), True, 'import matplotlib.pyplot as plt\n'), ((8457, 8473), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (8466, 8473), True, 'import matplotlib.pyplot as plt\n'), ((9375, 9393), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9391, 9393), True, 'import matplotlib.pyplot as plt\n'), ((9402, 9412), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (9410, 9412), True, 'import matplotlib.pyplot as plt\n'), ((9421, 9437), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (9430, 9437), True, 'import matplotlib.pyplot as plt\n'), ((9578, 9589), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9587, 9589), True, 'import matplotlib.pyplot as plt\n'), ((10109, 10128), 'matplotlib.pyplot.close', 'plt.close', (['self.fig'], {}), '(self.fig)\n', (10118, 10128), True, 'import matplotlib.pyplot as plt\n'), ((5783, 5824), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'self.line', 'loc': '"""best"""'}), "(handles=self.line, loc='best')\n", (5793, 5824), True, 'import matplotlib.pyplot as plt\n'), ((7008, 7025), 'numpy.abs', 'np.abs', (['yDatas[i]'], {}), '(yDatas[i])\n', (7014, 7025), True, 'import numpy as np\n'), ((3261, 3291), 'matplotlib.ticker.EngFormatter', 'EngFormatter', ([], {'unit': "yAx['unit']"}), "(unit=yAx['unit'])\n", (3273, 3291), False, 'from matplotlib.ticker import EngFormatter\n'), ((3346, 3367), 'matplotlib.ticker.EngFormatter', 'EngFormatter', ([], {'unit': '""""""'}), "(unit='')\n", (3358, 3367), False, 'from matplotlib.ticker import EngFormatter\n')] |
import h5py
if h5py.get_config().mpi == False:
import warnings
warnings.warn("h5py not MPI enabled. Discontinuing test.")
import sys
sys.exit(0)
import underworld as uw
import numpy as np
mesh = uw.mesh.FeMesh_Cartesian(elementRes=(128,128))
swarm = uw.swarm.Swarm(mesh)
# create some variables to track
origOwningEl = swarm.add_variable('int',1)
origCreatingProc = swarm.add_variable('int',1)
origParticleIndex = swarm.add_variable('int',1)
randomNumber = swarm.add_variable('int',1)
swarm.populate_using_layout(uw.swarm.layouts.PerCellSpaceFillerLayout(swarm,20))
# init variables
origOwningEl.data[:] = mesh.data_elgId[swarm.owningCell.data[:]] # global elementId where created
origCreatingProc.data[:] = uw.mpi.rank # rank where created
origParticleIndex.data[:,0] = range(swarm.particleLocalCount) # local index where created
from random import randint
for index in range(0,swarm.particleLocalCount): # add random numbers to this variable
randomNumber.data[index] = randint(0,9999999)
# get max local particlecount across all procs
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
inguy = np.zeros(1)
outguy = np.zeros(1)
inguy[:] = swarm.particleLocalCount
comm.Allreduce(inguy, outguy, op=MPI.MAX)
# create h5 array for players to write primary data into
f = h5py.File('primarydata.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
dset_data = f.create_dataset('randomdata', (comm.Get_size(),outguy[0]), dtype='i')
# write primary data parallel array
dset_data[uw.mpi.rank,origParticleIndex.data[:,0]] = randomNumber.data[:,0]
# also create one to write particle element counts
dset_counts = f.create_dataset('counts', (mesh.elementsGlobal,), dtype='i')
# get counts
el_index, counts = np.unique(origOwningEl.data[:,0],return_counts=True)
for element_gId, el_count in zip (el_index,counts):
dset_counts[element_gId] = el_count
if len(origCreatingProc.data_shadow) != 0:
raise RuntimeError("The shadow data should be empty at this stage, but isn't. Hmm...")
# get shadow particles!!
swarm.shadow_particles_fetch()
if len(origCreatingProc.data_shadow) == 0 and (uw.mpi.size>1):
raise RuntimeError("The shadow data should be populated at this stage, but isn't. Hmm...")
# now check that communicated particles contain required data.
# first create local numpy copies of primary data in memory,
# as h5py has limitations in the way you can index its arrays
dset_numpy_data = np.array(dset_data)
if not (dset_numpy_data[origCreatingProc.data_shadow[:,0], origParticleIndex.data_shadow[:,0]] == randomNumber.data_shadow[:,0]).all():
raise RuntimeError("Shadow particle data does not appear to be correct.")
# also check that we have the correct particle counts
# get counts
el_index, counts = np.unique(origOwningEl.data_shadow[:,0],return_counts=True)
# again create copy for indexing ease
dset_numpy_counts = np.array(dset_counts)
if not (dset_numpy_counts[el_index] == counts[:]).all():
raise RuntimeError("Shadow data particle counts do not appear to be correct.")
# close and cleaup
f.close()
import os
if uw.mpi.rank==0:
os.remove('primarydata.hdf5')
| [
"underworld.swarm.Swarm",
"h5py.get_config",
"h5py.File",
"os.remove",
"random.randint",
"numpy.zeros",
"sys.exit",
"underworld.swarm.layouts.PerCellSpaceFillerLayout",
"numpy.array",
"underworld.mesh.FeMesh_Cartesian",
"warnings.warn",
"numpy.unique"
] | [((212, 259), 'underworld.mesh.FeMesh_Cartesian', 'uw.mesh.FeMesh_Cartesian', ([], {'elementRes': '(128, 128)'}), '(elementRes=(128, 128))\n', (236, 259), True, 'import underworld as uw\n'), ((268, 288), 'underworld.swarm.Swarm', 'uw.swarm.Swarm', (['mesh'], {}), '(mesh)\n', (282, 288), True, 'import underworld as uw\n'), ((1190, 1201), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1198, 1201), True, 'import numpy as np\n'), ((1211, 1222), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1219, 1222), True, 'import numpy as np\n'), ((1363, 1433), 'h5py.File', 'h5py.File', (['"""primarydata.hdf5"""', '"""w"""'], {'driver': '"""mpio"""', 'comm': 'MPI.COMM_WORLD'}), "('primarydata.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)\n", (1372, 1433), False, 'import h5py\n'), ((1789, 1843), 'numpy.unique', 'np.unique', (['origOwningEl.data[:, 0]'], {'return_counts': '(True)'}), '(origOwningEl.data[:, 0], return_counts=True)\n', (1798, 1843), True, 'import numpy as np\n'), ((2492, 2511), 'numpy.array', 'np.array', (['dset_data'], {}), '(dset_data)\n', (2500, 2511), True, 'import numpy as np\n'), ((2813, 2874), 'numpy.unique', 'np.unique', (['origOwningEl.data_shadow[:, 0]'], {'return_counts': '(True)'}), '(origOwningEl.data_shadow[:, 0], return_counts=True)\n', (2822, 2874), True, 'import numpy as np\n'), ((2931, 2952), 'numpy.array', 'np.array', (['dset_counts'], {}), '(dset_counts)\n', (2939, 2952), True, 'import numpy as np\n'), ((71, 129), 'warnings.warn', 'warnings.warn', (['"""h5py not MPI enabled. Discontinuing test."""'], {}), "('h5py not MPI enabled. Discontinuing test.')\n", (84, 129), False, 'import warnings\n'), ((149, 160), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (157, 160), False, 'import sys\n'), ((533, 585), 'underworld.swarm.layouts.PerCellSpaceFillerLayout', 'uw.swarm.layouts.PerCellSpaceFillerLayout', (['swarm', '(20)'], {}), '(swarm, 20)\n', (574, 585), True, 'import underworld as uw\n'), ((1050, 1069), 'random.randint', 'randint', (['(0)', '(9999999)'], {}), '(0, 9999999)\n', (1057, 1069), False, 'from random import randint\n'), ((3157, 3186), 'os.remove', 'os.remove', (['"""primarydata.hdf5"""'], {}), "('primarydata.hdf5')\n", (3166, 3186), False, 'import os\n'), ((15, 32), 'h5py.get_config', 'h5py.get_config', ([], {}), '()\n', (30, 32), False, 'import h5py\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import numpy as np
def FakeQuantization8BitsRowwise(data):
min_el = np.min(data, axis=1)
max_el = np.max(data, axis=1)
scale = (max_el - min_el) / 255.
bias = min_el
inv_scale = 1. / scale
data = data.T
data = np.round((data - bias) * inv_scale) * scale + bias
return data.T
class TestQuantize8bits(hu.HypothesisTestCase):
def test_quantize_op(self):
op = core.CreateOperator(
'FloatToRowwiseQuantized8Bits',
['input_data'],
['quantized_input', 'scale_bias'])
input_data = np.float32(np.asarray([[801., 786, 235.2, 2353.3434],
[5., 11., 9., -2.]]))
workspace.FeedBlob('input_data', input_data)
workspace.RunOperatorOnce(op)
op1 = core.CreateOperator(
'Rowwise8BitQuantizedToFloat',
['quantized_input', 'scale_bias'],
['dequantized_input'])
workspace.RunOperatorOnce(op1)
result = workspace.FetchBlob('dequantized_input')
ground_truth = FakeQuantization8BitsRowwise(input_data)
np.testing.assert_array_almost_equal(
result, ground_truth)
def test_quantize_tensor_with_const_row_op(self):
op = core.CreateOperator(
'FloatToRowwiseQuantized8Bits',
['input_data'],
['quantized_input', 'scale_bias'])
input_data = np.float32(np.asarray([[801., 786, 235.2, 2353.3434],
[9., 9., 9., 9.]]))
workspace.FeedBlob('input_data', input_data)
workspace.RunOperatorOnce(op)
op1 = core.CreateOperator(
'Rowwise8BitQuantizedToFloat',
['quantized_input', 'scale_bias'],
['dequantized_input'])
workspace.RunOperatorOnce(op1)
result = workspace.FetchBlob('dequantized_input')
ground_truth = FakeQuantization8BitsRowwise(input_data)
ground_truth[1, :] = 9.
np.testing.assert_array_almost_equal(
result, ground_truth)
def test_SparseSegmentUint8(self):
init_net = core.Net("init")
net = core.Net("bench")
size = 10**3
isize = 10**2
# input preparation
d = init_net.UniformFill([], shape=[size, 32])
w = init_net.UniformFill([], shape=[isize, ])
i = init_net.UniformIntFill([], shape=[isize], max=size - 1)
i = init_net.Cast([i], to=core.DataType.INT64)
l = init_net.ConstantFill(
[],
['l'],
shape=[isize // 10],
value=10,
dtype=core.DataType.INT32,
)
net.FloatToRowwiseQuantized8Bits([d],
['quantized_data', 'scale_bias'])
net.Rowwise8BitQuantizedToFloat(['quantized_data', 'scale_bias'],
['dequantized_data'])
# SparseLengthsWeightedSum
net.SparseLengthsWeightedSum(['dequantized_data', w, i, l],
['PositionWeighted_0'], engine='fp16')
net.SparseLengthsWeightedSum8BitsRowwise(
['quantized_data', w, i, l, 'scale_bias'],
['PositionWeighted_1'])
# SparseLengthsSum
net.SparseLengthsSum(['dequantized_data', i, l],
['Sum_0'], engine='fp16')
net.SparseLengthsSum8BitsRowwise(
['quantized_data', i, l, 'scale_bias'],
['Sum_1'])
# SparseLengthsWeightedMean
# net.SparseLengthsWeightedMean(['dequantized_data', w, i, l],
# ['WeightedMean_0'])
# net.SparseLengthsWeightedMean8BitsRowwise(
# ['quantized_data', w, i, l, 'scale_bias'],
# ['WeightedMean_1'])
# SparseLengthsMean
net.SparseLengthsMean(['dequantized_data', i, l],
['Mean_0'], engine='fp16')
net.SparseLengthsMean8BitsRowwise(
['quantized_data', i, l, 'scale_bias'],
['Mean_1'])
gathered_w = net.Gather(['quantized_data', i],
engine='fp16')
gathered_scale_bias = net.Gather(['scale_bias', i],
engine='fp16')
net.Rowwise8BitQuantizedToFloat(
[gathered_w, gathered_scale_bias],
'Gathered_1')
net.Gather(['dequantized_data', i], 'Gathered_0')
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
workspace.RunNetOnce(init_net)
workspace.CreateNet(net)
workspace.RunNetOnce(net)
PositionWeighted_1 = workspace.FetchBlob('PositionWeighted_1')
ground_truth_posw = workspace.FetchBlob('PositionWeighted_0')
np.testing.assert_array_almost_equal(PositionWeighted_1,
ground_truth_posw, decimal=5)
Sum_1 = workspace.FetchBlob('Sum_1')
ground_truth_sum = workspace.FetchBlob('Sum_0')
np.testing.assert_array_almost_equal(Sum_1,
ground_truth_sum, decimal=5)
Mean_1 = workspace.FetchBlob('Mean_1')
ground_truth_mean = workspace.FetchBlob('Mean_0')
np.testing.assert_array_almost_equal(Mean_1,
ground_truth_mean, decimal=5)
Gathered_1 = workspace.FetchBlob('Gathered_1')
ground_truth_gathered = workspace.FetchBlob('Gathered_0')
np.testing.assert_array_almost_equal(Gathered_1,
ground_truth_gathered, decimal=5)
| [
"caffe2.python.workspace.FetchBlob",
"caffe2.python.workspace.GlobalInit",
"caffe2.python.core.Net",
"caffe2.python.workspace.FeedBlob",
"numpy.asarray",
"caffe2.python.workspace.RunNetOnce",
"caffe2.python.workspace.RunOperatorOnce",
"numpy.min",
"numpy.max",
"caffe2.python.core.CreateOperator",
... | [((315, 335), 'numpy.min', 'np.min', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (321, 335), True, 'import numpy as np\n'), ((349, 369), 'numpy.max', 'np.max', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (355, 369), True, 'import numpy as np\n'), ((646, 753), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""FloatToRowwiseQuantized8Bits"""', "['input_data']", "['quantized_input', 'scale_bias']"], {}), "('FloatToRowwiseQuantized8Bits', ['input_data'], [\n 'quantized_input', 'scale_bias'])\n", (665, 753), False, 'from caffe2.python import core, workspace\n'), ((935, 979), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['"""input_data"""', 'input_data'], {}), "('input_data', input_data)\n", (953, 979), False, 'from caffe2.python import core, workspace\n'), ((988, 1017), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (1013, 1017), False, 'from caffe2.python import core, workspace\n'), ((1032, 1144), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Rowwise8BitQuantizedToFloat"""', "['quantized_input', 'scale_bias']", "['dequantized_input']"], {}), "('Rowwise8BitQuantizedToFloat', ['quantized_input',\n 'scale_bias'], ['dequantized_input'])\n", (1051, 1144), False, 'from caffe2.python import core, workspace\n'), ((1186, 1216), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op1'], {}), '(op1)\n', (1211, 1216), False, 'from caffe2.python import core, workspace\n'), ((1234, 1274), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""dequantized_input"""'], {}), "('dequantized_input')\n", (1253, 1274), False, 'from caffe2.python import core, workspace\n'), ((1347, 1405), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'ground_truth'], {}), '(result, ground_truth)\n', (1383, 1405), True, 'import numpy as np\n'), ((1487, 1594), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""FloatToRowwiseQuantized8Bits"""', "['input_data']", "['quantized_input', 'scale_bias']"], {}), "('FloatToRowwiseQuantized8Bits', ['input_data'], [\n 'quantized_input', 'scale_bias'])\n", (1506, 1594), False, 'from caffe2.python import core, workspace\n'), ((1774, 1818), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['"""input_data"""', 'input_data'], {}), "('input_data', input_data)\n", (1792, 1818), False, 'from caffe2.python import core, workspace\n'), ((1827, 1856), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (1852, 1856), False, 'from caffe2.python import core, workspace\n'), ((1871, 1983), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Rowwise8BitQuantizedToFloat"""', "['quantized_input', 'scale_bias']", "['dequantized_input']"], {}), "('Rowwise8BitQuantizedToFloat', ['quantized_input',\n 'scale_bias'], ['dequantized_input'])\n", (1890, 1983), False, 'from caffe2.python import core, workspace\n'), ((2025, 2055), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op1'], {}), '(op1)\n', (2050, 2055), False, 'from caffe2.python import core, workspace\n'), ((2073, 2113), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""dequantized_input"""'], {}), "('dequantized_input')\n", (2092, 2113), False, 'from caffe2.python import core, workspace\n'), ((2218, 2276), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'ground_truth'], {}), '(result, ground_truth)\n', (2254, 2276), True, 'import numpy as np\n'), ((2350, 2366), 'caffe2.python.core.Net', 'core.Net', (['"""init"""'], {}), "('init')\n", (2358, 2366), False, 'from caffe2.python import core, workspace\n'), ((2381, 2398), 'caffe2.python.core.Net', 'core.Net', (['"""bench"""'], {}), "('bench')\n", (2389, 2398), False, 'from caffe2.python import core, workspace\n'), ((4691, 4747), 'caffe2.python.workspace.GlobalInit', 'workspace.GlobalInit', (["['caffe2', '--caffe2_log_level=0']"], {}), "(['caffe2', '--caffe2_log_level=0'])\n", (4711, 4747), False, 'from caffe2.python import core, workspace\n'), ((4756, 4786), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['init_net'], {}), '(init_net)\n', (4776, 4786), False, 'from caffe2.python import core, workspace\n'), ((4795, 4819), 'caffe2.python.workspace.CreateNet', 'workspace.CreateNet', (['net'], {}), '(net)\n', (4814, 4819), False, 'from caffe2.python import core, workspace\n'), ((4828, 4853), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (4848, 4853), False, 'from caffe2.python import core, workspace\n'), ((4884, 4925), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""PositionWeighted_1"""'], {}), "('PositionWeighted_1')\n", (4903, 4925), False, 'from caffe2.python import core, workspace\n'), ((4954, 4995), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""PositionWeighted_0"""'], {}), "('PositionWeighted_0')\n", (4973, 4995), False, 'from caffe2.python import core, workspace\n'), ((5004, 5094), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['PositionWeighted_1', 'ground_truth_posw'], {'decimal': '(5)'}), '(PositionWeighted_1, ground_truth_posw,\n decimal=5)\n', (5040, 5094), True, 'import numpy as np\n'), ((5152, 5180), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Sum_1"""'], {}), "('Sum_1')\n", (5171, 5180), False, 'from caffe2.python import core, workspace\n'), ((5208, 5236), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Sum_0"""'], {}), "('Sum_0')\n", (5227, 5236), False, 'from caffe2.python import core, workspace\n'), ((5245, 5317), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['Sum_1', 'ground_truth_sum'], {'decimal': '(5)'}), '(Sum_1, ground_truth_sum, decimal=5)\n', (5281, 5317), True, 'import numpy as np\n'), ((5381, 5410), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Mean_1"""'], {}), "('Mean_1')\n", (5400, 5410), False, 'from caffe2.python import core, workspace\n'), ((5439, 5468), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Mean_0"""'], {}), "('Mean_0')\n", (5458, 5468), False, 'from caffe2.python import core, workspace\n'), ((5477, 5551), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['Mean_1', 'ground_truth_mean'], {'decimal': '(5)'}), '(Mean_1, ground_truth_mean, decimal=5)\n', (5513, 5551), True, 'import numpy as np\n'), ((5619, 5652), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Gathered_1"""'], {}), "('Gathered_1')\n", (5638, 5652), False, 'from caffe2.python import core, workspace\n'), ((5685, 5718), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Gathered_0"""'], {}), "('Gathered_0')\n", (5704, 5718), False, 'from caffe2.python import core, workspace\n'), ((5727, 5813), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['Gathered_1', 'ground_truth_gathered'], {'decimal': '(5)'}), '(Gathered_1, ground_truth_gathered,\n decimal=5)\n', (5763, 5813), True, 'import numpy as np\n'), ((481, 516), 'numpy.round', 'np.round', (['((data - bias) * inv_scale)'], {}), '((data - bias) * inv_scale)\n', (489, 516), True, 'import numpy as np\n'), ((818, 886), 'numpy.asarray', 'np.asarray', (['[[801.0, 786, 235.2, 2353.3434], [5.0, 11.0, 9.0, -2.0]]'], {}), '([[801.0, 786, 235.2, 2353.3434], [5.0, 11.0, 9.0, -2.0]])\n', (828, 886), True, 'import numpy as np\n'), ((1659, 1725), 'numpy.asarray', 'np.asarray', (['[[801.0, 786, 235.2, 2353.3434], [9.0, 9.0, 9.0, 9.0]]'], {}), '([[801.0, 786, 235.2, 2353.3434], [9.0, 9.0, 9.0, 9.0]])\n', (1669, 1725), True, 'import numpy as np\n')] |
'''
WES.2018.03.01
'''
import numpy as np
import numpy.random as npr
from scipy.special import psi, gammaln
from collections import namedtuple
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler
#%%
class ElasticNet(object):
'''
This is a single use Elastic Net method for solving a Linear Equation.
It can be used for a Gaussian, Poisson, Negative Binomial, or Logit distributions.
Calling the fit method with no inputs allows you to develop your own cross validation method if needed.
Initializations:
x = feature variable(s)
y = target variable(s)
offset (default = None) = offset
alpha (default=1) = regularization term
1.0 = full Lasso regression
0.0 = full Ridge regression
depth (default=20) = depth of lambda to go to (full path is 100)
tol (defalut = 1e-4) = tolerance specification for the beta convergence
x_std (default = False) = standardize x
y_std (default = False) = standardize y
family = family of functions (default is NegBin)
The other available methods are 'Gauss', 'Poisson', and 'Logit'.
manual_lam_seq (default is None)
For providing a manual lambda sequence. Must pass an array.
Resets the depth to len(manual_lam_seq)-1!!!
Methods:
lam_seq_gen(x, y, offset=1, alpha=1, nlen=20):
takes the passed x and y and develops the lambda sequence
cost(x, y, b0, b, k, lam, offset=1, alpha=1, fam):
cost function for the optimization
not utilized in the coord descent but available here for testing
cord(x, y, b0_init, b_init, lam, k=1, alpha=1, nullDev=1, tol=1e-4, fam='NegBin', offset=1):
coordinate descent for optimization
disp_est(,x,y,b0,b,offset=1,k=1):
dispersion estimate
devi(x, y, b0, b, k=1, offset=1.0, fam='NegBin'):
deviance
devi_stack(x, y, b0, b, k=1, offset=1, fam='NegBin'):
deviance for stacked errors
sigmoid(z):
sigmoid function
nbd_grad(x, y, b0, b, offset=1, k=1):
NegBin Gradient
fit():
Fits the model.
Usage Example:
from ___.___ import ElasticNet as enet
from enetUtils import *
lags = 1
xL = lagDf(xData, lags) #potential call to lag a data frame
mod = enet.ElasticNet(xL, yB, offset=None, x_std=True, y_std=False,
alpha=1.0, depth=20, tol=tols, fam='Gauss',
manual_lam_seq=None)
fit = mod.fit()
'''
def __init__(self, x, y, offset=None, x_std=False, y_std=False,
alpha=1.0, depth=20, tol=1e-4, fam='NegBin',
manual_lam_seq=None):
''' initialize '''
self.x_std = x_std
self.y_std = y_std
if type(x) == pd.core.frame.DataFrame:
self.param_nm = x.columns
else:
self.param_nm = list(str('X'+str(x)) for x in range(x.shape[1]))
ss = StandardScaler(with_mean=True, with_std=True)
if x_std == True:
self.x = ss.fit_transform(x)
else:
self.x = np.array(x)
if y_std == True:
if len(np.shape(y)) > 1:
self.y = ss.fit_transform(y)
else:
#y = ss.fit_transform(y[:, None])[:, 0]
self.y = ss.fit_transform(y.reshape(-1,1))
else:
self.y = np.array(y)
if fam == 'Logit':
self.y = np.array(np.where(y>0,1,0))
## this changes the target to a binary set
if len(np.shape(self.y))>1:
pass
else:
self.y = np.reshape(self.y, (len(self.y),1))
if offset is not None:
##check shape or size
if np.size(offset) == 1:
if offset == 0:
self.offset = np.ones(self.y.shape)
else:
self.offset = offset * np.ones(self.y.shape)
else:
self.offset = np.ones(self.y.shape)
assert len(self.offset) == len(self.y), "Length of Offset != Length of y"
self.offset = np.reshape(self.offset, (len(self.offset),1))
self.alpha = alpha
self.depth = depth
self.tol = tol
self.family = fam
##FORMAT LAMBDA SEQ NOW
mx, nx = np.shape(self.x)
my, ny = np.shape(self.y)
if fam == 'NegBin' or fam == 'Poisson':
b0_init = np.log(np.mean(self.y/self.offset, axis=0))
if fam == 'Gauss':
b0_init = np.mean(self.y,axis=0)
if fam == 'Logit':
b0_init = np.log(np.mean(self.y,axis=0)/(1-np.mean(self.y,axis=0)))
## CHECKING FOR MULTIVARIABLE TARGET
if ny > 1:
xstack = np.matlib.repmat(self.x, ny, 1)
ystack = np.reshape(self.y, (my*ny,1), order='F')
ofstack = np.reshape(self.offset, (my*ny,1), order='F')
if ny>1:
fStackCase = {'NegBin': ystack - np.exp(b0_init + np.log(ofstack)),
'Poisson': ystack - np.exp(b0_init + np.log(ofstack)),
'Gauss': ystack - b0_init,
'Logit': ystack - self.sigmoid(b0_init)}
funstack = fStackCase[self.family]
lams = self.lam_seq_gen(xstack, funstack, ofstack, self.alpha, 100)
else:
fCase = {'NegBin': self.y - np.exp(b0_init + np.log(self.offset)),
'Poisson': self.y - np.exp(b0_init + np.log(self.offset)),
'Gauss': self.y - b0_init,
'Logit': self.y - self.sigmoid(b0_init)}
fun = fCase[self.family]
lams = self.lam_seq_gen(self.x, fun, self.offset, self.alpha, 100)
self.lams = lams
if manual_lam_seq is None:
pass
else:
manual_lam_seq = np.array(manual_lam_seq)
if type(manual_lam_seq) != np.ndarray and type(manual_lam_seq) != list:
raise Exception('** Manual lambdas must be a list or an numpy array and must be of length >= 2! **')
assert len(manual_lam_seq) >= 2, "** Length of Manual Lam Seq Must Be >= 2. **"
self.lams = manual_lam_seq.astype(float)
self.depth = len(manual_lam_seq) - 1
print(" ** Depth has been reset appropriately to reflect manual lambda sequence! ** ")
self.manual_lam_seq = manual_lam_seq
#%% LOG LAMBDA SEQUENCE FUNCTION===========================================
def lam_seq_gen(self, x, y, offset=1, alpha=1, nlen=100):
''' lambda sequence generator '''
m,n = np.shape(x)
## addition to assist with sizing problems coming from the offset and y
## if y is not already standardized
if np.mean(np.abs( np.dot(y.T,y) - len(y) )) < 1e-2:
pass
else:
y = y / np.std(y)
if m>n: lam_ratio = 0.0001
else: lam_ratio = 0.01
lam_max = np.max( np.abs( np.dot(x.T,y) ) ) / m
if alpha != 0: lam_max = lam_max / alpha
else: lam_max = lam_max / 0.001
lam_min = lam_ratio*lam_max
lams_log = np.linspace(np.log(lam_max), np.log(lam_min), 100)
lams = np.exp(np.insert(lams_log,0,-10))
return lams
#%% NEGATIVE BINOMIAL LASSO FUNCTION=======================================
def fit(self):
''' fit call for the regression '''
fam = self.family
X = self.x
y = self.y
ofs = self.offset
mx, nx = np.shape(X)
my, ny = np.shape(y)
b_init = np.zeros((nx,1))
if fam == 'NegBin' or fam == 'Poisson':
b0_init = np.log( np.mean(y/ofs, axis=0))
k_init, it_dummy = self.disp_est(X, y, b0_init, b_init, ofs, 1)
if fam == 'Poisson':
k_init, it_dummy = 1e-5, 0
dev = self.devi(X, y, b0_init, b_init, k_init, ofs, fam)
if fam == 'Gauss':
b0_init = np.mean(y,axis=0)
k_init, it_dummy = 1e-5, 0
dev = np.mean(self.devi(X,y,b0_init,b_init, k_init, ofs, fam))
if fam == 'Logit':
p0 = np.mean(y,axis=0)
b0_init = np.log(p0/(1-p0))
k_init, it_dummy = 1e-5, 0
dev = self.devi(X, y, b0_init, b_init, k_init, ofs, fam)
## New way to intialize lambdas
lams = self.lams
if np.isnan(b0_init).any() == True:
raise Exception("The value of b0 is NAN. Confirm y is NOT standardized.")
##Storage Containers for Variables--------------------------------------
minL = min(self.depth, 100)
betas = np.zeros((nx, minL))
beta0s = np.zeros((1, minL))
ks = np.zeros((1, minL))
yhats = np.zeros((minL, my))
disp_iters = np.zeros((minL,1))
mod_err = np.zeros((minL,1))
##---------------------------------------------------------------------
for j in range(minL):
lnb1 = lams[j+1]
lnb0 = lams[j]
if fam == 'NegBin':
k, disp_iter = self.disp_est(X, y, b0_init, b_init, ofs, k_init)
else:
k, disp_iter = 1e-5, 0
nzb, jdum = np.nonzero( np.abs(X.T.dot(y) / mx) > self.alpha*(2.0*lnb1 - lnb0) )
x_nzb = np.array(X[:,nzb])
b_nzb = np.array(b_init[nzb])
b0, b, npass = self.cord(x_nzb, y, b0_init, b_nzb, lnb1, k, self.alpha, dev/mx, self.tol, fam, ofs)
b0_init = np.copy(b0)
k_init = np.copy(k)
b_init[nzb] = b[:]
if fam == 'NegBin' or fam == 'Poisson':
model_dev = self.devi(X,y,b0_init,b_init,k_init,ofs,fam=fam)
r = np.divide(np.subtract(dev,model_dev),dev)
if r > 0.9: break
yhat = np.exp(b0_init + X.dot(b_init) + np.log(ofs))
if fam == 'Logit':
model_dev = self.devi(X,y,b0_init,b_init,k_init,ofs,fam=fam)
r = np.divide(np.subtract(dev,model_dev),dev)
if r > 0.9: break
yhat = self.sigmoid(b0_init + X.dot(b_init))
else:
model_dev = np.mean(self.devi(X,y,b0_init,b_init,k_init,ofs,fam=fam))
yhat = b0_init + X.dot(b_init)
betas[:,j] = np.copy(b_init.ravel())
beta0s[:,j] = np.copy(b0_init)
ks[:,j] = np.copy(k_init)
yhats[j,:] = yhat.ravel()
disp_iters[j] = disp_iter
mod_err[j] = model_dev
if k_init <= 1e-4:
self.DispersionNote = "Dispersion reached < 1e-4, consider running a Poisson."
## MIN OUT OF SAMPLE ERROR PREDICTION - PICKING LOWEST LAMBDA WITH AT LEAST 2 BETAS
min_errlm_idx = np.where(mod_err == np.nanmin(mod_err))[0][0]
betaCntChk = np.sum(betas[:,min_errlm_idx]!=0)
while betaCntChk < 2 and min_errlm_idx < self.depth-1:
self.min_errlm_idx_note = 'Min lambda error had no Betas - moving forward until there are at least 2.'
min_errlm_idx += 1
betaCntChk = np.sum(betas[:,min_errlm_idx]!=0)
self.B = betas
self.B0 = beta0s
self.min_lam_idx = min_errlm_idx
self.K = ks
self.disp_iter = disp_iters
self.yhat = yhats
self.model_errors = mod_err
#%% DISPERSION ESTIMATE FOR K==============================================
def disp_est(self, x, y, b0, b, offset=1, k=1):
''' dispersion estimate calculation '''
iters = 0
k_old=0
while np.abs(k-k_old) > 1e-3:
k_old = np.copy(k)
k = k - 0.01 / np.sqrt(len(x)+iters) * self.nbd_grad(x,y,b0,b,offset,k) ##Original
iters += 1
if k<0:
k = 1e-6
break
return k, iters
#%% GRADIENT - NEG BINOM===================================================
def nbd_grad(self, x, y, b0, b, offset=1, k=1):
''' gradient calculation for the negative binomial model '''
mu = np.exp(b0 + x.dot(b) + np.log(offset))
grad = -np.sum( psi(y+1/k)*(-1/k**2) + psi(1/k)*(1/k**2) + (1/k**2)*np.log(k) - \
(1/k**2) + (1/k**2)*np.log(1/k + mu) + (1/k**3)/(1/k + mu) + \
(y/(1/k + mu))*(1/k**2) )
return grad
#%% SIGMOID================================================================
def sigmoid(self,z):
''' sigmoid function 1/(1+exp(-z)) for logit '''
return 1.0/(1.0+np.exp(-z))
#%% COORDINATE DESCENT - NEG BINOM=========================================
def cord(self, x, y, b0_init, b_init, lam, k=1, alpha=1, nullDev=1, tol=1e-4, fam='NegBin', offset=1):
''' coordinate descent algorithm based on beta convergence '''
m,n = np.shape(x)
npass, tol_chk = 0, 1
b = np.zeros((n,1))
if fam == 'Gauss':
w = np.ones((len(y),1))
z = y
if fam == 'NegBin':
p = np.exp(b0_init + np.add(x.dot(b_init), np.log(offset)))
s = np.divide( ((k*y+1.0)*p) , (k*p + 1.0)**2 )
q0 = np.divide( (k*p+1.0) , ((k*y+1.0)*p) )
w = np.ones((len(y),1))*s
z = b0_init + np.add(x.dot(b_init), np.subtract(y,p)*q0)
if fam == 'Logit':
p = self.sigmoid(b0_init + np.dot(x,b_init))
s = np.multiply( p, (1.0-p) )
q0 = np.divide( (y-p) , s )
w = np.ones((len(y),1))*s
z = b0_init + np.add(x.dot(b_init), q0)
if fam == 'Poisson':
p = np.exp(b0_init + np.add(x.dot(b_init), np.log(offset)))
q0 = np.divide( (y-p) , p )
w = np.ones((len(y),1))*p
z = b0_init + np.add(x.dot(b_init), q0)
while tol_chk >= tol and npass<1000:
npass+=1
b0 = np.dot( w.T, np.subtract(z, np.dot(x,b))) / np.sum(w)
if x.size != 0:
for ii in range(0,n):
xi = x[:,[ii]]
b[ii] = np.dot(xi.T, ( w*(np.subtract(z, np.dot(x,b)) - b0 + xi*b[ii]) ) )/m
f = np.abs(b[ii]) - alpha*lam
st = np.sign(b[ii]) * (np.abs(f) + f)/2.0 ## SoftThreshHolding
b[ii] = np.divide(st , np.add( np.dot(xi.T, (w*xi))/m , (1.0-alpha)*lam ))
tol_chk = np.linalg.norm(np.subtract(b0+b, b0_init+b_init))
b_init[:] = b[:]
b0_init[:] = b0[:]
return b0, b, npass
#%% COST FUNC==============================================================
## Not really in use with the particular Coordinate Descent being used but still a resource.
def cost(self, x, y, b0, b, lam, k=1, offset=1.0, alpha=1, fam='NegBin'):
''' cost function * no longer used but useful if needed '''
m,n=np.shape(x)
reg = lam*alpha*np.sum(np.abs(b)) + lam*(1.0-alpha)*np.linalg.norm(b)
j = -self.devi(x,y,b0,b,k,offset,fam)/m
return (j + reg)
#%% DEVIANCE===============================================================
def devi(self, x, y, b0, b, k=1, offset=1.0, fam='NegBin'):
''' deviance calculation for each family '''
m,n=np.shape(x)
if fam == 'NegBin':
mu = np.array(np.exp(b0 + x.dot(b) + np.log(offset)), ndmin=2)
LL = gammaln(y + 1/k) - gammaln(1/k) - gammaln(y + 1) - (y + 1/k)*np.log(1 + k*mu) + y*np.log(k) + y*np.log(mu)
L = -2.0*np.sum(LL)
if fam == 'Poisson':
if offset.all() == 1.0:
mu = np.array(np.exp(b0 + x.dot(b) + np.log(offset)), ndmin=2)
L = -2.0*np.sum(y*mu - gammaln(y+1))
else:
mu = np.array(np.exp(b0 + x.dot(b)), ndmin=2)
L = -2.0*( (y/offset).T * mu.T * (1/offset) )
if fam == 'Gauss':
res = np.subtract(y, x.dot(b) + b0)
L = 0.5*np.dot(res.T,res)
if fam == 'Logit':
mu = np.array(self.sigmoid(b0 + x.dot(b)), ndmin=2)
L = -2.0*np.sum( np.add( np.where(y>0, y*np.log(mu), 0), np.where(y<1, (1.0-y)*np.log(1.0-mu), 0) ))
return (L)
def devi_stack(self, x, y, b0, b, k=1, offset=1, fam='NegBin'):
""" deviance calculation for the stacked multivariate target model """
m,n=np.shape(x)
if fam == 'NegBin':
mu = np.exp(b0 + x.dot(b) + np.log(offset))
LL = gammaln(y + 1/k) - gammaln(1/k) - gammaln(y + 1) - (y + 1/k)*np.log(1 + k*mu) + y*np.log(k) + y*np.log(mu)
L = -2.0*np.sum(LL, axis=0)
if fam == 'Poisson':
if offset.all() == 1.0:
mu = np.array(np.exp(b0 + x.dot(b) + np.log(offset)))
L = -2.0*np.sum(y*mu - gammaln(y+1), axis=0)
else:
mu = np.array(np.exp(b0 + x.dot(b)))
L = -2.0*( (y/offset).T * mu.T * (1/offset) )
if fam == 'Gauss':
LL = np.subtract(y, x.dot(b) + b0)
L = 0.5/len(y) * LL.T.dot(LL)
if fam == 'Logit':
mu = self.sigmoid(b0 + x.dot(b))
L = -2.0*np.sum( np.add( np.where(y>0, y*np.log(mu), 0),
np.where(y<1, (1.0-y)*np.log(1.0-mu), 0) ), axis=0)
return (L)
'''
*************************
END
*************************
'''
| [
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"numpy.shape",
"numpy.mean",
"numpy.linalg.norm",
"numpy.exp",
"numpy.multiply",
"numpy.copy",
"numpy.std",
"numpy.insert",
"numpy.reshape",
"numpy.matlib.repmat",
"numpy.divide",
"numpy.si... | [((3196, 3241), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(True)', 'with_std': '(True)'}), '(with_mean=True, with_std=True)\n', (3210, 3241), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4592, 4608), 'numpy.shape', 'np.shape', (['self.x'], {}), '(self.x)\n', (4600, 4608), True, 'import numpy as np\n'), ((4627, 4643), 'numpy.shape', 'np.shape', (['self.y'], {}), '(self.y)\n', (4635, 4643), True, 'import numpy as np\n'), ((6935, 6946), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (6943, 6946), True, 'import numpy as np\n'), ((7873, 7884), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (7881, 7884), True, 'import numpy as np\n'), ((7903, 7914), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (7911, 7914), True, 'import numpy as np\n'), ((7935, 7952), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (7943, 7952), True, 'import numpy as np\n'), ((9022, 9042), 'numpy.zeros', 'np.zeros', (['(nx, minL)'], {}), '((nx, minL))\n', (9030, 9042), True, 'import numpy as np\n'), ((9061, 9080), 'numpy.zeros', 'np.zeros', (['(1, minL)'], {}), '((1, minL))\n', (9069, 9080), True, 'import numpy as np\n'), ((9095, 9114), 'numpy.zeros', 'np.zeros', (['(1, minL)'], {}), '((1, minL))\n', (9103, 9114), True, 'import numpy as np\n'), ((9132, 9152), 'numpy.zeros', 'np.zeros', (['(minL, my)'], {}), '((minL, my))\n', (9140, 9152), True, 'import numpy as np\n'), ((9175, 9194), 'numpy.zeros', 'np.zeros', (['(minL, 1)'], {}), '((minL, 1))\n', (9183, 9194), True, 'import numpy as np\n'), ((9213, 9232), 'numpy.zeros', 'np.zeros', (['(minL, 1)'], {}), '((minL, 1))\n', (9221, 9232), True, 'import numpy as np\n'), ((11268, 11304), 'numpy.sum', 'np.sum', (['(betas[:, min_errlm_idx] != 0)'], {}), '(betas[:, min_errlm_idx] != 0)\n', (11274, 11304), True, 'import numpy as np\n'), ((13274, 13285), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (13282, 13285), True, 'import numpy as np\n'), ((13330, 13346), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (13338, 13346), True, 'import numpy as np\n'), ((15347, 15358), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (15355, 15358), True, 'import numpy as np\n'), ((15728, 15739), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (15736, 15739), True, 'import numpy as np\n'), ((16856, 16867), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (16864, 16867), True, 'import numpy as np\n'), ((3350, 3361), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3358, 3361), True, 'import numpy as np\n'), ((3648, 3659), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3656, 3659), True, 'import numpy as np\n'), ((4254, 4275), 'numpy.ones', 'np.ones', (['self.y.shape'], {}), '(self.y.shape)\n', (4261, 4275), True, 'import numpy as np\n'), ((4811, 4834), 'numpy.mean', 'np.mean', (['self.y'], {'axis': '(0)'}), '(self.y, axis=0)\n', (4818, 4834), True, 'import numpy as np\n'), ((5031, 5062), 'numpy.matlib.repmat', 'np.matlib.repmat', (['self.x', 'ny', '(1)'], {}), '(self.x, ny, 1)\n', (5047, 5062), True, 'import numpy as np\n'), ((5085, 5128), 'numpy.reshape', 'np.reshape', (['self.y', '(my * ny, 1)'], {'order': '"""F"""'}), "(self.y, (my * ny, 1), order='F')\n", (5095, 5128), True, 'import numpy as np\n'), ((5149, 5197), 'numpy.reshape', 'np.reshape', (['self.offset', '(my * ny, 1)'], {'order': '"""F"""'}), "(self.offset, (my * ny, 1), order='F')\n", (5159, 5197), True, 'import numpy as np\n'), ((6160, 6184), 'numpy.array', 'np.array', (['manual_lam_seq'], {}), '(manual_lam_seq)\n', (6168, 6184), True, 'import numpy as np\n'), ((7501, 7516), 'numpy.log', 'np.log', (['lam_max'], {}), '(lam_max)\n', (7507, 7516), True, 'import numpy as np\n'), ((7518, 7533), 'numpy.log', 'np.log', (['lam_min'], {}), '(lam_min)\n', (7524, 7533), True, 'import numpy as np\n'), ((7563, 7590), 'numpy.insert', 'np.insert', (['lams_log', '(0)', '(-10)'], {}), '(lams_log, 0, -10)\n', (7572, 7590), True, 'import numpy as np\n'), ((8332, 8350), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (8339, 8350), True, 'import numpy as np\n'), ((8512, 8530), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (8519, 8530), True, 'import numpy as np\n'), ((8553, 8574), 'numpy.log', 'np.log', (['(p0 / (1 - p0))'], {}), '(p0 / (1 - p0))\n', (8559, 8574), True, 'import numpy as np\n'), ((9695, 9714), 'numpy.array', 'np.array', (['X[:, nzb]'], {}), '(X[:, nzb])\n', (9703, 9714), True, 'import numpy as np\n'), ((9735, 9756), 'numpy.array', 'np.array', (['b_init[nzb]'], {}), '(b_init[nzb])\n', (9743, 9756), True, 'import numpy as np\n'), ((9897, 9908), 'numpy.copy', 'np.copy', (['b0'], {}), '(b0)\n', (9904, 9908), True, 'import numpy as np\n'), ((9931, 9941), 'numpy.copy', 'np.copy', (['k'], {}), '(k)\n', (9938, 9941), True, 'import numpy as np\n'), ((10780, 10796), 'numpy.copy', 'np.copy', (['b0_init'], {}), '(b0_init)\n', (10787, 10796), True, 'import numpy as np\n'), ((10820, 10835), 'numpy.copy', 'np.copy', (['k_init'], {}), '(k_init)\n', (10827, 10835), True, 'import numpy as np\n'), ((11540, 11576), 'numpy.sum', 'np.sum', (['(betas[:, min_errlm_idx] != 0)'], {}), '(betas[:, min_errlm_idx] != 0)\n', (11546, 11576), True, 'import numpy as np\n'), ((12026, 12043), 'numpy.abs', 'np.abs', (['(k - k_old)'], {}), '(k - k_old)\n', (12032, 12043), True, 'import numpy as np\n'), ((12071, 12081), 'numpy.copy', 'np.copy', (['k'], {}), '(k)\n', (12078, 12081), True, 'import numpy as np\n'), ((13551, 13599), 'numpy.divide', 'np.divide', (['((k * y + 1.0) * p)', '((k * p + 1.0) ** 2)'], {}), '((k * y + 1.0) * p, (k * p + 1.0) ** 2)\n', (13560, 13599), True, 'import numpy as np\n'), ((13614, 13655), 'numpy.divide', 'np.divide', (['(k * p + 1.0)', '((k * y + 1.0) * p)'], {}), '(k * p + 1.0, (k * y + 1.0) * p)\n', (13623, 13655), True, 'import numpy as np\n'), ((13865, 13888), 'numpy.multiply', 'np.multiply', (['p', '(1.0 - p)'], {}), '(p, 1.0 - p)\n', (13876, 13888), True, 'import numpy as np\n'), ((13910, 13929), 'numpy.divide', 'np.divide', (['(y - p)', 's'], {}), '(y - p, s)\n', (13919, 13929), True, 'import numpy as np\n'), ((14148, 14167), 'numpy.divide', 'np.divide', (['(y - p)', 'p'], {}), '(y - p, p)\n', (14157, 14167), True, 'import numpy as np\n'), ((3721, 3742), 'numpy.where', 'np.where', (['(y > 0)', '(1)', '(0)'], {}), '(y > 0, 1, 0)\n', (3729, 3742), True, 'import numpy as np\n'), ((3814, 3830), 'numpy.shape', 'np.shape', (['self.y'], {}), '(self.y)\n', (3822, 3830), True, 'import numpy as np\n'), ((4011, 4026), 'numpy.size', 'np.size', (['offset'], {}), '(offset)\n', (4018, 4026), True, 'import numpy as np\n'), ((4723, 4760), 'numpy.mean', 'np.mean', (['(self.y / self.offset)'], {'axis': '(0)'}), '(self.y / self.offset, axis=0)\n', (4730, 4760), True, 'import numpy as np\n'), ((7189, 7198), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (7195, 7198), True, 'import numpy as np\n'), ((8032, 8056), 'numpy.mean', 'np.mean', (['(y / ofs)'], {'axis': '(0)'}), '(y / ofs, axis=0)\n', (8039, 8056), True, 'import numpy as np\n'), ((12540, 12554), 'numpy.log', 'np.log', (['offset'], {}), '(offset)\n', (12546, 12554), True, 'import numpy as np\n'), ((12984, 12994), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (12990, 12994), True, 'import numpy as np\n'), ((14396, 14405), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (14402, 14405), True, 'import numpy as np\n'), ((14877, 14914), 'numpy.subtract', 'np.subtract', (['(b0 + b)', '(b0_init + b_init)'], {}), '(b0 + b, b0_init + b_init)\n', (14888, 14914), True, 'import numpy as np\n'), ((15420, 15437), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (15434, 15437), True, 'import numpy as np\n'), ((15992, 16002), 'numpy.sum', 'np.sum', (['LL'], {}), '(LL)\n', (15998, 16002), True, 'import numpy as np\n'), ((16447, 16465), 'numpy.dot', 'np.dot', (['res.T', 'res'], {}), '(res.T, res)\n', (16453, 16465), True, 'import numpy as np\n'), ((17101, 17119), 'numpy.sum', 'np.sum', (['LL'], {'axis': '(0)'}), '(LL, axis=0)\n', (17107, 17119), True, 'import numpy as np\n'), ((3411, 3422), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (3419, 3422), True, 'import numpy as np\n'), ((4101, 4122), 'numpy.ones', 'np.ones', (['self.y.shape'], {}), '(self.y.shape)\n', (4108, 4122), True, 'import numpy as np\n'), ((4892, 4915), 'numpy.mean', 'np.mean', (['self.y'], {'axis': '(0)'}), '(self.y, axis=0)\n', (4899, 4915), True, 'import numpy as np\n'), ((7308, 7322), 'numpy.dot', 'np.dot', (['x.T', 'y'], {}), '(x.T, y)\n', (7314, 7322), True, 'import numpy as np\n'), ((8764, 8781), 'numpy.isnan', 'np.isnan', (['b0_init'], {}), '(b0_init)\n', (8772, 8781), True, 'import numpy as np\n'), ((10138, 10165), 'numpy.subtract', 'np.subtract', (['dev', 'model_dev'], {}), '(dev, model_dev)\n', (10149, 10165), True, 'import numpy as np\n'), ((10417, 10444), 'numpy.subtract', 'np.subtract', (['dev', 'model_dev'], {}), '(dev, model_dev)\n', (10428, 10444), True, 'import numpy as np\n'), ((13830, 13847), 'numpy.dot', 'np.dot', (['x', 'b_init'], {}), '(x, b_init)\n', (13836, 13847), True, 'import numpy as np\n'), ((15391, 15400), 'numpy.abs', 'np.abs', (['b'], {}), '(b)\n', (15397, 15400), True, 'import numpy as np\n'), ((15959, 15969), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (15965, 15969), True, 'import numpy as np\n'), ((16938, 16952), 'numpy.log', 'np.log', (['offset'], {}), '(offset)\n', (16944, 16952), True, 'import numpy as np\n'), ((17068, 17078), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (17074, 17078), True, 'import numpy as np\n'), ((4190, 4211), 'numpy.ones', 'np.ones', (['self.y.shape'], {}), '(self.y.shape)\n', (4197, 4211), True, 'import numpy as np\n'), ((4918, 4941), 'numpy.mean', 'np.mean', (['self.y'], {'axis': '(0)'}), '(self.y, axis=0)\n', (4925, 4941), True, 'import numpy as np\n'), ((7101, 7115), 'numpy.dot', 'np.dot', (['y.T', 'y'], {}), '(y.T, y)\n', (7107, 7115), True, 'import numpy as np\n'), ((10263, 10274), 'numpy.log', 'np.log', (['ofs'], {}), '(ofs)\n', (10269, 10274), True, 'import numpy as np\n'), ((11220, 11238), 'numpy.nanmin', 'np.nanmin', (['mod_err'], {}), '(mod_err)\n', (11229, 11238), True, 'import numpy as np\n'), ((13517, 13531), 'numpy.log', 'np.log', (['offset'], {}), '(offset)\n', (13523, 13531), True, 'import numpy as np\n'), ((13741, 13758), 'numpy.subtract', 'np.subtract', (['y', 'p'], {}), '(y, p)\n', (13752, 13758), True, 'import numpy as np\n'), ((14112, 14126), 'numpy.log', 'np.log', (['offset'], {}), '(offset)\n', (14118, 14126), True, 'import numpy as np\n'), ((14380, 14392), 'numpy.dot', 'np.dot', (['x', 'b'], {}), '(x, b)\n', (14386, 14392), True, 'import numpy as np\n'), ((14633, 14646), 'numpy.abs', 'np.abs', (['b[ii]'], {}), '(b[ii])\n', (14639, 14646), True, 'import numpy as np\n'), ((15819, 15833), 'numpy.log', 'np.log', (['offset'], {}), '(offset)\n', (15825, 15833), True, 'import numpy as np\n'), ((15945, 15954), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (15951, 15954), True, 'import numpy as np\n'), ((17054, 17063), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (17060, 17063), True, 'import numpy as np\n'), ((5276, 5291), 'numpy.log', 'np.log', (['ofstack'], {}), '(ofstack)\n', (5282, 5291), True, 'import numpy as np\n'), ((5358, 5373), 'numpy.log', 'np.log', (['ofstack'], {}), '(ofstack)\n', (5364, 5373), True, 'import numpy as np\n'), ((5700, 5719), 'numpy.log', 'np.log', (['self.offset'], {}), '(self.offset)\n', (5706, 5719), True, 'import numpy as np\n'), ((5781, 5800), 'numpy.log', 'np.log', (['self.offset'], {}), '(self.offset)\n', (5787, 5800), True, 'import numpy as np\n'), ((14685, 14699), 'numpy.sign', 'np.sign', (['b[ii]'], {}), '(b[ii])\n', (14692, 14699), True, 'import numpy as np\n'), ((15897, 15911), 'scipy.special.gammaln', 'gammaln', (['(y + 1)'], {}), '(y + 1)\n', (15904, 15911), False, 'from scipy.special import psi, gammaln\n'), ((15924, 15942), 'numpy.log', 'np.log', (['(1 + k * mu)'], {}), '(1 + k * mu)\n', (15930, 15942), True, 'import numpy as np\n'), ((16124, 16138), 'numpy.log', 'np.log', (['offset'], {}), '(offset)\n', (16130, 16138), True, 'import numpy as np\n'), ((16190, 16204), 'scipy.special.gammaln', 'gammaln', (['(y + 1)'], {}), '(y + 1)\n', (16197, 16204), False, 'from scipy.special import psi, gammaln\n'), ((17006, 17020), 'scipy.special.gammaln', 'gammaln', (['(y + 1)'], {}), '(y + 1)\n', (17013, 17020), False, 'from scipy.special import psi, gammaln\n'), ((17033, 17051), 'numpy.log', 'np.log', (['(1 + k * mu)'], {}), '(1 + k * mu)\n', (17039, 17051), True, 'import numpy as np\n'), ((17241, 17255), 'numpy.log', 'np.log', (['offset'], {}), '(offset)\n', (17247, 17255), True, 'import numpy as np\n'), ((17298, 17312), 'scipy.special.gammaln', 'gammaln', (['(y + 1)'], {}), '(y + 1)\n', (17305, 17312), False, 'from scipy.special import psi, gammaln\n'), ((12684, 12702), 'numpy.log', 'np.log', (['(1 / k + mu)'], {}), '(1 / k + mu)\n', (12690, 12702), True, 'import numpy as np\n'), ((14703, 14712), 'numpy.abs', 'np.abs', (['f'], {}), '(f)\n', (14709, 14712), True, 'import numpy as np\n'), ((14795, 14815), 'numpy.dot', 'np.dot', (['xi.T', '(w * xi)'], {}), '(xi.T, w * xi)\n', (14801, 14815), True, 'import numpy as np\n'), ((15863, 15881), 'scipy.special.gammaln', 'gammaln', (['(y + 1 / k)'], {}), '(y + 1 / k)\n', (15870, 15881), False, 'from scipy.special import psi, gammaln\n'), ((15882, 15896), 'scipy.special.gammaln', 'gammaln', (['(1 / k)'], {}), '(1 / k)\n', (15889, 15896), False, 'from scipy.special import psi, gammaln\n'), ((16612, 16622), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (16618, 16622), True, 'import numpy as np\n'), ((16650, 16666), 'numpy.log', 'np.log', (['(1.0 - mu)'], {}), '(1.0 - mu)\n', (16656, 16666), True, 'import numpy as np\n'), ((16972, 16990), 'scipy.special.gammaln', 'gammaln', (['(y + 1 / k)'], {}), '(y + 1 / k)\n', (16979, 16990), False, 'from scipy.special import psi, gammaln\n'), ((16991, 17005), 'scipy.special.gammaln', 'gammaln', (['(1 / k)'], {}), '(1 / k)\n', (16998, 17005), False, 'from scipy.special import psi, gammaln\n'), ((17703, 17713), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (17709, 17713), True, 'import numpy as np\n'), ((17778, 17794), 'numpy.log', 'np.log', (['(1.0 - mu)'], {}), '(1.0 - mu)\n', (17784, 17794), True, 'import numpy as np\n'), ((12633, 12642), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (12639, 12642), True, 'import numpy as np\n'), ((12581, 12595), 'scipy.special.psi', 'psi', (['(y + 1 / k)'], {}), '(y + 1 / k)\n', (12584, 12595), False, 'from scipy.special import psi, gammaln\n'), ((12604, 12614), 'scipy.special.psi', 'psi', (['(1 / k)'], {}), '(1 / k)\n', (12607, 12614), False, 'from scipy.special import psi, gammaln\n'), ((14572, 14584), 'numpy.dot', 'np.dot', (['x', 'b'], {}), '(x, b)\n', (14578, 14584), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import unittest
import numpy
from pyscf import lib
from pyscf import gto
from pyscf.gto import ft_ao
libpbc = lib.load_library('libpbc')
mol = gto.Mole()
mol.atom = '''
C 1.3 .2 .3
C .1 -.1 1.1 '''
mol.basis = 'ccpvdz'
mol.build()
mesh = (7,9,11)
numpy.random.seed(12)
invh = numpy.diag(numpy.random.random(3))
b = 2*numpy.pi * invh
Gvbase = (numpy.fft.fftfreq(mesh[0], 1./mesh[0]),
numpy.fft.fftfreq(mesh[1], 1./mesh[1]),
numpy.fft.fftfreq(mesh[2], 1./mesh[2]))
Gv = numpy.dot(lib.cartesian_prod(Gvbase), b)
gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase])
def tearDownModule():
global mol, Gvbase, Gv, gxyz
del mol, Gvbase, Gv, gxyz
def ft_ao_o0(mol, Gv):
nao = mol.nao_nr()
ngrids = Gv.shape[0]
aoG = numpy.zeros((nao,ngrids), dtype=numpy.complex)
gx = numpy.empty((12,ngrids), dtype=numpy.complex)
gy = numpy.empty((12,ngrids), dtype=numpy.complex)
gz = numpy.empty((12,ngrids), dtype=numpy.complex)
buf = numpy.empty((64,ngrids), dtype=numpy.complex)
kk = numpy.einsum('ki,ki->k', Gv, Gv)
i0 = 0
for ib in range(mol.nbas):
ci = mol._libcint_ctr_coeff(ib)
ei = mol.bas_exp(ib)
li = mol.bas_angular(ib)
ri = mol.bas_coord(ib)
ni = ci.shape[1]
di = (li*2+1) * ni
nfi = (li+1)*(li+2)//2
kr = numpy.dot(Gv,ri)
cs = numpy.exp(-1j*kr)
buf[:nfi*ni] = 0
for ip in range(ci.shape[0]):
ai = ei[ip]
fac = (numpy.pi/ai)**1.5 * numpy.exp(-.25/ai*kk)
gx[0] = 1
gy[0] = 1
gz[0] = cs * fac
if li > 0:
gx[1] = -1j*Gv[:,0]/(2*ai) * gx[0]
gy[1] = -1j*Gv[:,1]/(2*ai) * gy[0]
gz[1] = -1j*Gv[:,2]/(2*ai) * gz[0]
for m in range(1, li):
gx[m+1] = m/(2*ai) * gx[m-1] - 1j*Gv[:,0]/(2*ai) * gx[m]
gy[m+1] = m/(2*ai) * gy[m-1] - 1j*Gv[:,1]/(2*ai) * gy[m]
gz[m+1] = m/(2*ai) * gz[m-1] - 1j*Gv[:,2]/(2*ai) * gz[m]
for m,(ix,iy,iz) in enumerate(loop_cart(li)):
val = gx[ix] * gy[iy] * gz[iz]
for i, cip in enumerate(ci[ip]):
buf[i*nfi+m] += cip*val
ti = c2s_bra(li, numpy.eye(nfi)).T
tmp1 = numpy.empty((di,ngrids), dtype=numpy.complex)
for i in range(ni):
tmp1[i*(li*2+1):(i+1)*(li*2+1)] = \
numpy.einsum('pi,px->ix', ti, buf[i*nfi:(i+1)*nfi])
aoG[i0:i0+di] += tmp1
i0 += di
return aoG.T
def loop_cart(l):
for ix in reversed(range(l+1)):
for iy in reversed(range(l-ix+1)):
iz = l - ix - iy
yield ix, iy, iz
def c2s_bra(l, gcart):
if l == 0:
return gcart * 0.282094791773878143
elif l == 1:
return gcart * 0.488602511902919921
else:
m = gcart.shape[1]
gsph = numpy.empty((l*2+1,m))
fc2s = gto.moleintor.libcgto.CINTc2s_ket_sph
fc2s(gsph.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(m),
gcart.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l))
return gsph
def finger(a):
return numpy.dot(a.ravel(), numpy.cos(numpy.arange(a.size)))
class KnownValues(unittest.TestCase):
def test_ft_ao1(self):
ref = ft_ao_o0(mol, Gv)
dat = ft_ao.ft_ao(mol, Gv)
self.assertTrue(numpy.allclose(ref, dat))
dat = ft_ao.ft_ao(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)
self.assertTrue(numpy.allclose(ref, dat))
def test_ft_ao2(self):
numpy.random.seed(12)
invh = numpy.random.random(3) + numpy.eye(3) * 2.5
b = 2*numpy.pi * invh
Gv = numpy.dot(lib.cartesian_prod(Gvbase), b)
ref = ft_ao_o0(mol, Gv)
dat = ft_ao.ft_ao(mol, Gv)
self.assertTrue(numpy.allclose(ref, dat))
mol1 = mol.copy()
mol1.cart = True
ref = ft_ao.ft_ao(mol1, Gv)
dat = ft_ao.ft_ao(mol1, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)
self.assertTrue(numpy.allclose(ref, dat))
def test_ft_aopair1(self):
dat = ft_ao.ft_aopair(mol, Gv)
self.assertAlmostEqual(finger(dat), (-5.9794759129252348+8.07254562525371j), 9)
dat_s2 = ft_ao.ft_aopair(mol, Gv, aosym='s2')
nao = dat.shape[-1]
for i in range(nao):
for j in range(i+1):
dat[:,i,j] = dat[:,j,i] = dat_s2[:,i*(i+1)//2+j]
self.assertAlmostEqual(finger(dat), (-5.9794759129252348+8.07254562525371j), 9)
dat1 = ft_ao.ft_aopair(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)
self.assertAlmostEqual(finger(dat1), (-5.9794759129252348+8.07254562525371j), 9)
def test_ft_aopair2(self):
numpy.random.seed(12)
invh = numpy.random.random(3) + numpy.eye(3) * 2.5
b = 2*numpy.pi * invh
Gv = numpy.dot(lib.cartesian_prod(Gvbase), b)
dat = ft_ao.ft_aopair(mol, Gv)
self.assertAlmostEqual(finger(dat), (-3.1468496579780125-0.019209667673850885j), 9)
dat1 = ft_ao.ft_aopair(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)
self.assertAlmostEqual(finger(dat1), (-3.1468496579780125-0.019209667673850885j), 9)
def test_ft_aopair_pdotp(self):
dat = ft_ao.ft_aopair(mol, Gv, intor='GTO_ft_pdotp_sph')
self.assertAlmostEqual(finger(dat), (-80.69687735727976+69.239798150854909j), 9)
def test_ft_aopair_pxp(self):
dat = ft_ao.ft_aopair(mol, Gv, intor='GTO_ft_pxp_sph', comp=3)
self.assertAlmostEqual(finger(dat), (3.7490985032017079+43.665863070814687j), 8)
if __name__ == '__main__':
print('Full Tests for ft_ao')
unittest.main()
| [
"unittest.main",
"numpy.random.seed",
"pyscf.gto.Mole",
"numpy.eye",
"ctypes.c_int",
"numpy.empty",
"numpy.allclose",
"numpy.zeros",
"numpy.einsum",
"numpy.fft.fftfreq",
"numpy.random.random",
"pyscf.gto.ft_ao.ft_ao",
"numpy.exp",
"pyscf.lib.load_library",
"numpy.arange",
"numpy.dot",
... | [((759, 785), 'pyscf.lib.load_library', 'lib.load_library', (['"""libpbc"""'], {}), "('libpbc')\n", (775, 785), False, 'from pyscf import lib\n'), ((792, 802), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (800, 802), False, 'from pyscf import gto\n'), ((920, 941), 'numpy.random.seed', 'numpy.random.seed', (['(12)'], {}), '(12)\n', (937, 941), False, 'import numpy\n'), ((960, 982), 'numpy.random.random', 'numpy.random.random', (['(3)'], {}), '(3)\n', (979, 982), False, 'import numpy\n'), ((1016, 1057), 'numpy.fft.fftfreq', 'numpy.fft.fftfreq', (['mesh[0]', '(1.0 / mesh[0])'], {}), '(mesh[0], 1.0 / mesh[0])\n', (1033, 1057), False, 'import numpy\n'), ((1066, 1107), 'numpy.fft.fftfreq', 'numpy.fft.fftfreq', (['mesh[1]', '(1.0 / mesh[1])'], {}), '(mesh[1], 1.0 / mesh[1])\n', (1083, 1107), False, 'import numpy\n'), ((1116, 1157), 'numpy.fft.fftfreq', 'numpy.fft.fftfreq', (['mesh[2]', '(1.0 / mesh[2])'], {}), '(mesh[2], 1.0 / mesh[2])\n', (1133, 1157), False, 'import numpy\n'), ((1171, 1197), 'pyscf.lib.cartesian_prod', 'lib.cartesian_prod', (['Gvbase'], {}), '(Gvbase)\n', (1189, 1197), False, 'from pyscf import lib\n'), ((1437, 1484), 'numpy.zeros', 'numpy.zeros', (['(nao, ngrids)'], {'dtype': 'numpy.complex'}), '((nao, ngrids), dtype=numpy.complex)\n', (1448, 1484), False, 'import numpy\n'), ((1493, 1539), 'numpy.empty', 'numpy.empty', (['(12, ngrids)'], {'dtype': 'numpy.complex'}), '((12, ngrids), dtype=numpy.complex)\n', (1504, 1539), False, 'import numpy\n'), ((1548, 1594), 'numpy.empty', 'numpy.empty', (['(12, ngrids)'], {'dtype': 'numpy.complex'}), '((12, ngrids), dtype=numpy.complex)\n', (1559, 1594), False, 'import numpy\n'), ((1603, 1649), 'numpy.empty', 'numpy.empty', (['(12, ngrids)'], {'dtype': 'numpy.complex'}), '((12, ngrids), dtype=numpy.complex)\n', (1614, 1649), False, 'import numpy\n'), ((1659, 1705), 'numpy.empty', 'numpy.empty', (['(64, ngrids)'], {'dtype': 'numpy.complex'}), '((64, ngrids), dtype=numpy.complex)\n', (1670, 1705), False, 'import numpy\n'), ((1714, 1746), 'numpy.einsum', 'numpy.einsum', (['"""ki,ki->k"""', 'Gv', 'Gv'], {}), "('ki,ki->k', Gv, Gv)\n", (1726, 1746), False, 'import numpy\n'), ((6312, 6327), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6325, 6327), False, 'import unittest\n'), ((2019, 2036), 'numpy.dot', 'numpy.dot', (['Gv', 'ri'], {}), '(Gv, ri)\n', (2028, 2036), False, 'import numpy\n'), ((2049, 2070), 'numpy.exp', 'numpy.exp', (['(-1.0j * kr)'], {}), '(-1.0j * kr)\n', (2058, 2070), False, 'import numpy\n'), ((2993, 3039), 'numpy.empty', 'numpy.empty', (['(di, ngrids)'], {'dtype': 'numpy.complex'}), '((di, ngrids), dtype=numpy.complex)\n', (3004, 3039), False, 'import numpy\n'), ((4029, 4049), 'pyscf.gto.ft_ao.ft_ao', 'ft_ao.ft_ao', (['mol', 'Gv'], {}), '(mol, Gv)\n', (4040, 4049), False, 'from pyscf.gto import ft_ao\n'), ((4115, 4166), 'pyscf.gto.ft_ao.ft_ao', 'ft_ao.ft_ao', (['mol', 'Gv'], {'b': 'b', 'gxyz': 'gxyz', 'Gvbase': 'Gvbase'}), '(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)\n', (4126, 4166), False, 'from pyscf.gto import ft_ao\n'), ((4253, 4274), 'numpy.random.seed', 'numpy.random.seed', (['(12)'], {}), '(12)\n', (4270, 4274), False, 'import numpy\n'), ((4464, 4484), 'pyscf.gto.ft_ao.ft_ao', 'ft_ao.ft_ao', (['mol', 'Gv'], {}), '(mol, Gv)\n', (4475, 4484), False, 'from pyscf.gto import ft_ao\n'), ((4601, 4622), 'pyscf.gto.ft_ao.ft_ao', 'ft_ao.ft_ao', (['mol1', 'Gv'], {}), '(mol1, Gv)\n', (4612, 4622), False, 'from pyscf.gto import ft_ao\n'), ((4637, 4689), 'pyscf.gto.ft_ao.ft_ao', 'ft_ao.ft_ao', (['mol1', 'Gv'], {'b': 'b', 'gxyz': 'gxyz', 'Gvbase': 'Gvbase'}), '(mol1, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)\n', (4648, 4689), False, 'from pyscf.gto import ft_ao\n'), ((4786, 4810), 'pyscf.gto.ft_ao.ft_aopair', 'ft_ao.ft_aopair', (['mol', 'Gv'], {}), '(mol, Gv)\n', (4801, 4810), False, 'from pyscf.gto import ft_ao\n'), ((4917, 4953), 'pyscf.gto.ft_ao.ft_aopair', 'ft_ao.ft_aopair', (['mol', 'Gv'], {'aosym': '"""s2"""'}), "(mol, Gv, aosym='s2')\n", (4932, 4953), False, 'from pyscf.gto import ft_ao\n'), ((5213, 5268), 'pyscf.gto.ft_ao.ft_aopair', 'ft_ao.ft_aopair', (['mol', 'Gv'], {'b': 'b', 'gxyz': 'gxyz', 'Gvbase': 'Gvbase'}), '(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)\n', (5228, 5268), False, 'from pyscf.gto import ft_ao\n'), ((5398, 5419), 'numpy.random.seed', 'numpy.random.seed', (['(12)'], {}), '(12)\n', (5415, 5419), False, 'import numpy\n'), ((5577, 5601), 'pyscf.gto.ft_ao.ft_aopair', 'ft_ao.ft_aopair', (['mol', 'Gv'], {}), '(mol, Gv)\n', (5592, 5601), False, 'from pyscf.gto import ft_ao\n'), ((5710, 5765), 'pyscf.gto.ft_ao.ft_aopair', 'ft_ao.ft_aopair', (['mol', 'Gv'], {'b': 'b', 'gxyz': 'gxyz', 'Gvbase': 'Gvbase'}), '(mol, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase)\n', (5725, 5765), False, 'from pyscf.gto import ft_ao\n'), ((5910, 5960), 'pyscf.gto.ft_ao.ft_aopair', 'ft_ao.ft_aopair', (['mol', 'Gv'], {'intor': '"""GTO_ft_pdotp_sph"""'}), "(mol, Gv, intor='GTO_ft_pdotp_sph')\n", (5925, 5960), False, 'from pyscf.gto import ft_ao\n'), ((6099, 6155), 'pyscf.gto.ft_ao.ft_aopair', 'ft_ao.ft_aopair', (['mol', 'Gv'], {'intor': '"""GTO_ft_pxp_sph"""', 'comp': '(3)'}), "(mol, Gv, intor='GTO_ft_pxp_sph', comp=3)\n", (6114, 6155), False, 'from pyscf.gto import ft_ao\n'), ((3135, 3192), 'numpy.einsum', 'numpy.einsum', (['"""pi,px->ix"""', 'ti', 'buf[i * nfi:(i + 1) * nfi]'], {}), "('pi,px->ix', ti, buf[i * nfi:(i + 1) * nfi])\n", (3147, 3192), False, 'import numpy\n'), ((3603, 3630), 'numpy.empty', 'numpy.empty', (['(l * 2 + 1, m)'], {}), '((l * 2 + 1, m))\n', (3614, 3630), False, 'import numpy\n'), ((3894, 3914), 'numpy.arange', 'numpy.arange', (['a.size'], {}), '(a.size)\n', (3906, 3914), False, 'import numpy\n'), ((4074, 4098), 'numpy.allclose', 'numpy.allclose', (['ref', 'dat'], {}), '(ref, dat)\n', (4088, 4098), False, 'import numpy\n'), ((4191, 4215), 'numpy.allclose', 'numpy.allclose', (['ref', 'dat'], {}), '(ref, dat)\n', (4205, 4215), False, 'import numpy\n'), ((4290, 4312), 'numpy.random.random', 'numpy.random.random', (['(3)'], {}), '(3)\n', (4309, 4312), False, 'import numpy\n'), ((4387, 4413), 'pyscf.lib.cartesian_prod', 'lib.cartesian_prod', (['Gvbase'], {}), '(Gvbase)\n', (4405, 4413), False, 'from pyscf import lib\n'), ((4509, 4533), 'numpy.allclose', 'numpy.allclose', (['ref', 'dat'], {}), '(ref, dat)\n', (4523, 4533), False, 'import numpy\n'), ((4714, 4738), 'numpy.allclose', 'numpy.allclose', (['ref', 'dat'], {}), '(ref, dat)\n', (4728, 4738), False, 'import numpy\n'), ((5435, 5457), 'numpy.random.random', 'numpy.random.random', (['(3)'], {}), '(3)\n', (5454, 5457), False, 'import numpy\n'), ((5532, 5558), 'pyscf.lib.cartesian_prod', 'lib.cartesian_prod', (['Gvbase'], {}), '(Gvbase)\n', (5550, 5558), False, 'from pyscf import lib\n'), ((2194, 2220), 'numpy.exp', 'numpy.exp', (['(-0.25 / ai * kk)'], {}), '(-0.25 / ai * kk)\n', (2203, 2220), False, 'import numpy\n'), ((2960, 2974), 'numpy.eye', 'numpy.eye', (['nfi'], {}), '(nfi)\n', (2969, 2974), False, 'import numpy\n'), ((3730, 3745), 'ctypes.c_int', 'ctypes.c_int', (['m'], {}), '(m)\n', (3742, 3745), False, 'import ctypes\n'), ((3799, 3814), 'ctypes.c_int', 'ctypes.c_int', (['l'], {}), '(l)\n', (3811, 3814), False, 'import ctypes\n'), ((4315, 4327), 'numpy.eye', 'numpy.eye', (['(3)'], {}), '(3)\n', (4324, 4327), False, 'import numpy\n'), ((5460, 5472), 'numpy.eye', 'numpy.eye', (['(3)'], {}), '(3)\n', (5469, 5472), False, 'import numpy\n')] |
import ctypes
import math
import os
import os.path
import typing
from nidigital import enums
import nidigital
from nidigital.history_ram_cycle_information import HistoryRAMCycleInformation
from nitsm.codemoduleapi import SemiconductorModuleContext as SMContext
import nitsm.codemoduleapi
import nitsm.enums
import numpy
import pytest
import nidevtools.digital as dt_dpi
# To run the code on simulated hardware create a dummy file named "Simulate.driver" to flag SIMULATE_HARDWARE boolean.
SIMULATE_HARDWARE = os.path.exists(os.path.join(os.path.dirname(__file__), "Simulate.driver"))
pin_file_names = ["Rainbow.pinmap", "MonoLithic.pinmap"]
# Change index below to change the pinmap to use
pin_file_name = pin_file_names[0]
OPTIONS = {} # empty dict options to run on real hardware.
if SIMULATE_HARDWARE:
OPTIONS = {"Simulate": True, "driver_setup": {"Model": "6571"}}
@pytest.fixture
def tsm(standalone_tsm):
"""
This TSM context is on simulated hardware or on real hardware based on OPTIONS defined below.
This TSM context uses standalone_tsm context fixture created by the conftest.py
The fixture provides the digital project files necessary for initialization of sessions
in a dictionary format.
"""
print("\nTest is running on Simulated driver?", SIMULATE_HARDWARE)
dt_dpi.initialize_sessions(standalone_tsm, options=OPTIONS)
yield standalone_tsm
dt_dpi.close_sessions(standalone_tsm)
@pytest.fixture
def digital_tsm_s(tsm, tests_pins):
"""Returns LabVIEW Cluster equivalent data
This fixture accepts single pin in string format or
multiple pins in list of string format"""
digital_tsms = []
for test_pin in tests_pins:
if isinstance(test_pin, str):
digital_tsms.append(dt_dpi.pins_to_sessions(tsm, test_pin))
elif isinstance(test_pin, list):
digital_tsms.append(dt_dpi.pins_to_sessions(tsm, test_pin))
else:
assert False # unexpected datatype
return digital_tsms
@pytest.fixture
def digital_ssc_s(digital_tsm_s):
"""Returns LabVIEW Array equivalent data"""
# func needs to be defined.
digital_sscs = []
for digital_tsm in digital_tsm_s:
digital_sscs.extend(digital_tsm.ssc)
return digital_sscs
@pytest.mark.pin_map(pin_file_name)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
class TestNIDigital:
"""The Following APIs/VIs are used in the DUT Power on sequence.
So these functions needs to be test first.
"""
def test_initialize_sessions(self, tsm):
"""This Api is used in the Init routine"""
queried_sessions = list(tsm.get_all_nidigital_sessions())
for session in queried_sessions:
assert isinstance(session, nidigital.Session)
assert len(queried_sessions) == len(tsm.get_all_nidigital_instrument_names())
def test_pins_to_sessions(self, digital_tsm_s, tests_pins):
"""TSM SSC Digital N Pins To M Sessions"""
for digital_tsm in digital_tsm_s:
assert isinstance(digital_tsm, dt_dpi.TSMDigital)
def test_select_function(self, digital_tsm_s):
"""TSM SSC Digital Select Function
Need to add logic to check back if the selected function is applied or not"""
function_to_select = enums.SelectedFunction.DIGITAL
for tsm in digital_tsm_s:
tsm.ssc.select_function(function_to_select)
assert isinstance(tsm, dt_dpi.TSMDigital)
def test_write_read_static_loop_back_pin_low(self, digital_tsm_s):
"""TSM SSC Digital Write Static
This test writes data on one pin and reads back on another pin.
digital_tsm_s[0] is output pin and digital_tsm_s[1] is input pin
This test may pass on simulated device as low is the default value.
Test with write ZERO and read Low
"""
for tsm in digital_tsm_s:
tsm.ssc.select_function(enums.SelectedFunction.DIGITAL)
# digital_tsm_s[0].ssc.select_function(enums.SelectedFunction.DIGITAL)
# digital_tsm_s[1].ssc.select_function(enums.SelectedFunction.DIGITAL)
digital_tsm_s[0].ssc.write_static(enums.WriteStaticPinState.ZERO)
# digital_tsm_s[0].ssc.write_static(enums.WriteStaticPinState.ZERO)
# sleep(1)
per_site_per_pin_data = digital_tsm_s[1].read_static()
print(per_site_per_pin_data)
for per_site_data in per_site_per_pin_data:
for per_pin_data in per_site_data:
assert isinstance(per_pin_data, enums.PinState)
assert per_pin_data == enums.PinState.L
def test_write_read_static_loop_back_pin_high(self, digital_tsm_s):
"""TSM SSC Digital Write Static
This test writes data on one pin and reads back on another pin.
digital_tsm_s[0] is output pin and digital_tsm_s[1] is input pin
Test with write ONE and read High
"""
digital_tsm_s[0].ssc.select_function(enums.SelectedFunction.DIGITAL)
digital_tsm_s[1].ssc.select_function(enums.SelectedFunction.DIGITAL)
digital_tsm_s[0].ssc.write_static(enums.WriteStaticPinState.ONE)
per_site_per_pin_data = digital_tsm_s[1].read_static()
print(per_site_per_pin_data)
for per_site_data in per_site_per_pin_data:
for per_pin_data in per_site_data:
assert isinstance(per_pin_data, enums.PinState)
assert per_pin_data == enums.PinState.H
def test_write_read_static_same_pin_low(self, digital_tsm_s):
"""TSM SSC Digital Write Static
This test writes data on one pin and reads back on same pin.
digital_tsm_s[0] is output pin and digital_tsm_s[0] is input pin
Test with write ZERO and read Low
"""
digital_tsm_s[0].ssc.select_function(enums.SelectedFunction.DIGITAL)
digital_tsm_s[0].ssc.write_static(enums.WriteStaticPinState.ZERO)
per_site_per_pin_data = digital_tsm_s[0].read_static()
for per_site_data in per_site_per_pin_data:
for per_pin_data in per_site_data:
assert isinstance(per_pin_data, enums.PinState)
assert per_pin_data == enums.PinState.L
def test_write_read_static_same_pin_high(self, digital_tsm_s):
"""TSM SSC Digital Write Static
This test writes data on one pin and reads back on same pin.
digital_tsm_s[0] is output pin and digital_tsm_s[0] is input pin
Test with write ONE and read High
"""
digital_tsm_s[0].ssc.select_function(enums.SelectedFunction.DIGITAL)
digital_tsm_s[0].ssc.write_static(enums.WriteStaticPinState.ONE)
per_site_per_pin_data = digital_tsm_s[0].read_static()
for per_site_data in per_site_per_pin_data:
for per_pin_data in per_site_data:
assert isinstance(per_pin_data, enums.PinState)
assert per_pin_data == enums.PinState.H
def test_ppmu_source_voltage_loop_back_pin(self, digital_tsm_s):
"""TSM SSC Digital PPMU Source Voltage.vi"""
digital_tsm_s[0].ssc.select_function(enums.SelectedFunction.PPMU)
test_voltages = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0]
for test_voltage in test_voltages:
digital_tsm_s[0].ssc.ppmu_source_voltage(test_voltage, 0.02)
per_site_per_pin_measurements = digital_tsm_s[1].ppmu_measure_voltage()
print(per_site_per_pin_measurements)
for per_site_measurements in per_site_per_pin_measurements:
for per_pin_measurement in per_site_measurements:
assert isinstance(per_pin_measurement, float)
assert test_voltage - 0.1 <= per_pin_measurement <= test_voltage + 0.1
def test_ppmu_source_voltage_same_pin(self, digital_tsm_s):
"""TSM SSC Digital PPMU Source Voltage.vi"""
digital_tsm_s[0].ssc.select_function(enums.SelectedFunction.PPMU)
test_voltages = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0]
for test_voltage in test_voltages:
digital_tsm_s[0].ssc.ppmu_source_voltage(test_voltage, 0.02)
per_site_per_pin_measurements = digital_tsm_s[0].ppmu_measure_voltage()
print(per_site_per_pin_measurements)
for per_site_measurements in per_site_per_pin_measurements:
for per_pin_measurement in per_site_measurements:
assert isinstance(per_pin_measurement, float)
assert test_voltage - 0.1 <= per_pin_measurement <= test_voltage + 0.1
def test_burst_pattern_pass_fail(self, tsm, digital_tsm_s):
"""
TSM SSC Digital Burst Pattern [Pass Fail]
TSM SSC Digital Apply Levels and Timing
"""
level = tsm.nidigital_project_levels_file_paths[0]
timing = tsm.nidigital_project_timing_file_paths[0]
print(level)
print(timing)
print(str(level))
print(str(timing))
digital_tsm_s[2].ssc.apply_levels_and_timing(str(level), str(timing))
per_site_pass = digital_tsm_s[2].burst_pattern_pass_fail("I2C_Write_Loop")
print(per_site_pass)
for per_pass in per_site_pass:
assert isinstance(per_pass, bool)
assert per_pass
def test_burst_pattern(self, tsm, digital_tsm_s):
"""
TSM SSC Digital Apply Levels and Timing
TSM SSC Digital Configure Time Set Period
"""
level = tsm.nidigital_project_levels_file_paths[0]
timing = tsm.nidigital_project_timing_file_paths[0]
print(level)
print(timing)
print(str(level))
print(str(timing))
digital_tsm_s[2].ssc.apply_levels_and_timing(str(level), str(timing))
configured_period = digital_tsm_s[0].ssc.configure_time_set_period("Idle", 40e-6)
assert math.isclose(configured_period, 40e-6, abs_tol=5e-6)
digital_tsm_s[2].ssc.burst_pattern("I2C_Read_Loop")
def test_ppmu_source_voltage_per_site_per_pin(self, digital_tsm_s):
"""
test for the voltage sourcing per pin and site
"""
digital_tsm_s[0].ssc.select_function(enums.SelectedFunction.PPMU)
test_voltages = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0]
for test_voltage in test_voltages:
digital_tsm_s[0].ssc.ppmu_source_voltage(test_voltage, 0.02)
per_site_per_pin_measurements = digital_tsm_s[1].ppmu_measure_voltage()
print(per_site_per_pin_measurements)
for per_site_measurements in per_site_per_pin_measurements:
for per_pin_measurement in per_site_measurements:
assert isinstance(per_pin_measurement, float)
assert test_voltage - 0.1 <= per_pin_measurement <= test_voltage + 0.1
def test_get_properties(self, digital_tsm_s):
session_properties = digital_tsm_s[0].ssc.get_properties()
for session_property in session_properties:
print("instrument_name")
assert session_property[0].startswith("DPI")
print(session_property)
print("voh")
assert math.isclose(session_property[1], 1.7, abs_tol=5e-4)
print("vol")
assert math.isclose(session_property[2], 1.6, abs_tol=5e-4)
print("vih")
assert math.isclose(session_property[3], 3.3, abs_tol=5e-4)
print("vil")
assert math.isclose(session_property[4], 3.05e-5, abs_tol=5e-4)
print("vterm")
assert math.isclose(session_property[5], 2.0, abs_tol=5e-4)
def test_write_source_waveform_broadcast(self, digital_tsm_s):
"""TSM SSC Digital Write Source Waveform [Broadcast].vi"""
digital_tsm_s[0].write_source_waveform_site_unique(
"I2C_SiteUnique",
[
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
],
True,
)
digital_tsm_s[0].ssc.write_source_waveform_broadcast("I2C_Broadcast", [1, 2, 3, 4, 5], True)
def test_write_sequencer_register(self, digital_tsm_s):
"""TSM SSC Digital Write Sequencer Register.vi"""
digital_tsm_s[0].ssc.write_sequencer_flag(enums.SequencerFlag.FLAG1, True)
digital_tsm_s[0].ssc.write_sequencer_register(enums.SequencerRegister.REGISTER1, 1)
per_instrument_state = digital_tsm_s[0].ssc.read_sequencer_flag(enums.SequencerFlag.FLAG1)
assert isinstance(per_instrument_state, list)
assert numpy.shape(per_instrument_state) == (1,)
for state in per_instrument_state:
assert isinstance(state, bool)
register_values = digital_tsm_s[0].ssc.read_sequencer_register(
enums.SequencerRegister.REGISTER1
)
assert isinstance(register_values, list)
assert numpy.shape(register_values) == (1,)
for register_value in register_values:
assert isinstance(register_value, int)
@nitsm.codemoduleapi.code_module
def open_sessions(tsm: SMContext):
dt_dpi.initialize_sessions(tsm, options=OPTIONS)
@nitsm.codemoduleapi.code_module
def close_sessions(tsm: SMContext):
dt_dpi.close_sessions(tsm)
@nitsm.codemoduleapi.code_module
def clock_generation(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
frequency = 25000
dpi_tsm.ssc.modify_time_set_for_clock_generation(frequency, 0.5, "time_set")
dpi_tsm.ssc.clock_generator_generate_clock(frequency)
for ssc in dpi_tsm.ssc.sessions_sites_channels:
assert ssc._channels_session.clock_generator_is_running
assert round(ssc._channels_session.clock_generator_frequency) == frequency
dpi_tsm.ssc.clock_generator_abort()
for ssc in dpi_tsm.ssc.sessions_sites_channels:
assert not ssc._channels_session.clock_generator_is_running
@nitsm.codemoduleapi.code_module
def configuration(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.ssc.clear_start_trigger_signal()
dpi_tsm.ssc.configure_trigger_signal(dt_dpi.PXI_TRIGGER_LINE.PXI_TRIG0)
dpi_tsm.ssc.select_function(enums.SelectedFunction.DIGITAL)
dpi_tsm.ssc.export_opcode_trigger_signal(
dt_dpi.SIGNAL_ID.PATTERN_OPCODE_EVENT0, dt_dpi.PXI_TRIGGER_LINE.PXI_TRIG0
)
@nitsm.codemoduleapi.code_module
def frequency_measurement_func(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.ssc.frequency_counter_configure_measurement_time(0.5)
per_site_per_pin_frequency_measurements = dpi_tsm.frequency_counter_measure_frequency()
assert isinstance(per_site_per_pin_frequency_measurements, list)
print(numpy.shape(per_site_per_pin_frequency_measurements))
assert numpy.shape(per_site_per_pin_frequency_measurements) == (1, 2)
for frequency_measurements in per_site_per_pin_frequency_measurements:
for frequency_measurement in frequency_measurements:
assert isinstance(frequency_measurement, float)
@nitsm.codemoduleapi.code_module
def hram(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
hram_configuration = dt_dpi.HRAMConfiguration()
hram_configuration.trigger_type = enums.HistoryRAMTriggerType.PATTERN_LABEL
hram_configuration.pattern_label = "start_burst"
hram_configuration.cycles_to_acquire = enums.HistoryRAMCyclesToAcquire.ALL
dpi_tsm.ssc.configure_hram(hram_configuration)
hram_configuration = dpi_tsm.get_hram_configuration()
assert isinstance(hram_configuration, dt_dpi.HRAMConfiguration)
assert isinstance(hram_configuration.finite_samples, bool)
assert isinstance(hram_configuration.cycles_to_acquire, enums.HistoryRAMCyclesToAcquire)
assert isinstance(hram_configuration.max_samples_to_acquire_per_site, int)
assert isinstance(hram_configuration.buffer_size_per_site, int)
assert isinstance(hram_configuration.pretrigger_samples, int)
assert isinstance(hram_configuration.trigger_type, enums.HistoryRAMTriggerType)
assert isinstance(hram_configuration.cycle_number, int)
assert isinstance(hram_configuration.pattern_label, str)
assert isinstance(hram_configuration.vector_offset, int)
assert isinstance(hram_configuration.cycle_offset, int)
dpi_tsm.ssc.burst_pattern("start_burst")
dpi_tsm.ssc.wait_until_done()
per_site_cycle_information = dpi_tsm.stream_hram_results()
for cycle_information in per_site_cycle_information:
assert not cycle_information
files_generated = dpi_tsm.log_hram_results(
[
[
HistoryRAMCycleInformation(
"start_burst",
"time_set",
0,
0,
0,
[enums.PinState.X] * 3,
[enums.PinState.X] * 3,
[False] * 3,
)
]
* 2
]
* 3,
"Pattern Name",
os.path.dirname(os.path.realpath(__file__)) + r"\log",
)
for file in files_generated:
assert isinstance(file, str)
@nitsm.codemoduleapi.code_module
def pattern_actions(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.ssc.abort()
dpi_tsm.ssc.burst_pattern("start_burst")
dpi_tsm.ssc.wait_until_done()
per_site_pass = dpi_tsm.burst_pattern_pass_fail("start_burst")
assert isinstance(per_site_pass, list)
print(numpy.shape(per_site_pass))
assert numpy.shape(per_site_pass) == (1,)
for status in per_site_pass:
assert isinstance(status, bool)
per_site_per_pin_fail_counts = dpi_tsm.get_fail_count()
assert isinstance(per_site_per_pin_fail_counts, list)
print(numpy.shape(per_site_per_pin_fail_counts))
assert numpy.shape(per_site_per_pin_fail_counts) == (1, 2)
for fail_counts in per_site_per_pin_fail_counts:
for fail_count in fail_counts:
assert isinstance(fail_count, int)
per_site_pass = dpi_tsm.get_site_pass_fail()
assert isinstance(per_site_pass, list)
assert numpy.shape(per_site_pass) == (1,)
for status in per_site_pass:
assert isinstance(status, bool)
@nitsm.codemoduleapi.code_module
def pin_levels_and_timing(tsm: SMContext, pins: typing.List[str]):
# ctypes.windll.user32.MessageBoxW(None, "niPythonHost Process ID:" + str(os.getpid()), "Attach debugger", 0)
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.ssc.apply_levels_and_timing("PinLevels", "Timing")
dpi_tsm.apply_tdr_offsets_per_site_per_pin(
[
[
1e-9,
]
]
* 3
)
dpi_tsm.ssc.apply_tdr_offsets(
[
[
1e-9,
1e-9,
]
]
* 1,
)
dpi_tsm.ssc.configure_active_load(0.0015, 0.0015, -0.0015)
dpi_tsm.configure_single_level_per_site(dt_dpi.LevelTypeToSet.VIL, [0.0015, 0.0015, 0.0015])
dpi_tsm.ssc.configure_single_level(dt_dpi.LevelTypeToSet.VIL, 0.0015)
dpi_tsm.ssc.configure_termination_mode(enums.TerminationMode.HIGH_Z)
dpi_tsm.configure_time_set_compare_edge_per_site_per_pin(
"time_set",
[
[
40e-6,
]
]
* 3,
)
dpi_tsm.configure_time_set_compare_edge_per_site("time_set", [40e-6, 40e-6, 40e-6])
dpi_tsm.ssc.configure_time_set_compare_edge("time_set", 40e-6)
dpi_tsm.ssc.configure_voltage_levels(0.0015, 0.0015, 0.0015, 0.0015, 0.0015)
configured_period = dpi_tsm.ssc.configure_time_set_period("time_set", 40e-6)
assert math.isclose(configured_period, 40e-6, abs_tol=5e-6)
for ssc in dpi_tsm.ssc.sessions_sites_channels:
assert math.isclose(ssc._channels_session.active_load_ioh, -0.0015, abs_tol=5e-6)
assert math.isclose(ssc._channels_session.active_load_iol, 0.0015, abs_tol=5e-6)
assert math.isclose(ssc._channels_session.active_load_vcom, 0.0015, abs_tol=5e-6)
assert math.isclose(ssc._channels_session.vih, 0.0015, abs_tol=5e-6)
assert math.isclose(ssc._channels_session.vil, 0.0015, abs_tol=5e-6)
assert math.isclose(ssc._channels_session.voh, 0.0015, abs_tol=5e-6)
assert math.isclose(ssc._channels_session.vol, 0.0015, abs_tol=5e-6)
assert math.isclose(ssc._channels_session.vterm, 0.0015, abs_tol=5e-6)
assert ssc._channels_session.tdr_offset.femtoseconds == 1000000
@nitsm.codemoduleapi.code_module
def ppmu(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.ssc.ppmu_configure_aperture_time(0.01)
dpi_tsm.ssc.ppmu_configure_current_limit_range(0.01)
dpi_tsm.ssc.ppmu_configure_voltage_limits(0.01, 0.01)
dpi_tsm.ssc.ppmu_source_current(0.01)
dpi_tsm.ssc.ppmu_source_voltage_per_site_per_pin(0.01, [[0.01, 0.01]] * 3)
dpi_tsm.ppmu_source_voltage_per_site(0.01, [0.01, 0.01, 0.01])
dpi_tsm.ssc.ppmu_source()
per_site_per_pin_measurements = dpi_tsm.ppmu_measure_current()
assert isinstance(per_site_per_pin_measurements, list)
assert numpy.shape(per_site_per_pin_measurements) == (1, 2)
for measurements in per_site_per_pin_measurements:
for measurement in measurements:
assert isinstance(measurement, float)
dpi_tsm.ssc.ppmu_source_voltage(0.01, 0.01)
per_site_per_pin_measurements = dpi_tsm.ppmu_measure_voltage()
assert isinstance(per_site_per_pin_measurements, list)
assert numpy.shape(per_site_per_pin_measurements) == (1, 2)
for measurements in per_site_per_pin_measurements:
for measurement in measurements:
assert isinstance(measurement, float)
@nitsm.codemoduleapi.code_module
def sequencer_flags_and_registers(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.ssc.write_sequencer_flag(enums.SequencerFlag.FLAG1, True)
dpi_tsm.ssc.write_sequencer_register(enums.SequencerRegister.REGISTER1, 1)
per_instrument_state = dpi_tsm.ssc.read_sequencer_flag(enums.SequencerFlag.FLAG1)
assert isinstance(per_instrument_state, list)
assert numpy.shape(per_instrument_state) == (1,)
for state in per_instrument_state:
assert isinstance(state, bool)
per_instrument_register_values = dpi_tsm.ssc.read_sequencer_register(
enums.SequencerRegister.REGISTER1
)
assert isinstance(per_instrument_register_values, list)
assert numpy.shape(per_instrument_register_values) == (1,)
for register_value in per_instrument_register_values:
assert isinstance(register_value, int)
@nitsm.codemoduleapi.code_module
def session_properties_func(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
session_properties = dpi_tsm.ssc.get_properties()
for session_property in session_properties:
assert session_property[0].startswith("DPI")
assert math.isclose(session_property[1], 0.0015, abs_tol=5e-6)
assert math.isclose(session_property[2], 0.0015, abs_tol=5e-6)
assert math.isclose(session_property[3], 0.0015, abs_tol=5e-6)
assert math.isclose(session_property[4], 0.0015, abs_tol=5e-6)
assert math.isclose(session_property[5], 0.0015, abs_tol=5e-6)
@nitsm.codemoduleapi.code_module
def source_and_capture_waveforms(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.write_source_waveform_site_unique(
"SourceWaveform_SiteUnique", [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]], True
)
dpi_tsm.ssc.write_source_waveform_broadcast("SourceWaveform", [1, 2, 3, 4, 5], True)
dpi_tsm.ssc.burst_pattern("start_capture")
per_site_waveforms = dpi_tsm.fetch_capture_waveform("CaptureWaveform", 2)
assert isinstance(per_site_waveforms, list)
assert numpy.shape(per_site_waveforms) == (3, 2)
for waveforms in per_site_waveforms:
for waveform in waveforms:
assert isinstance(waveform, int)
@nitsm.codemoduleapi.code_module
def static(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.ssc.write_static(enums.WriteStaticPinState.ONE)
dpi_tsm.write_static_per_site([enums.WriteStaticPinState.ONE] * 3)
dpi_tsm.write_static_per_site_per_pin(
[[enums.WriteStaticPinState.ONE, enums.WriteStaticPinState.ONE]] * 3
)
per_site_per_pin_data = dpi_tsm.read_static()
assert isinstance(per_site_per_pin_data, list)
print(numpy.shape(per_site_per_pin_data))
assert numpy.shape(per_site_per_pin_data) == (1, 2)
for data in per_site_per_pin_data:
for _data in data:
assert isinstance(_data, enums.PinState)
@nitsm.codemoduleapi.code_module
def misc(tsm: SMContext, pins: typing.List[str]):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm1 = dt_dpi.filter_sites(dpi_tsm, [0])
for ssc in dpi_tsm1.ssc.sessions_sites_channels:
assert ssc._pins == "site0"
dpi_tsm1 = dt_dpi.filter_sites(dpi_tsm, [1])
for ssc in dpi_tsm1.ssc.sessions_sites_channels:
assert ssc.site_list == "site1"
dpi_tsm1 = dt_dpi.filter_sites(dpi_tsm, [2])
for ssc in dpi_tsm1.ssc.sessions_sites_channels:
assert ssc.site_list == "site2"
dpi_tsm = dt_dpi.pins_to_sessions(tsm, pins[0])
dpi_tsm.ssc.initiate()
dpi_tsm.ssc.abort()
per_instrument_to_per_site_lut = dpi_tsm.ssc.calculate_per_instrument_to_per_site_lut(
dpi_tsm.sites
)
per_site_data = dt_dpi._apply_lut_per_instrument_to_per_site(
[False, False, False],
per_instrument_to_per_site_lut,
[[False, False, False], [True, True, True]],
)
assert len(per_site_data) == len([True, True, True])
# assert per_site_data == [True, True, True]
print(per_site_data)
per_site_data = dt_dpi._apply_lut_per_instrument_to_per_site(
[[False, False]] * 3,
per_instrument_to_per_site_lut,
[[[False, False]] * 3, [[True, True]] * 3],
)
# assert per_site_data == [[True, True]] * 3
print(per_site_data)
per_instrument_to_per_site_per_pin_lut = (
dpi_tsm.ssc.calculate_per_instrument_to_per_site_per_pin_lut(dpi_tsm.sites, dpi_tsm.pins)
)
per_site_per_pin_data = dt_dpi._apply_lut_per_instrument_to_per_site_per_pin(
[[0, 0], [0, 0], [0, 0]],
per_instrument_to_per_site_per_pin_lut,
[[1, 2, 3], [4, 5, 6]],
)
# assert per_site_per_pin_data == [[1, 4], [2, 5], [3, 6]]
print(per_site_per_pin_data)
(
per_site_to_per_instrument_lut,
_,
_,
) = dpi_tsm.ssc.calculate_per_site_to_per_instrument_lut(dpi_tsm.sites)
per_instrument_data = dt_dpi._apply_lut_per_site_to_per_instrument(
[[0, 0, 0], [0, 0, 0]], per_site_to_per_instrument_lut, [1, 2, 3]
)
# assert per_instrument_data == [[1, 2, 3], [0, 0, 0]]
print(per_instrument_data)
(
per_site_per_pin_to_per_instrument_lut,
_,
_,
) = dpi_tsm.ssc.calculate_per_site_per_pin_to_per_instrument_lut(dpi_tsm.sites, dpi_tsm.pins)
per_instrument_data = dt_dpi._apply_lut_per_site_per_pin_to_per_instrument(
[[0, 0, 0], [0, 0, 0]],
per_site_per_pin_to_per_instrument_lut,
[[1, 4], [2, 5], [3, 6]],
)
# assert per_instrument_data == [[1, 2, 3], [4, 5, 6]]
print(per_instrument_data)
# dpi_tsm.publish([1.0, 1.0, 1.0], "Publish_1")
dpi_tsm.publish([[1.0, 1.0, 1.0]], "Publish_1")
dpi_tsm.publish([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]], "Publish_2")
# dpi_tsm.publish([True, True, True], "Publish_3")
dpi_tsm.publish([[True, True, True]], "Publish_3")
dpi_tsm.publish([[True, True], [True, True], [True, True]], "Publish_4")
@nitsm.codemoduleapi.code_module
def initialize_sessions(tsm: SMContext):
ctypes.windll.user32.MessageBoxW(
None,
"Process: niPythonHost.exe & ID: " + str(os.getpid()),
"Attach debugger",
0,
)
print(tsm.pin_map_file_path)
pins = SMContext.get_pin_names(
tsm, instrument_type_id=nitsm.enums.InstrumentTypeIdConstants.NI_DIGITAL_PATTERN
)
print(pins)
dt_dpi.initialize_sessions(tsm, options=OPTIONS)
dpi_tsm_i_o = dt_dpi.pins_to_sessions(tsm, ["DPI_PG_Inputs", "DPI_PG_Outputs"])
dpi_tsm_i_o.ssc.apply_levels_and_timing("I2C_Levels", "I2C_Timing")
dpi_tsm_i_o.ssc.select_function(dt_dpi.enums.SelectedFunction.DIGITAL)
@nitsm.codemoduleapi.code_module
def configure_pins(tsm: SMContext):
dpi_tsm_o = dt_dpi.pins_to_sessions(tsm, ["DPI_PG_Outputs"])
dpi_tsm_o.ssc.select_function(dt_dpi.enums.SelectedFunction.DIGITAL)
dpi_tsm_o.ssc.write_static(dt_dpi.enums.WriteStaticPinState.ZERO)
@nitsm.codemoduleapi.code_module
def read_pins(tsm: SMContext):
dpi_tsm_i = dt_dpi.pins_to_sessions(tsm, ["DPI_PG_Inputs"])
# dpi_tsm_i.ssc.select_function(ni_dt_digital.enums.SelectedFunction.DIGITAL)
data = dpi_tsm_i.read_static()
print(data)
return data
@nitsm.codemoduleapi.code_module
def burst_pattern(tsm: SMContext):
dpi_tsm = dt_dpi.pins_to_sessions(tsm, ["DPI_DO_SCL", "DPI_DO_SDA"])
dpi_tsm.ssc.apply_levels_and_timing("I2C_Levels", "I2C_Timing")
per_site_pass = dpi_tsm.burst_pattern_pass_fail("I2C_Read_Loop")
print(per_site_pass)
| [
"os.getpid",
"nidevtools.digital.close_sessions",
"nidevtools.digital._apply_lut_per_site_to_per_instrument",
"nidevtools.digital._apply_lut_per_instrument_to_per_site",
"nidevtools.digital._apply_lut_per_instrument_to_per_site_per_pin",
"nidevtools.digital._apply_lut_per_site_per_pin_to_per_instrument",
... | [((2270, 2304), 'pytest.mark.pin_map', 'pytest.mark.pin_map', (['pin_file_name'], {}), '(pin_file_name)\n', (2289, 2304), False, 'import pytest\n'), ((2306, 2362), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::DeprecationWarning"""'], {}), "('ignore::DeprecationWarning')\n", (2332, 2362), False, 'import pytest\n'), ((1313, 1372), 'nidevtools.digital.initialize_sessions', 'dt_dpi.initialize_sessions', (['standalone_tsm'], {'options': 'OPTIONS'}), '(standalone_tsm, options=OPTIONS)\n', (1339, 1372), True, 'import nidevtools.digital as dt_dpi\n'), ((1402, 1439), 'nidevtools.digital.close_sessions', 'dt_dpi.close_sessions', (['standalone_tsm'], {}), '(standalone_tsm)\n', (1423, 1439), True, 'import nidevtools.digital as dt_dpi\n'), ((13202, 13250), 'nidevtools.digital.initialize_sessions', 'dt_dpi.initialize_sessions', (['tsm'], {'options': 'OPTIONS'}), '(tsm, options=OPTIONS)\n', (13228, 13250), True, 'import nidevtools.digital as dt_dpi\n'), ((13326, 13352), 'nidevtools.digital.close_sessions', 'dt_dpi.close_sessions', (['tsm'], {}), '(tsm)\n', (13347, 13352), True, 'import nidevtools.digital as dt_dpi\n'), ((13464, 13501), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (13487, 13501), True, 'import nidevtools.digital as dt_dpi\n'), ((14131, 14168), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (14154, 14168), True, 'import nidevtools.digital as dt_dpi\n'), ((14609, 14646), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (14632, 14646), True, 'import nidevtools.digital as dt_dpi\n'), ((15307, 15344), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (15330, 15344), True, 'import nidevtools.digital as dt_dpi\n'), ((15370, 15396), 'nidevtools.digital.HRAMConfiguration', 'dt_dpi.HRAMConfiguration', ([], {}), '()\n', (15394, 15396), True, 'import nidevtools.digital as dt_dpi\n'), ((17434, 17471), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (17457, 17471), True, 'import nidevtools.digital as dt_dpi\n'), ((18657, 18694), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (18680, 18694), True, 'import nidevtools.digital as dt_dpi\n'), ((19847, 19900), 'math.isclose', 'math.isclose', (['configured_period', '(4e-05)'], {'abs_tol': '(5e-06)'}), '(configured_period, 4e-05, abs_tol=5e-06)\n', (19859, 19900), False, 'import math\n'), ((20779, 20816), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (20802, 20816), True, 'import nidevtools.digital as dt_dpi\n'), ((22045, 22082), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (22068, 22082), True, 'import nidevtools.digital as dt_dpi\n'), ((22967, 23004), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (22990, 23004), True, 'import nidevtools.digital as dt_dpi\n'), ((23638, 23675), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (23661, 23675), True, 'import nidevtools.digital as dt_dpi\n'), ((24361, 24398), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (24384, 24398), True, 'import nidevtools.digital as dt_dpi\n'), ((25077, 25114), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (25100, 25114), True, 'import nidevtools.digital as dt_dpi\n'), ((25130, 25163), 'nidevtools.digital.filter_sites', 'dt_dpi.filter_sites', (['dpi_tsm', '[0]'], {}), '(dpi_tsm, [0])\n', (25149, 25163), True, 'import nidevtools.digital as dt_dpi\n'), ((25268, 25301), 'nidevtools.digital.filter_sites', 'dt_dpi.filter_sites', (['dpi_tsm', '[1]'], {}), '(dpi_tsm, [1])\n', (25287, 25301), True, 'import nidevtools.digital as dt_dpi\n'), ((25410, 25443), 'nidevtools.digital.filter_sites', 'dt_dpi.filter_sites', (['dpi_tsm', '[2]'], {}), '(dpi_tsm, [2])\n', (25429, 25443), True, 'import nidevtools.digital as dt_dpi\n'), ((25552, 25589), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'pins[0]'], {}), '(tsm, pins[0])\n', (25575, 25589), True, 'import nidevtools.digital as dt_dpi\n'), ((25780, 25933), 'nidevtools.digital._apply_lut_per_instrument_to_per_site', 'dt_dpi._apply_lut_per_instrument_to_per_site', (['[False, False, False]', 'per_instrument_to_per_site_lut', '[[False, False, False], [True, True, True]]'], {}), '([False, False, False],\n per_instrument_to_per_site_lut, [[False, False, False], [True, True, True]]\n )\n', (25824, 25933), True, 'import nidevtools.digital as dt_dpi\n'), ((26107, 26253), 'nidevtools.digital._apply_lut_per_instrument_to_per_site', 'dt_dpi._apply_lut_per_instrument_to_per_site', (['([[False, False]] * 3)', 'per_instrument_to_per_site_lut', '[[[False, False]] * 3, [[True, True]] * 3]'], {}), '([[False, False]] * 3,\n per_instrument_to_per_site_lut, [[[False, False]] * 3, [[True, True]] * 3])\n', (26151, 26253), True, 'import nidevtools.digital as dt_dpi\n'), ((26534, 26681), 'nidevtools.digital._apply_lut_per_instrument_to_per_site_per_pin', 'dt_dpi._apply_lut_per_instrument_to_per_site_per_pin', (['[[0, 0], [0, 0], [0, 0]]', 'per_instrument_to_per_site_per_pin_lut', '[[1, 2, 3], [4, 5, 6]]'], {}), '([[0, 0], [0, 0], [0, 0\n ]], per_instrument_to_per_site_per_pin_lut, [[1, 2, 3], [4, 5, 6]])\n', (26586, 26681), True, 'import nidevtools.digital as dt_dpi\n'), ((26974, 27089), 'nidevtools.digital._apply_lut_per_site_to_per_instrument', 'dt_dpi._apply_lut_per_site_to_per_instrument', (['[[0, 0, 0], [0, 0, 0]]', 'per_site_to_per_instrument_lut', '[1, 2, 3]'], {}), '([[0, 0, 0], [0, 0, 0]],\n per_site_to_per_instrument_lut, [1, 2, 3])\n', (27018, 27089), True, 'import nidevtools.digital as dt_dpi\n'), ((27390, 27536), 'nidevtools.digital._apply_lut_per_site_per_pin_to_per_instrument', 'dt_dpi._apply_lut_per_site_per_pin_to_per_instrument', (['[[0, 0, 0], [0, 0, 0]]', 'per_site_per_pin_to_per_instrument_lut', '[[1, 4], [2, 5], [3, 6]]'], {}), '([[0, 0, 0], [0, 0, 0]],\n per_site_per_pin_to_per_instrument_lut, [[1, 4], [2, 5], [3, 6]])\n', (27442, 27536), True, 'import nidevtools.digital as dt_dpi\n'), ((28295, 28405), 'nitsm.codemoduleapi.SemiconductorModuleContext.get_pin_names', 'SMContext.get_pin_names', (['tsm'], {'instrument_type_id': 'nitsm.enums.InstrumentTypeIdConstants.NI_DIGITAL_PATTERN'}), '(tsm, instrument_type_id=nitsm.enums.\n InstrumentTypeIdConstants.NI_DIGITAL_PATTERN)\n', (28318, 28405), True, 'from nitsm.codemoduleapi import SemiconductorModuleContext as SMContext\n'), ((28435, 28483), 'nidevtools.digital.initialize_sessions', 'dt_dpi.initialize_sessions', (['tsm'], {'options': 'OPTIONS'}), '(tsm, options=OPTIONS)\n', (28461, 28483), True, 'import nidevtools.digital as dt_dpi\n'), ((28502, 28567), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', "['DPI_PG_Inputs', 'DPI_PG_Outputs']"], {}), "(tsm, ['DPI_PG_Inputs', 'DPI_PG_Outputs'])\n", (28525, 28567), True, 'import nidevtools.digital as dt_dpi\n'), ((28802, 28850), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', "['DPI_PG_Outputs']"], {}), "(tsm, ['DPI_PG_Outputs'])\n", (28825, 28850), True, 'import nidevtools.digital as dt_dpi\n'), ((29076, 29123), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', "['DPI_PG_Inputs']"], {}), "(tsm, ['DPI_PG_Inputs'])\n", (29099, 29123), True, 'import nidevtools.digital as dt_dpi\n'), ((29357, 29415), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', "['DPI_DO_SCL', 'DPI_DO_SDA']"], {}), "(tsm, ['DPI_DO_SCL', 'DPI_DO_SDA'])\n", (29380, 29415), True, 'import nidevtools.digital as dt_dpi\n'), ((540, 565), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (555, 565), False, 'import os\n'), ((9843, 9896), 'math.isclose', 'math.isclose', (['configured_period', '(4e-05)'], {'abs_tol': '(5e-06)'}), '(configured_period, 4e-05, abs_tol=5e-06)\n', (9855, 9896), False, 'import math\n'), ((14884, 14936), 'numpy.shape', 'numpy.shape', (['per_site_per_pin_frequency_measurements'], {}), '(per_site_per_pin_frequency_measurements)\n', (14895, 14936), False, 'import numpy\n'), ((14949, 15001), 'numpy.shape', 'numpy.shape', (['per_site_per_pin_frequency_measurements'], {}), '(per_site_per_pin_frequency_measurements)\n', (14960, 15001), False, 'import numpy\n'), ((17696, 17722), 'numpy.shape', 'numpy.shape', (['per_site_pass'], {}), '(per_site_pass)\n', (17707, 17722), False, 'import numpy\n'), ((17735, 17761), 'numpy.shape', 'numpy.shape', (['per_site_pass'], {}), '(per_site_pass)\n', (17746, 17761), False, 'import numpy\n'), ((17971, 18012), 'numpy.shape', 'numpy.shape', (['per_site_per_pin_fail_counts'], {}), '(per_site_per_pin_fail_counts)\n', (17982, 18012), False, 'import numpy\n'), ((18025, 18066), 'numpy.shape', 'numpy.shape', (['per_site_per_pin_fail_counts'], {}), '(per_site_per_pin_fail_counts)\n', (18036, 18066), False, 'import numpy\n'), ((18319, 18345), 'numpy.shape', 'numpy.shape', (['per_site_pass'], {}), '(per_site_pass)\n', (18330, 18345), False, 'import numpy\n'), ((19967, 20042), 'math.isclose', 'math.isclose', (['ssc._channels_session.active_load_ioh', '(-0.0015)'], {'abs_tol': '(5e-06)'}), '(ssc._channels_session.active_load_ioh, -0.0015, abs_tol=5e-06)\n', (19979, 20042), False, 'import math\n'), ((20057, 20131), 'math.isclose', 'math.isclose', (['ssc._channels_session.active_load_iol', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(ssc._channels_session.active_load_iol, 0.0015, abs_tol=5e-06)\n', (20069, 20131), False, 'import math\n'), ((20146, 20221), 'math.isclose', 'math.isclose', (['ssc._channels_session.active_load_vcom', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(ssc._channels_session.active_load_vcom, 0.0015, abs_tol=5e-06)\n', (20158, 20221), False, 'import math\n'), ((20236, 20298), 'math.isclose', 'math.isclose', (['ssc._channels_session.vih', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(ssc._channels_session.vih, 0.0015, abs_tol=5e-06)\n', (20248, 20298), False, 'import math\n'), ((20313, 20375), 'math.isclose', 'math.isclose', (['ssc._channels_session.vil', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(ssc._channels_session.vil, 0.0015, abs_tol=5e-06)\n', (20325, 20375), False, 'import math\n'), ((20390, 20452), 'math.isclose', 'math.isclose', (['ssc._channels_session.voh', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(ssc._channels_session.voh, 0.0015, abs_tol=5e-06)\n', (20402, 20452), False, 'import math\n'), ((20467, 20529), 'math.isclose', 'math.isclose', (['ssc._channels_session.vol', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(ssc._channels_session.vol, 0.0015, abs_tol=5e-06)\n', (20479, 20529), False, 'import math\n'), ((20544, 20608), 'math.isclose', 'math.isclose', (['ssc._channels_session.vterm', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(ssc._channels_session.vterm, 0.0015, abs_tol=5e-06)\n', (20556, 20608), False, 'import math\n'), ((21338, 21380), 'numpy.shape', 'numpy.shape', (['per_site_per_pin_measurements'], {}), '(per_site_per_pin_measurements)\n', (21349, 21380), False, 'import numpy\n'), ((21722, 21764), 'numpy.shape', 'numpy.shape', (['per_site_per_pin_measurements'], {}), '(per_site_per_pin_measurements)\n', (21733, 21764), False, 'import numpy\n'), ((22379, 22412), 'numpy.shape', 'numpy.shape', (['per_instrument_state'], {}), '(per_instrument_state)\n', (22390, 22412), False, 'import numpy\n'), ((22692, 22735), 'numpy.shape', 'numpy.shape', (['per_instrument_register_values'], {}), '(per_instrument_register_values)\n', (22703, 22735), False, 'import numpy\n'), ((23175, 23231), 'math.isclose', 'math.isclose', (['session_property[1]', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(session_property[1], 0.0015, abs_tol=5e-06)\n', (23187, 23231), False, 'import math\n'), ((23246, 23302), 'math.isclose', 'math.isclose', (['session_property[2]', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(session_property[2], 0.0015, abs_tol=5e-06)\n', (23258, 23302), False, 'import math\n'), ((23317, 23373), 'math.isclose', 'math.isclose', (['session_property[3]', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(session_property[3], 0.0015, abs_tol=5e-06)\n', (23329, 23373), False, 'import math\n'), ((23388, 23444), 'math.isclose', 'math.isclose', (['session_property[4]', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(session_property[4], 0.0015, abs_tol=5e-06)\n', (23400, 23444), False, 'import math\n'), ((23459, 23515), 'math.isclose', 'math.isclose', (['session_property[5]', '(0.0015)'], {'abs_tol': '(5e-06)'}), '(session_property[5], 0.0015, abs_tol=5e-06)\n', (23471, 23515), False, 'import math\n'), ((24097, 24128), 'numpy.shape', 'numpy.shape', (['per_site_waveforms'], {}), '(per_site_waveforms)\n', (24108, 24128), False, 'import numpy\n'), ((24767, 24801), 'numpy.shape', 'numpy.shape', (['per_site_per_pin_data'], {}), '(per_site_per_pin_data)\n', (24778, 24801), False, 'import numpy\n'), ((24814, 24848), 'numpy.shape', 'numpy.shape', (['per_site_per_pin_data'], {}), '(per_site_per_pin_data)\n', (24825, 24848), False, 'import numpy\n'), ((11150, 11204), 'math.isclose', 'math.isclose', (['session_property[1]', '(1.7)'], {'abs_tol': '(0.0005)'}), '(session_property[1], 1.7, abs_tol=0.0005)\n', (11162, 11204), False, 'import math\n'), ((11247, 11301), 'math.isclose', 'math.isclose', (['session_property[2]', '(1.6)'], {'abs_tol': '(0.0005)'}), '(session_property[2], 1.6, abs_tol=0.0005)\n', (11259, 11301), False, 'import math\n'), ((11344, 11398), 'math.isclose', 'math.isclose', (['session_property[3]', '(3.3)'], {'abs_tol': '(0.0005)'}), '(session_property[3], 3.3, abs_tol=0.0005)\n', (11356, 11398), False, 'import math\n'), ((11441, 11500), 'math.isclose', 'math.isclose', (['session_property[4]', '(3.05e-05)'], {'abs_tol': '(0.0005)'}), '(session_property[4], 3.05e-05, abs_tol=0.0005)\n', (11453, 11500), False, 'import math\n'), ((11544, 11598), 'math.isclose', 'math.isclose', (['session_property[5]', '(2.0)'], {'abs_tol': '(0.0005)'}), '(session_property[5], 2.0, abs_tol=0.0005)\n', (11556, 11598), False, 'import math\n'), ((12673, 12706), 'numpy.shape', 'numpy.shape', (['per_instrument_state'], {}), '(per_instrument_state)\n', (12684, 12706), False, 'import numpy\n'), ((12993, 13021), 'numpy.shape', 'numpy.shape', (['register_values'], {}), '(register_values)\n', (13004, 13021), False, 'import numpy\n'), ((1767, 1805), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'test_pin'], {}), '(tsm, test_pin)\n', (1790, 1805), True, 'import nidevtools.digital as dt_dpi\n'), ((17209, 17235), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (17225, 17235), False, 'import os\n'), ((28193, 28204), 'os.getpid', 'os.getpid', ([], {}), '()\n', (28202, 28204), False, 'import os\n'), ((1880, 1918), 'nidevtools.digital.pins_to_sessions', 'dt_dpi.pins_to_sessions', (['tsm', 'test_pin'], {}), '(tsm, test_pin)\n', (1903, 1918), True, 'import nidevtools.digital as dt_dpi\n'), ((16805, 16933), 'nidigital.history_ram_cycle_information.HistoryRAMCycleInformation', 'HistoryRAMCycleInformation', (['"""start_burst"""', '"""time_set"""', '(0)', '(0)', '(0)', '([enums.PinState.X] * 3)', '([enums.PinState.X] * 3)', '([False] * 3)'], {}), "('start_burst', 'time_set', 0, 0, 0, [enums.\n PinState.X] * 3, [enums.PinState.X] * 3, [False] * 3)\n", (16831, 16933), False, 'from nidigital.history_ram_cycle_information import HistoryRAMCycleInformation\n')] |
import os
import logging
from collections import OrderedDict
from random import randint
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results, PascalVOCDetectionEvaluator
from detectron2.modeling import GeneralizedRCNNWithTTA
from myILOD.utils.register import my_register
import detectron2.utils.comm as comm
from PIL import Image, ImageDraw
from detectron2.data.detection_utils import convert_PIL_to_numpy
import numpy as np
import torch, sys, random, json, logging, time, cv2
from detectron2.data import build_detection_train_loader
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data import transforms as T
from detectron2.data.build import get_detection_dataset_dicts, DatasetFromList, MapDataset
class myAug(T.augmentation.Augmentation):
def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
do = self._rand_range() < self.prob
class Trainer(DefaultTrainer):
def __init__(self, cfg):
super().__init__(cfg)
self.memory = self.build_memory(cfg)
def CutPaste(self, each_img):
img = Image.fromarray(each_img['image'].byte().permute(1, 2, 0).numpy())
# MEMORY
# mm_data format:
# file_name, height, width, image_id, image,
# instances: Instances(
# num_instances, image_height, image_width,
# fields=[gt_boxes: Boxes(tensor([[352., 268., 635., 415.]])), gt_classes: tensor([2])])
mm_data = random.choice(self.memory)
r_id = random.randint(0, len(mm_data['instances']._fields['gt_boxes'].tensor)-1)
mm_ann = mm_data['instances']._fields['gt_boxes'].tensor[r_id]
mm_cat = mm_data['instances']._fields['gt_classes'][r_id]
mm_img = Image.fromarray(mm_data['image'].byte().permute(1, 2, 0).numpy())
# OPERATION
x1, y1, x2, y2 = [int(i) for i in mm_ann]
w, h = x2-x1, y2-y1
mm_cut = mm_img.crop((x1, y1, x2, y2))
paste_x = random.randint(0, max(0, each_img['image'].size()[2] - w))
paste_y = random.randint(0, max(0, each_img['image'].size()[1] - h))
img.paste(mm_cut, (paste_x, paste_y))
# LABEL
gt_boxes = torch.unsqueeze(torch.tensor([float(i) for i in [paste_x, paste_y, paste_x + w, paste_y + h]]), 0)
gt_classes = torch.unsqueeze(mm_cat, 0)
for box, cat in zip(each_img['instances']._fields['gt_boxes'].tensor, each_img['instances']._fields['gt_classes']):
ixmin = np.maximum(mm_ann[0], box[0])
iymin = np.maximum(mm_ann[1], box[1])
ixmax = np.minimum(mm_ann[2], box[2])
iymax = np.minimum(mm_ann[3], box[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
box_area = (box[2] - box[0] + 1.0) * (box[3] - box[1] + 1.0)
overlaps_of_box = inters / box_area
if overlaps_of_box <= 0.5:
gt_boxes = torch.cat((gt_boxes, torch.unsqueeze(box, 0)))
gt_classes = torch.cat((gt_classes, torch.unsqueeze(cat, 0)))
each_img['image'] = torch.as_tensor(np.ascontiguousarray(np.array(img).transpose(2, 0, 1)))
each_img['instances']._fields['gt_boxes'].tensor = gt_boxes
each_img['instances']._fields['gt_classes'] = gt_classes
# DRAW
# b, g, r = cv2.split(np.array(img))
# draw_img = Image.fromarray(cv2.merge([r, g, b]))
# a = ImageDraw.ImageDraw(draw_img)
# for b in each_img['instances']._fields['gt_boxes'].tensor:
# a.rectangle([int(i) for i in b])
# draw_img.save("0.jpg")
# print(each_img['instances'])
# sys.exit()
def Mixup(self, each_img):
# input data
img1 = each_img['image'].byte().permute(1, 2, 0).numpy()
lambd = np.random.beta(2,2)
# memory data
mm_data = random.choice(self.memory)
img2= mm_data['image'].byte().permute(1, 2, 0).numpy()
# operation
height = max(img1.shape[0], img2.shape[0])
width = max(img1.shape[1], img2.shape[1])
mix_img = np.zeros(shape=(height, width, 3), dtype='float32')
mix_img[:img1.shape[0], :img1.shape[1], :] = img1.astype('float32') * lambd
mix_img[:img2.shape[0], :img2.shape[1], :] += img2.astype('float32') * (1. - lambd)
mix_img = mix_img.astype('uint8')
# fix
each_img['image'] = torch.as_tensor(np.ascontiguousarray(mix_img.transpose(2, 0, 1)))
each_img['instances']._fields['gt_boxes'].tensor = torch.cat((each_img['instances']._fields['gt_boxes'].tensor, mm_data['instances']._fields['gt_boxes'].tensor))
each_img['instances']._fields['gt_classes'] = torch.cat((each_img['instances']._fields['gt_classes'], mm_data['instances']._fields['gt_classes']))
# drawimg = Image.fromarray(mix_img)
# a = ImageDraw.ImageDraw(drawimg)
# for b in each_img['instances']._fields['gt_boxes'].tensor:
# a.rectangle([int(i) for i in b])
# drawimg.save("0.jpg")
def run_step(self):
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
data = next(self._data_loader_iter)
# TODO: EMM
# each_img: 3 (b, g, r) * H * W
for each_img in data:
self.CutPaste(each_img)
# END
data_time = time.perf_counter() - start
loss_dict = self.model(data)
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
losses.backward()
# use a new stream so the ops don't wait for DDP
with torch.cuda.stream(
torch.cuda.Stream()
):
metrics_dict = loss_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
self._detect_anomaly(losses, loss_dict)
self.optimizer.step()
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return PascalVOCDetectionEvaluator(dataset_name)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
@classmethod
def build_train_loader(cls, cfg):
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
augmentation.append(T.RandomFlip())
mapper = DatasetMapper(cfg, True, augmentations=augmentation)
return build_detection_train_loader(cfg, mapper)
@classmethod
def build_memory(cls, cfg):
memory_dict = get_detection_dataset_dicts(
cfg.DATASETS.MEMORY,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None
)
memory_dataset = DatasetFromList(memory_dict)
memory_dataset = MapDataset(memory_dataset, DatasetMapper(cfg, True))
return memory_dataset
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file) # 从config file 覆盖配置
cfg.merge_from_list(args.opts) # 从CLI参数 覆盖配置
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
# ZJW: Myregister
my_register()
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
model = Trainer.build_model(cfg)
for n,p in model.named_parameters():
if p.requires_grad:
print(n)
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
args.dist_url='tcp://127.0.0.1:{}'.format(randint(30000,50000))
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
) | [
"numpy.maximum",
"torch.cat",
"detectron2.modeling.GeneralizedRCNNWithTTA",
"detectron2.engine.default_argument_parser",
"myILOD.utils.register.my_register",
"detectron2.data.build_detection_train_loader",
"os.path.join",
"detectron2.checkpoint.DetectionCheckpointer",
"detectron2.data.build.DatasetF... | [((8700, 8709), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (8707, 8709), False, 'from detectron2.config import get_cfg\n'), ((8855, 8879), 'detectron2.engine.default_setup', 'default_setup', (['cfg', 'args'], {}), '(cfg, args)\n', (8868, 8879), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((8940, 8953), 'myILOD.utils.register.my_register', 'my_register', ([], {}), '()\n', (8951, 8953), False, 'from myILOD.utils.register import my_register\n'), ((9740, 9874), 'detectron2.engine.launch', 'launch', (['main', 'args.num_gpus'], {'num_machines': 'args.num_machines', 'machine_rank': 'args.machine_rank', 'dist_url': 'args.dist_url', 'args': '(args,)'}), '(main, args.num_gpus, num_machines=args.num_machines, machine_rank=\n args.machine_rank, dist_url=args.dist_url, args=(args,))\n', (9746, 9874), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((2073, 2099), 'random.choice', 'random.choice', (['self.memory'], {}), '(self.memory)\n', (2086, 2099), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((2921, 2947), 'torch.unsqueeze', 'torch.unsqueeze', (['mm_cat', '(0)'], {}), '(mm_cat, 0)\n', (2936, 2947), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((4481, 4501), 'numpy.random.beta', 'np.random.beta', (['(2)', '(2)'], {}), '(2, 2)\n', (4495, 4501), True, 'import numpy as np\n'), ((4550, 4576), 'random.choice', 'random.choice', (['self.memory'], {}), '(self.memory)\n', (4563, 4576), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((4780, 4831), 'numpy.zeros', 'np.zeros', ([], {'shape': '(height, width, 3)', 'dtype': '"""float32"""'}), "(shape=(height, width, 3), dtype='float32')\n", (4788, 4831), True, 'import numpy as np\n'), ((5218, 5333), 'torch.cat', 'torch.cat', (["(each_img['instances']._fields['gt_boxes'].tensor, mm_data['instances'].\n _fields['gt_boxes'].tensor)"], {}), "((each_img['instances']._fields['gt_boxes'].tensor, mm_data[\n 'instances']._fields['gt_boxes'].tensor))\n", (5227, 5333), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((5383, 5488), 'torch.cat', 'torch.cat', (["(each_img['instances']._fields['gt_classes'], mm_data['instances']._fields[\n 'gt_classes'])"], {}), "((each_img['instances']._fields['gt_classes'], mm_data['instances'\n ]._fields['gt_classes']))\n", (5392, 5488), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((5849, 5868), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5866, 5868), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((6803, 6844), 'detectron2.evaluation.PascalVOCDetectionEvaluator', 'PascalVOCDetectionEvaluator', (['dataset_name'], {}), '(dataset_name)\n', (6830, 6844), False, 'from detectron2.evaluation import COCOEvaluator, verify_results, PascalVOCDetectionEvaluator\n'), ((6921, 6960), 'logging.getLogger', 'logging.getLogger', (['"""detectron2.trainer"""'], {}), "('detectron2.trainer')\n", (6938, 6960), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((7153, 7187), 'detectron2.modeling.GeneralizedRCNNWithTTA', 'GeneralizedRCNNWithTTA', (['cfg', 'model'], {}), '(cfg, model)\n', (7175, 7187), False, 'from detectron2.modeling import GeneralizedRCNNWithTTA\n'), ((7997, 8049), 'detectron2.data.dataset_mapper.DatasetMapper', 'DatasetMapper', (['cfg', '(True)'], {'augmentations': 'augmentation'}), '(cfg, True, augmentations=augmentation)\n', (8010, 8049), False, 'from detectron2.data.dataset_mapper import DatasetMapper\n'), ((8065, 8106), 'detectron2.data.build_detection_train_loader', 'build_detection_train_loader', (['cfg', 'mapper'], {}), '(cfg, mapper)\n', (8093, 8106), False, 'from detectron2.data import build_detection_train_loader\n'), ((8179, 8396), 'detectron2.data.build.get_detection_dataset_dicts', 'get_detection_dataset_dicts', (['cfg.DATASETS.MEMORY'], {'filter_empty': 'cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS', 'min_keypoints': '(0)', 'proposal_files': '(cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None)'}), '(cfg.DATASETS.MEMORY, filter_empty=cfg.\n DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=0, proposal_files=\n cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None)\n', (8206, 8396), False, 'from detectron2.data.build import get_detection_dataset_dicts, DatasetFromList, MapDataset\n'), ((8474, 8502), 'detectron2.data.build.DatasetFromList', 'DatasetFromList', (['memory_dict'], {}), '(memory_dict)\n', (8489, 8502), False, 'from detectron2.data.build import get_detection_dataset_dicts, DatasetFromList, MapDataset\n'), ((9235, 9257), 'detectron2.utils.comm.is_main_process', 'comm.is_main_process', ([], {}), '()\n', (9255, 9257), True, 'import detectron2.utils.comm as comm\n'), ((9675, 9696), 'random.randint', 'randint', (['(30000)', '(50000)'], {}), '(30000, 50000)\n', (9682, 9696), False, 'from random import randint\n'), ((3093, 3122), 'numpy.maximum', 'np.maximum', (['mm_ann[0]', 'box[0]'], {}), '(mm_ann[0], box[0])\n', (3103, 3122), True, 'import numpy as np\n'), ((3143, 3172), 'numpy.maximum', 'np.maximum', (['mm_ann[1]', 'box[1]'], {}), '(mm_ann[1], box[1])\n', (3153, 3172), True, 'import numpy as np\n'), ((3193, 3222), 'numpy.minimum', 'np.minimum', (['mm_ann[2]', 'box[2]'], {}), '(mm_ann[2], box[2])\n', (3203, 3222), True, 'import numpy as np\n'), ((3243, 3272), 'numpy.minimum', 'np.minimum', (['mm_ann[3]', 'box[3]'], {}), '(mm_ann[3], box[3])\n', (3253, 3272), True, 'import numpy as np\n'), ((3290, 3326), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin + 1.0)', '(0.0)'], {}), '(ixmax - ixmin + 1.0, 0.0)\n', (3300, 3326), True, 'import numpy as np\n'), ((3344, 3380), 'numpy.maximum', 'np.maximum', (['(iymax - iymin + 1.0)', '(0.0)'], {}), '(iymax - iymin + 1.0, 0.0)\n', (3354, 3380), True, 'import numpy as np\n'), ((6077, 6096), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6094, 6096), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((6746, 6787), 'os.path.join', 'os.path.join', (['cfg.OUTPUT_DIR', '"""inference"""'], {}), "(cfg.OUTPUT_DIR, 'inference')\n", (6758, 6787), False, 'import os\n'), ((7879, 7933), 'detectron2.data.transforms.ResizeShortestEdge', 'T.ResizeShortestEdge', (['min_size', 'max_size', 'sample_style'], {}), '(min_size, max_size, sample_style)\n', (7899, 7933), True, 'from detectron2.data import transforms as T\n'), ((7963, 7977), 'detectron2.data.transforms.RandomFlip', 'T.RandomFlip', ([], {}), '()\n', (7975, 7977), True, 'from detectron2.data import transforms as T\n'), ((8555, 8579), 'detectron2.data.dataset_mapper.DatasetMapper', 'DatasetMapper', (['cfg', '(True)'], {}), '(cfg, True)\n', (8568, 8579), False, 'from detectron2.data.dataset_mapper import DatasetMapper\n'), ((9271, 9295), 'detectron2.evaluation.verify_results', 'verify_results', (['cfg', 'res'], {}), '(cfg, res)\n', (9285, 9295), False, 'from detectron2.evaluation import COCOEvaluator, verify_results, PascalVOCDetectionEvaluator\n'), ((9590, 9615), 'detectron2.engine.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (9613, 9615), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((6350, 6369), 'torch.cuda.Stream', 'torch.cuda.Stream', ([], {}), '()\n', (6367, 6369), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((9055, 9108), 'detectron2.checkpoint.DetectionCheckpointer', 'DetectionCheckpointer', (['model'], {'save_dir': 'cfg.OUTPUT_DIR'}), '(model, save_dir=cfg.OUTPUT_DIR)\n', (9076, 9108), False, 'from detectron2.checkpoint import DetectionCheckpointer\n'), ((7285, 7330), 'os.path.join', 'os.path.join', (['cfg.OUTPUT_DIR', '"""inference_TTA"""'], {}), "(cfg.OUTPUT_DIR, 'inference_TTA')\n", (7297, 7330), False, 'import os\n'), ((3619, 3642), 'torch.unsqueeze', 'torch.unsqueeze', (['box', '(0)'], {}), '(box, 0)\n', (3634, 3642), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((3697, 3720), 'torch.unsqueeze', 'torch.unsqueeze', (['cat', '(0)'], {}), '(cat, 0)\n', (3712, 3720), False, 'import torch, sys, random, json, logging, time, cv2\n'), ((3797, 3810), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3805, 3810), True, 'import numpy as np\n')] |
import openseespy.opensees as ops
import pandas as pd
import csv
import os
import numpy as np
import random
import math
from functions import *
import column
# Create a dictionary to store the column section design parameters
data = {'P': [],'My': [],'Mz': [],'Width': [],'Depth': [],'D_rebar': [],
'w_g': [],'d_g': [],'numRebars': [], 'As_total':[], 'h':[], 'fc':[]}
# Directory to store the ouput files
directory = 'D:/output/'
n = 1 # Number of column designs
numSaveToFile= 1 # Number of designs to save at a time
# Creating a file to store the opensees logs and cash
logName = directory + 'logs.log'
# Crearing a csv file to store the generated dataset
fileName= directory + 'output/data.csv'
# Create an object of class Columns to call the material, geometry and analysis parameters
parameters = column.Column()
ops.logFile(logName, '-noEcho') # Send all logs to the logName file instead of terminal
# Start writing the design data to the csv file
with open(fileName, 'w', newline='') as f:
thewriter = csv.writer(f)
thewriter.writerow(['P','My','Mz', 'Width','Depth','D_rebar','w_g','d_g','numRebars','As_total', 'h', 'fc'])
i=1
# *************START: Outer loop for parametric designs*********************
while i < n+1:
# Concrete parameters
fck = 30 # Characteristic strength of concrete
fc = -fck/parameters.g_c # Design strength of concrete
eps1U = parameters.eps1U # Strain at maximum strength
eps2U = parameters.eps2U # Strain at ultimate strength
# Steel parameters
fy = parameters.fy/parameters.g_s # Design strength of steel
# Randomly generate column cross-section
colWidth, colDepth = cross_section(parameters)
colHeight = parameters.colHeight # Column height
print(colHeight, colWidth, colDepth)
# Select reinforcement diameter
barDiam = random.choice(parameters.d_rebars)
# Calculate the area of one rebar
As_bar = (math.pi)*(barDiam*barDiam)/4
# Calculate the steel area and reinforcement constraints
A_core = colWidth*colDepth # Section gross area
As_min = 0.002*A_core # Minimum area of steel
As_max = 0.04*A_core # Maximum area of steel
numRebars_min = math.ceil(As_min/As_bar) # Minimum number of rebars
numRebars_max = math.floor(As_max/As_bar) # Maximum number of rebars
# Total number of longitudinal-reinforcement bars
try:
numBarsSec = random.randint(numRebars_min,numRebars_max)
if numBarsSec<4:
continue
except:
continue
# Section geometry modelling parameters
coverY = colDepth/2.0 # The distance from the section z-axis to the edge of the cover concrete -- outer edge of cover concrete
coverZ = colWidth/2.0 # The distance from the section y-axis to the edge of the cover concrete -- outer edge of cover concrete
coreY = coverY - parameters.cover - barDiam/2 # The distance from the section z-axis to the edge of the core concrete -- edge of the core concrete/inner edge of cover concrete
coreZ = coverZ - parameters.cover - barDiam/2 # The distance from the section y-axis to the edge of the core concrete -- edge of the core concrete/inner edge of cover concrete
dist1 = coverY - parameters.cover/2
dist2 = coverZ - parameters.cover/2
# Generating the grid parameters for rebars placement
listDivisorPairs = returnDivisorsPair(numBarsSec)
if (len(listDivisorPairs) == 1):
listDivisorPairs = returnDivisorsPair(numBarsSec-1)
w_g, d_g= grid_params(listDivisorPairs)
# No reinforcement area, to place reinforcement along the perimeter of section
w_h = (colWidth-2*barDiam-2.5*parameters.cover)/colWidth
d_h = (colDepth-2*barDiam-2.5*parameters.cover)/colDepth
rebarZ = np.linspace(-coreZ, coreZ, w_g) # Coordinates of rebars in Z axis
rebarY = np.linspace(-coreY, coreY, d_g) # Coordinates of rebars in Y axis
spacingZ = (2*coreZ)/(w_g-1) # Spacing between rebars in Z axis
spacingY = (2*coreY)/(d_g-1) # Spacing between rebars in Y axis
# Checking for reinforcement bars minimum spacing requirement
spacing_min=max(2*barDiam, barDiam+0.032+0.005, barDiam+0.020) # Minimum allowable spacing [m]
if (spacingZ < spacing_min or spacingY < spacing_min):
continue
# Clean the cash and saved parameters from previous design
ops.wipe()
# Define model builder
ops.model('basic', '-ndm', 3, '-ndf', 6)
# Create concrete material
ops.uniaxialMaterial('Concrete01', parameters.IDcon, fc, eps1U, fc, eps2U)
# Create steel material
ops.uniaxialMaterial('Steel01', parameters.IDreinf, fy, parameters.Es, parameters.Bs)
# Create section
ops.section('Fiber', parameters.SecTag, '-GJ', 1.0e6)
# Construct fibers in the concrete core
ops.patch('quadr', parameters.IDcon, parameters.num_fib, parameters.num_fib, -coreY, coreZ, -coreY, -coreZ, coreY, -coreZ, coreY, coreZ)
# Construct fibers in the concrete cover at four sides
ops.patch('quadr', parameters.IDcon, 1, parameters.num_fib, -coverY, coverZ, -coreY, coreZ, coreY, coreZ, coverY, coverZ)
ops.patch('quadr', parameters.IDcon, 1, parameters.num_fib, -coreY, -coreZ, -coverY, -coverZ, coverY, -coverZ, coreY, -coreZ)
ops.patch('quadr', parameters.IDcon, parameters.num_fib, 1, -coverY, coverZ, -coverY, -coverZ, -coreY, -coreZ, -coreY, coreZ)
ops.patch('quadr', parameters.IDcon, parameters.num_fib, 1, coreY, coreZ, coreY, -coreZ, coverY, -coverZ, coverY, coverZ)
# Inserting rebars along the perimeter of the section
hollowY = d_h*coverY
hollowZ = w_h*coverZ
rebars_YZ = np.empty((0,2))
for ii, Y in enumerate(rebarY):
for jj, Z in enumerate(rebarZ):
if (abs(Y) < hollowY and abs(Z) < hollowZ):
continue
rebars_YZ = np.vstack([rebars_YZ, [Y,Z]])
for ii in range(len(rebars_YZ)):
ops.fiber(*rebars_YZ[ii], As_bar, parameters.IDreinf)
# Check for number of rebars in final configuration, should not be less than 4
numTotRebars = len(rebars_YZ)
if (numTotRebars<4 or numTotRebars*As_bar<As_min) :
# print("Req-nt on # rebars is not met.")
continue
# Steel yield strain
eps = fy/parameters.Es
d_z = colDepth-parameters.cover-barDiam/2 # Distance from column outer edge to rebar
Kz = eps/(0.7*d_z) # Yield curvature in Z direction
d_y = colWidth-parameters.cover-barDiam/2 # Distance from column outer edge to rebar
Ky = eps/(0.7*d_y) # Yield curvature in Y direction
# Compute the axial load capacity
As = As_bar * numTotRebars
Ac = colWidth*colDepth - As
Pmax = -parameters.alpha_coef*(parameters.nu_coef*(-fc)*Ac + fy*As)
# Check for steel area requirement
if -0.1*Pmax/fy > As or As<0.002*A_core:
continue
# Computer the axial load capacity for the bottom section of column
Pmax = Pmax + parameters.unit_weight*colHeight*colDepth*colWidth
# **********START: Inner loop: Increasing axial load up to Pmax ***********
# Generate list of axial load P
list_P = np.linspace(0,Pmax,50)
# First call analysis to calculate My capacity (uniaxial moment case)
# List to store the My capacity for each axial load P from list_P
list_M_maxs=[]
for v in range(len(list_P)):
# Create files to store the stress, strain and moment from
# four corner points of column section
strain1 = directory + 'strain1_' + str(v)+ '.txt'
strain2 = directory + 'strain2_' + str(v)+ '.txt'
strain3 = directory + 'strain3_' + str(v)+ '.txt'
strain4 = directory + 'strain4_' + str(v)+ '.txt'
strains = [strain1, strain2, strain3, strain4]
# Call the section analysis procedure
MomentCurvature(parameters, list_P[v], Kz, -1, 5, strains, dist1, dist2)
# Create a list to store the step when the ultimate strength strain is reached
indices = []
# Extract the step when the first corner point reached the ultimate strain
if os.path.getsize(strain1)>0:
strain1 = pd.read_csv(strain1, sep = ' ', header = None, )
filtered1 = strain1[strain1[2]>=-0.0035]
if len(filtered1)> 1:
indices.append(list(filtered1.index)[-1])
# Extract the step when the second corner point reached the ultimate strain
if os.path.getsize(strain2)>0:
strain2 = pd.read_csv(strain2, sep = ' ', header = None, )
filtered2 = strain2[strain2[2]>=-0.0035]
if len(filtered2)> 1:
indices.append(list(filtered2.index)[-1])
# Extract the step when the third corner point reached the ultimate strain
if os.path.getsize(strain3)>0:
strain3 = pd.read_csv(strain3, sep = ' ', header = None, )
filtered3 = strain3[strain3[2]>=-0.0035]
if len(filtered3)> 1:
indices.append(list(filtered3.index)[-1])
# Extract the step when the forth corner point reached the ultimate strain
if os.path.getsize(strain4)>0:
strain4 = pd.read_csv(strain4, sep = ' ', header = None, )
filtered4 = strain4[strain4[2]>=-0.0035]
if len(filtered4)> 1:
indices.append(list(filtered4.index)[-1])
# Extract the step when one of the four edge points reached the ultimate
# strain first
if len(indices)>=1:
Moment_ult = min(indices)
M_ult = strain1.loc[Moment_ult, [0]]
list_M_maxs.append(float(M_ult))
else:
# if convergence wasn't reached set moment capacity to zero
M_ult = 0
list_M_maxs.append(M_ult)
# Delete the files with the stress, strain and moment to free the memory
if v>=5:
myfile1=directory + "strain1_{}.txt".format(v-5)
myfile2=directory + "strain2_{}.txt".format(v-5)
myfile3=directory + "strain3_{}.txt".format(v-5)
myfile4=directory + "strain4_{}.txt".format(v-5)
list_delete = [myfile1, myfile2, myfile3, myfile4]
for myfile in list_delete:
if os.path.isfile(myfile):
os.remove(myfile)
# Call analysis to calculate Mz capacity (biaxial moment case)
# Iterate for each axial load P
for j in range(len(list_P)):
P=list_P[j]
# Create a list of moments in Y direction up to My capacity
list_m = np.append(list_M_maxs[j]*np.random.random_sample(size=29), list_M_maxs[j])
# Iterate for each axial load P and moment My
for m in range(len(list_m)):
# Fill the dictionary with the current design parameters
data['P'].append(P), data['Width'].append(colWidth), data['Depth'].append(colDepth), data['D_rebar'].append(barDiam)
data['w_g'].append(w_g), data['d_g'].append(d_g), data['numRebars'].append(numTotRebars),data['As_total'].append(As),
data['h'].append(colHeight),data['fc'].append(-fck)
# Create files to store the stress, strain and moment from
# four corner points of column section for biaxial bending case
strain21 = directory + 'strain21_' + str(v)+ '.txt'
strain22 = directory + 'strain22_' + str(v)+ '.txt'
strain23 = directory + 'strain23_' + str(v)+ '.txt'
strain24 = directory + 'strain24_' + str(v)+ '.txt'
strains2 = [strain21, strain22, strain23, strain24]
# Call the section analysis procedure to computer Mz capacity
MomentCurvature(parameters, P, Ky, list_m[m], 6, strains2, dist1, dist2)
# Reset a list to store the step when the ultimate strength strain is reached
indices = []
# Extract the step when the first corner point reached the ultimate strain
if os.path.getsize(strain21)>0:
strain1 = pd.read_csv(strain21, sep = ' ', header = None)
filtered1 = strain1[strain1[2]>= -0.0035]
if len(filtered1)> 1:
indices.append(list(filtered1.index)[-1])
# Extract the step when the second corner point reached the ultimate strain
if os.path.getsize(strain22)>0:
strain2 = pd.read_csv(strain22, sep = ' ', header = None)
filtered2 = strain2[strain2[2]>= -0.0035]
if len(filtered2)> 1:
indices.append(list(filtered2.index)[-1])
# Extract the step when the third corner point reached the ultimate strain
if os.path.getsize(strain23)>0:
strain3 = pd.read_csv(strain23, sep = ' ', header = None)
filtered3 = strain3[strain3[2]>= -0.0035]
if len(filtered3)> 1:
indices.append(list(filtered3.index)[-1])
# Extract the step when the forth corner point reached the ultimate strain
if os.path.getsize(strain24)>0:
strain4 = pd.read_csv(strain24, sep = ' ', header = None)
filtered4 = strain4[strain4[2]>= -0.0035]
if len(filtered4)> 1:
indices.append(list(filtered4.index)[-1])
# Extract the step when one of the four edge points reached the ultimate
# strain first
if len(indices)>=1:
Moment_ult = min(indices)
M_ult = strain1[0].values[Moment_ult]
list_M_maxs.append(float(M_ult))
data['My'].append(list_m[m])
data['Mz'].append(M_ult)
else:
M_ult = 0
list_M_maxs.append(M_ult)
data['My'].append(list_m[m])
data['Mz'].append(M_ult)
# Delete the files with the stress, strain and moment to free the memory
if v>=5:
myfile1=directory + "strain21_{}.txt".format(v-5)
myfile2=directory + "strain22_{}.txt".format(v-5)
myfile3=directory + "strain23_{}.txt".format(v-5)
myfile4=directory + "strain24_{}.txt".format(v-5)
list_delete = [myfile1, myfile2, myfile3, myfile4]
for myfile in list_delete:
if os.path.isfile(myfile):
os.remove(myfile)
# Save the design
if i%numSaveToFile == 0:
# Create dataframe with the data from dictionary of design parameters
df = pd.DataFrame(data)
# Drop failure points
df=df[(df['Mz'].astype(float)>0.0) & (df['My'].astype(float) > 0.0)]
df = df.dropna()
# Save the dataframe with designs to a csv file
df.to_csv(fileName, mode='a', index=False, header=False)
print("%s column designs already saved."%(i) )
# Clean the disctionary
data = {'P':[],'My':[],'Mz':[],'Width':[],'Depth':[],'D_rebar':[],'w_g':[],'d_g':[],'numRebars':[],'As_total':[], 'h':[],'fc':[]}
# Increase counter by one for the next design
i+=1
| [
"os.remove",
"numpy.random.random_sample",
"pandas.read_csv",
"numpy.empty",
"column.Column",
"os.path.isfile",
"pandas.DataFrame",
"openseespy.opensees.logFile",
"random.randint",
"openseespy.opensees.wipe",
"openseespy.opensees.model",
"numpy.linspace",
"csv.writer",
"math.ceil",
"open... | [((893, 908), 'column.Column', 'column.Column', ([], {}), '()\n', (906, 908), False, 'import column\n'), ((924, 955), 'openseespy.opensees.logFile', 'ops.logFile', (['logName', '"""-noEcho"""'], {}), "(logName, '-noEcho')\n", (935, 955), True, 'import openseespy.opensees as ops\n'), ((1135, 1148), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1145, 1148), False, 'import csv\n'), ((2075, 2109), 'random.choice', 'random.choice', (['parameters.d_rebars'], {}), '(parameters.d_rebars)\n', (2088, 2109), False, 'import random\n'), ((2485, 2511), 'math.ceil', 'math.ceil', (['(As_min / As_bar)'], {}), '(As_min / As_bar)\n', (2494, 2511), False, 'import math\n'), ((2558, 2585), 'math.floor', 'math.floor', (['(As_max / As_bar)'], {}), '(As_max / As_bar)\n', (2568, 2585), False, 'import math\n'), ((4125, 4156), 'numpy.linspace', 'np.linspace', (['(-coreZ)', 'coreZ', 'w_g'], {}), '(-coreZ, coreZ, w_g)\n', (4136, 4156), True, 'import numpy as np\n'), ((4212, 4243), 'numpy.linspace', 'np.linspace', (['(-coreY)', 'coreY', 'd_g'], {}), '(-coreY, coreY, d_g)\n', (4223, 4243), True, 'import numpy as np\n'), ((4773, 4783), 'openseespy.opensees.wipe', 'ops.wipe', ([], {}), '()\n', (4781, 4783), True, 'import openseespy.opensees as ops\n'), ((4820, 4860), 'openseespy.opensees.model', 'ops.model', (['"""basic"""', '"""-ndm"""', '(3)', '"""-ndf"""', '(6)'], {}), "('basic', '-ndm', 3, '-ndf', 6)\n", (4829, 4860), True, 'import openseespy.opensees as ops\n'), ((4901, 4975), 'openseespy.opensees.uniaxialMaterial', 'ops.uniaxialMaterial', (['"""Concrete01"""', 'parameters.IDcon', 'fc', 'eps1U', 'fc', 'eps2U'], {}), "('Concrete01', parameters.IDcon, fc, eps1U, fc, eps2U)\n", (4921, 4975), True, 'import openseespy.opensees as ops\n'), ((5008, 5097), 'openseespy.opensees.uniaxialMaterial', 'ops.uniaxialMaterial', (['"""Steel01"""', 'parameters.IDreinf', 'fy', 'parameters.Es', 'parameters.Bs'], {}), "('Steel01', parameters.IDreinf, fy, parameters.Es,\n parameters.Bs)\n", (5028, 5097), True, 'import openseespy.opensees as ops\n'), ((5121, 5178), 'openseespy.opensees.section', 'ops.section', (['"""Fiber"""', 'parameters.SecTag', '"""-GJ"""', '(1000000.0)'], {}), "('Fiber', parameters.SecTag, '-GJ', 1000000.0)\n", (5132, 5178), True, 'import openseespy.opensees as ops\n'), ((5223, 5363), 'openseespy.opensees.patch', 'ops.patch', (['"""quadr"""', 'parameters.IDcon', 'parameters.num_fib', 'parameters.num_fib', '(-coreY)', 'coreZ', '(-coreY)', '(-coreZ)', 'coreY', '(-coreZ)', 'coreY', 'coreZ'], {}), "('quadr', parameters.IDcon, parameters.num_fib, parameters.num_fib,\n -coreY, coreZ, -coreY, -coreZ, coreY, -coreZ, coreY, coreZ)\n", (5232, 5363), True, 'import openseespy.opensees as ops\n'), ((5423, 5548), 'openseespy.opensees.patch', 'ops.patch', (['"""quadr"""', 'parameters.IDcon', '(1)', 'parameters.num_fib', '(-coverY)', 'coverZ', '(-coreY)', 'coreZ', 'coreY', 'coreZ', 'coverY', 'coverZ'], {}), "('quadr', parameters.IDcon, 1, parameters.num_fib, -coverY, coverZ,\n -coreY, coreZ, coreY, coreZ, coverY, coverZ)\n", (5432, 5548), True, 'import openseespy.opensees as ops\n'), ((5549, 5678), 'openseespy.opensees.patch', 'ops.patch', (['"""quadr"""', 'parameters.IDcon', '(1)', 'parameters.num_fib', '(-coreY)', '(-coreZ)', '(-coverY)', '(-coverZ)', 'coverY', '(-coverZ)', 'coreY', '(-coreZ)'], {}), "('quadr', parameters.IDcon, 1, parameters.num_fib, -coreY, -coreZ,\n -coverY, -coverZ, coverY, -coverZ, coreY, -coreZ)\n", (5558, 5678), True, 'import openseespy.opensees as ops\n'), ((5679, 5808), 'openseespy.opensees.patch', 'ops.patch', (['"""quadr"""', 'parameters.IDcon', 'parameters.num_fib', '(1)', '(-coverY)', 'coverZ', '(-coverY)', '(-coverZ)', '(-coreY)', '(-coreZ)', '(-coreY)', 'coreZ'], {}), "('quadr', parameters.IDcon, parameters.num_fib, 1, -coverY, coverZ,\n -coverY, -coverZ, -coreY, -coreZ, -coreY, coreZ)\n", (5688, 5808), True, 'import openseespy.opensees as ops\n'), ((5809, 5934), 'openseespy.opensees.patch', 'ops.patch', (['"""quadr"""', 'parameters.IDcon', 'parameters.num_fib', '(1)', 'coreY', 'coreZ', 'coreY', '(-coreZ)', 'coverY', '(-coverZ)', 'coverY', 'coverZ'], {}), "('quadr', parameters.IDcon, parameters.num_fib, 1, coreY, coreZ,\n coreY, -coreZ, coverY, -coverZ, coverY, coverZ)\n", (5818, 5934), True, 'import openseespy.opensees as ops\n'), ((6062, 6078), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (6070, 6078), True, 'import numpy as np\n'), ((7624, 7648), 'numpy.linspace', 'np.linspace', (['(0)', 'Pmax', '(50)'], {}), '(0, Pmax, 50)\n', (7635, 7648), True, 'import numpy as np\n'), ((2710, 2754), 'random.randint', 'random.randint', (['numRebars_min', 'numRebars_max'], {}), '(numRebars_min, numRebars_max)\n', (2724, 2754), False, 'import random\n'), ((6339, 6392), 'openseespy.opensees.fiber', 'ops.fiber', (['*rebars_YZ[ii]', 'As_bar', 'parameters.IDreinf'], {}), '(*rebars_YZ[ii], As_bar, parameters.IDreinf)\n', (6348, 6392), True, 'import openseespy.opensees as ops\n'), ((15314, 15332), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (15326, 15332), True, 'import pandas as pd\n'), ((6260, 6290), 'numpy.vstack', 'np.vstack', (['[rebars_YZ, [Y, Z]]'], {}), '([rebars_YZ, [Y, Z]])\n', (6269, 6290), True, 'import numpy as np\n'), ((8618, 8642), 'os.path.getsize', 'os.path.getsize', (['strain1'], {}), '(strain1)\n', (8633, 8642), False, 'import os\n'), ((8668, 8710), 'pandas.read_csv', 'pd.read_csv', (['strain1'], {'sep': '""" """', 'header': 'None'}), "(strain1, sep=' ', header=None)\n", (8679, 8710), True, 'import pandas as pd\n'), ((8968, 8992), 'os.path.getsize', 'os.path.getsize', (['strain2'], {}), '(strain2)\n', (8983, 8992), False, 'import os\n'), ((9018, 9060), 'pandas.read_csv', 'pd.read_csv', (['strain2'], {'sep': '""" """', 'header': 'None'}), "(strain2, sep=' ', header=None)\n", (9029, 9060), True, 'import pandas as pd\n'), ((9315, 9339), 'os.path.getsize', 'os.path.getsize', (['strain3'], {}), '(strain3)\n', (9330, 9339), False, 'import os\n'), ((9365, 9407), 'pandas.read_csv', 'pd.read_csv', (['strain3'], {'sep': '""" """', 'header': 'None'}), "(strain3, sep=' ', header=None)\n", (9376, 9407), True, 'import pandas as pd\n'), ((9662, 9686), 'os.path.getsize', 'os.path.getsize', (['strain4'], {}), '(strain4)\n', (9677, 9686), False, 'import os\n'), ((9712, 9754), 'pandas.read_csv', 'pd.read_csv', (['strain4'], {'sep': '""" """', 'header': 'None'}), "(strain4, sep=' ', header=None)\n", (9723, 9754), True, 'import pandas as pd\n'), ((10814, 10836), 'os.path.isfile', 'os.path.isfile', (['myfile'], {}), '(myfile)\n', (10828, 10836), False, 'import os\n'), ((11152, 11184), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': '(29)'}), '(size=29)\n', (11175, 11184), True, 'import numpy as np\n'), ((12618, 12643), 'os.path.getsize', 'os.path.getsize', (['strain21'], {}), '(strain21)\n', (12633, 12643), False, 'import os\n'), ((12673, 12716), 'pandas.read_csv', 'pd.read_csv', (['strain21'], {'sep': '""" """', 'header': 'None'}), "(strain21, sep=' ', header=None)\n", (12684, 12716), True, 'import pandas as pd\n'), ((12999, 13024), 'os.path.getsize', 'os.path.getsize', (['strain22'], {}), '(strain22)\n', (13014, 13024), False, 'import os\n'), ((13054, 13097), 'pandas.read_csv', 'pd.read_csv', (['strain22'], {'sep': '""" """', 'header': 'None'}), "(strain22, sep=' ', header=None)\n", (13065, 13097), True, 'import pandas as pd\n'), ((13375, 13400), 'os.path.getsize', 'os.path.getsize', (['strain23'], {}), '(strain23)\n', (13390, 13400), False, 'import os\n'), ((13430, 13473), 'pandas.read_csv', 'pd.read_csv', (['strain23'], {'sep': '""" """', 'header': 'None'}), "(strain23, sep=' ', header=None)\n", (13441, 13473), True, 'import pandas as pd\n'), ((13751, 13776), 'os.path.getsize', 'os.path.getsize', (['strain24'], {}), '(strain24)\n', (13766, 13776), False, 'import os\n'), ((13806, 13849), 'pandas.read_csv', 'pd.read_csv', (['strain24'], {'sep': '""" """', 'header': 'None'}), "(strain24, sep=' ', header=None)\n", (13817, 13849), True, 'import pandas as pd\n'), ((10858, 10875), 'os.remove', 'os.remove', (['myfile'], {}), '(myfile)\n', (10867, 10875), False, 'import os\n'), ((15105, 15127), 'os.path.isfile', 'os.path.isfile', (['myfile'], {}), '(myfile)\n', (15119, 15127), False, 'import os\n'), ((15153, 15170), 'os.remove', 'os.remove', (['myfile'], {}), '(myfile)\n', (15162, 15170), False, 'import os\n')] |
# Game of Life
# Program by: <NAME>
# <EMAIL>
# github.com/angeeranaser
# Project referenced from https://robertheaton.com/2018/07/20/project-2-game-of-life/
import numpy as np
import main
def test_dead_cells_no_neighbors(): # Do dead cells with no live neighbors stay dead?
init = np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]) # None of the dead cells have 3 neighbors.
expected = np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]) # None of the dead cells should come alive.
actual = main.next_board(init)
assert np.array_equal(expected, actual)
def test_dead_cells_three_neighbors(): # Do dead cells with three live neighbors come alive?
init = np.array([
[0, 1, 0],
[1, 1, 0],
[0, 0, 0]
]) # Cell (0,0) is dead and has 3 neighbors/
expected = np.array([
[1, 1, 0],
[1, 1, 0],
[0, 0, 0]
]) # Cell (0,0) should come alive.
actual = main.next_board(init)
assert np.array_equal(expected, actual)
def test_live_cells_few_neighbors(): # Do live cells with too few neighbors die?
init = np.array([
[1, 1, 0],
[1, 0, 1],
[0, 0, 0]
]) # Cell (2,1) is alive, but has less than 2 neighbors.
expected = np.array([
[1, 1, 0],
[1, 0, 0],
[0, 0, 0]
]) # Cell (2,1) should die.
actual = main.next_board(init)
assert np.array_equal(expected, actual)
def test_live_cells_many_neighbors(): # Do live cells with too many neighbors die?
init = np.array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0]
]) # Cell (1,1) is alive, but has more than 3 neighbors.
expected = np.array([
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
]) # Cell (1,1) should die (and cells (0,0), (2,0), (0,2), and (2,2) come alive).
actual = main.next_board(init)
assert np.array_equal(expected, actual)
def test_prettify(): # Is the output of the pretty-printer correct?
init = np.array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0]
])
expected = '| O |\n' \
'| O O O |\n' \
'| O |\n'
actual = main.prettify(init)
assert expected == actual
| [
"main.next_board",
"main.prettify",
"numpy.array",
"numpy.array_equal"
] | [((298, 341), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (306, 341), True, 'import numpy as np\n'), ((435, 478), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (443, 478), True, 'import numpy as np\n'), ((571, 592), 'main.next_board', 'main.next_board', (['init'], {}), '(init)\n', (586, 592), False, 'import main\n'), ((605, 637), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (619, 637), True, 'import numpy as np\n'), ((746, 789), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 1, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 1, 0], [0, 0, 0]])\n', (754, 789), True, 'import numpy as np\n'), ((882, 925), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 1, 0], [0, 0, 0]]'], {}), '([[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n', (890, 925), True, 'import numpy as np\n'), ((1006, 1027), 'main.next_board', 'main.next_board', (['init'], {}), '(init)\n', (1021, 1027), False, 'import main\n'), ((1040, 1072), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (1054, 1072), True, 'import numpy as np\n'), ((1169, 1212), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[1, 1, 0], [1, 0, 1], [0, 0, 0]])\n', (1177, 1212), True, 'import numpy as np\n'), ((1317, 1360), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[1, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (1325, 1360), True, 'import numpy as np\n'), ((1434, 1455), 'main.next_board', 'main.next_board', (['init'], {}), '(init)\n', (1449, 1455), False, 'import main\n'), ((1468, 1500), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (1482, 1500), True, 'import numpy as np\n'), ((1599, 1642), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 1, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 1, 1], [0, 1, 0]])\n', (1607, 1642), True, 'import numpy as np\n'), ((1747, 1790), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 0, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n', (1755, 1790), True, 'import numpy as np\n'), ((1918, 1939), 'main.next_board', 'main.next_board', (['init'], {}), '(init)\n', (1933, 1939), False, 'import main\n'), ((1952, 1984), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (1966, 1984), True, 'import numpy as np\n'), ((2068, 2111), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 1, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 1, 1], [0, 1, 0]])\n', (2076, 2111), True, 'import numpy as np\n'), ((2266, 2285), 'main.prettify', 'main.prettify', (['init'], {}), '(init)\n', (2279, 2285), False, 'import main\n')] |
import numpy as np
from math import log, gamma
''' Gammaln function of scipy.special library'''
def gammaln(a):
b = []
for i in np.nditer(a):
b.append(gamma(i))
b = np.array(b).reshape(a.shape)
b = np.log(np.absolute(b))
return b
def assess_dimension(spectrum, rank, n_samples):
"""
Compute the log-likelihood of a rank 'rank' dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum 'spectrum'.
"""
n_features = spectrum.shape[0]
if not 1 <= rank < n_features:
raise ValueError("The tested rank should be in [1, n_features - 1]")
eps = 1e-15
if spectrum[rank - 1] < eps:
return -np.inf
pu = -rank * log(2.)
for i in range(1, rank + 1):
pu += (gammaln((n_features - i + 1) / 2.) - log(np.pi) *
(n_features - i + 1) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
v = max(eps, np.sum(spectrum[rank:]) / (n_features - rank))
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def infer_dimension(spectrum, n_samples):
"""
Infers the dimension of a dataset with a given spectrum.
The returned value will be in [1, n_features - 1].
"""
ll = np.empty_like(spectrum)
ll[0] = -np.inf # we don't want the n_components to be 0
for rank in range(1, spectrum.shape[0]):
ll[rank] = assess_dimension(spectrum, rank, n_samples)
return ll.argmax()
class PCA_utils():
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components.T * S**2 * components + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
"""
if self.fitted is False:
raise ValueError("The model should be fitted first.")
components = self.components
exp_var = self.explained_variances
if self.whiten:
components = components * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance, 0.)
cov = np.dot(components.T * exp_var_diff, components)
cov.flat[::len(cov) + 1] += self.noise_variance
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
"""
if self.fitted is False:
raise ValueError("The model should be fitted first.")
n_features = self.components.shape[1]
# handle corner cases
if self.n_components == 0:
return np.eye(n_features) / self.noise_variance
if self.n_components == n_features:
return np.linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components = self.components
exp_var = self.explained_variances
if self.whiten:
components = components * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance, 0.)
precision = np.dot(components, components.T) / self.noise_variance
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components.T, np.dot(np.linalg.inv(precision),
components))
precision /= -(self.noise_variance ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance
return precision
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
"""
if self.fitted is False:
raise ValueError("The model should be fitted first.")
X = X - self.mean
return np.dot(X, self.components.T)
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Note- If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.fitted is False:
raise ValueError("The model should be fitted first.")
if self.whiten:
return np.dot(X, np.sqrt(self.explained_variances[:, np.newaxis]) *
self.components) + self.mean
else:
return np.dot(X, self.components) + self.mean
| [
"numpy.absolute",
"numpy.maximum",
"numpy.log",
"numpy.sum",
"numpy.eye",
"numpy.nditer",
"numpy.empty_like",
"math.gamma",
"numpy.array",
"numpy.linalg.inv",
"numpy.dot",
"math.log",
"numpy.sqrt"
] | [((140, 152), 'numpy.nditer', 'np.nditer', (['a'], {}), '(a)\n', (149, 152), True, 'import numpy as np\n'), ((1718, 1741), 'numpy.empty_like', 'np.empty_like', (['spectrum'], {}), '(spectrum)\n', (1731, 1741), True, 'import numpy as np\n'), ((233, 247), 'numpy.absolute', 'np.absolute', (['b'], {}), '(b)\n', (244, 247), True, 'import numpy as np\n'), ((733, 741), 'math.log', 'log', (['(2.0)'], {}), '(2.0)\n', (736, 741), False, 'from math import log, gamma\n'), ((897, 920), 'numpy.log', 'np.log', (['spectrum[:rank]'], {}), '(spectrum[:rank])\n', (903, 920), True, 'import numpy as np\n'), ((2536, 2582), 'numpy.maximum', 'np.maximum', (['(exp_var - self.noise_variance)', '(0.0)'], {}), '(exp_var - self.noise_variance, 0.0)\n', (2546, 2582), True, 'import numpy as np\n'), ((2596, 2643), 'numpy.dot', 'np.dot', (['(components.T * exp_var_diff)', 'components'], {}), '(components.T * exp_var_diff, components)\n', (2602, 2643), True, 'import numpy as np\n'), ((3563, 3609), 'numpy.maximum', 'np.maximum', (['(exp_var - self.noise_variance)', '(0.0)'], {}), '(exp_var - self.noise_variance, 0.0)\n', (3573, 3609), True, 'import numpy as np\n'), ((4368, 4396), 'numpy.dot', 'np.dot', (['X', 'self.components.T'], {}), '(X, self.components.T)\n', (4374, 4396), True, 'import numpy as np\n'), ((171, 179), 'math.gamma', 'gamma', (['i'], {}), '(i)\n', (176, 179), False, 'from math import log, gamma\n'), ((189, 200), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (197, 200), True, 'import numpy as np\n'), ((969, 992), 'numpy.sum', 'np.sum', (['spectrum[rank:]'], {}), '(spectrum[rank:])\n', (975, 992), True, 'import numpy as np\n'), ((1136, 1152), 'math.log', 'log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (1139, 1152), False, 'from math import log, gamma\n'), ((3629, 3661), 'numpy.dot', 'np.dot', (['components', 'components.T'], {}), '(components, components.T)\n', (3635, 3661), True, 'import numpy as np\n'), ((1339, 1415), 'math.log', 'log', (['((spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i]))'], {}), '((spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i]))\n', (1342, 1415), False, 'from math import log, gamma\n'), ((1438, 1452), 'math.log', 'log', (['n_samples'], {}), '(n_samples)\n', (1441, 1452), False, 'from math import log, gamma\n'), ((1499, 1513), 'math.log', 'log', (['n_samples'], {}), '(n_samples)\n', (1502, 1513), False, 'from math import log, gamma\n'), ((2481, 2512), 'numpy.sqrt', 'np.sqrt', (['exp_var[:, np.newaxis]'], {}), '(exp_var[:, np.newaxis])\n', (2488, 2512), True, 'import numpy as np\n'), ((3172, 3190), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (3178, 3190), True, 'import numpy as np\n'), ((3508, 3539), 'numpy.sqrt', 'np.sqrt', (['exp_var[:, np.newaxis]'], {}), '(exp_var[:, np.newaxis])\n', (3515, 3539), True, 'import numpy as np\n'), ((3798, 3822), 'numpy.linalg.inv', 'np.linalg.inv', (['precision'], {}), '(precision)\n', (3811, 3822), True, 'import numpy as np\n'), ((5014, 5040), 'numpy.dot', 'np.dot', (['X', 'self.components'], {}), '(X, self.components)\n', (5020, 5040), True, 'import numpy as np\n'), ((826, 836), 'math.log', 'log', (['np.pi'], {}), '(np.pi)\n', (829, 836), False, 'from math import log, gamma\n'), ((1026, 1035), 'numpy.log', 'np.log', (['v'], {}), '(v)\n', (1032, 1035), True, 'import numpy as np\n'), ((4875, 4923), 'numpy.sqrt', 'np.sqrt', (['self.explained_variances[:, np.newaxis]'], {}), '(self.explained_variances[:, np.newaxis])\n', (4882, 4923), True, 'import numpy as np\n')] |
"""
Classes for assigning configurations in a batch to threads
Created on Feb 12, 2020
@author: <NAME> (<EMAIL>)
"""
from logging import getLogger
from math import isinf
from numpy import array
log = getLogger(__name__)
class BatchComposer(object):
"""Class for assigning configurations in a batch to threads
Attributes
----------
technique : SearchTechniqueBase
Root search technique
models : list of Model
Models to predict build time for each fidelity
parallelism : int
Number of measurement threads
lookahead : DesiredResult
Configuration that was already selected, but has not been attempted yet
"""
# The duration of the first build
first_build_time = 6541.0
# Minimum waste decrease for new configuration to be added to batch
min_waste_dec = 1e-3
def __init__(self, technique, models, parallelism):
self.technique = technique
self.models = models
self.parallelism = parallelism
self.lookahead = None
for model in self.models:
model.metric = "build_time"
def add_result(self, result):
"""This callback is invoked by the search driver to report new results.
Parameters
----------
result : Result
Result
"""
if not isinf(result.build_time):
fidelity = result.configuration.fidelity
fidelity = 1 if fidelity == 0 else fidelity
self.models[fidelity - 1].add_result(result)
def compose_batch(self):
"""Select one or more configurations for each thread.
Returns
-------
list of DesiredResult
A list with desired results. The thread assignment is given by the
thread attribute of each desired result.
"""
if self.parallelism == 1:
dr = self.technique.desired_result()
dr.thread = 0
return [dr]
for model in self.models:
model.train()
done = False
cfgs = []
for i in range(self.parallelism):
if self.lookahead:
dr = self.lookahead
self.lookahead = None
else:
dr = self.technique.desired_result()
if dr is None or dr is False:
done = True
break
time = self.predict(dr)
cfgs.append((time, dr))
bins, bin_size = self.binary_search(cfgs)
total_time = sum(time for time, _ in cfgs)
prev_waste = 1.0 - total_time / self.parallelism / bin_size
while not done:
dr = self.technique.desired_result()
if dr is None or dr is False:
break
self.lookahead = dr
time = self.predict(dr)
new_cfgs = cfgs + [(time, dr)]
new_bins, new_bin_size = self.binary_search(new_cfgs)
total_time = sum(time for time, _ in new_cfgs)
waste = 1.0 - total_time / self.parallelism / new_bin_size
if prev_waste - waste < self.min_waste_dec:
break
cfgs = new_cfgs
bins = new_bins
bin_size = new_bin_size
prev_waste = waste
desired_results = []
for thread, grp in enumerate(bins):
for dr in grp:
dr.thread = thread
desired_results.append(dr)
assignment = ", ".join("{}: {}".format(dr.id, dr.thread)
for dr in desired_results)
log.info("Batch assignment: %s", assignment)
log.info("Expected batch duration: %e s, Time wasted: %f%%",
bin_size, 100.0 * prev_waste)
return desired_results
def predict(self, desired_result):
"""Predict the build time for a given configuration.
Parameters
----------
desired_result : DesiredResult
Desired result with configuration to be predicted
Returns
-------
float
Build time
"""
fidelity = desired_result.configuration.fidelity
fidelity = 1 if fidelity == 0 else fidelity
model = self.models[fidelity - 1]
# Fall back on lower fidelity if no results are available yet.
if len(model.results) == 0 and fidelity > 1:
model = self.models[fidelity - 2]
if len(model.results) > 0:
cfg_vec = model.get_vec(desired_result.configuration.data)
time = model.predict(array(cfg_vec).reshape(1, -1))[0][0]
else:
time = self.first_build_time
# Ensure that the build time is positive to avoid issues with bin packing
# later.
time = max(time, 1e-8)
log.info("Predicted build time: %e", time)
return time
def binary_search(self, cfgs):
"""Find smallest bin size for which all configurations fit in threads.
Parameters
----------
cfgs : list of DesiredResult
Configurations that we wish to test
Returns
-------
list of list of DesiredResult
Bin assignment. Each sublist represents all desired results for a
thread.
float
Smallest bin size for which all configurations fit
"""
low = 0.0
high = 1.0
while True:
try:
bins = self.best_fit(cfgs, high)
break
except NoFitException:
high *= 2.0
while True:
curr = (high + low) / 2.0
if curr < low + 1e-8 or curr > high - 1e-8:
break
try:
bins = self.best_fit(cfgs, curr)
high = curr
except NoFitException:
low = curr
bins = self.best_fit(cfgs, high)
return bins, high
def best_fit(self, cfgs, bin_size):
"""Assign configurations of bins, minimizing number of bins needed
Parameters
----------
cfgs : list of (float, DesiredResult)
Configurations that we wish to test
bin_size : float
Size of each bin
Returns
-------
list of list of DesiredResult
Bin assignment. Each sublist represents all desired results for a
thread.
Notes
-----
This algorithm is a solution to the bin packing problem. The strategy
that we use is "Best Fit Decreasing". It is not optimal, but the number of
bins is guaranteed to be no more than 11/9 OPT + 4, where OPT is the
minimum number of bins.
"""
cfgs.sort(reverse=True)
bins = [[] for i in range(self.parallelism)]
sizes = [0.0] * self.parallelism
for time, dr in cfgs:
combined = sorted(zip(sizes, bins), reverse=True)
sizes = [size for size, _ in combined]
bins = [b for _, b in combined]
for i in range(self.parallelism):
new_size = sizes[i] + time
if new_size <= bin_size:
bins[i].append(dr)
sizes[i] = new_size
break
else:
raise NoFitException()
return bins
class NoFitException(Exception):
"""Exception thrown when BatchComposer.best_fit fails."""
pass
| [
"math.isinf",
"numpy.array",
"logging.getLogger"
] | [((203, 222), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (212, 222), False, 'from logging import getLogger\n'), ((1234, 1258), 'math.isinf', 'isinf', (['result.build_time'], {}), '(result.build_time)\n', (1239, 1258), False, 'from math import isinf\n'), ((4035, 4049), 'numpy.array', 'array', (['cfg_vec'], {}), '(cfg_vec)\n', (4040, 4049), False, 'from numpy import array\n')] |
import matplotlib.pyplot as plt
import os
import pickle
import math
import datetime
import argparse
import csv
import re
from enum import Enum
class Tally():
def __init__(self):
self.data = {}
def record(self, e):
if e in self.data.keys():
self.data[e] += 1
else:
self.data[e] = 1
def sorted_list(self):
return sorted(self.data.items(),key=lambda x: x[1],reverse= True)
class MPT_Mode(Enum):
Unknown =0
FirstLine =1
NbHeader =2
JumpToData = 3
Units =4
Data =5
Junk =6
CSVFormat = 7
CollectData = 8
import numpy
import copy
def is_number(string, call=float):
try:
call(string)
return True
except ValueError:
return False
class Mode(Enum):
Unknown =0
Header =1
Tags =2
Units =3
Data =4
Junk =5
known_tags = ["FREQUENCY SWEEP","USER","ID","DATE","TIME","CALIBRATION FILE","VOLTAGE","CYCLE"]
known_header = ["AC IMPEDANCE TEST"]
known_units = ["TIME (s)", "FREQ (Hz)","Z(Re) (Ohm)", "-Z(Im) (Ohm)","VOLTS (V)"]
retained_data_cols = {1:"FREQ", 2:"Re[Z]",3:"Im[Z]",4:"VOLTS"}
# for now we assume that Im[Z] was given with a negative sign
def parse_fra_file(filename):
my_header = []
my_tags = {}
my_units = {}
my_data = []
my_junk = []
with open(filename,'r') as myfile:
header_content = ""
mode = Mode.Unknown
redo = False
for i in range(10000):
if redo:
redo = False
else:
this_line = myfile.readline()
if i < 10:
header_content+= this_line
if this_line == '':
break
if mode == Mode.Unknown:
# test for header
some_header = [this_line.startswith(head) for head in known_header]
found_header = False
for some_head in some_header:
found_header = found_header or some_head
if found_header:
mode = Mode.Header
redo = True
continue
# test for tags
x = this_line.split(':')
if len(x) > 1:
if x[0] in known_tags:
mode = Mode.Tags
redo = True
continue
# test for units
some_units = [unit in this_line for unit in known_units]
found_all_units = True
for some_unit in some_units:
found_all_units = found_all_units and some_unit
if found_all_units:
mode = Mode.Units
redo = True
continue
# test for data
x = this_line.split('\n')[0].split('\t')
some_data = [is_number(x_i) for x_i in x]
all_numbers = True
for some_datum in some_data:
all_numbers = all_numbers and some_datum
if all_numbers:
mode = Mode.Data
redo = True
continue
mode = Mode.Junk
redo = True
continue
if mode == Mode.Header:
for head in known_header:
if this_line.startswith(head):
my_header.append(head)
mode= Mode.Unknown
continue
if mode == Mode.Tags:
x = this_line.split('\n')[0].split(':', maxsplit=1)
current_tag = known_tags.index(x[0])
content = x[1]
if known_tags[current_tag] in my_tags.keys():
mode = Mode.Junk
redo = True
continue
clean_content = content.replace('\t','').replace(' ','')
if known_tags[current_tag] == 'DATE':
if not all([is_number(d,call=int) for d in clean_content.split('/')]):
mode = Mode.Junk
redo = True
continue
parsed_date = [int(d) for d in clean_content.split('/')] # month\day\year
my_tags[known_tags[current_tag]] = copy.deepcopy(parsed_date)
elif known_tags[current_tag] == 'TIME':
if not all([is_number(d,call=int) for d in clean_content.split(':')]):
mode = Mode.Junk
redo = True
continue
parsed_time = [int(d) for d in clean_content.split(':')] # hour:minute:second
my_tags[known_tags[current_tag]] = copy.deepcopy(parsed_time)
elif known_tags[current_tag] == 'VOLTAGE':
if not is_number(clean_content):
mode = Mode.Junk
redo = True
continue
parsed_voltage = float(clean_content)
my_tags[known_tags[current_tag]] = copy.deepcopy(parsed_voltage)
elif known_tags[current_tag] == 'CYCLE':
if not is_number(clean_content,call=int):
mode = Mode.Junk
redo = True
continue
parsed_cycle = int(clean_content)
my_tags[known_tags[current_tag]] = copy.deepcopy(parsed_cycle)
else:
my_tags[known_tags[current_tag]] = content.replace('\t','').replace(' ','')
mode= Mode.Unknown
continue
if mode == Mode.Units:
x = this_line.split('\n')[0].split('\t')
x = [x_i.replace('\t','') for x_i in x]
# map where the units are
unit_positions = [known_units.index(x_i) for x_i in x]
for retained_data_col in retained_data_cols.keys():
my_units[retained_data_cols[retained_data_col]] = unit_positions[retained_data_col]
mode = Mode.Unknown
continue
if mode == Mode.Data:
x = this_line.split('\n')[0].split('\t')
some_data = [float(x_i) for x_i in x]
my_data.append( copy.deepcopy(some_data))
mode = Mode.Unknown
continue
if mode == Mode.Junk:
my_junk += copy.deepcopy([this_line])
mode = Mode.Unknown
continue
my_data = numpy.array(my_data)
my_data_shape = numpy.shape(my_data)
my_split_data = {}
if not 'Im[Z]' in my_units.keys():
if len(my_units) > 0 or len(my_data) >0:
print("my file:, ", filename)
print("my units: ", my_units)
print("my data: ", my_data)
my_units = {'TIME':0, 'FREQ':1, 'Re[Z]':2, 'Im[Z]':3, 'VOLTS':4}
if not my_data == [] and len(my_data_shape) > 1:
for i in range(len(my_data)):
my_data[i,my_units["Im[Z]"]] = -my_data[i,my_units["Im[Z]"]]
for key in my_units.keys():
if my_units[key] >= my_data_shape[1]:
if len(my_data) >0:
print("my file:, ", filename)
print("my_data: ", my_data)
else:
my_split_data[key] = my_data[:, my_units[key]]
else:
if len(my_data) > 0:
print("my file:, ", filename)
print("my_data: ", my_data)
parsed_data = {"header":copy.deepcopy(my_header), "tags":copy.deepcopy(my_tags), "data":copy.deepcopy(my_split_data), "junk":copy.deepcopy(my_junk)}
return parsed_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
#parser.add_argument('--file_types', choices=['fra', 'eis'], default='fra')
#parser.add_argument('--finetuned', type=bool, default=False)
#parser.add_argument('--finetune', dest='finetuned', action='store_true')
#parser.add_argument('--no-finetune', dest='finetuned', action='store_false')
#parser.set_defaults(finetuned=True)
#File paths
parser.add_argument('--data_dir', default='RealData')
#parser.add_argument('--fra_no_finetune_file', default="results_of_inverse_model.file")
#parser.add_argument('--fra_finetune_file', default="results_fine_tuned_with_adam_{}.file")
#parser.add_argument('--eis_no_finetune_file', default="results_of_inverse_model_eis.file")
#parser.add_argument('--eis_finetune_file', default="results_eis_fine_tuned_with_adam_{}.file")
#parser.add_argument('--steps', type=int, default=1000)
args = parser.parse_args()
path_to_spectra = os.path.join(".", args.data_dir)
# tally the file extentions
tally_extensions = Tally()
for root, dirs, filenames in os.walk(path_to_spectra):
for file in filenames:
extension = file.split('.')[-1]
tally_extensions.record(extension)
print(tally_extensions.sorted_list())
all_mpt_filenames = []
all_fra_filenames = []
for root, dirs, filenames in os.walk(path_to_spectra):
for file in filenames:
if file.endswith('.FRA'):
all_fra_filenames.append(os.path.join(root, file))
if file.endswith('.mpt') or file.endswith('.txt'):
all_mpt_filenames.append(os.path.join(root, file))
print('Number of fra files {}.'.format(len(all_fra_filenames)))
print('Number of mpt files {}.'.format(len(all_mpt_filenames)))
print_whole_file = False
unit_list = ['freq/Hz',
'Re(Z)/Ohm',
'-Im(Z)/Ohm',
'|Z|/Ohm',
'Phase(Z)/deg',
'time/s',
'<Ewe>/V']
count = 0
database_eis = {}
all_datas = []
data_length_tally = Tally()
for repeat_index in range(2):
for filename in all_mpt_filenames:
clean_multiple_loops = False
with open(filename, 'r') as f:
data = {}
for u in unit_list:
data[u] = []
if print_whole_file:
print(f.read())
else:
all_lines = f.readlines()
my_mode = MPT_Mode.FirstLine
for i in range(len(all_lines)):
new_line = all_lines[i]
if my_mode == MPT_Mode.FirstLine:
if 'EC-Lab ASCII FILE' in new_line:
my_mode = MPT_Mode.NbHeader
continue
elif '"EC-Lab","ASCII","FILE"\n' in new_line:
my_mode = MPT_Mode.CSVFormat
break
else:
split_line = new_line.split('\t')
expected = True
if len(unit_list) > len(split_line):
expected = False
else:
for index in range(len(unit_list)):
if not (unit_list[index] in split_line[index] or '#NAME?' in split_line[index]):
expected = False
break
if not expected:
print(repr(new_line))
print(all_lines[:min(len(all_lines), 100)])
break
else:
my_mode = MPT_Mode.CollectData
number_of_header_lines = 1
continue
elif my_mode == MPT_Mode.NbHeader:
matchObj = re.match(r'Nb header lines : (\d{1,})',
new_line)
if matchObj:
number_of_header_lines = int(matchObj.group(1))
my_mode = MPT_Mode.JumpToData
continue
else:
continue
elif my_mode == MPT_Mode.JumpToData:
if i == number_of_header_lines-1:
split_line = new_line.split('\t')
expected = True
if len(unit_list) > len(split_line):
expected = False
else:
for index in range(len(unit_list)):
if not (unit_list[index] in split_line[index] or '#NAME?' in split_line[index]):
expected = False
break
if not expected:
print(repr(new_line))
else:
my_mode=MPT_Mode.CollectData
continue
else:
matchObj = re.match(r'Number of loops : (\d{1,})',
new_line)
if matchObj:
number_of_loops = int(matchObj.group(1))
if not number_of_loops == 1:
clean_multiple_loops = True
continue
else:
continue
elif my_mode == MPT_Mode.CollectData:
if i >= number_of_header_lines:
split_line = new_line.split('\t')
if len(unit_list) > len(split_line):
print('new line', new_line)
continue
else:
vals = []
for index in range(len(unit_list)):
try:
val = float(split_line[index])
except ValueError:
if re.match(r'(\d{1,2}):(\d{1,2})\.(\d{1,4})', split_line[index]):
val = 0.
else:
print("newline: ", new_line)
break
vals.append(val)
if len(vals) < len(unit_list):
continue
for index in range(len(unit_list)):
data[unit_list[index]].append(vals[index])
continue
else:
continue
if my_mode == MPT_Mode.NbHeader:
print("didn't find number of header lines.")
print(all_lines[:min(len(all_lines), 100)])
elif my_mode == MPT_Mode.CSVFormat:
with open(filename, newline='') as f:
all_lines= list(csv.reader(f))
my_mode = MPT_Mode.FirstLine
for i in range(len(all_lines)):
new_line = all_lines[i]
if my_mode == MPT_Mode.FirstLine:
if ['EC-Lab', 'ASCII', 'FILE'] == new_line:
my_mode = MPT_Mode.NbHeader
continue
else:
split_line = new_line
expected = True
if len(unit_list) > len(split_line):
expected = False
else:
for index in range(len(unit_list)):
if not (unit_list[index] in split_line[index] or '#NAME?' in split_line[index]):
expected = False
break
if not expected:
print(repr(new_line))
print(" didn't find.")
print(all_lines[:min(len(all_lines), 100)])
break
else:
my_mode = MPT_Mode.CollectData
continue
elif my_mode == MPT_Mode.NbHeader:
if len(new_line) == 5 and new_line[0]=='Nb' and \
new_line[1]=='header' and new_line[2]=='lines' and \
new_line[3]==':':
number_of_header_lines = int(new_line[4])
my_mode = MPT_Mode.JumpToData
continue
else:
continue
elif my_mode == MPT_Mode.JumpToData:
if i == number_of_header_lines - 1:
split_line = new_line
expected = True
if len(unit_list) > len(split_line):
expected = False
else:
for index in range(len(unit_list)):
if not (unit_list[index] in split_line[index] or '#NAME?' in split_line[index]):
expected = False
break
if not expected:
print(repr(new_line))
else:
my_mode = MPT_Mode.CollectData
continue
else:
continue
elif my_mode == MPT_Mode.CollectData:
if i >= number_of_header_lines:
split_line = new_line
if len(unit_list) > len(split_line):
print('new line', new_line)
continue
else:
vals = []
for index in range(len(unit_list)):
try:
val = float(split_line[index])
except ValueError:
if re.match(r'(\d{1,2}):(\d{1,2})\.(\d{1,4})', split_line[index]):
val = 0.
else:
print("newline: ", new_line)
break
vals.append(val)
if len(vals) < len(unit_list):
continue
for index in range(len(unit_list)):
data[unit_list[index]].append(vals[index])
continue
else:
continue
else:
continue
if my_mode == MPT_Mode.CollectData:
'''
['freq/Hz',
'Re(Z)/Ohm',
'-Im(Z)/Ohm',
'|Z|/Ohm',
'Phase(Z)/deg',
'time/s',
'<Ewe>/V']
'''
log_freq_ = numpy.log(2 * math.pi * numpy.array(data['freq/Hz']))
re_z_ = numpy.array(data['Re(Z)/Ohm'])
im_z_ = -numpy.array(data['-Im(Z)/Ohm'])
if not len(log_freq_) < 10:
if True:#clean_multiple_loops:
turning_points = [0]
for index in range(len(log_freq_) - 1):
if log_freq_[index] < log_freq_[index+1]:
turning_points.append(index+1)
if len(turning_points) > 1 and repeat_index == 0:
continue
turning_points.append(len(log_freq_))
log_freq__ = log_freq_
re_z__ = re_z_
im_z__ = im_z_
for index in range(len(turning_points)-1):
log_freq_ = log_freq__[turning_points[index]:turning_points[index+1]]
re_z_ = re_z__[turning_points[index]:turning_points[index + 1]]
im_z_ = im_z__[turning_points[index]:turning_points[index + 1]]
if not len(log_freq_) < 10:
if log_freq_[0] > log_freq_[-1]:
log_freq = numpy.flip(log_freq_, axis=0)
re_z = numpy.flip(re_z_, axis=0)
im_z = numpy.flip(im_z_, axis=0)
else:
log_freq = log_freq_
re_z = re_z_
im_z = im_z_
negs = 0
for i in reversed(range(len(log_freq))):
if im_z[i] < 0.0:
break
else:
negs += 1
tails = 0
for i in list(reversed(range(len(log_freq))))[:-1]:
'''
we are looking for a pattern where as w -> infinity, -Im increases (Im decreases)
'''
if im_z[i] > im_z[i-1]:
break
else:
tails += 1
if True:
if len(turning_points) == 2:
new_filename = filename
else:
new_filename = filename.split('.mpt')[0] + '_loop{}.mpt'.format(index+1)
voltages = numpy.array(sorted(numpy.array(data['<Ewe>/V'])))
if len(voltages) > 30:
voltages = voltages[3:-3]
actual_voltage = numpy.mean(voltages)
test_1 = numpy.max(numpy.abs(re_z) + numpy.abs(im_z)) > 1e6
mean_re_z = numpy.mean(re_z)
mean_im_z = numpy.mean(im_z)
mean_mag = math.sqrt(mean_re_z ** 2 + mean_im_z ** 2)
length = int((len(re_z) - 1) / 3)
mean_dev = numpy.mean(
numpy.sort(numpy.sqrt((re_z[1:] - re_z[:-1]) ** 2 + (im_z[1:] - im_z[:-1]) ** 2))[
-length:])
test_2 = mean_dev >= mean_mag
mean_re_z_ = re_z - numpy.mean(re_z)
mean_im_z_ = im_z - numpy.mean(im_z)
mean_mag_ = numpy.mean(numpy.sqrt(mean_re_z_ ** 2 + mean_im_z_ ** 2))
test_3 = 2.*mean_dev >= mean_mag_
test = test_1 or test_2 or test_3
if not test:
record = {'original_spectrum': (log_freq, re_z, im_z),
'freqs_with_negative_im_z': negs,
'freqs_with_tails_im_z': tails,
'actual_voltage': actual_voltage,
'recognized_metadata': False}
not_already_recorded = True
for already_recorded in database_eis.keys():
comp_record = database_eis[already_recorded]
if (not record['freqs_with_negative_im_z'] == comp_record['freqs_with_negative_im_z'] or
not record['freqs_with_tails_im_z'] == comp_record[
'freqs_with_tails_im_z']):
continue
if not len(record['original_spectrum'][0]) == len(comp_record['original_spectrum'][0]):
continue
continue_q = False
for index_i in range(len(log_freq)):
if ((record['original_spectrum'][0][index_i] - comp_record['original_spectrum'][0][index_i]) > 1e-10 or
( record['original_spectrum'][1][index_i] -
comp_record['original_spectrum'][1][index_i]) > 1e-10 or
(record['original_spectrum'][2][index_i] -
comp_record['original_spectrum'][2][index_i]) > 1e-10) :
continue_q = True
break
if continue_q:
continue
else:
not_already_recorded = False
print('already recorded. was file {}'.format(already_recorded))
break
if not_already_recorded:
database_eis[new_filename] = record
count += 1
print('record added. was file {}'.format(new_filename))
else:
print('duplicate identified. was file {}'.format(new_filename))
continue
else:
print('bad file: {}, t1:{}, t2:{}, t3:{}'.format( new_filename, test_1,test_2,test_3))
continue
else:
print('bad file: ', new_filename)
continue
else:
print('bad file: ', filename)
continue
print('number of properly processed eis files: {}'.format(count))
with open(os.path.join(".", args.data_dir, "database_eis.file"), 'wb') as f:
pickle.dump(database_eis, f, pickle.HIGHEST_PROTOCOL)
database = {}
count = 0
empty_parse = 0
print('Number of files {}.'.format(len(all_fra_filenames)))
for filename in all_fra_filenames:
dats = parse_fra_file(filename)
if 'data' in dats.keys() and 'FREQ' in dats['data'].keys() and 'Re[Z]' in dats['data'].keys() and 'Im[Z]' in dats['data'].keys() :
log_freq_ = numpy.log(2* math.pi * numpy.array(dats['data']['FREQ']))
re_z_ = numpy.array(dats['data']['Re[Z]'])
im_z_ = numpy.array(dats['data']['Im[Z]'])
if log_freq_[0] > log_freq_[-1]:
log_freq = numpy.flip(log_freq_, axis=0)
re_z = numpy.flip(re_z_, axis=0)
im_z = numpy.flip(im_z_, axis=0)
else:
log_freq = log_freq_
re_z = re_z_
im_z = im_z_
negs = 0
for i in reversed(range(len(log_freq))):
if im_z[i] < 0.0:
break
else:
negs += 1
if not (any([x < 0.0 for x in re_z])):
my_tags = dats['tags']
if ('VOLTAGE' in my_tags.keys()) and ('CYCLE' in my_tags.keys()) and ('DATE' in my_tags.keys()) and ('TIME' in my_tags.keys()):
last_filename = filename.split('\\')[-1]
decomposed = last_filename.split('_')
if len(decomposed) >= 8:
valid = True
matchObj1 = re.match(r'(FRA)',
decomposed[1])
if valid and not matchObj1:
valid = False
matchObj1 = re.match(r'0(\d{5,5})',
decomposed[2])
matchObj2 = re.match(r'(\d{5,5})',
decomposed[2])
matchObj3 = re.match(r'(\d{2,5}[A-Z])',
decomposed[2])
if valid and matchObj1:
cell_id = matchObj1.group(1)
elif valid and matchObj2:
cell_id = matchObj2.group(1)
elif valid and matchObj3:
cell_id = matchObj3.group(1)
else:
valid = False
matchObj1 = re.match(r'(NEWARE)', decomposed[3])
matchObj2 = re.match(r'(Nw)', decomposed[3])
if matchObj1 or matchObj2:
cycle_index = 4
else:
cycle_index = 3
matchObj1 = re.match(r'c(\d{1,7})',
decomposed[cycle_index])
if valid and matchObj1:
cycle_offset = int(matchObj1.group(1))
else:
valid = False
if valid:
pre_record = {'original_spectrum': (log_freq, re_z, im_z),
'freqs_with_negative_im_z': negs, 'cell_id': cell_id, 'cycle': (cycle_offset + my_tags['CYCLE']),
'nominal_voltage': my_tags['VOLTAGE'], 'complete':True}
else:
print('bad file: ', filename)
continue
else:
matchObj1 = re.match(r'(.*)-EIS(\d{4,4}).FRA',
last_filename)
if matchObj1:
pre_record = {'original_spectrum': (log_freq, re_z, im_z),
'freqs_with_negative_im_z': negs,'cell_id': matchObj1.group(1), 'cycle': my_tags['CYCLE'],
'nominal_voltage': my_tags['VOLTAGE'], 'complete': False}
else:
print('bad file: ', filename)
continue
test_1 = numpy.max(numpy.abs(re_z) + numpy.abs(im_z)) > 1e6
mean_re_z = numpy.mean(re_z)
mean_im_z = numpy.mean(im_z)
mean_mag = math.sqrt(mean_re_z ** 2 + mean_im_z ** 2)
length = int((len(re_z) - 1) / 3)
mean_dev = numpy.mean(
numpy.sort(numpy.sqrt((re_z[1:] - re_z[:-1]) ** 2 + (im_z[1:] - im_z[:-1]) ** 2))[
-length:])
test_2 = mean_dev >= mean_mag
mean_re_z_ = re_z - numpy.mean(re_z)
mean_im_z_ = im_z - numpy.mean(im_z)
mean_mag_ = numpy.mean(numpy.sqrt(mean_re_z_ ** 2 + mean_im_z_ ** 2))
test_3 = 2. * mean_dev >= mean_mag_
test = test_1 or test_2 or test_3
if not test:
record = pre_record
not_already_recorded = True
for already_recorded in database.keys():
comp_record = database[already_recorded]
if (not record['freqs_with_negative_im_z'] == comp_record['freqs_with_negative_im_z'] ):
continue
if not len(record['original_spectrum'][0]) == len(comp_record['original_spectrum'][0]):
continue
continue_q = False
for index_i in range(len(log_freq)):
if ((record['original_spectrum'][0][index_i] - comp_record['original_spectrum'][0][
index_i]) > 1e-10 or
(record['original_spectrum'][1][index_i] -
comp_record['original_spectrum'][1][index_i]) > 1e-10 or
(record['original_spectrum'][2][index_i] -
comp_record['original_spectrum'][2][index_i]) > 1e-10):
continue_q = True
break
if continue_q:
continue
else:
if record['cell_id'] == comp_record['cell_id']:
matchObj = re.match(r'.*-EIS(\d{4,4})\.FRA',
filename)
matchObj2 = re.match(r'.*EIS(\d{4,4})\.FRA',
already_recorded)
if matchObj and matchObj2:
if matchObj.group(1) == matchObj2.group(1):
not_already_recorded = False
print('found duplicate at {}'.format(already_recorded))
break
else:
continue
else:
print('something went wrong.')
break
if not_already_recorded:
database[filename] = record
count += 1
print('record added. was file {}'.format(filename))
else:
print('duplicate identified. was file {}'.format(filename))
continue
else:
print('bad file: {}, t1:{}, t2:{}, t3:{}'.format(filename, test_1, test_2, test_3))
continue
int_month = my_tags['DATE'][0]
int_day = my_tags['DATE'][1]
int_year = my_tags['DATE'][2]
int_hour = my_tags['TIME'][0]
int_minute = my_tags['TIME'][1]
int_second = my_tags['TIME'][2]
start_time = datetime.datetime(int_year, int_month, int_day, hour=int_hour, minute=int_minute,
second=int_second)
if 'data' in dats.keys() and 'VOLTS' in dats['data'].keys():
voltages = numpy.array(sorted(dats['data']['VOLTS']))
if len(voltages) > 30:
voltages=voltages[3:-3]
actual_voltage = numpy.mean(voltages)
else:
actual_voltage = database[filename]['nominal_voltage']
database[filename]['time'] = start_time
database[filename]['actual_voltage'] = actual_voltage
else:
empty_parse += 1
print('successfully processed {} fra files.'.format(count))
metadata_groups = {}
for file_id in all_fra_filenames:
if file_id in database.keys():
meta = database[file_id]
cell_id = meta['cell_id']
if not cell_id in metadata_groups.keys():
metadata_groups[cell_id]=[]
metadata_groups[cell_id].append(file_id)
for k in metadata_groups.keys():
all_times = []
for file_id in metadata_groups[k]:
all_times.append(database[file_id]['time'])
start_time = min(all_times)
for file_id in metadata_groups[k]:
database[file_id]['time'] = database[file_id]['time'] - start_time
cycle_groups = {}
for file_id in metadata_groups[k]:
meta = database[file_id]
cycle = meta['cycle']
if not cycle in cycle_groups.keys():
cycle_groups[cycle] = []
cycle_groups[cycle].append({'file_id':file_id, 'time':meta['time'],'nominal_voltage':meta['nominal_voltage']})
for cyc in cycle_groups.keys():
cycle_groups[cyc] = sorted(cycle_groups[cyc], key=lambda x: x['time'])
if len(cycle_groups[cyc]) == 1:
database[cycle_groups[cyc][0]['file_id']]['charge'] = True
else:
for index in range(len(cycle_groups[cyc])):
if index == 0:
if cycle_groups[cyc][1]['nominal_voltage'] > cycle_groups[cyc][index]['nominal_voltage']:
database[cycle_groups[cyc][index]['file_id']]['charge'] = True
else:
database[cycle_groups[cyc][index]['file_id']]['charge'] = False
elif index == len(cycle_groups[cyc]) -1:
if cycle_groups[cyc][index]['nominal_voltage'] > cycle_groups[cyc][index-1]['nominal_voltage']:
database[cycle_groups[cyc][index]['file_id']]['charge'] = True
else:
database[cycle_groups[cyc][index]['file_id']]['charge'] = False
else:
if (cycle_groups[cyc][index]['nominal_voltage'] > cycle_groups[cyc][index - 1]['nominal_voltage']) and \
(cycle_groups[cyc][index+1]['nominal_voltage'] > cycle_groups[cyc][index]['nominal_voltage']):
database[cycle_groups[cyc][index]['file_id']]['charge'] = True
elif (cycle_groups[cyc][index]['nominal_voltage'] < cycle_groups[cyc][index - 1]['nominal_voltage']) and \
(cycle_groups[cyc][index+1]['nominal_voltage'] < cycle_groups[cyc][index]['nominal_voltage']):
database[cycle_groups[cyc][index]['file_id']]['charge'] = False
elif (cycle_groups[cyc][index]['nominal_voltage'] > cycle_groups[cyc][index - 1]['nominal_voltage']) and \
(cycle_groups[cyc][index+1]['nominal_voltage'] <= cycle_groups[cyc][index]['nominal_voltage']):
database[cycle_groups[cyc][index]['file_id']]['charge'] = True
elif (cycle_groups[cyc][index]['nominal_voltage'] < cycle_groups[cyc][index - 1]['nominal_voltage']) and \
(cycle_groups[cyc][index+1]['nominal_voltage'] >= cycle_groups[cyc][index]['nominal_voltage']):
database[cycle_groups[cyc][index]['file_id']]['charge'] = False
elif (cycle_groups[cyc][index]['nominal_voltage'] == cycle_groups[cyc][index - 1]['nominal_voltage']) and \
(cycle_groups[cyc][index+1]['nominal_voltage'] < cycle_groups[cyc][index]['nominal_voltage']):
database[cycle_groups[cyc][index]['file_id']]['charge'] = False
elif (cycle_groups[cyc][index]['nominal_voltage'] == cycle_groups[cyc][index - 1]['nominal_voltage']) and \
(cycle_groups[cyc][index+1]['nominal_voltage'] >= cycle_groups[cyc][index]['nominal_voltage']):
database[cycle_groups[cyc][index]['file_id']]['charge'] = True
else:
database[cycle_groups[cyc][index]['file_id']]['charge'] = True
print('empty parses: {}.'.format(empty_parse))
with open(os.path.join(".", args.data_dir, "database.file"), 'wb') as f:
pickle.dump(database, f, pickle.HIGHEST_PROTOCOL)
| [
"pickle.dump",
"copy.deepcopy",
"numpy.flip",
"argparse.ArgumentParser",
"math.sqrt",
"csv.reader",
"numpy.abs",
"os.walk",
"re.match",
"datetime.datetime",
"numpy.shape",
"numpy.mean",
"numpy.array",
"os.path.join",
"numpy.sqrt"
] | [((7481, 7506), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7504, 7506), False, 'import argparse\n'), ((8431, 8463), 'os.path.join', 'os.path.join', (['"""."""', 'args.data_dir'], {}), "('.', args.data_dir)\n", (8443, 8463), False, 'import os\n'), ((8561, 8585), 'os.walk', 'os.walk', (['path_to_spectra'], {}), '(path_to_spectra)\n', (8568, 8585), False, 'import os\n'), ((8838, 8862), 'os.walk', 'os.walk', (['path_to_spectra'], {}), '(path_to_spectra)\n', (8845, 8862), False, 'import os\n'), ((6268, 6288), 'numpy.array', 'numpy.array', (['my_data'], {}), '(my_data)\n', (6279, 6288), False, 'import numpy\n'), ((6311, 6331), 'numpy.shape', 'numpy.shape', (['my_data'], {}), '(my_data)\n', (6322, 6331), False, 'import numpy\n'), ((29226, 29279), 'pickle.dump', 'pickle.dump', (['database_eis', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(database_eis, f, pickle.HIGHEST_PROTOCOL)\n', (29237, 29279), False, 'import pickle\n'), ((42810, 42859), 'pickle.dump', 'pickle.dump', (['database', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(database, f, pickle.HIGHEST_PROTOCOL)\n', (42821, 42859), False, 'import pickle\n'), ((7277, 7301), 'copy.deepcopy', 'copy.deepcopy', (['my_header'], {}), '(my_header)\n', (7290, 7301), False, 'import copy\n'), ((7310, 7332), 'copy.deepcopy', 'copy.deepcopy', (['my_tags'], {}), '(my_tags)\n', (7323, 7332), False, 'import copy\n'), ((7341, 7369), 'copy.deepcopy', 'copy.deepcopy', (['my_split_data'], {}), '(my_split_data)\n', (7354, 7369), False, 'import copy\n'), ((7378, 7400), 'copy.deepcopy', 'copy.deepcopy', (['my_junk'], {}), '(my_junk)\n', (7391, 7400), False, 'import copy\n'), ((29151, 29204), 'os.path.join', 'os.path.join', (['"""."""', 'args.data_dir', '"""database_eis.file"""'], {}), "('.', args.data_dir, 'database_eis.file')\n", (29163, 29204), False, 'import os\n'), ((29722, 29756), 'numpy.array', 'numpy.array', (["dats['data']['Re[Z]']"], {}), "(dats['data']['Re[Z]'])\n", (29733, 29756), False, 'import numpy\n'), ((29776, 29810), 'numpy.array', 'numpy.array', (["dats['data']['Im[Z]']"], {}), "(dats['data']['Im[Z]'])\n", (29787, 29810), False, 'import numpy\n'), ((42739, 42788), 'os.path.join', 'os.path.join', (['"""."""', 'args.data_dir', '"""database.file"""'], {}), "('.', args.data_dir, 'database.file')\n", (42751, 42788), False, 'import os\n'), ((6159, 6185), 'copy.deepcopy', 'copy.deepcopy', (['[this_line]'], {}), '([this_line])\n', (6172, 6185), False, 'import copy\n'), ((29881, 29910), 'numpy.flip', 'numpy.flip', (['log_freq_'], {'axis': '(0)'}), '(log_freq_, axis=0)\n', (29891, 29910), False, 'import numpy\n'), ((29932, 29957), 'numpy.flip', 'numpy.flip', (['re_z_'], {'axis': '(0)'}), '(re_z_, axis=0)\n', (29942, 29957), False, 'import numpy\n'), ((29979, 30004), 'numpy.flip', 'numpy.flip', (['im_z_'], {'axis': '(0)'}), '(im_z_, axis=0)\n', (29989, 30004), False, 'import numpy\n'), ((4054, 4080), 'copy.deepcopy', 'copy.deepcopy', (['parsed_date'], {}), '(parsed_date)\n', (4067, 4080), False, 'import copy\n'), ((6024, 6048), 'copy.deepcopy', 'copy.deepcopy', (['some_data'], {}), '(some_data)\n', (6037, 6048), False, 'import copy\n'), ((8968, 8992), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (8980, 8992), False, 'import os\n'), ((9095, 9119), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (9107, 9119), False, 'import os\n'), ((20877, 20907), 'numpy.array', 'numpy.array', (["data['Re(Z)/Ohm']"], {}), "(data['Re(Z)/Ohm'])\n", (20888, 20907), False, 'import numpy\n'), ((29668, 29701), 'numpy.array', 'numpy.array', (["dats['data']['FREQ']"], {}), "(dats['data']['FREQ'])\n", (29679, 29701), False, 'import numpy\n'), ((33533, 33549), 'numpy.mean', 'numpy.mean', (['re_z'], {}), '(re_z)\n', (33543, 33549), False, 'import numpy\n'), ((33581, 33597), 'numpy.mean', 'numpy.mean', (['im_z'], {}), '(im_z)\n', (33591, 33597), False, 'import numpy\n'), ((33628, 33670), 'math.sqrt', 'math.sqrt', (['(mean_re_z ** 2 + mean_im_z ** 2)'], {}), '(mean_re_z ** 2 + mean_im_z ** 2)\n', (33637, 33670), False, 'import math\n'), ((37535, 37640), 'datetime.datetime', 'datetime.datetime', (['int_year', 'int_month', 'int_day'], {'hour': 'int_hour', 'minute': 'int_minute', 'second': 'int_second'}), '(int_year, int_month, int_day, hour=int_hour, minute=\n int_minute, second=int_second)\n', (37552, 37640), False, 'import datetime\n'), ((4470, 4496), 'copy.deepcopy', 'copy.deepcopy', (['parsed_time'], {}), '(parsed_time)\n', (4483, 4496), False, 'import copy\n'), ((20937, 20968), 'numpy.array', 'numpy.array', (["data['-Im(Z)/Ohm']"], {}), "(data['-Im(Z)/Ohm'])\n", (20948, 20968), False, 'import numpy\n'), ((30751, 30783), 're.match', 're.match', (['"""(FRA)"""', 'decomposed[1]'], {}), "('(FRA)', decomposed[1])\n", (30759, 30783), False, 'import re\n'), ((30957, 30995), 're.match', 're.match', (['"""0(\\\\d{5,5})"""', 'decomposed[2]'], {}), "('0(\\\\d{5,5})', decomposed[2])\n", (30965, 30995), False, 'import re\n'), ((31076, 31113), 're.match', 're.match', (['"""(\\\\d{5,5})"""', 'decomposed[2]'], {}), "('(\\\\d{5,5})', decomposed[2])\n", (31084, 31113), False, 'import re\n'), ((31194, 31236), 're.match', 're.match', (['"""(\\\\d{2,5}[A-Z])"""', 'decomposed[2]'], {}), "('(\\\\d{2,5}[A-Z])', decomposed[2])\n", (31202, 31236), False, 'import re\n'), ((31701, 31736), 're.match', 're.match', (['"""(NEWARE)"""', 'decomposed[3]'], {}), "('(NEWARE)', decomposed[3])\n", (31709, 31736), False, 'import re\n'), ((31773, 31804), 're.match', 're.match', (['"""(Nw)"""', 'decomposed[3]'], {}), "('(Nw)', decomposed[3])\n", (31781, 31804), False, 'import re\n'), ((32007, 32055), 're.match', 're.match', (['"""c(\\\\d{1,7})"""', 'decomposed[cycle_index]'], {}), "('c(\\\\d{1,7})', decomposed[cycle_index])\n", (32015, 32055), False, 'import re\n'), ((32835, 32884), 're.match', 're.match', (['"""(.*)-EIS(\\\\d{4,4}).FRA"""', 'last_filename'], {}), "('(.*)-EIS(\\\\d{4,4}).FRA', last_filename)\n", (32843, 32884), False, 'import re\n'), ((33996, 34012), 'numpy.mean', 'numpy.mean', (['re_z'], {}), '(re_z)\n', (34006, 34012), False, 'import numpy\n'), ((34052, 34068), 'numpy.mean', 'numpy.mean', (['im_z'], {}), '(im_z)\n', (34062, 34068), False, 'import numpy\n'), ((34111, 34156), 'numpy.sqrt', 'numpy.sqrt', (['(mean_re_z_ ** 2 + mean_im_z_ ** 2)'], {}), '(mean_re_z_ ** 2 + mean_im_z_ ** 2)\n', (34121, 34156), False, 'import numpy\n'), ((37982, 38002), 'numpy.mean', 'numpy.mean', (['voltages'], {}), '(voltages)\n', (37992, 38002), False, 'import numpy\n'), ((4811, 4840), 'copy.deepcopy', 'copy.deepcopy', (['parsed_voltage'], {}), '(parsed_voltage)\n', (4824, 4840), False, 'import copy\n'), ((20819, 20847), 'numpy.array', 'numpy.array', (["data['freq/Hz']"], {}), "(data['freq/Hz'])\n", (20830, 20847), False, 'import numpy\n'), ((5158, 5185), 'copy.deepcopy', 'copy.deepcopy', (['parsed_cycle'], {}), '(parsed_cycle)\n', (5171, 5185), False, 'import copy\n'), ((11715, 11764), 're.match', 're.match', (['"""Nb header lines : (\\\\d{1,})"""', 'new_line'], {}), "('Nb header lines : (\\\\d{1,})', new_line)\n", (11723, 11764), False, 'import re\n'), ((15488, 15501), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (15498, 15501), False, 'import csv\n'), ((33460, 33475), 'numpy.abs', 'numpy.abs', (['re_z'], {}), '(re_z)\n', (33469, 33475), False, 'import numpy\n'), ((33478, 33493), 'numpy.abs', 'numpy.abs', (['im_z'], {}), '(im_z)\n', (33487, 33493), False, 'import numpy\n'), ((33800, 33869), 'numpy.sqrt', 'numpy.sqrt', (['((re_z[1:] - re_z[:-1]) ** 2 + (im_z[1:] - im_z[:-1]) ** 2)'], {}), '((re_z[1:] - re_z[:-1]) ** 2 + (im_z[1:] - im_z[:-1]) ** 2)\n', (33810, 33869), False, 'import numpy\n'), ((35814, 35858), 're.match', 're.match', (['""".*-EIS(\\\\d{4,4})\\\\.FRA"""', 'filename'], {}), "('.*-EIS(\\\\d{4,4})\\\\.FRA', filename)\n", (35822, 35858), False, 'import re\n'), ((35949, 36000), 're.match', 're.match', (['""".*EIS(\\\\d{4,4})\\\\.FRA"""', 'already_recorded'], {}), "('.*EIS(\\\\d{4,4})\\\\.FRA', already_recorded)\n", (35957, 36000), False, 'import re\n'), ((13170, 13219), 're.match', 're.match', (['"""Number of loops : (\\\\d{1,})"""', 'new_line'], {}), "('Number of loops : (\\\\d{1,})', new_line)\n", (13178, 13219), False, 'import re\n'), ((22203, 22232), 'numpy.flip', 'numpy.flip', (['log_freq_'], {'axis': '(0)'}), '(log_freq_, axis=0)\n', (22213, 22232), False, 'import numpy\n'), ((22280, 22305), 'numpy.flip', 'numpy.flip', (['re_z_'], {'axis': '(0)'}), '(re_z_, axis=0)\n', (22290, 22305), False, 'import numpy\n'), ((22353, 22378), 'numpy.flip', 'numpy.flip', (['im_z_'], {'axis': '(0)'}), '(im_z_, axis=0)\n', (22363, 22378), False, 'import numpy\n'), ((24117, 24137), 'numpy.mean', 'numpy.mean', (['voltages'], {}), '(voltages)\n', (24127, 24137), False, 'import numpy\n'), ((24292, 24308), 'numpy.mean', 'numpy.mean', (['re_z'], {}), '(re_z)\n', (24302, 24308), False, 'import numpy\n'), ((24361, 24377), 'numpy.mean', 'numpy.mean', (['im_z'], {}), '(im_z)\n', (24371, 24377), False, 'import numpy\n'), ((24429, 24471), 'math.sqrt', 'math.sqrt', (['(mean_re_z ** 2 + mean_im_z ** 2)'], {}), '(mean_re_z ** 2 + mean_im_z ** 2)\n', (24438, 24471), False, 'import math\n'), ((24924, 24940), 'numpy.mean', 'numpy.mean', (['re_z'], {}), '(re_z)\n', (24934, 24940), False, 'import numpy\n'), ((25001, 25017), 'numpy.mean', 'numpy.mean', (['im_z'], {}), '(im_z)\n', (25011, 25017), False, 'import numpy\n'), ((25081, 25126), 'numpy.sqrt', 'numpy.sqrt', (['(mean_re_z_ ** 2 + mean_im_z_ ** 2)'], {}), '(mean_re_z_ ** 2 + mean_im_z_ ** 2)\n', (25091, 25126), False, 'import numpy\n'), ((23896, 23924), 'numpy.array', 'numpy.array', (["data['<Ewe>/V']"], {}), "(data['<Ewe>/V'])\n", (23907, 23924), False, 'import numpy\n'), ((24198, 24213), 'numpy.abs', 'numpy.abs', (['re_z'], {}), '(re_z)\n', (24207, 24213), False, 'import numpy\n'), ((24216, 24231), 'numpy.abs', 'numpy.abs', (['im_z'], {}), '(im_z)\n', (24225, 24231), False, 'import numpy\n'), ((24664, 24733), 'numpy.sqrt', 'numpy.sqrt', (['((re_z[1:] - re_z[:-1]) ** 2 + (im_z[1:] - im_z[:-1]) ** 2)'], {}), '((re_z[1:] - re_z[:-1]) ** 2 + (im_z[1:] - im_z[:-1]) ** 2)\n', (24674, 24733), False, 'import numpy\n'), ((14404, 14469), 're.match', 're.match', (['"""(\\\\d{1,2}):(\\\\d{1,2})\\\\.(\\\\d{1,4})"""', 'split_line[index]'], {}), "('(\\\\d{1,2}):(\\\\d{1,2})\\\\.(\\\\d{1,4})', split_line[index])\n", (14412, 14469), False, 'import re\n'), ((19531, 19596), 're.match', 're.match', (['"""(\\\\d{1,2}):(\\\\d{1,2})\\\\.(\\\\d{1,4})"""', 'split_line[index]'], {}), "('(\\\\d{1,2}):(\\\\d{1,2})\\\\.(\\\\d{1,4})', split_line[index])\n", (19539, 19596), False, 'import re\n')] |
"""BART based chatbot implementation."""
from typing import Dict
import numpy as np
import scipy.special as scp
import onnxruntime as rt
from npc_engine.services.text_generation.text_generation_base import TextGenerationAPI
from tokenizers import Tokenizer
import os
import json
class BartChatbot(TextGenerationAPI):
"""BART based chatbot implementation class.
This model class requires two ONNX models `encoder_bart.onnx` and `decoder_bart.onnx`
that correspond to encoder and decoder from transformers
[EncoderDecoderModel](https://huggingface.co/transformers/model_doc/encoderdecoder.html)
and a tokenizer.json with huggingface tokenizers definition.
encoder_bart.onnx spec:
- inputs:
`input_ids`
- outputs:
`encoder_hidden_state`
decoder_bart.onnx spec:
- inputs:
`encoder_hidden_state`
`decoder_input_ids`
- outputs:
`logits`
"""
def __init__(
self,
model_path,
max_steps=100,
min_length=2,
repetition_penalty=1,
bos_token_id=0,
eos_token_id=2,
pad_token_id=1,
sep_token_id=None,
*args,
**kwargs,
):
"""Create the chatbot from config args and kwargs.
Args:
model_path: path to scan for model files (weights and configs)
max_steps: stop generation at this number of tokens
min_length: model can't stop generating text before it's atleast
this long in tokens
repetition_penalty: probability coef for same tokens to appear multiple times
bos_token_id: beginning of sequence token id
eos_token_id: end of sequence token id
pad_token_id: padding token id
sep_token_id: token id for separating sequence into multiple parts
"""
super().__init__(*args, **kwargs)
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.sep_token_id = eos_token_id if sep_token_id is None else sep_token_id
self.pad_token_id = pad_token_id
sess_options = rt.SessionOptions()
sess_options.graph_optimization_level = (
rt.GraphOptimizationLevel.ORT_DISABLE_ALL
)
self.encoder_model = rt.InferenceSession(
os.path.join(model_path, "encoder_bart.onnx"),
providers=self.get_providers(),
sess_options=sess_options,
)
self.decoder_model = rt.InferenceSession(
os.path.join(model_path, "decoder_bart.onnx"),
providers=self.get_providers(),
sess_options=sess_options,
)
self.tokenizer = Tokenizer.from_file(os.path.join(model_path, "tokenizer.json"))
added_tokens_path = os.path.join(model_path, "added_tokens.txt")
if os.path.exists(added_tokens_path):
with open(added_tokens_path) as f:
added_tokens = json.load(f)
added_tokens = [
key for key, _ in sorted(list(added_tokens.items()), key=lambda x: x[1])
]
self.tokenizer.add_tokens(added_tokens)
self.special_tokens = {
"bos_token": self.tokenizer.decode(
[bos_token_id], skip_special_tokens=False
),
"eos_token": self.tokenizer.decode(
[eos_token_id], skip_special_tokens=False
),
"sep_token": self.tokenizer.decode(
[self.sep_token_id], skip_special_tokens=False
),
"pad_token": self.tokenizer.decode(
[pad_token_id], skip_special_tokens=False
),
**{
f"added_token{self.tokenizer.token_to_id(token)}": token
for token in added_tokens
},
}
self.max_steps = max_steps
self.min_length = min_length
self.repetition_penalty = repetition_penalty
def run(self, prompt: str, temperature: float = 1.0, topk: int = None) -> str:
"""Run text generation from given prompt and parameters.
Args:
prompt: Fromatted prompt.
temperature: Temperature parameter for sampling.
Controls how random model output is: more temperature - more randomness
topk: If not none selects top n of predictions to sample from during generation.
Returns:
Generated text
"""
tokens = self.tokenizer.encode(prompt)
total = np.asarray(tokens.ids, dtype=np.int64).reshape([1, -1])
total_enc = self.encoder_model.run(None, {"input_ids": total})[0]
utterance = np.asarray([self.eos_token_id], dtype=np.int64).reshape([1, 1])
for i in range(self.max_steps):
o = self.decoder_model.run(
None,
{"encoder_hidden_state": total_enc, "decoder_input_ids": utterance},
)
logits = o[0][0, -1, :]
if i < self.min_length:
logits[self.eos_token_id] = float("-inf")
if topk is not None:
ind = np.argpartition(logits, -topk)[-topk:]
new_logits = np.zeros(logits.shape)
new_logits[ind] = logits[ind]
logits = new_logits
probs = scp.softmax(logits / temperature, axis=0)
token = np.random.choice(np.arange(probs.shape[0]), p=probs)
token = token.reshape([1, 1])
utterance = np.concatenate([utterance, token], axis=1)
if token[0, 0] == self.eos_token_id:
break
return self.tokenizer.decode(utterance[0, :].tolist(), skip_special_tokens=True)
def get_special_tokens(self) -> Dict[str, str]:
"""Retrun dict of special tokens to be renderable from template."""
return self.special_tokens
| [
"json.load",
"os.path.join",
"numpy.asarray",
"os.path.exists",
"numpy.zeros",
"numpy.argpartition",
"numpy.arange",
"scipy.special.softmax",
"onnxruntime.SessionOptions",
"numpy.concatenate"
] | [((2232, 2251), 'onnxruntime.SessionOptions', 'rt.SessionOptions', ([], {}), '()\n', (2249, 2251), True, 'import onnxruntime as rt\n'), ((2902, 2946), 'os.path.join', 'os.path.join', (['model_path', '"""added_tokens.txt"""'], {}), "(model_path, 'added_tokens.txt')\n", (2914, 2946), False, 'import os\n'), ((2959, 2992), 'os.path.exists', 'os.path.exists', (['added_tokens_path'], {}), '(added_tokens_path)\n', (2973, 2992), False, 'import os\n'), ((2433, 2478), 'os.path.join', 'os.path.join', (['model_path', '"""encoder_bart.onnx"""'], {}), "(model_path, 'encoder_bart.onnx')\n", (2445, 2478), False, 'import os\n'), ((2640, 2685), 'os.path.join', 'os.path.join', (['model_path', '"""decoder_bart.onnx"""'], {}), "(model_path, 'decoder_bart.onnx')\n", (2652, 2685), False, 'import os\n'), ((2829, 2871), 'os.path.join', 'os.path.join', (['model_path', '"""tokenizer.json"""'], {}), "(model_path, 'tokenizer.json')\n", (2841, 2871), False, 'import os\n'), ((5492, 5533), 'scipy.special.softmax', 'scp.softmax', (['(logits / temperature)'], {'axis': '(0)'}), '(logits / temperature, axis=0)\n', (5503, 5533), True, 'import scipy.special as scp\n'), ((5678, 5720), 'numpy.concatenate', 'np.concatenate', (['[utterance, token]'], {'axis': '(1)'}), '([utterance, token], axis=1)\n', (5692, 5720), True, 'import numpy as np\n'), ((3074, 3086), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3083, 3086), False, 'import json\n'), ((4675, 4713), 'numpy.asarray', 'np.asarray', (['tokens.ids'], {'dtype': 'np.int64'}), '(tokens.ids, dtype=np.int64)\n', (4685, 4713), True, 'import numpy as np\n'), ((4829, 4876), 'numpy.asarray', 'np.asarray', (['[self.eos_token_id]'], {'dtype': 'np.int64'}), '([self.eos_token_id], dtype=np.int64)\n', (4839, 4876), True, 'import numpy as np\n'), ((5362, 5384), 'numpy.zeros', 'np.zeros', (['logits.shape'], {}), '(logits.shape)\n', (5370, 5384), True, 'import numpy as np\n'), ((5574, 5599), 'numpy.arange', 'np.arange', (['probs.shape[0]'], {}), '(probs.shape[0])\n', (5583, 5599), True, 'import numpy as np\n'), ((5293, 5323), 'numpy.argpartition', 'np.argpartition', (['logits', '(-topk)'], {}), '(logits, -topk)\n', (5308, 5323), True, 'import numpy as np\n')] |
import glob
import math
import os
import os.path as osp
import random
import time
from collections import OrderedDict
import torch
import cv2
import numpy as np
import copy
from ..utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from ..utils.common import xyxy2xywh
from ..transform.train_transform import letterbox, hsv_augment, random_affine
from ..transform.train_transform import CropAndPaste
# 在检测\跟踪等测试时都会用到这个加载图像数据集 测试时会用到
class LoadImages:
def __init__(
self,
path,
img_size,
transform,
):
"""
Args:
path: 图像存储的文件夹路径
img_size: 规定的输入图像大小
transform: 图像数据变换
"""
if os.path.isdir(path):
image_format = ['.jpg', '.jpeg', '.png', '.tif']
self.files = sorted(glob.glob('%s/*.*' % path))
self.files = list(filter(lambda x: os.path.splitext(x)[1].lower() in image_format, self.files))
elif os.path.isfile(path):
self.files = [path]
self.nF = len(self.files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
self.transform = transform
assert self.nF > 0, 'No images found in ' + path
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == self.nF:
raise StopIteration
img_path = self.files[self.count]
# 4K增强
if img_path.find("720p") != -1:
img_path = img_path.replace("720p", "4k")
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
# img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# only resize
img = cv2.resize(img0, (self.width, self.height), cv2.INTER_CUBIC)
# Normalize RGB
# img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img[:, :, ::-1])
# transform
img = self.transform(img)
img = img.numpy()
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return img_path, img, img0
def __getitem__(self, idx):
idx = idx % self.nF
img_path = self.files[idx]
# 4K增强
if img_path.find("720p") != -1:
img_path = img_path.replace("720p", "4k")
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
# img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# only resize
img = cv2.resize(img0, (self.width, self.height), cv2.INTER_CUBIC)
# Normalize RGB
# img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img[:, :, ::-1])
# transform
img = self.transform(img)
img = img.numpy()
return img_path, img, img0
def __len__(self):
return self.nF # number of files
# 在视频demo的时候需要用到这个数据集 测试时会用到
class LoadVideo:
def __init__(
self,
path,
img_size,
transform,
):
self.cap = cv2.VideoCapture(path)
self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))
self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
self.transform = transform
# 默认的图像输出尺寸
self.w, self.h = 1280, 720
print('Lenth of the video: {:d} frames'.format(self.vn))
def get_size(self, vw, vh, dw, dh):
wa, ha = float(dw) / vw, float(dh) / vh
a = min(wa, ha)
return int(vw * a), int(vh * a)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == len(self):
raise StopIteration
# Read image
res, img0 = self.cap.read() # BGR
assert img0 is not None, 'Failed to load frame {:d}'.format(self.count)
img0 = cv2.resize(img0, (self.w, self.h))
# Padded resize
# img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
img = cv2.resize(img0, (self.width, self.height), cv2.INTER_CUBIC)
# Normalize RGB
# img = img[:, :, ::-1].transpose(2, 0, 1)
# img = np.ascontiguousarray(img, dtype=np.float32)
img = np.ascontiguousarray(img[:, :, ::-1])
img = self.transform(img)
# img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return self.count, img, img0
def __len__(self):
return self.vn # number of files
# 训练时使用到的最根本的数据加载器 用来加载图片和标签 是其它数据集加载的一个基类
class LoadImagesAndLabels:
def __init__(
self,
path,
img_size,
augment=False,
transforms=None
):
"""
对图像和标签加载的一个封装 最基本的类
Args:
path: 存储了所有训练数据和标签的一个路径
img_size: 输入图像的大小
augment: 是否进行图像增强
transforms: 图像变换
"""
with open(path, 'r') as file:
# 读取所有的图像路径数据 放到列表里
self.img_files = file.readlines()
self.img_files = [x.replace('\n', '') for x in self.img_files]
self.img_files = list(filter(lambda x: len(x) > 0, self.img_files))
# 得到对应于每一帧图像的标签文件 图像数目和标签数目是对应相等的
self.label_files = [
x.replace('images', 'labels_with_ids')
.replace('.png', '.txt')
.replace('.jpg', '.txt')
for x in self.img_files
]
# shuffle
self.pairs = zip(self.img_files, self.label_files)
random.shuffle(self.pairs)
# 获取文件总数 尺寸等信息
self.nF = len(self.pairs)
# 此时的长宽高是在训练初已经设置好的 代表了网络需要的图像尺寸 也是应该resize之后的尺寸
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
def __getitem__(self, files_index):
img_path = self.pairs[files_index][0]
label_path = self.pairs[files_index][1]
# 加载对应的图像和标签
return self.get_data(img_path, label_path)
def get_data(self, img_path, label_path, use_letter_box = False, ball_augment = None, hr_flip = None):
"""
图像数据格式转换, 增强; 标签格式化
Args:
img_path: img_path单幅图像的路径
label_path: 标签路径
use_letter_box: 是否使用letter_box机制
ball_autment: 对足球进行crop paste
Returns:
img: 单幅img对应的张量数据 rgb格式 已经normalization
labels: 单幅图像对应的标签
img_path: 图像路径
(h,w): 图像的原始高和宽
"""
height = self.height
width = self.width
img = cv2.imread(img_path) # BGR
if img is None:
raise ValueError('File corrupt {}'.format(img_path))
# 得到图像真正的大小 然后进行letterbox图像尺寸变换
augment_hsv = True
h, w, _ = img.shape
if use_letter_box:
if self.augment and augment_hsv:
hsv_augment(img)
# letterbox实际上会降低检测精度 因此不适用letterbox和affine机制
# TODO 不考虑在letterbox模式下增强ball 因为实在是太小了
img, ratio, padw, padh = letterbox(img, height=height, width=width)
# Load labels
if os.path.isfile(label_path):
# cls, id, xcenter / img_width, ycenter / img_height, width / img_width, height / img_height. 各个数值相当于计算的是百分比
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
labels = labels0.copy()
# letterbox变换之后的bbox坐标调整
# 根据图片中w, h的真实尺寸规模 计算得到x1 y1 x2 y2 同时还要加上一个填充
# 变换回原来的像素位置之后 再加上现在的填充 得到的是真实的像素数据
labels[:, 2] = ratio * w * (labels0[:, 2] - labels0[:, 4] / 2) + padw # x1
labels[:, 3] = ratio * h * (labels0[:, 3] - labels0[:, 5] / 2) + padh # y1
labels[:, 4] = ratio * w * (labels0[:, 2] + labels0[:, 4] / 2) + padw # x2
labels[:, 5] = ratio * h * (labels0[:, 3] + labels0[:, 5] / 2) + padh # y2
else:
# 如果不是文件 则直接表示为空的标签列表 这个最后会有一定的调整
labels = np.array([])
else:
# 1. 原始图像上进行paste增强
labels0 = None
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
if ball_augment is not None:
img, labels0 = ball_augment(img, labels0, img_path.replace(self.root, ""))
# 2. 进行hsv色彩域变化
if self.augment and augment_hsv:
hsv_augment(img)
# 3. 调整到网络输入大小
img = cv2.resize(img, [self.width, self.height], interpolation=cv2.INTER_AREA)
if labels0 is not None:
labels = labels0.copy()
labels[:, 2] = self.width * (labels0[:, 2] - labels0[:, 4] / 2)
labels[:, 3] = self.height * (labels0[:, 3] - labels0[:, 5] / 2)
labels[:, 4] = self.width * (labels0[:, 2] + labels0[:, 4] / 2)
labels[:, 5] = self.height * (labels0[:, 3] + labels0[:, 5] / 2)
else:
labels = np.array([])
# 同样不使用仿射增强
# Augment image and labels
if self.augment and use_letter_box:
# 进行数据增强 主要包括了一些仿射变换 对图像和标签都会调整
img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.50, 1.20))
# 得到这个标签中的对象数目
nL = len(labels)
if nL > 0:
# display
# img_clone = copy.deepcopy(img)
# for label in labels:
# cv2.rectangle(img_clone, (int(label[2]), int(label[3])), (int(label[4]), int(label[5])), color=(234, 123, 234), thickness=1)
# cv2.imwrite("test.jpg", img_clone)
# convert xyxy to xywh and normalize
labels[:, 2:6] = xyxy2xywh(labels[:, 2:6].copy()) # / height
labels[:, 2] /= width
labels[:, 3] /= height
labels[:, 4] /= width
labels[:, 5] /= height
hr_flip = hr_flip if hr_flip is not None else (random.random() > 0.5)
if self.augment:
# random left-right flip
if hr_flip:
img = np.fliplr(img)
if nL > 0:
labels[:, 2] = 1 - labels[:, 2]
# BGR to RGB Before Normalization Transfrom and be continuous
img = np.ascontiguousarray(img[:, :, ::-1])
# Normalize (0~1 normalize and mean / std)
if self.transforms is not None:
img = self.transforms(img)
return img, labels, img_path, (h, w)
def __len__(self):
return self.nF # number of batches
# 通用的联合数据集
class JointDataset(LoadImagesAndLabels):
def __init__(
self,
opt,
root,
paths,
img_size,
augment=False,
transforms=None
):
"""
初始化主要是对联合数据集的一个整理
Args:
opt: 基本的配置参数
root: 图像和标签存放的根目录
paths: 这个是在data中放置的已经生成好的train文件或者val文件
img_size: 输入尺寸 统一尺寸大小 可以根据opt中进行调整
Returns:
"""
self.opt = opt
# 它这个会记录数据集放入顺序字典中 即记录多个数据集
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict() # 这个统计了每个数据集中的id数目 (最大id值)
self.tid_start_index = OrderedDict() # 记录了每个数据集中id的起始位置 (id相对于整个数据集中的偏移位置)
self.num_classes = opt.num_classes # 整个数据集中的类别总数 默认为1 仅仅为球员
# 根据data中的数据划分文件读取所有的图像数据
# 需要注意的是paths在联合训练的情况下可能是多个数据集 因此会有多个文件划分的路径
# ds为数据集的名称 path对这个数据对应文件路径
for ds, path in paths.items():
# 读取每个文件中图像的路径
# 数据集名称 --> 数据集对应的路径列表
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
# 标签的路径
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]
]
print('Total {} image files in {} dataset.'.format(len(self.label_files[ds]), ds))
# 读取标签标注信息
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
# 如果只有一条数据
cur_img_max_id = lb[1]
else:
cur_img_max_id = np.max(lb[:, 1])
if cur_img_max_id > max_index:
# 当前数据集中的id有没有增长 根据子数据集而定
max_index = cur_img_max_id
# 得到当前子数据集中的最大id
self.tid_num[ds] = max_index
# 整理整个数据集中的起始id数据 放到id字典中 便于后续计算
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
# 统计整个联合数据集的信息
# 加上最后的那个1表示安全容量 ID的个数
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.max_objs = opt.K
self.augment = augment
self.transforms = transforms
print('=' * 120)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 120)
def __getitem__(self, files_index):
# 计算偏移量
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
# 得到letterbox / 随机仿射之后的图像以及对应的标签数据
imgs, labels, img_path, (origin_h, origin_w) = self.get_data(img_path, label_path,use_letter_box = True)
# 根据id哪一项的值判断是否需要进行reid训练 id为-1表示不进行这个对象不进行reid训练
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
# 计算网络最终输出大小 即经过若干次采样的结果
output_h = imgs.shape[1] // self.opt.down_ratio
output_w = imgs.shape[2] // self.opt.down_ratio
# 设置类别数目
num_classes = self.num_classes
# ==========================================================================
# 设置各类标签 =
# ==========================================================================
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32) # hm是一个c * h * w的张量矩阵标签 代表c个类别的中心点数据
if self.opt.ltrb: # ltrb xywh四元组标签 wh的回归或者是距离的回归
wh = np.zeros((self.max_objs, 4), dtype=np.float32) # 得到前k个对应的距离中心点的值或者是w或者h的值
else:
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
center_offset = np.zeros((self.max_objs, 2), dtype=np.float32) # offset 回归标签 指的是对中心点偏移的一个回归
center_1d_index = np.zeros((self.max_objs, ), dtype=np.int64) # 代表了hm对象中心点的经过压扁后的一维位置
center_offset_reg_mask = np.zeros((self.max_objs, ), dtype=np.uint8) # 回归掩码 用来标识那些位置需要计算回归任务
ids = np.zeros((self.max_objs, ), dtype=np.int64) # 中心点对应对象的id 在reid分类时需要使用
bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32) # bbox四元组
# only for player
labels = labels[labels[:, 0] == 0]
num_objs = labels.shape[0]
# hm进行gaussian计算的高斯函数
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
# 得到各个对象的bbox
for k in range(min(num_objs, self.max_objs)):
label = labels[k] # 对应于第k个对象的标签信息
bbox = label[2:]
cls_id = int(label[0])
# xywh的bbox
bbox[[0, 2]] = bbox[[0, 2]] * output_w # 对应的输出时的像素位置
bbox[[1, 3]] = bbox[[1, 3]] * output_h # 此时依然为xywh的形式
# xyxy的bbox
bbox_amodal = copy.deepcopy(bbox)
bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2. # 转换为xyxy的另一种形式
bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
bbox[0] = np.clip(bbox[0], 0, output_w - 1) # 将中心限定在图像范围内 获得对象bbox的长宽和高
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
bbox_xy = copy.deepcopy(bbox) # xyxy的bbox 和bbox_amodal相比唯一的区别就是进行了限定x y的值
bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
# 计算高斯处理之后的各类标签
if h > 0 and w > 0:
# 计算高斯半径
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = 6 if self.opt.mse_loss else radius
#radius = max(1, int(radius)) if self.opt.mse_loss else radius
ct = np.array([bbox[0], bbox[1]], dtype=np.float32) # heatmap的中心点像素位置
ct_int = ct.astype(np.int32) # 转换为整数类型的中心点的坐标
# hm 为当前对象的对应中心点位置绘制高斯半径 得到hm标签
draw_gaussian(hm[cls_id], ct_int, radius)
# wh 对bbox进行回归计算的损失
# 针对ltrb四元组的形式 计算这个中心点到左上角和右下角的偏移量 像素值 或者仅仅是针对w和h的形式
if self.opt.ltrb:
wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
else:
wh[k] = 1. * w, 1. * h
center_1d_index[k] = ct_int[1] * output_w + ct_int[0] # 指示变量 表明对应的那个中心点位置应该是张量压扁之后对应的什么地方
center_offset[k] = ct - ct_int # 中心点的偏移量 即计算在下采样过程中损失的小数部分
center_offset_reg_mask[k] = 1 # 限制只有前K个会计算回归损失
ids[k] = label[1] # reid分类时的标签 已经加上了偏移量
bbox_xys[k] = bbox_xy # 当前对象的bbox x1y1x2y2的对应像素值
# 返回结果
ret = {
'input': imgs, # 输入图像的tensor
'hm': hm, # 中心点以及使用了高斯加权之后的hm
'reg_mask': center_offset_reg_mask, # 回归标记掩码 表示该位置会不会计算回归损失
'ind': center_1d_index, # 对象的中心点位置在压扁后的坐标
'wh': wh, # 尺寸
'reg': center_offset, # 中心点偏移量
'ids': ids, # reid的类别
'bbox': bbox_xys # bbox尺寸
}
return ret
# 仅简单数据处理用于足球检测
class BallDataset(JointDataset):
def __init__(
self,
opt,
root,
paths,
img_size,
augment=False,
transforms=None,
ball_info_dict = None
):
"""
初始化主要是对联合数据集的一个整理
Args:
opt: 基本的配置参数
root: 图像和标签存放的根目录
paths: 这个是在data中放置的已经生成好的train文件或者val文件
img_size: 输入尺寸 统一尺寸大小 可以根据opt中进行调整
Returns:
"""
super(BallDataset, self).__init__(opt, root, paths, img_size, augment, transforms)
self.root = root
self.ball_augment = CropAndPaste(opt.data_dir, ball_info_dict) if ball_info_dict is not None else None
def __getitem__(self, files_index):
# 计算偏移量
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
# 得到letterbox / 随机仿射之后的图像以及对应的标签数据
imgs, labels, img_path, (origin_h, origin_w) = self.get_data(img_path, label_path, ball_augment = self.ball_augment)
# 根据id哪一项的值判断是否需要进行reid训练 id为-1表示不进行这个对象不进行reid训练
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
# 计算网络最终输出大小 即经过若干次采样的结果
# c * h * w tensor
output_h = imgs.shape[1] // self.opt.down_ratio
output_w = imgs.shape[2] // self.opt.down_ratio
# =======================================================
# 分开设置ball和player的标签
# =======================================================
# hm进行gaussian计算的高斯函数
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
def set_targets(selected_labels, maxK, hm, wh, center1dind, centeroffset, centeroffsetregmask, reidcls, bboxxys):
for k in range(min(maxK, selected_labels.shape[0])):
label = selected_labels[k] # 对应于第k个对象的标签信息
bbox = label[2:]
# xywh的bbox
bbox[[0, 2]] = bbox[[0, 2]] * output_w # 对应的输出时的像素位置
bbox[[1, 3]] = bbox[[1, 3]] * output_h # 此时依然为xywh的形式
# # xyxy的bbox
bbox_amodal = copy.deepcopy(bbox)
bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2. # 转换为xyxy的另一种形式
bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
bbox[0] = np.clip(bbox[0], 0, output_w - 1) # 将中心限定在图像范围内 获得对象bbox的长宽和高
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
bbox_xy = copy.deepcopy(bbox) # xyxy的bbox 和bbox_amodal相比唯一的区别就是进行了限定x y的值
bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
# 计算高斯处理之后的各类标签
if h > 0 and w > 0:
# 计算高斯半径
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = 6 if self.opt.mse_loss else radius
#radius = max(1, int(radius)) if self.opt.mse_loss else radius
ct = np.array([bbox[0], bbox[1]], dtype=np.float32) # heatmap的中心点像素位置
ct_int = ct.astype(np.int32) # 转换为整数类型的中心点的坐标
# hm 为当前对象的对应中心点位置绘制高斯半径 得到hm标签
draw_gaussian(hm, ct_int, radius)
# wh 对bbox进行回归计算的损失
# 针对ltrb四元组的形式 计算这个中心点到左上角和右下角的偏移量 像素值 或者仅仅是针对w和h的形式
if self.opt.ltrb:
wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
else:
wh[k] = 1. * w, 1. * h
center1dind[k] = ct_int[1] * output_w + ct_int[0] # 指示变量 表明对应的那个中心点位置应该是张量压扁之后对应的什么地方
centeroffset[k] = ct - ct_int # 中心点的偏移量 即计算在下采样过程中损失的小数部分
centeroffsetregmask[k] = 1 # 限制只有前K个会计算回归损失
reidcls[k] = label[1] # reid分类时的标签 已经加上了偏移量
bboxxys[k] = bbox_xy # 当前对象的bbox x1y1x2y2的对应像素值
# for ball
MAX_BALL_NUM = 5
hm = np.zeros((1, output_h, output_w), dtype=np.float32)
wh = np.zeros((MAX_BALL_NUM, 4), dtype=np.float32) if self.opt.ltrb else np.zeros((MAX_BALL_NUM, 2), dtype=np.float32)
center_offset = np.zeros((MAX_BALL_NUM, 2), dtype=np.float32)
center_1d_ind = np.zeros((MAX_BALL_NUM, ), dtype=np.int64)
center_offset_reg_mask = np.zeros((MAX_BALL_NUM, ), dtype=np.uint8)
reid_cls = np.zeros((MAX_BALL_NUM, ), dtype=np.int64)
bbox_xys = np.zeros((MAX_BALL_NUM, 4), dtype=np.float32)
labels = labels[labels[:, 0].astype(np.int32) == 1]
set_targets(labels, MAX_BALL_NUM, hm[0], wh, center_1d_ind, center_offset, center_offset_reg_mask, reid_cls, bbox_xys)
# # 保存hm图
# save_hm = True
# import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set
# fig = plt.figure()
# sns_plot = sns.heatmap(ball_hm[0])
# fig.savefig("heatmap.png")
# plt.show()
# 返回结果
ret = {
'input': imgs, # 输入图像的tensor
'hm': hm, # 中心点以及使用了高斯加权之后的hm
'reg_mask': center_offset_reg_mask, # 回归标记掩码 表示该位置会不会计算回归损失
'ind': center_1d_ind, # 对象的中心点位置在压扁后的坐标
'wh': wh, # 尺寸
'reg': center_offset, # 中心点偏移量
'ids': reid_cls, # reid的类别
'bbox': bbox_xys # bbox尺寸
}
return ret
# 高清分辨率Patch
class HRDataset(JointDataset):
"""
用于特征增强的数据集 从原始图像中裁剪 然后缩放到h * w大小
针对MCC4K数据集则是可以从4K图像中裁切patch
针对MOT数据集则是可以从自身原始图像中切剪数据
"""
def __init__(
self,
opt,
root,
paths,
img_size,
patch_size,
augment=False,
transforms=None,
):
super(HRDataset, self).__init__(opt, root, paths, img_size, augment, transforms)
self.opt = opt
self.root = root
self.paths = paths
self.img_size = img_size
self.augment = augment
self.transforms = transforms
self.patch_size = patch_size
def __getitem__(self, files_index):
# 计算偏移量
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
# 加载用于训练输入的resize之后的图像和标签
random_hr_flip = False
imgs, labels, img_path, (origin_h, origin_w) = self.get_data(img_path, label_path, hr_flip=random_hr_flip)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
# 计算网络最终输出大小 即经过若干次采样的结果
output_h = imgs.shape[1] // self.opt.down_ratio
output_w = imgs.shape[2] // self.opt.down_ratio
# 设置类别数目
num_classes = self.num_classes
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32) # hm是一个c * h * w的张量矩阵标签 代表c个类别的中心点数据
if self.opt.ltrb: # ltrb xywh四元组标签 wh的回归或者是距离的回归
wh = np.zeros((self.max_objs, 4), dtype=np.float32) # 得到前k个对应的距离中心点的值或者是w或者h的值
else:
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
center_offset = np.zeros((self.max_objs, 2), dtype=np.float32) # offset 回归标签 指的是对中心点偏移的一个回归
center_1d_index = np.zeros((self.max_objs, ), dtype=np.int64) # 代表了hm对象中心点的经过压扁后的一维位置
center_offset_reg_mask = np.zeros((self.max_objs, ), dtype=np.uint8) # 回归掩码 用来标识那些位置需要计算回归任务
ids = np.zeros((self.max_objs, ), dtype=np.int64) # 中心点对应对象的id 在reid分类时需要使用
bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32) # bbox四元组
# 运动员
labels = labels[labels[:, 0] == 0].copy()
num_objs = labels.shape[0]
# step1 低分辨率标签设置
# hm进行gaussian计算的高斯函数
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
# 得到各个对象的bbox
for k in range(min(num_objs, self.max_objs)):
label = labels[k].copy() # 对应于第k个对象的标签信息
bbox = label[2:]
cls_id = int(label[0])
# xywh的bbox
bbox[[0, 2]] = bbox[[0, 2]] * output_w # 对应的输出时的像素位置
bbox[[1, 3]] = bbox[[1, 3]] * output_h # 此时依然为xywh的形式
# xyxy的bbox
bbox_amodal = copy.deepcopy(bbox)
bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2. # 转换为xyxy的另一种形式
bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
bbox[0] = np.clip(bbox[0], 0, output_w - 1) # 将中心限定在图像范围内 获得对象bbox的长宽和高
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
bbox_xy = copy.deepcopy(bbox) # xyxy的bbox 和bbox_amodal相比唯一的区别就是进行了限定x y的值
bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
# 计算高斯处理之后的各类标签
if h > 0 and w > 0:
# 计算高斯半径
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = 6 if self.opt.mse_loss else radius
# radius = max(1, int(radius)) if self.opt.mse_loss else radius
ct = np.array([bbox[0], bbox[1]], dtype=np.float32) # heatmap的中心点像素位置
ct_int = ct.astype(np.int32) # 转换为整数类型的中心点的坐标
# hm 为当前对象的对应中心点位置绘制高斯半径 得到hm标签
draw_gaussian(hm[cls_id], ct_int, radius)
# wh 对bbox进行回归计算的损失
# 针对ltrb四元组的形式 计算这个中心点到左上角和右下角的偏移量 像素值 或者仅仅是针对w和h的形式
if self.opt.ltrb:
wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
else:
wh[k] = 1. * w, 1. * h
center_1d_index[k] = ct_int[1] * output_w + ct_int[0] # 指示变量 表明对应的那个中心点位置应该是张量压扁之后对应的什么地方
center_offset[k] = ct - ct_int # 中心点的偏移量 即计算在下采样过程中损失的小数部分
center_offset_reg_mask[k] = 1 # 限制只有前K个会计算回归损失
ids[k] = label[1] # reid分类时的标签 已经加上了偏移量
bbox_xys[k] = bbox_xy # 当前对象的bbox x1y1x2y2的对应像素值
# step2 从高分辨率图像中裁剪数据
if img_path.find("/720p/") != -1:
hd_img_path = img_path.replace("720p", "4k")
else:
hd_img_path = img_path
hd_img = cv2.imread(hd_img_path)
if random_hr_flip:
hd_img = np.fliplr(hd_img)
# BGR -> RGB
hd_img = np.ascontiguousarray(hd_img[:, :, ::-1])
hd_height, hd_width, _ = hd_img.shape
hd_patches = np.zeros((self.max_objs, 3, self.patch_size, self.patch_size), dtype=np.float32)
for k in range(min(num_objs, self.max_objs)):
label = labels[k].copy()
bbox = label[2:]
xcenter = bbox[0] * hd_width
width = bbox[2] * hd_width
ycenter = bbox[1] * hd_height
height = bbox[3] * hd_height
if height > 0 and width > 0:
# print(xcenter, ycenter, width, height)
start_y = max(0, int(ycenter - height / 2))
end_y = min(int(start_y + height), hd_height)
start_x = max(0, int(xcenter - width / 2))
end_x = min(int(start_x + width), hd_width)
patch = hd_img[start_y:end_y, start_x:end_x].copy()
# for safe
if patch.shape[0] * patch.shape[1] <= 0:
patch = cv2.resize(hd_img, (self.patch_size, self.patch_size), cv2.INTER_CUBIC)
else:
patch = cv2.resize(patch, (self.patch_size, self.patch_size), cv2.INTER_CUBIC)
# cv2.imwrite("./patches/patch_%d_.jpg" % (k,), patch)
patch = self.transforms(patch)
hd_patches[k] = patch
# 返回结果
ret = {
'input': imgs, # 输入图像的tensor
'hm': hm, # 中心点以及使用了高斯加权之后的hm
'reg_mask': center_offset_reg_mask, # 回归标记掩码 表示该位置会不会计算回归损失
'ind': center_1d_index, # 对象的中心点位置在压扁后的坐标
'wh': wh, # 尺寸
'reg': center_offset, # 中心点偏移量
'ids': ids, # reid的类别
"hd_patches": hd_patches, # 每个对象的高清局部patch[k * 3 * self.patch_size * self.patch_size]
}
return ret
# 使用Tiling机制用于足球检测
class TilingBallDataset(JointDataset):
def __init__(
self,
opt,
root,
paths,
img_size,
augment=False,
transforms=None,
):
"""
初始化主要是对联合数据集的一个整理
Args:
opt: 基本的配置参数
root: 图像和标签存放的根目录
paths: 这个是在data中放置的已经生成好的train文件或者val文件
img_size: 输入图像的大小 同时也是裁剪的最小尺寸
Returns:
"""
super(TilingBallDataset, self).__init__(opt, root, paths, img_size, augment, transforms)
assert img_size[0] == img_size[1]
# 采样总数 包括了正负样本
self.samples = 3
# 代表crop_patch的缩放比例
self.resize_ratio = 1
self.patch_size = img_size[0] * self.resize_ratio
self.input_size = img_size[0]
self.vis_num = 0
def crop_patch(self, img, center, object_size, patch_size):
"""
根据中心点的位置在图中裁切一个patch出来
"""
xcenter, ycenter = center
width, height = object_size
# probe
nearest_y = int(max(ycenter - height, 0))
farest_y = int(max(ycenter - patch_size + height, 0))
farest_y = int(min(nearest_y, farest_y))
rand_start_y = random.randint(farest_y, nearest_y)
nearest_x = int(max(xcenter - width, 0))
farest_x = int(max(xcenter - patch_size + width, 0))
farest_x = int(min(nearest_x, farest_x))
rand_start_x = random.randint(farest_x, nearest_x)
# fix
y_diff = rand_start_y + patch_size - img.shape[0]
if y_diff > 0:
rand_start_y -= y_diff
x_diff = rand_start_x + patch_size - img.shape[1]
if x_diff > 0:
rand_start_x -= x_diff
crop_img = img[rand_start_y:rand_start_y+patch_size, rand_start_x:rand_start_x+patch_size].copy()
# resize
crop_size = patch_size // self.resize_ratio
crop_img = cv2.resize(crop_img, (crop_size, crop_size), cv2.INTER_LINEAR)
new_label = np.array(
[[1, 0, (xcenter - rand_start_x) / patch_size, (ycenter - rand_start_y) / patch_size, width / patch_size, height / patch_size]]
)
return crop_img, new_label
def get_data(self, img_path, label_path):
"""
图像数据格式转换, 增强; 标签格式化
Args:
img_path: img_path单幅图像的路径
label_path: 标签路径
Returns:
img: 单幅img对应的张量数据 rgb格式 已经normalization
labels: 单幅图像对应的标签
img_path: 图像路径
(h,w): 图像的原始高和宽
"""
img = cv2.imread(img_path) # BGR
if img is None:
raise ValueError('File corrupt {}'.format(img_path))
# 得到图像真正的大小 然后进行letterbox图像尺寸变换
augment_hsv = True
h, w, _ = img.shape
# 对于那些尺寸不足的原始图像需要按照比例缩放
ratio = w / h
if ratio < 1:
# 竖排图像
if w < self.patch_size:
img = cv2.resize(img, (self.patch_size, int(self.patch_size / ratio)), cv2.INTER_LINEAR)
else:
if h < self.patch_size:
img = cv2.resize(img, (int(self.patch_size * ratio), self.patch_size), cv2.INTER_LINEAR)
img_new_shape = img.shape
# 1. 进行hsv色彩域变化
if self.augment and augment_hsv:
hsv_augment(img)
# 2. 加载图像标签
origin_labels0 = None
if os.path.isfile(label_path):
origin_labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
if origin_labels0 is not None:
labels = origin_labels0.copy()
labels = labels[labels[:, 0] == 1]
else:
labels = np.array([])
# 3. samples
# 从一张图像中采样的patch 包括了正样本和负样本
patch_imgs = []
patch_labels = []
pos_samples = []
# default center
ycenter = img_new_shape[0] // 2
xcenter = img_new_shape[1] // 2
# 正样本
for i in range(labels.shape[0]):
label = labels[i].copy()
xcenter = label[2] * img_new_shape[1]
ycenter = label[3] * img_new_shape[0]
bwidth = label[4] * img_new_shape[1]
bheight = label[5] * img_new_shape[0]
crop_img, new_label = self.crop_patch(img, (xcenter, ycenter), (bwidth, bheight), self.patch_size)
patch_imgs.append(crop_img)
patch_labels.append(new_label)
pos_samples.append(1)
break
# 负样本
for i in range(self.samples):
probe_times = 0
while True:
probe_times += 1
if probe_times == 100: # 防止出现不能采样到负样本的情况 此时的解决方案是产生一个无用的样本
n_ycenter = img_new_shape[0]
n_xcenter = img_new_shape[1]
break
n_ycenter = random.randint(0, img_new_shape[0])
n_xcenter = random.randint(0, img_new_shape[1])
if abs(n_ycenter - ycenter) >= self.patch_size and abs(n_xcenter - xcenter) >= self.patch_size:
break
crop_img, new_label = self.crop_patch(img, (n_xcenter, n_ycenter), (0, 0), self.patch_size)
patch_imgs.append(crop_img)
patch_labels.append(new_label)
pos_samples.append(0)
for i in range(len(patch_imgs)):
if self.augment & (random.random() > 0.5):
patch_imgs[i] = np.fliplr(patch_imgs[i])
if pos_samples[i] == 1:
patch_labels[i][:, 2] = 1 - patch_labels[i][:, 2]
patch_imgs[i] = np.ascontiguousarray(patch_imgs[i][:, :, ::-1])
# # visualizes
# if pos_samples[i] == 1:
# xcenter = patch_labels[i][0, 2] * self.input_size
# ycenter = patch_labels[i][0, 3] * self.input_size
# width = patch_labels[i][0, 4] * self.input_size
# height = patch_labels[i][0, 5] * self.input_size
# print(xcenter, ycenter, width, height)
# cv2.rectangle(patch_imgs[i], (int(xcenter - width / 2), int(ycenter - height / 2)), (int(xcenter + width / 2), int(ycenter + height / 2)), color=(123,213,231), thickness=1)
# cv2.imwrite("tmp_%d_.jpg" % (self.vis_num, ), patch_imgs[i])
# # else:
# # cv2.imwrite("tmp_neg_%d_.jpg" % (self.num, ), patch_imgs[i])
if self.transforms is not None:
patch_imgs[i] = self.transforms(patch_imgs[i]).unsqueeze(0)
return torch.cat(patch_imgs[:self.samples], dim = 0), patch_labels[:self.samples], pos_samples[:self.samples], img_path
def __getitem__(self, files_index):
# 计算偏移量
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
patch_imgs, patch_labels, pos_samples, img_path = self.get_data(img_path, label_path)
# 计算网络最终输出大小 即经过若干次采样的结果
# c * h * w tensor
assert self.width == self.height
output_h = self.width // self.opt.down_ratio
output_w = self.width // self.opt.down_ratio
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
def set_targets(selected_labels, maxK, hm, wh, center1dind, centeroffset, centeroffsetregmask):
for k in range(min(maxK, selected_labels.shape[0])):
label = selected_labels[k].copy() # 对应于第k个对象的标签信息
bbox = label[2:]
# xywh的bbox
bbox[[0, 2]] = bbox[[0, 2]] * output_w # 对应的输出时的像素位置
bbox[[1, 3]] = bbox[[1, 3]] * output_h # 此时依然为xywh的形式
# # xyxy的bbox
bbox_amodal = copy.deepcopy(bbox)
bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2. # 转换为xyxy的另一种形式
bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
bbox[0] = np.clip(bbox[0], 0, output_w - 1) # 将中心限定在图像范围内 获得对象bbox的长宽和高
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
bbox_xy = copy.deepcopy(bbox) # xyxy的bbox 和bbox_amodal相比唯一的区别就是进行了限定x y的值
bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
# 计算高斯处理之后的各类标签
if h > 0 and w > 0:
# 计算高斯半径
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = 6 if self.opt.mse_loss else radius
#radius = max(1, int(radius)) if self.opt.mse_loss else radius
ct = np.array([bbox[0], bbox[1]], dtype=np.float32) # heatmap的中心点像素位置
ct_int = ct.astype(np.int32) # 转换为整数类型的中心点的坐标
# hm 为当前对象的对应中心点位置绘制高斯半径 得到hm标签
draw_gaussian(hm, ct_int, radius)
# wh 对bbox进行回归计算的损失
# 针对ltrb四元组的形式 计算这个中心点到左上角和右下角的偏移量 像素值 或者仅仅是针对w和h的形式
if self.opt.ltrb:
wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
else:
wh[k] = 1. * w, 1. * h
center1dind[k] = ct_int[1] * output_w + ct_int[0] # 指示变量 表明对应的那个中心点位置应该是张量压扁之后对应的什么地方
centeroffset[k] = ct - ct_int # 中心点的偏移量 即计算在下采样过程中损失的小数部分
centeroffsetregmask[k] = 1 # 限制只有前K个会计算回归损失
# for all samples
MAX_BALL_NUM = 1
hm = np.zeros((self.samples, 1, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.samples, MAX_BALL_NUM, 4), dtype=np.float32) if self.opt.ltrb else np.zeros((self.samples, MAX_BALL_NUM, 2), dtype=np.float32)
center_offset = np.zeros((self.samples, MAX_BALL_NUM, 2), dtype=np.float32)
center_1d_ind = np.zeros((self.samples, MAX_BALL_NUM, ), dtype=np.int64)
center_offset_reg_mask = np.zeros((self.samples, MAX_BALL_NUM, ), dtype=np.uint8)
for i in range(self.samples):
if pos_samples[i] == 1:
labels = patch_labels[i]
set_targets(labels, MAX_BALL_NUM, hm[i, 0], wh[i], center_1d_ind[i], center_offset[i], center_offset_reg_mask[i])
# 保存hm图
# save_hm = True
# import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set
# fig = plt.figure()
# sns_plot = sns.heatmap(hm[0, 0])
# fig.savefig("heatmap_%d_.png" % (self.vis_num, ))
# plt.show()
# self.vis_num += 1
# print(patch_imgs.size())
# print(hm.shape)
# print(center_offset_reg_mask.shape)
# print(center_1d_ind.shape)
# print(wh.shape)
# print(center_offset.shape)
# 返回结果
ret = {
'input': patch_imgs, # samples * 3 * patch_size * patch_size
'hm': hm, # samples * 1 * patch_size * patch_size
'reg_mask': center_offset_reg_mask, # samples * maxK * 1
'ind': center_1d_ind, # samples * maxK * 1
'wh': wh, # samples * maxK * 4 / 2
'reg': center_offset, # samples * maxK * 2
}
return ret
# 检测验证数据集
class DetDataset(LoadImagesAndLabels):
def __init__(
self,
root,
paths,
img_size,
augment=False,
transforms=None
):
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
print('=' * 80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
imgs, labels, img_path, (h, w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
return imgs, labels0, img_path, (h, w)
# 评估使用Tiling机制的足球检测性能
class OriginDetDataset(LoadImagesAndLabels):
def __init__(
self,
root,
paths,
augment=False,
transforms=None
):
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.augment = augment
self.transforms = transforms
print('=' * 80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 80)
def get_data(self, img_path, label_path):
"""
读取图像不做任何缩放然后返回
"""
img = cv2.imread(img_path) # BGR
if img is None:
raise ValueError('File corrupt {}'.format(img_path))
h, w, _ = img.shape
if os.path.isfile(label_path):
labels = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
else:
labels = np.array([])
# BGR to RGB Before Normalization Transfrom and be continuous
img = np.ascontiguousarray(img[:, :, ::-1])
# Normalize (0~1 normalize and mean / std)
if self.transforms is not None:
img = self.transforms(img)
return img, labels, img_path, (h, w)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
imgs, labels, img_path, (h, w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
return imgs, labels0, img_path, (h, w)
| [
"random.shuffle",
"torch.cat",
"numpy.clip",
"os.path.isfile",
"glob.glob",
"random.randint",
"numpy.max",
"numpy.loadtxt",
"cv2.resize",
"copy.deepcopy",
"math.ceil",
"numpy.fliplr",
"random.random",
"os.path.isdir",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.imread",
"numpy.array",
... | [((742, 761), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (755, 761), False, 'import os\n'), ((1707, 1727), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1717, 1727), False, 'import cv2\n'), ((1945, 2005), 'cv2.resize', 'cv2.resize', (['img0', '(self.width, self.height)', 'cv2.INTER_CUBIC'], {}), '(img0, (self.width, self.height), cv2.INTER_CUBIC)\n', (1955, 2005), False, 'import cv2\n'), ((2100, 2137), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img[:, :, ::-1]'], {}), '(img[:, :, ::-1])\n', (2120, 2137), True, 'import numpy as np\n'), ((2635, 2655), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2645, 2655), False, 'import cv2\n'), ((2873, 2933), 'cv2.resize', 'cv2.resize', (['img0', '(self.width, self.height)', 'cv2.INTER_CUBIC'], {}), '(img0, (self.width, self.height), cv2.INTER_CUBIC)\n', (2883, 2933), False, 'import cv2\n'), ((3028, 3065), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img[:, :, ::-1]'], {}), '(img[:, :, ::-1])\n', (3048, 3065), True, 'import numpy as np\n'), ((3432, 3454), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (3448, 3454), False, 'import cv2\n'), ((4489, 4523), 'cv2.resize', 'cv2.resize', (['img0', '(self.w, self.h)'], {}), '(img0, (self.w, self.h))\n', (4499, 4523), False, 'import cv2\n'), ((4646, 4706), 'cv2.resize', 'cv2.resize', (['img0', '(self.width, self.height)', 'cv2.INTER_CUBIC'], {}), '(img0, (self.width, self.height), cv2.INTER_CUBIC)\n', (4656, 4706), False, 'import cv2\n'), ((4862, 4899), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img[:, :, ::-1]'], {}), '(img[:, :, ::-1])\n', (4882, 4899), True, 'import numpy as np\n'), ((6227, 6253), 'random.shuffle', 'random.shuffle', (['self.pairs'], {}), '(self.pairs)\n', (6241, 6253), False, 'import random\n'), ((7296, 7316), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (7306, 7316), False, 'import cv2\n'), ((11105, 11142), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img[:, :, ::-1]'], {}), '(img[:, :, ::-1])\n', (11125, 11142), True, 'import numpy as np\n'), ((11939, 11952), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11950, 11952), False, 'from collections import OrderedDict\n'), ((11981, 11994), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11992, 11994), False, 'from collections import OrderedDict\n'), ((12019, 12032), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12030, 12032), False, 'from collections import OrderedDict\n'), ((12104, 12117), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12115, 12117), False, 'from collections import OrderedDict\n'), ((15796, 15857), 'numpy.zeros', 'np.zeros', (['(num_classes, output_h, output_w)'], {'dtype': 'np.float32'}), '((num_classes, output_h, output_w), dtype=np.float32)\n', (15804, 15857), True, 'import numpy as np\n'), ((16216, 16262), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (16224, 16262), True, 'import numpy as np\n'), ((16325, 16367), 'numpy.zeros', 'np.zeros', (['(self.max_objs,)'], {'dtype': 'np.int64'}), '((self.max_objs,), dtype=np.int64)\n', (16333, 16367), True, 'import numpy as np\n'), ((16434, 16476), 'numpy.zeros', 'np.zeros', (['(self.max_objs,)'], {'dtype': 'np.uint8'}), '((self.max_objs,), dtype=np.uint8)\n', (16442, 16476), True, 'import numpy as np\n'), ((16517, 16559), 'numpy.zeros', 'np.zeros', (['(self.max_objs,)'], {'dtype': 'np.int64'}), '((self.max_objs,), dtype=np.int64)\n', (16525, 16559), True, 'import numpy as np\n'), ((16626, 16672), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 4)'], {'dtype': 'np.float32'}), '((self.max_objs, 4), dtype=np.float32)\n', (16634, 16672), True, 'import numpy as np\n'), ((25487, 25538), 'numpy.zeros', 'np.zeros', (['(1, output_h, output_w)'], {'dtype': 'np.float32'}), '((1, output_h, output_w), dtype=np.float32)\n', (25495, 25538), True, 'import numpy as np\n'), ((25692, 25737), 'numpy.zeros', 'np.zeros', (['(MAX_BALL_NUM, 2)'], {'dtype': 'np.float32'}), '((MAX_BALL_NUM, 2), dtype=np.float32)\n', (25700, 25737), True, 'import numpy as np\n'), ((25763, 25804), 'numpy.zeros', 'np.zeros', (['(MAX_BALL_NUM,)'], {'dtype': 'np.int64'}), '((MAX_BALL_NUM,), dtype=np.int64)\n', (25771, 25804), True, 'import numpy as np\n'), ((25840, 25881), 'numpy.zeros', 'np.zeros', (['(MAX_BALL_NUM,)'], {'dtype': 'np.uint8'}), '((MAX_BALL_NUM,), dtype=np.uint8)\n', (25848, 25881), True, 'import numpy as np\n'), ((25903, 25944), 'numpy.zeros', 'np.zeros', (['(MAX_BALL_NUM,)'], {'dtype': 'np.int64'}), '((MAX_BALL_NUM,), dtype=np.int64)\n', (25911, 25944), True, 'import numpy as np\n'), ((25966, 26011), 'numpy.zeros', 'np.zeros', (['(MAX_BALL_NUM, 4)'], {'dtype': 'np.float32'}), '((MAX_BALL_NUM, 4), dtype=np.float32)\n', (25974, 26011), True, 'import numpy as np\n'), ((28671, 28732), 'numpy.zeros', 'np.zeros', (['(num_classes, output_h, output_w)'], {'dtype': 'np.float32'}), '((num_classes, output_h, output_w), dtype=np.float32)\n', (28679, 28732), True, 'import numpy as np\n'), ((29091, 29137), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (29099, 29137), True, 'import numpy as np\n'), ((29200, 29242), 'numpy.zeros', 'np.zeros', (['(self.max_objs,)'], {'dtype': 'np.int64'}), '((self.max_objs,), dtype=np.int64)\n', (29208, 29242), True, 'import numpy as np\n'), ((29309, 29351), 'numpy.zeros', 'np.zeros', (['(self.max_objs,)'], {'dtype': 'np.uint8'}), '((self.max_objs,), dtype=np.uint8)\n', (29317, 29351), True, 'import numpy as np\n'), ((29392, 29434), 'numpy.zeros', 'np.zeros', (['(self.max_objs,)'], {'dtype': 'np.int64'}), '((self.max_objs,), dtype=np.int64)\n', (29400, 29434), True, 'import numpy as np\n'), ((29501, 29547), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 4)'], {'dtype': 'np.float32'}), '((self.max_objs, 4), dtype=np.float32)\n', (29509, 29547), True, 'import numpy as np\n'), ((32908, 32931), 'cv2.imread', 'cv2.imread', (['hd_img_path'], {}), '(hd_img_path)\n', (32918, 32931), False, 'import cv2\n'), ((33044, 33084), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['hd_img[:, :, ::-1]'], {}), '(hd_img[:, :, ::-1])\n', (33064, 33084), True, 'import numpy as np\n'), ((33156, 33241), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 3, self.patch_size, self.patch_size)'], {'dtype': 'np.float32'}), '((self.max_objs, 3, self.patch_size, self.patch_size), dtype=np.float32\n )\n', (33164, 33241), True, 'import numpy as np\n'), ((36345, 36380), 'random.randint', 'random.randint', (['farest_y', 'nearest_y'], {}), '(farest_y, nearest_y)\n', (36359, 36380), False, 'import random\n'), ((36567, 36602), 'random.randint', 'random.randint', (['farest_x', 'nearest_x'], {}), '(farest_x, nearest_x)\n', (36581, 36602), False, 'import random\n'), ((37054, 37116), 'cv2.resize', 'cv2.resize', (['crop_img', '(crop_size, crop_size)', 'cv2.INTER_LINEAR'], {}), '(crop_img, (crop_size, crop_size), cv2.INTER_LINEAR)\n', (37064, 37116), False, 'import cv2\n'), ((37140, 37281), 'numpy.array', 'np.array', (['[[1, 0, (xcenter - rand_start_x) / patch_size, (ycenter - rand_start_y) /\n patch_size, width / patch_size, height / patch_size]]'], {}), '([[1, 0, (xcenter - rand_start_x) / patch_size, (ycenter -\n rand_start_y) / patch_size, width / patch_size, height / patch_size]])\n', (37148, 37281), True, 'import numpy as np\n'), ((37700, 37720), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (37710, 37720), False, 'import cv2\n'), ((38521, 38547), 'os.path.isfile', 'os.path.isfile', (['label_path'], {}), '(label_path)\n', (38535, 38547), False, 'import os\n'), ((45603, 45668), 'numpy.zeros', 'np.zeros', (['(self.samples, 1, output_h, output_w)'], {'dtype': 'np.float32'}), '((self.samples, 1, output_h, output_w), dtype=np.float32)\n', (45611, 45668), True, 'import numpy as np\n'), ((45850, 45909), 'numpy.zeros', 'np.zeros', (['(self.samples, MAX_BALL_NUM, 2)'], {'dtype': 'np.float32'}), '((self.samples, MAX_BALL_NUM, 2), dtype=np.float32)\n', (45858, 45909), True, 'import numpy as np\n'), ((45935, 45989), 'numpy.zeros', 'np.zeros', (['(self.samples, MAX_BALL_NUM)'], {'dtype': 'np.int64'}), '((self.samples, MAX_BALL_NUM), dtype=np.int64)\n', (45943, 45989), True, 'import numpy as np\n'), ((46026, 46080), 'numpy.zeros', 'np.zeros', (['(self.samples, MAX_BALL_NUM)'], {'dtype': 'np.uint8'}), '((self.samples, MAX_BALL_NUM), dtype=np.uint8)\n', (46034, 46080), True, 'import numpy as np\n'), ((47775, 47788), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (47786, 47788), False, 'from collections import OrderedDict\n'), ((47817, 47830), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (47828, 47830), False, 'from collections import OrderedDict\n'), ((47855, 47868), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (47866, 47868), False, 'from collections import OrderedDict\n'), ((47901, 47914), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (47912, 47914), False, 'from collections import OrderedDict\n'), ((50026, 50052), 'os.path.isfile', 'os.path.isfile', (['label_path'], {}), '(label_path)\n', (50040, 50052), False, 'import os\n'), ((50653, 50666), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (50664, 50666), False, 'from collections import OrderedDict\n'), ((50695, 50708), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (50706, 50708), False, 'from collections import OrderedDict\n'), ((50733, 50746), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (50744, 50746), False, 'from collections import OrderedDict\n'), ((50779, 50792), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (50790, 50792), False, 'from collections import OrderedDict\n'), ((52596, 52616), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (52606, 52616), False, 'import cv2\n'), ((52758, 52784), 'os.path.isfile', 'os.path.isfile', (['label_path'], {}), '(label_path)\n', (52772, 52784), False, 'import os\n'), ((53005, 53042), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img[:, :, ::-1]'], {}), '(img[:, :, ::-1])\n', (53025, 53042), True, 'import numpy as np\n'), ((53581, 53607), 'os.path.isfile', 'os.path.isfile', (['label_path'], {}), '(label_path)\n', (53595, 53607), False, 'import os\n'), ((1009, 1029), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1023, 1029), False, 'import os\n'), ((7860, 7886), 'os.path.isfile', 'os.path.isfile', (['label_path'], {}), '(label_path)\n', (7874, 7886), False, 'import os\n'), ((8874, 8900), 'os.path.isfile', 'os.path.isfile', (['label_path'], {}), '(label_path)\n', (8888, 8900), False, 'import os\n'), ((9286, 9358), 'cv2.resize', 'cv2.resize', (['img', '[self.width, self.height]'], {'interpolation': 'cv2.INTER_AREA'}), '(img, [self.width, self.height], interpolation=cv2.INTER_AREA)\n', (9296, 9358), False, 'import cv2\n'), ((16024, 16070), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 4)'], {'dtype': 'np.float32'}), '((self.max_objs, 4), dtype=np.float32)\n', (16032, 16070), True, 'import numpy as np\n'), ((16144, 16190), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (16152, 16190), True, 'import numpy as np\n'), ((17427, 17446), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (17440, 17446), False, 'import copy\n'), ((17760, 17793), 'numpy.clip', 'np.clip', (['bbox[0]', '(0)', '(output_w - 1)'], {}), '(bbox[0], 0, output_w - 1)\n', (17767, 17793), True, 'import numpy as np\n'), ((17857, 17890), 'numpy.clip', 'np.clip', (['bbox[1]', '(0)', '(output_h - 1)'], {}), '(bbox[1], 0, output_h - 1)\n', (17864, 17890), True, 'import numpy as np\n'), ((17966, 17985), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (17979, 17985), False, 'import copy\n'), ((25553, 25598), 'numpy.zeros', 'np.zeros', (['(MAX_BALL_NUM, 4)'], {'dtype': 'np.float32'}), '((MAX_BALL_NUM, 4), dtype=np.float32)\n', (25561, 25598), True, 'import numpy as np\n'), ((25621, 25666), 'numpy.zeros', 'np.zeros', (['(MAX_BALL_NUM, 2)'], {'dtype': 'np.float32'}), '((MAX_BALL_NUM, 2), dtype=np.float32)\n', (25629, 25666), True, 'import numpy as np\n'), ((28899, 28945), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 4)'], {'dtype': 'np.float32'}), '((self.max_objs, 4), dtype=np.float32)\n', (28907, 28945), True, 'import numpy as np\n'), ((29019, 29065), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (29027, 29065), True, 'import numpy as np\n'), ((30330, 30349), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (30343, 30349), False, 'import copy\n'), ((30663, 30696), 'numpy.clip', 'np.clip', (['bbox[0]', '(0)', '(output_w - 1)'], {}), '(bbox[0], 0, output_w - 1)\n', (30670, 30696), True, 'import numpy as np\n'), ((30760, 30793), 'numpy.clip', 'np.clip', (['bbox[1]', '(0)', '(output_h - 1)'], {}), '(bbox[1], 0, output_h - 1)\n', (30767, 30793), True, 'import numpy as np\n'), ((30869, 30888), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (30882, 30888), False, 'import copy\n'), ((32984, 33001), 'numpy.fliplr', 'np.fliplr', (['hd_img'], {}), '(hd_img)\n', (32993, 33001), True, 'import numpy as np\n'), ((38809, 38821), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (38817, 38821), True, 'import numpy as np\n'), ((40766, 40813), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['patch_imgs[i][:, :, ::-1]'], {}), '(patch_imgs[i][:, :, ::-1])\n', (40786, 40813), True, 'import numpy as np\n'), ((41741, 41784), 'torch.cat', 'torch.cat', (['patch_imgs[:self.samples]'], {'dim': '(0)'}), '(patch_imgs[:self.samples], dim=0)\n', (41750, 41784), False, 'import torch\n'), ((45683, 45742), 'numpy.zeros', 'np.zeros', (['(self.samples, MAX_BALL_NUM, 4)'], {'dtype': 'np.float32'}), '((self.samples, MAX_BALL_NUM, 4), dtype=np.float32)\n', (45691, 45742), True, 'import numpy as np\n'), ((45765, 45824), 'numpy.zeros', 'np.zeros', (['(self.samples, MAX_BALL_NUM, 2)'], {'dtype': 'np.float32'}), '((self.samples, MAX_BALL_NUM, 2), dtype=np.float32)\n', (45773, 45824), True, 'import numpy as np\n'), ((52904, 52916), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (52912, 52916), True, 'import numpy as np\n'), ((858, 884), 'glob.glob', 'glob.glob', (["('%s/*.*' % path)"], {}), "('%s/*.*' % path)\n", (867, 884), False, 'import glob\n'), ((8769, 8781), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8777, 8781), True, 'import numpy as np\n'), ((9808, 9820), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9816, 9820), True, 'import numpy as np\n'), ((10786, 10801), 'random.random', 'random.random', ([], {}), '()\n', (10799, 10801), False, 'import random\n'), ((10921, 10935), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (10930, 10935), True, 'import numpy as np\n'), ((13239, 13253), 'numpy.loadtxt', 'np.loadtxt', (['lp'], {}), '(lp)\n', (13249, 13253), True, 'import numpy as np\n'), ((18637, 18683), 'numpy.array', 'np.array', (['[bbox[0], bbox[1]]'], {'dtype': 'np.float32'}), '([bbox[0], bbox[1]], dtype=np.float32)\n', (18645, 18683), True, 'import numpy as np\n'), ((22919, 22938), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (22932, 22938), False, 'import copy\n'), ((23276, 23309), 'numpy.clip', 'np.clip', (['bbox[0]', '(0)', '(output_w - 1)'], {}), '(bbox[0], 0, output_w - 1)\n', (23283, 23309), True, 'import numpy as np\n'), ((23377, 23410), 'numpy.clip', 'np.clip', (['bbox[1]', '(0)', '(output_h - 1)'], {}), '(bbox[1], 0, output_h - 1)\n', (23384, 23410), True, 'import numpy as np\n'), ((23498, 23517), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (23511, 23517), False, 'import copy\n'), ((31541, 31587), 'numpy.array', 'np.array', (['[bbox[0], bbox[1]]'], {'dtype': 'np.float32'}), '([bbox[0], bbox[1]], dtype=np.float32)\n', (31549, 31587), True, 'import numpy as np\n'), ((40001, 40036), 'random.randint', 'random.randint', (['(0)', 'img_new_shape[0]'], {}), '(0, img_new_shape[0])\n', (40015, 40036), False, 'import random\n'), ((40066, 40101), 'random.randint', 'random.randint', (['(0)', 'img_new_shape[1]'], {}), '(0, img_new_shape[1])\n', (40080, 40101), False, 'import random\n'), ((40600, 40624), 'numpy.fliplr', 'np.fliplr', (['patch_imgs[i]'], {}), '(patch_imgs[i])\n', (40609, 40624), True, 'import numpy as np\n'), ((43231, 43250), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (43244, 43250), False, 'import copy\n'), ((43588, 43621), 'numpy.clip', 'np.clip', (['bbox[0]', '(0)', '(output_w - 1)'], {}), '(bbox[0], 0, output_w - 1)\n', (43595, 43621), True, 'import numpy as np\n'), ((43689, 43722), 'numpy.clip', 'np.clip', (['bbox[1]', '(0)', '(output_h - 1)'], {}), '(bbox[1], 0, output_h - 1)\n', (43696, 43722), True, 'import numpy as np\n'), ((43810, 43829), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (43823, 43829), False, 'import copy\n'), ((48576, 48590), 'numpy.loadtxt', 'np.loadtxt', (['lp'], {}), '(lp)\n', (48586, 48590), True, 'import numpy as np\n'), ((51454, 51468), 'numpy.loadtxt', 'np.loadtxt', (['lp'], {}), '(lp)\n', (51464, 51468), True, 'import numpy as np\n'), ((13494, 13510), 'numpy.max', 'np.max', (['lb[:, 1]'], {}), '(lb[:, 1])\n', (13500, 13510), True, 'import numpy as np\n'), ((24217, 24263), 'numpy.array', 'np.array', (['[bbox[0], bbox[1]]'], {'dtype': 'np.float32'}), '([bbox[0], bbox[1]], dtype=np.float32)\n', (24225, 24263), True, 'import numpy as np\n'), ((34059, 34130), 'cv2.resize', 'cv2.resize', (['hd_img', '(self.patch_size, self.patch_size)', 'cv2.INTER_CUBIC'], {}), '(hd_img, (self.patch_size, self.patch_size), cv2.INTER_CUBIC)\n', (34069, 34130), False, 'import cv2\n'), ((34183, 34253), 'cv2.resize', 'cv2.resize', (['patch', '(self.patch_size, self.patch_size)', 'cv2.INTER_CUBIC'], {}), '(patch, (self.patch_size, self.patch_size), cv2.INTER_CUBIC)\n', (34193, 34253), False, 'import cv2\n'), ((38582, 38622), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.float32'}), '(label_path, dtype=np.float32)\n', (38592, 38622), True, 'import numpy as np\n'), ((40543, 40558), 'random.random', 'random.random', ([], {}), '()\n', (40556, 40558), False, 'import random\n'), ((44529, 44575), 'numpy.array', 'np.array', (['[bbox[0], bbox[1]]'], {'dtype': 'np.float32'}), '([bbox[0], bbox[1]], dtype=np.float32)\n', (44537, 44575), True, 'import numpy as np\n'), ((48784, 48800), 'numpy.max', 'np.max', (['lb[:, 1]'], {}), '(lb[:, 1])\n', (48790, 48800), True, 'import numpy as np\n'), ((50077, 50117), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.float32'}), '(label_path, dtype=np.float32)\n', (50087, 50117), True, 'import numpy as np\n'), ((51662, 51678), 'numpy.max', 'np.max', (['lb[:, 1]'], {}), '(lb[:, 1])\n', (51668, 51678), True, 'import numpy as np\n'), ((52811, 52851), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.float32'}), '(label_path, dtype=np.float32)\n', (52821, 52851), True, 'import numpy as np\n'), ((53632, 53672), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.float32'}), '(label_path, dtype=np.float32)\n', (53642, 53672), True, 'import numpy as np\n'), ((8041, 8081), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.float32'}), '(label_path, dtype=np.float32)\n', (8051, 8081), True, 'import numpy as np\n'), ((8932, 8972), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.float32'}), '(label_path, dtype=np.float32)\n', (8942, 8972), True, 'import numpy as np\n'), ((18397, 18409), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (18406, 18409), False, 'import math\n'), ((18411, 18423), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (18420, 18423), False, 'import math\n'), ((31300, 31312), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (31309, 31312), False, 'import math\n'), ((31314, 31326), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (31323, 31326), False, 'import math\n'), ((23961, 23973), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (23970, 23973), False, 'import math\n'), ((23975, 23987), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (23984, 23987), False, 'import math\n'), ((44273, 44285), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (44282, 44285), False, 'import math\n'), ((44287, 44299), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (44296, 44299), False, 'import math\n'), ((934, 953), 'os.path.splitext', 'os.path.splitext', (['x'], {}), '(x)\n', (950, 953), False, 'import os\n')] |
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
import contextlib
import numpy as np
import pathlib
if TYPE_CHECKING:
import collections.abc
__all__ = [
"expand",
"temporary_seed",
]
def expand(path: str, dir: Optional[str] = None) -> str:
"""Expand relative path or path with user shortcut.
Parameters
----------
path : str
The path.
dir : Optional[str], optional
The directory containing the path, by default None.
Returns
-------
str
The explicit absolute path.
"""
p = pathlib.Path(path).expanduser()
if dir is not None:
p = pathlib.Path(dir).joinpath(p)
return str(p.expanduser().resolve())
@contextlib.contextmanager
def temporary_seed(seed: int) -> collections.abc.Generator[None, None, None]:
"""Temporarily set numpy random seed.
Parameters
----------
seed : int
Random seed.
"""
# adapted from https://stackoverflow.com/a/49557127
state = np.random.get_state()
if seed is not None:
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
| [
"numpy.random.get_state",
"pathlib.Path",
"numpy.random.seed",
"numpy.random.set_state"
] | [((1017, 1038), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (1036, 1038), True, 'import numpy as np\n'), ((1072, 1092), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1086, 1092), True, 'import numpy as np\n'), ((1137, 1163), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (1156, 1163), True, 'import numpy as np\n'), ((586, 604), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (598, 604), False, 'import pathlib\n'), ((654, 671), 'pathlib.Path', 'pathlib.Path', (['dir'], {}), '(dir)\n', (666, 671), False, 'import pathlib\n')] |
"""
Provides analysis tools for wind data.
"""
import matplotlib.pyplot as plt
import pandas
from pandas import DataFrame, Grouper
from windrose import WindroseAxes
from scipy import stats
import numpy as np
from .classes import WindTurbine
def boxplot(data, fields=None, labels=None, **box_kwargs):
"""
Draws boxplots of wind speeds.
.. image:: ../docs/boxplot.jpg
Args:
data (DataFrame): wind data
fields (:obj:`list` of :obj:`str`, optional): a list of columns to include from the
given `data`. If none are provided, these will be inferred using any columns in
`data` with the prefix `'windspeed_'`.
labels (:obj:`list` of :obj:`str`, optional): a list of labels to use. If none are
provided, they will use the same names as `fields`. If no `fields` or `labels`
are provided, they will both be inferred using the same strategy as `fields`, but
taking the suffix after `'windspeed_'`. e.g. `'windspeed_90m'` -> `'90m'`
box_kwargs (dict, optional): additional parameters for `matplotlib.pyplot.boxplot`
Returns:
tuple: A tuple (fig, ax) consisting of a `matplotlib.figure.Figure` and
`matplotlib.axes.Axes`.
"""
assert isinstance(data, DataFrame), '"data" must be a DataFrame'
if fields:
assert isinstance(fields, list), '"fields" must be a list or None'
msg = '"fields" elements must be strings'
assert all([isinstance(f, str) for f in fields]), msg
if labels:
assert isinstance(labels, list), '"labels" must be a list or None'
msg = '"labels" elements must be strings'
assert all([isinstance(label, str) for label in labels]), msg
else:
labels = fields
if not fields and not labels:
fields = list(filter(lambda x: 'windspeed' in x, data.columns[:]))
labels = [field.split('_')[1] for field in fields]
x = [list(data[field]) for field in fields]
fig, ax = plt.subplots()
ax.boxplot(
x,
labels=labels,
flierprops=dict(marker='_', markeredgecolor='red'),
boxprops=dict(color='blue'),
medianprops=dict(color='red'), **box_kwargs)
ax.set_ylabel('Wind Speed (m/s)', fontsize='large')
ax.set_xlabel('Elevation (m)', fontsize='large')
return fig, ax
def plot_windrose(data, speed=None, direction=None, **wr_kwargs):
"""
Generates a windrose plot from the given data.
.. image:: ../docs/windrose.png
Args:
data (DataFrame): Wind data
speed (str, optional): Wind speed column name. If not provided, it will be inferred
from `data`. It will take the first column containing the string 'windspeed'.
direction (str, optional): Wind direction column name. If not provided, it will be
inferred from `data`. It will take the first column containing the string `winddirection`.
wr_kwargs (dict, optional): Additional windrose parameters. See
https://windrose.readthedocs.io for more info.
Returns:
WindroseAxes: A `WindroseAxes` instance.
"""
assert isinstance(data, DataFrame), '"data" must be a DataFrame'
if speed:
assert isinstance(speed, str), '"speed" must be a string'
assert speed in data, "column not found: %s" % speed
ws = list(data[speed])
if direction:
assert isinstance(direction, str), '"direction" must be a string'
assert direction in data, 'column not found: %s' % direction
wd = list(data[direction])
if not speed:
fields = list(filter(lambda x: 'windspeed' in x, data.columns[:]))
assert len(fields) > 0, 'unable to infer wind speed data column'
ws_field = fields[0]
ws = list(data[ws_field])
if not direction:
fields = list(filter(lambda x: 'winddirection' in x, data.columns[:]))
assert len(fields) > 0, 'unable to infer wind direction data column'
wd_field = fields[0]
wd = list(data[wd_field])
# NOTE: this is a workaround for a current bug in the `windrose` package
ax = WindroseAxes.from_ax(theta_labels=["E", "N-E", "N", "N-W", "W", "S-W", "S", "S-E"])
ax.bar(wd, ws, normed=True, opening=0.8, edgecolor='white', **wr_kwargs)
ax.set_legend()
return ax
def pdf(data, speed=None, hist_kwargs=None, plot_kwargs=None):
"""
Generates a Weibull probability density plot from the given data.
.. image:: ../docs/pdf.jpg
Args:
data (DataFrame): Wind data
speed (str, optional): Wind speed column name. If not provided, it will be inferred
from `data`. It will take the first column containing the string 'windspeed'.
hist_kwargs (dict, optional): Additional histogram parameters.
plot_kwargs (dict, optional): Additional plot parameters.
Returns:
tuple: (fig, ax, params) consisting of a `matplotlib.figure.Figure`,
`matplotlib.axes.Axes`, and 4-element tuple of floats/ints representing
shape (2), location, and scale.
"""
assert isinstance(data, DataFrame), '"data" must be a DataFrame'
plot_kwargs = plot_kwargs or {}
hist_kwargs = hist_kwargs or {}
assert isinstance(plot_kwargs, dict), '"plot_kwargs" must be a dict'
assert isinstance(hist_kwargs, dict), '"hist_kwargs" must be a dict'
if speed:
assert isinstance(speed, str), '"speed" must be a string'
assert speed in data, "column not found: %s" % speed
ws = list(data[speed])
else:
fields = list(filter(lambda x: 'windspeed' in x, data.columns[:]))
assert len(fields) > 0, 'unable to infer wind speed data column'
ws_field = fields[0]
ws = list(data[ws_field])
# Fit Weibull function
params = stats.exponweib.fit(ws, floc=0, f0=1)
# Plotting
fig, ax = plt.subplots()
# Histogram
bins = round(max(ws))+5
values, bins, hist = ax.hist(ws, bins=bins, density=True, lw=1, ec='black', **hist_kwargs)
center = (bins[:-1] + bins[1:]) / 2.
# Using all params and the `pdf` function
ax.plot(
center,
stats.exponweib.pdf(center, *params),
lw=2, label='Weibull', color='r', **plot_kwargs)
ax.set_xlabel('Wind Speed (m/s)', fontsize='large')
ax.set_ylabel('Probability Density', fontsize='large')
ax.legend()
return fig, ax, params
def get_diurnal_stats(data, speed=None):
"""
Returns basic relevant diurnal wind speed statistics for the given data.
Args:
data (DataFrame): Wind data
speed (str, optional): Wind speed column name. If not provided, it will be inferred
from `data`. It will take the first column containing the string 'windspeed'.
Returns:
DataFrame: A DataFrame consisting of an hourly time index, and columns representing
various diurnal wind speed statistics for the given wind speed.
"""
assert isinstance(data, DataFrame), '"data" must be a DataFrame'
if speed:
assert isinstance(speed, str), '"speed" must be a string'
assert speed in data, "column not found: %s" % speed
ws = data[speed]
else:
fields = list(filter(lambda x: 'windspeed' in x, data.columns[:]))
assert len(fields) > 0, 'unable to infer wind speed data column'
ws_field = fields[0]
ws = data[ws_field]
mean = ws.groupby(data.index.hour).mean()
plus_std = mean + ws.groupby(data.index.hour).std()
minus_std = mean - ws.groupby(data.index.hour).std()
p_10 = ws.groupby(data.index.hour).quantile(q=.1)
median = ws.groupby(data.index.hour).median()
p_90 = ws.groupby(data.index.hour).quantile(q=.9)
df = pandas.concat([mean, plus_std, minus_std, p_10, median, p_90], axis=1)
df.columns = ['Mean', 'Mean+Std', 'Mean-Std', '10th Percentile', 'Median', '90th Percentile']
return df
def plot_diurnal_stats(data, speed=None):
"""
Plots basic relevant diurnal wind speed statistics for the given data.
.. image:: ../docs/diurnal.jpg
Args:
data (DataFrame): Wind data
speed (str, optional): Wind speed column name. If not provided, it will be inferred
from `data`. It will take the first column containing the string 'windspeed'.
Returns:
tuple: A tuple (fig, ax, df) consisting of a `matplotlib.figure.Figure`,
`matplotlib.axes.Axes`, and a `pandas.DataFrame` (same data as `get_diurnal_stats`).
"""
# data/field validation performed in this function
stats_df = get_diurnal_stats(data, speed)
markers = ('+', '*', '.', '2', 'x', '')
fig, ax = plt.subplots()
for i, label in enumerate(stats_df):
ax.plot(stats_df[label], label=label, marker=markers[i])
ax.set_xlabel('Hour', fontsize='large')
ax.set_ylabel('Wind Speed (m/s)', fontsize='large')
ax.set_xticks(np.arange(0, 23, 2))
ax.legend()
return fig, ax, stats_df
def turbulence_std(data, turbine, speed=None, b=5.6):
"""
Calculates the turbulence standard deviation.
Args:
data (Union[float, DataFrame]): Wind speed velocity (m/s) at hub height.
turbine (WindTurbine): A `WindTurbine` instance.
b (float, optional): Additional adjustment parameter (m/s)
Returns:
float: Turbulence standard deviation.
"""
assert isinstance(data, (float, DataFrame)), '"data" must be a float or DataFrame'
assert isinstance(turbine, WindTurbine), '"turbine" must be a WindTurbine'
if isinstance(data, float):
return turbine.i_ref*(0.75*data + b)
if speed:
assert isinstance(speed, str), '"speed" must be a string'
assert speed in data, "column not found: %s" % speed
ws = data[speed]
else:
fields = list(filter(lambda x: 'windspeed' in x, data.columns[:]))
assert len(fields) > 0, 'unable to infer wind speed data column'
ws_field = fields[0]
ws = data[ws_field]
# Group wind speeds by 10min averages, should work for any resolution
ws_avg = ws.groupby(Grouper(freq='10min')).mean()
df = DataFrame(ws_avg)
df.columns = ['turbulence_std']
# Apply std calc for every row
df = df.apply(lambda d: turbine.i_ref*(0.75*d + b))
return df
| [
"pandas.DataFrame",
"windrose.WindroseAxes.from_ax",
"scipy.stats.exponweib.fit",
"scipy.stats.exponweib.pdf",
"numpy.arange",
"pandas.Grouper",
"matplotlib.pyplot.subplots",
"pandas.concat"
] | [((1969, 1983), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1981, 1983), True, 'import matplotlib.pyplot as plt\n'), ((4077, 4164), 'windrose.WindroseAxes.from_ax', 'WindroseAxes.from_ax', ([], {'theta_labels': "['E', 'N-E', 'N', 'N-W', 'W', 'S-W', 'S', 'S-E']"}), "(theta_labels=['E', 'N-E', 'N', 'N-W', 'W', 'S-W', 'S',\n 'S-E'])\n", (4097, 4164), False, 'from windrose import WindroseAxes\n'), ((5739, 5776), 'scipy.stats.exponweib.fit', 'stats.exponweib.fit', (['ws'], {'floc': '(0)', 'f0': '(1)'}), '(ws, floc=0, f0=1)\n', (5758, 5776), False, 'from scipy import stats\n'), ((5808, 5822), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5820, 5822), True, 'import matplotlib.pyplot as plt\n'), ((7652, 7722), 'pandas.concat', 'pandas.concat', (['[mean, plus_std, minus_std, p_10, median, p_90]'], {'axis': '(1)'}), '([mean, plus_std, minus_std, p_10, median, p_90], axis=1)\n', (7665, 7722), False, 'import pandas\n'), ((8575, 8589), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8587, 8589), True, 'import matplotlib.pyplot as plt\n'), ((10037, 10054), 'pandas.DataFrame', 'DataFrame', (['ws_avg'], {}), '(ws_avg)\n', (10046, 10054), False, 'from pandas import DataFrame, Grouper\n'), ((6088, 6124), 'scipy.stats.exponweib.pdf', 'stats.exponweib.pdf', (['center', '*params'], {}), '(center, *params)\n', (6107, 6124), False, 'from scipy import stats\n'), ((8816, 8835), 'numpy.arange', 'np.arange', (['(0)', '(23)', '(2)'], {}), '(0, 23, 2)\n', (8825, 8835), True, 'import numpy as np\n'), ((9998, 10019), 'pandas.Grouper', 'Grouper', ([], {'freq': '"""10min"""'}), "(freq='10min')\n", (10005, 10019), False, 'from pandas import DataFrame, Grouper\n')] |
print("Importing libraries...")
import cv2
import numpy as np
import os
import random
import h5py
data_directory = "./data" #insert the directory you'll be working with
img_size = 128
categories = ["Positive", "Negative"]
training_data = []
def create_training_data():
for category in categories:
path = os.path.join(data_directory, category)
class_num = categories.index(category)
# read and resize the images and append to training_data a list with the image itself and its class number
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (img_size, img_size))
training_data.append([new_array, class_num])
print("Creating training data...")
create_training_data()
print("Training data successfully created!!")
print("Shuffling training data...")
random.shuffle(training_data)
print("Training data successfully shuffled!!")
X_data = []
y = []
# create X with the features (the images) and y with the targets (labels)
for features, label in training_data:
X_data.append(features)
y.append(label)
print("X and y data successfully created!!")
# reshape the image to be on the correct format for tensorflow (nº images, width, height, channels)
print("Reshaping X data...")
X = np.array(X_data).reshape(len(X_data), img_size, img_size, 1)
print("X data successfully reshaped!!")
print("Saving the data...")
hf = h5py.File("./concrete_crack_image_data.h5", "w") #Replace the three dots with the directory you want to save your dataset in
hf.create_dataset("X_concrete", data = X, compression = "gzip")
hf.create_dataset("y_concrete", data = y, compression = "gzip")
hf.close()
print("Data successfully saved!!")
| [
"h5py.File",
"random.shuffle",
"numpy.array",
"os.path.join",
"os.listdir",
"cv2.resize"
] | [((944, 973), 'random.shuffle', 'random.shuffle', (['training_data'], {}), '(training_data)\n', (958, 973), False, 'import random\n'), ((1536, 1584), 'h5py.File', 'h5py.File', (['"""./concrete_crack_image_data.h5"""', '"""w"""'], {}), "('./concrete_crack_image_data.h5', 'w')\n", (1545, 1584), False, 'import h5py\n'), ((334, 372), 'os.path.join', 'os.path.join', (['data_directory', 'category'], {}), '(data_directory, category)\n', (346, 372), False, 'import os\n'), ((567, 583), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (577, 583), False, 'import os\n'), ((1397, 1413), 'numpy.array', 'np.array', (['X_data'], {}), '(X_data)\n', (1405, 1413), True, 'import numpy as np\n'), ((693, 736), 'cv2.resize', 'cv2.resize', (['img_array', '(img_size, img_size)'], {}), '(img_array, (img_size, img_size))\n', (703, 736), False, 'import cv2\n'), ((621, 644), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (633, 644), False, 'import os\n')] |
import numpy as np
import timeit
embeddings = np.genfromtxt("embeddings.txt", delimiter=',')
def test():
embeddings1 = embeddings[0:1]
embeddings2 = embeddings[1:10001]
embeddings1 = embeddings1/np.linalg.norm(embeddings1, axis=1, keepdims=True)
embeddings2 = embeddings2/np.linalg.norm(embeddings2, axis=1, keepdims=True)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
# print(np.argmax(dist));
return np.max(dist)
# print(test())
print("started")
print(timeit.repeat("test()", setup="from __main__ import test; gc.enable();", number=1000, repeat=3))
| [
"numpy.subtract",
"timeit.repeat",
"numpy.square",
"numpy.genfromtxt",
"numpy.max",
"numpy.linalg.norm"
] | [((47, 93), 'numpy.genfromtxt', 'np.genfromtxt', (['"""embeddings.txt"""'], {'delimiter': '""","""'}), "('embeddings.txt', delimiter=',')\n", (60, 93), True, 'import numpy as np\n'), ((354, 391), 'numpy.subtract', 'np.subtract', (['embeddings1', 'embeddings2'], {}), '(embeddings1, embeddings2)\n', (365, 391), True, 'import numpy as np\n'), ((471, 483), 'numpy.max', 'np.max', (['dist'], {}), '(dist)\n', (477, 483), True, 'import numpy as np\n'), ((525, 625), 'timeit.repeat', 'timeit.repeat', (['"""test()"""'], {'setup': '"""from __main__ import test; gc.enable();"""', 'number': '(1000)', 'repeat': '(3)'}), "('test()', setup='from __main__ import test; gc.enable();',\n number=1000, repeat=3)\n", (538, 625), False, 'import timeit\n'), ((211, 261), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings1'], {'axis': '(1)', 'keepdims': '(True)'}), '(embeddings1, axis=1, keepdims=True)\n', (225, 261), True, 'import numpy as np\n'), ((292, 342), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings2'], {'axis': '(1)', 'keepdims': '(True)'}), '(embeddings2, axis=1, keepdims=True)\n', (306, 342), True, 'import numpy as np\n'), ((410, 425), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (419, 425), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import librosa
import random
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
def f0_to_2d(f0, sr, n_fft, f0_max):
# default shape = 100
f0 = f0[0]
shape0 = max(int(f0_max*n_fft/sr+1), 100)
f0_2d = np.zeros((shape0, len(f0)))
idx1 = (f0*n_fft/sr).astype(int)
idx2 = np.array(range(f0_2d.shape[1]))
f0_2d[idx1, idx2] = 1
f0_2d = np.array(f0_2d, dtype=np.float32)
return f0_2d
class AIShell(Dataset):
def __init__(
self,
folder,
table,
subset='train',
frames=430,
hop_length=512,
shifting=200,
f0_type='2d',
augment=True
):
self.folder = folder
df = pd.read_csv(table)
df = df[df['subset'] == subset]
self.df = df
self.frames = frames
self.shifting = shifting
self.hop_length = hop_length
self.f0_type = f0_type
self.augment = augment
def __getitem__(self, i):
path = self.df.iloc[i]['path']
sp = np.load(self.folder+path+'sp.npy')
f0 = np.load(self.folder+path+'f0.npy')
audio, fs = librosa.load(self.folder+path+'speech.wav', dtype=np.float32)
audio = audio[np.newaxis, :]
if self.frames:
if sp.shape[-1] < self.frames:
sp = np.append(sp, np.zeros((sp.shape[0], self.frames-sp.shape[1]),
dtype=np.float32), axis=-1)
f0 = np.append(f0, np.zeros((f0.shape[0], self.frames-f0.shape[1]),
dtype=np.float32), axis=-1)
if audio.shape[1] < self.frames*self.hop_length:
audio = np.append(audio, np.zeros((audio.shape[0],
self.frames*self.hop_length-audio.shape[1]),
dtype=np.float32), axis=-1)
if self.shifting:
new_sp = np.zeros((sp.shape[0]+self.shifting, sp.shape[1]), dtype=np.float32)
if self.augment:
shift_num = random.randint(0, self.shifting-1)
else:
shift_num = 0
new_sp[shift_num:sp.shape[0]+shift_num, :] = sp
sp = new_sp
# output
sp = np.array(sp, dtype=np.float32)[:, :self.frames]
f0 = np.array(f0, dtype=np.float32)[:, :self.frames]
audio = np.array(audio, dtype=np.float32)[:, :self.frames*self.hop_length]
if self.f0_type == '2d':
f0 = f0_to_2d(f0, 22050, 2048, 1000)
return sp, f0, audio
def __len__(self):
return len(self.df)
| [
"numpy.load",
"random.randint",
"pandas.read_csv",
"numpy.zeros",
"numpy.array",
"librosa.load"
] | [((500, 533), 'numpy.array', 'np.array', (['f0_2d'], {'dtype': 'np.float32'}), '(f0_2d, dtype=np.float32)\n', (508, 533), True, 'import numpy as np\n'), ((875, 893), 'pandas.read_csv', 'pd.read_csv', (['table'], {}), '(table)\n', (886, 893), True, 'import pandas as pd\n'), ((1212, 1250), 'numpy.load', 'np.load', (["(self.folder + path + 'sp.npy')"], {}), "(self.folder + path + 'sp.npy')\n", (1219, 1250), True, 'import numpy as np\n'), ((1261, 1299), 'numpy.load', 'np.load', (["(self.folder + path + 'f0.npy')"], {}), "(self.folder + path + 'f0.npy')\n", (1268, 1299), True, 'import numpy as np\n'), ((1317, 1382), 'librosa.load', 'librosa.load', (["(self.folder + path + 'speech.wav')"], {'dtype': 'np.float32'}), "(self.folder + path + 'speech.wav', dtype=np.float32)\n", (1329, 1382), False, 'import librosa\n'), ((2147, 2217), 'numpy.zeros', 'np.zeros', (['(sp.shape[0] + self.shifting, sp.shape[1])'], {'dtype': 'np.float32'}), '((sp.shape[0] + self.shifting, sp.shape[1]), dtype=np.float32)\n', (2155, 2217), True, 'import numpy as np\n'), ((2480, 2510), 'numpy.array', 'np.array', (['sp'], {'dtype': 'np.float32'}), '(sp, dtype=np.float32)\n', (2488, 2510), True, 'import numpy as np\n'), ((2542, 2572), 'numpy.array', 'np.array', (['f0'], {'dtype': 'np.float32'}), '(f0, dtype=np.float32)\n', (2550, 2572), True, 'import numpy as np\n'), ((2607, 2640), 'numpy.array', 'np.array', (['audio'], {'dtype': 'np.float32'}), '(audio, dtype=np.float32)\n', (2615, 2640), True, 'import numpy as np\n'), ((1902, 1998), 'numpy.zeros', 'np.zeros', (['(audio.shape[0], self.frames * self.hop_length - audio.shape[1])'], {'dtype': 'np.float32'}), '((audio.shape[0], self.frames * self.hop_length - audio.shape[1]),\n dtype=np.float32)\n', (1910, 1998), True, 'import numpy as np\n'), ((2275, 2311), 'random.randint', 'random.randint', (['(0)', '(self.shifting - 1)'], {}), '(0, self.shifting - 1)\n', (2289, 2311), False, 'import random\n'), ((1524, 1592), 'numpy.zeros', 'np.zeros', (['(sp.shape[0], self.frames - sp.shape[1])'], {'dtype': 'np.float32'}), '((sp.shape[0], self.frames - sp.shape[1]), dtype=np.float32)\n', (1532, 1592), True, 'import numpy as np\n'), ((1682, 1750), 'numpy.zeros', 'np.zeros', (['(f0.shape[0], self.frames - f0.shape[1])'], {'dtype': 'np.float32'}), '((f0.shape[0], self.frames - f0.shape[1]), dtype=np.float32)\n', (1690, 1750), True, 'import numpy as np\n')] |
import glob
import os.path
import cv2 as cv
import skimage.io
import numpy as np
import pandas as pd
import itertools as it
from skimage import measure
from copy import copy, deepcopy
from AffineCa2p.cellpose import models, utils
from AffineCa2p.FAIM.asift import affine_detect
from multiprocessing.pool import ThreadPool
from skimage.segmentation import find_boundaries
from AffineCa2p.FAIM.find_obj import init_feature, filter_matches, explore_match
def AlignIm(path_to_FOV, path_to_masks=[], preprocess=False, diameter=None, templateID=0 ,iterNum=100, method='sift'):
""" perform fully affine invariant method on calcium imaging field-of-view (FOV) images.
The function save the transformation matmatrices and the registered FOV images under the input folder.
Parameters
-------------
path_to_FOV: str
the path of the folder containing FOV images
path_to_masks: str (default [ ])
the path of the folder containing ROI masks. If the value is empty, the code will automatically extract ROI masks using cellpose. If the ROI masks have already obtained, provide the path to the folder can save time.
preprocess: bool (default False)
whether or not to apply contrast adjustment for the original FOV images
diameter: int (default None)
neuron diameter. If the default value is None, the diameter will be estimated by Cellpose from the original FOV images. Otherwise, the neuron will be detected based on the given diameter value.
templateID: int (default 0)
choose which FOV image as a template for alignment
iterNum: int (default 100)
the number of iterations for fully affine invariant method
method: name of the fully affine invariant method (default ['sift'])
name of the method:
'akaze' corresponds to 'AAKAZE'
'sift' corresponds to 'ASIFT'
'surf' corresponds to 'ASURF'
'brisk' corresponds to 'ABRISK'
'orb' corresponds to 'AORB'
Returns
----------------
Tmatrices: list of the trnaformation matrices
regImages: list of the registered FOV images
regROIs: list of the registered ROIs masks
"""
files=get_file_names(path_to_FOV)
generate_summary(templateID, files)
imgs=[]
if preprocess==True:
imgs = Image_enhance_contrast(files)
else:
imgs = [skimage.io.imread(f) for f in files]
nimg = len(imgs)
if path_to_masks == []:
model = models.Cellpose(gpu=False, model_type='cyto')
channels = []
for idx in range(nimg):
channels.append([0,0])
if diameter==None:
masks, flows, styles, diams = model.eval(imgs, diameter=None, channels=channels)
else:
masks, flows, styles, diams = model.eval(imgs, diameter=diameter, channels=channels)
ROIs_mask = generate_ROIs_mask(masks, imgs)
else:
ROI_files=get_file_names(path_to_masks)
ROIs_mask = [skimage.io.imread(f) for f in ROI_files]
if not (os.path.exists(path_to_FOV+'/ROIs_mask/')):
os.makedirs(path_to_FOV+'/ROIs_mask/')
for i in range(len(files)):
skimage.io.imsave(path_to_FOV+'/ROIs_mask/' + os.path.split(files[i])[-1], ROIs_mask[i])
Template = imgs[templateID] # FOV_template
Template = cv.normalize(Template, Template, 0, 255, cv.NORM_MINMAX)
Template_ROI = ROIs_mask[templateID]
Tmatrices=[]
regImages=[]
regROIs=[]
if method=='akaze':
print('A'+ method.upper() + ' is running')
for j in range(len(imgs)):
if j != templateID:
print('registering ' + os.path.split(files[j])[-1])
Regimage = imgs[j]
Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)
Regimage_ROI = ROIs_mask[j]
T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'akaze')
Tmatrices.append(T_matrix)
regImages.append(regIm)
regROIs.append(regROI)
output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'akaze')
return Tmatrices, regImages, regROIs
elif method=='sift':
print('A'+ method.upper() + ' is running')
for j in range(len(imgs)):
if j != templateID:
print('registering ' + os.path.split(files[j])[-1])
Regimage = imgs[j]
Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)
Regimage_ROI = ROIs_mask[j]
T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'sift')
Tmatrices.append(T_matrix)
regImages.append(regIm)
regROIs.append(regROI)
output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'sift')
return Tmatrices, regImages, regROIs
elif method=='surf':
print('A'+ method.upper() + ' is running')
for j in range(len(imgs)):
if j != templateID:
print('registering ' + os.path.split(files[j])[-1])
Regimage = imgs[j]
Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)
Regimage_ROI = ROIs_mask[j]
T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'surf')
Tmatrices.append(T_matrix)
regImages.append(regIm)
regROIs.append(regROI)
output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'surf')
return Tmatrices, regImages, regROIs
elif method=='brisk':
print('A'+ method.upper() + ' is running')
for j in range(len(imgs)):
if j != templateID:
print('registering ' + os.path.split(files[j])[-1])
Regimage = imgs[j]
Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)
Regimage_ROI = ROIs_mask[j]
T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'brisk')
Tmatrices.append(T_matrix)
regImages.append(regIm)
regROIs.append(regROI)
output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'brisk')
return Tmatrices, regImages, regROIs
elif method=='orb':
print('A'+ method.upper() + ' is running')
for j in range(len(imgs)):
if j != templateID:
print('registering ' + os.path.split(files[j])[-1])
Regimage = imgs[j]
Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)
Regimage_ROI = ROIs_mask[j]
T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'orb')
Tmatrices.append(T_matrix)
regImages.append(regIm)
regROIs.append(regROI)
output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'orb')
return Tmatrices, regImages, regROIs
def get_file_names(folder):
image_names = []
image_names.extend(glob.glob(folder + '/*.png'))
image_names.extend(glob.glob(folder + '/*.jpg'))
image_names.extend(glob.glob(folder + '/*.jpeg'))
image_names.extend(glob.glob(folder + '/*.tif'))
image_names.extend(glob.glob(folder + '/*.tiff'))
if image_names==[]:
print('Load image failed: please check the path')
elif len(image_names)==1:
print('Error: the folder needs to contain at least two images')
else:
return image_names
def generate_summary(ID, files):
print('Template image:' + os.path.split(files[ID])[-1])
regfiles=[]
for j in range(len(files)):
if j != ID:
regfiles.append(os.path.split(files[j])[-1])
print('Registered images:')
print(regfiles)
def Image_enhance_contrast(image_names):
images=[]
for n in range(len(image_names)):
img = skimage.io.imread(image_names[n])
if np.ptp(img)>0:
img = utils.normalize99(img)
img = np.clip(img, 0, 1)
img *= 255
img = np.uint8(img)
images.append(img)
return images
def generate_ROIs_mask(masks, imgs):
ROIs_mask=[]
nimg = len(imgs)
for idx in range(nimg):
raw_mask= np.zeros((imgs[idx].shape[0], imgs[idx].shape[1]), np.uint8)
maski = masks[idx]
for n in range(int(maski.max())):
ipix = (maski==n+1).nonzero()
if len(ipix[0])>60:
raw_mask[ipix[0],ipix[1]] = 255
ROIs_mask.append(raw_mask)
return ROIs_mask
def Apply_affine_methods(img2, img2_ROI, img1, img1_ROI, iterNum, method):
w = np.size(img2,1)
h = np.size(img2,0)
feature_name = method + '-flann'
detector, matcher = init_feature(feature_name)
# Detect ROI counter on raw ROI
Img1_contours = measure.find_contours(img1_ROI , 128)
Img1_Cmax=0
Img1_K=0
for n, contour in enumerate(Img1_contours):
if len(contour[:, 1])>Img1_Cmax:
Img1_Cmax=len(contour[:, 1])
for n, contour in enumerate(Img1_contours):
if len(contour[:, 1])>(Img1_Cmax*(2/3)):
Img1_K+=1
# detect features
r_err=w*h*255
pool=ThreadPool(processes = cv.getNumberOfCPUs())
kp1, desc1 = affine_detect(detector, img1, pool=pool)
kp2, desc2 = affine_detect(detector, img2, pool=pool)
# choose best H
H=np.zeros((3,3))
inliers = 0
matched = 0
for i in range(iterNum):
ROI_temp=deepcopy(img2_ROI)
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
temp_H, status = cv.findHomography(p1, p2, cv.RANSAC, 3.0, 150000)
kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
temp_inliers=np.sum(status)
temp_matched=len(status)
img1_ROIwrap = cv.warpPerspective(img1_ROI, temp_H, (h, w))
img1_ROIwrap[img1_ROIwrap<255] = 0
# Detect ROI counter on registered ROI
Img1wrap_contours = measure.find_contours(img1_ROIwrap , 128)
Img1wrap_K=0
for n, contour in enumerate(Img1wrap_contours):
if len(contour[:, 1])>(Img1_Cmax*(2/3)) and len(contour[:, 1])<Img1_Cmax:
Img1wrap_K+=1
if Img1wrap_K<(Img1_K/2):
continue
# L1-Norm
temp_err =np.array(np.abs(ROI_temp-img1_ROIwrap))
err=np.sum(temp_err)
if err < r_err:
r_err = err
H = temp_H
inliers = temp_inliers
matched = temp_matched
#cv.imwrite('C:/Users/Chuny/FAIM-master/A5/New folder/'+ str(i) + '.png', img1_ROIwrap)
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
if matched>0:
img1_wrap = cv.warpPerspective(img1, H, (h, w))
img1_ROI_wrap = cv.warpPerspective(img1_ROI, H, (h, w))
T=H
else:
img1_wrap=np.zeros([h, w], np.uint8)
img1_ROI_wrap = np.zeros([h, w], np.uint8)
T= np.zeros([3, 3])
return T, img1_wrap, img1_ROI_wrap
def output_results(path, files, ID, Template, Template_ROI, Tmatrices, regImages, regROIs, method):
# save transformation matrix
if not (os.path.exists(path+'/A' + method.upper() + '/')):
os.makedirs(path+'/A' + method.upper() + '/')
k=0
for i in range(len(files)):
if i!=ID:
raw_data = {'Registered_file': [os.path.split(files[i])[1]],
'Template_file': [os.path.split(files[ID])[1]],
'Transformation_matrix':[Tmatrices[k]]}
df = pd.DataFrame(raw_data, columns = ['Registered_file', 'Template_file', 'Transformation_matrix'])
dfsave=path +'/A' + method.upper() + '/'+os.path.split(files[i])[1][:-4]+'.csv'
df.to_csv(dfsave)
output_Im=np.zeros([np.size(Template,1), np.size(Template,1), 3], np.uint8)
outlines1 = np.zeros(Template_ROI.shape, np.bool)
outlines1[find_boundaries(Template_ROI, mode='inner')] = 1
outX1, outY1 = np.nonzero(outlines1)
output_Im[outX1, outY1] = np.array([255, 0, 0])
outlines2 = np.zeros(regROIs[k].shape, np.bool)
outlines2[find_boundaries(regROIs[k], mode='inner')] = 1
outX2, outY2 = np.nonzero(outlines2)
output_Im[outX2, outY2] = np.array([255, 255, 22])
img=cv.hconcat([cv.cvtColor(Template, cv.COLOR_GRAY2BGR),cv.cvtColor(regImages[k], cv.COLOR_GRAY2BGR), output_Im])
skimage.io.imsave(path+'/A' + method.upper() + '/results_' + os.path.split(files[i])[1], img)
k=k+1
| [
"AffineCa2p.cellpose.utils.normalize99",
"numpy.sum",
"numpy.abs",
"numpy.clip",
"skimage.measure.find_contours",
"glob.glob",
"cv2.normalize",
"AffineCa2p.FAIM.find_obj.filter_matches",
"AffineCa2p.FAIM.find_obj.init_feature",
"pandas.DataFrame",
"skimage.segmentation.find_boundaries",
"cv2.w... | [((3374, 3430), 'cv2.normalize', 'cv.normalize', (['Template', 'Template', '(0)', '(255)', 'cv.NORM_MINMAX'], {}), '(Template, Template, 0, 255, cv.NORM_MINMAX)\n', (3386, 3430), True, 'import cv2 as cv\n'), ((9276, 9292), 'numpy.size', 'np.size', (['img2', '(1)'], {}), '(img2, 1)\n', (9283, 9292), True, 'import numpy as np\n'), ((9301, 9317), 'numpy.size', 'np.size', (['img2', '(0)'], {}), '(img2, 0)\n', (9308, 9317), True, 'import numpy as np\n'), ((9382, 9408), 'AffineCa2p.FAIM.find_obj.init_feature', 'init_feature', (['feature_name'], {}), '(feature_name)\n', (9394, 9408), False, 'from AffineCa2p.FAIM.find_obj import init_feature, filter_matches, explore_match\n'), ((9469, 9505), 'skimage.measure.find_contours', 'measure.find_contours', (['img1_ROI', '(128)'], {}), '(img1_ROI, 128)\n', (9490, 9505), False, 'from skimage import measure\n'), ((9910, 9950), 'AffineCa2p.FAIM.asift.affine_detect', 'affine_detect', (['detector', 'img1'], {'pool': 'pool'}), '(detector, img1, pool=pool)\n', (9923, 9950), False, 'from AffineCa2p.FAIM.asift import affine_detect\n'), ((9969, 10009), 'AffineCa2p.FAIM.asift.affine_detect', 'affine_detect', (['detector', 'img2'], {'pool': 'pool'}), '(detector, img2, pool=pool)\n', (9982, 10009), False, 'from AffineCa2p.FAIM.asift import affine_detect\n'), ((10040, 10056), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (10048, 10056), True, 'import numpy as np\n'), ((2515, 2560), 'AffineCa2p.cellpose.models.Cellpose', 'models.Cellpose', ([], {'gpu': '(False)', 'model_type': '"""cyto"""'}), "(gpu=False, model_type='cyto')\n", (2530, 2560), False, 'from AffineCa2p.cellpose import models, utils\n'), ((7631, 7659), 'glob.glob', 'glob.glob', (["(folder + '/*.png')"], {}), "(folder + '/*.png')\n", (7640, 7659), False, 'import glob\n'), ((7685, 7713), 'glob.glob', 'glob.glob', (["(folder + '/*.jpg')"], {}), "(folder + '/*.jpg')\n", (7694, 7713), False, 'import glob\n'), ((7739, 7768), 'glob.glob', 'glob.glob', (["(folder + '/*.jpeg')"], {}), "(folder + '/*.jpeg')\n", (7748, 7768), False, 'import glob\n'), ((7794, 7822), 'glob.glob', 'glob.glob', (["(folder + '/*.tif')"], {}), "(folder + '/*.tif')\n", (7803, 7822), False, 'import glob\n'), ((7848, 7877), 'glob.glob', 'glob.glob', (["(folder + '/*.tiff')"], {}), "(folder + '/*.tiff')\n", (7857, 7877), False, 'import glob\n'), ((8679, 8692), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (8687, 8692), True, 'import numpy as np\n'), ((8870, 8930), 'numpy.zeros', 'np.zeros', (['(imgs[idx].shape[0], imgs[idx].shape[1])', 'np.uint8'], {}), '((imgs[idx].shape[0], imgs[idx].shape[1]), np.uint8)\n', (8878, 8930), True, 'import numpy as np\n'), ((10140, 10158), 'copy.deepcopy', 'deepcopy', (['img2_ROI'], {}), '(img2_ROI)\n', (10148, 10158), False, 'from copy import copy, deepcopy\n'), ((10270, 10307), 'AffineCa2p.FAIM.find_obj.filter_matches', 'filter_matches', (['kp1', 'kp2', 'raw_matches'], {}), '(kp1, kp2, raw_matches)\n', (10284, 10307), False, 'from AffineCa2p.FAIM.find_obj import init_feature, filter_matches, explore_match\n'), ((11677, 11712), 'cv2.warpPerspective', 'cv.warpPerspective', (['img1', 'H', '(h, w)'], {}), '(img1, H, (h, w))\n', (11695, 11712), True, 'import cv2 as cv\n'), ((11738, 11777), 'cv2.warpPerspective', 'cv.warpPerspective', (['img1_ROI', 'H', '(h, w)'], {}), '(img1_ROI, H, (h, w))\n', (11756, 11777), True, 'import cv2 as cv\n'), ((11821, 11847), 'numpy.zeros', 'np.zeros', (['[h, w]', 'np.uint8'], {}), '([h, w], np.uint8)\n', (11829, 11847), True, 'import numpy as np\n'), ((11873, 11899), 'numpy.zeros', 'np.zeros', (['[h, w]', 'np.uint8'], {}), '([h, w], np.uint8)\n', (11881, 11899), True, 'import numpy as np\n'), ((11912, 11928), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (11920, 11928), True, 'import numpy as np\n'), ((8549, 8560), 'numpy.ptp', 'np.ptp', (['img'], {}), '(img)\n', (8555, 8560), True, 'import numpy as np\n'), ((8583, 8605), 'AffineCa2p.cellpose.utils.normalize99', 'utils.normalize99', (['img'], {}), '(img)\n', (8600, 8605), False, 'from AffineCa2p.cellpose import models, utils\n'), ((8625, 8643), 'numpy.clip', 'np.clip', (['img', '(0)', '(1)'], {}), '(img, 0, 1)\n', (8632, 8643), True, 'import numpy as np\n'), ((9870, 9890), 'cv2.getNumberOfCPUs', 'cv.getNumberOfCPUs', ([], {}), '()\n', (9888, 9890), True, 'import cv2 as cv\n'), ((10364, 10413), 'cv2.findHomography', 'cv.findHomography', (['p1', 'p2', 'cv.RANSAC', '(3.0)', '(150000)'], {}), '(p1, p2, cv.RANSAC, 3.0, 150000)\n', (10381, 10413), True, 'import cv2 as cv\n'), ((10517, 10531), 'numpy.sum', 'np.sum', (['status'], {}), '(status)\n', (10523, 10531), True, 'import numpy as np\n'), ((10598, 10642), 'cv2.warpPerspective', 'cv.warpPerspective', (['img1_ROI', 'temp_H', '(h, w)'], {}), '(img1_ROI, temp_H, (h, w))\n', (10616, 10642), True, 'import cv2 as cv\n'), ((10778, 10818), 'skimage.measure.find_contours', 'measure.find_contours', (['img1_ROIwrap', '(128)'], {}), '(img1_ROIwrap, 128)\n', (10799, 10818), False, 'from skimage import measure\n'), ((11207, 11223), 'numpy.sum', 'np.sum', (['temp_err'], {}), '(temp_err)\n', (11213, 11223), True, 'import numpy as np\n'), ((12520, 12617), 'pandas.DataFrame', 'pd.DataFrame', (['raw_data'], {'columns': "['Registered_file', 'Template_file', 'Transformation_matrix']"}), "(raw_data, columns=['Registered_file', 'Template_file',\n 'Transformation_matrix'])\n", (12532, 12617), True, 'import pandas as pd\n'), ((12856, 12893), 'numpy.zeros', 'np.zeros', (['Template_ROI.shape', 'np.bool'], {}), '(Template_ROI.shape, np.bool)\n', (12864, 12893), True, 'import numpy as np\n'), ((12994, 13015), 'numpy.nonzero', 'np.nonzero', (['outlines1'], {}), '(outlines1)\n', (13004, 13015), True, 'import numpy as np\n'), ((13055, 13076), 'numpy.array', 'np.array', (['[255, 0, 0]'], {}), '([255, 0, 0])\n', (13063, 13076), True, 'import numpy as np\n'), ((13104, 13139), 'numpy.zeros', 'np.zeros', (['regROIs[k].shape', 'np.bool'], {}), '(regROIs[k].shape, np.bool)\n', (13112, 13139), True, 'import numpy as np\n'), ((13238, 13259), 'numpy.nonzero', 'np.nonzero', (['outlines2'], {}), '(outlines2)\n', (13248, 13259), True, 'import numpy as np\n'), ((13299, 13323), 'numpy.array', 'np.array', (['[255, 255, 22]'], {}), '([255, 255, 22])\n', (13307, 13323), True, 'import numpy as np\n'), ((3808, 3864), 'cv2.normalize', 'cv.normalize', (['Regimage', 'Regimage', '(0)', '(255)', 'cv.NORM_MINMAX'], {}), '(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\n', (3820, 3864), True, 'import cv2 as cv\n'), ((11159, 11190), 'numpy.abs', 'np.abs', (['(ROI_temp - img1_ROIwrap)'], {}), '(ROI_temp - img1_ROIwrap)\n', (11165, 11190), True, 'import numpy as np\n'), ((12917, 12960), 'skimage.segmentation.find_boundaries', 'find_boundaries', (['Template_ROI'], {'mode': '"""inner"""'}), "(Template_ROI, mode='inner')\n", (12932, 12960), False, 'from skimage.segmentation import find_boundaries\n'), ((13163, 13204), 'skimage.segmentation.find_boundaries', 'find_boundaries', (['regROIs[k]'], {'mode': '"""inner"""'}), "(regROIs[k], mode='inner')\n", (13178, 13204), False, 'from skimage.segmentation import find_boundaries\n'), ((4615, 4671), 'cv2.normalize', 'cv.normalize', (['Regimage', 'Regimage', '(0)', '(255)', 'cv.NORM_MINMAX'], {}), '(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\n', (4627, 4671), True, 'import cv2 as cv\n'), ((12775, 12795), 'numpy.size', 'np.size', (['Template', '(1)'], {}), '(Template, 1)\n', (12782, 12795), True, 'import numpy as np\n'), ((12796, 12816), 'numpy.size', 'np.size', (['Template', '(1)'], {}), '(Template, 1)\n', (12803, 12816), True, 'import numpy as np\n'), ((13355, 13395), 'cv2.cvtColor', 'cv.cvtColor', (['Template', 'cv.COLOR_GRAY2BGR'], {}), '(Template, cv.COLOR_GRAY2BGR)\n', (13366, 13395), True, 'import cv2 as cv\n'), ((13396, 13440), 'cv2.cvtColor', 'cv.cvtColor', (['regImages[k]', 'cv.COLOR_GRAY2BGR'], {}), '(regImages[k], cv.COLOR_GRAY2BGR)\n', (13407, 13440), True, 'import cv2 as cv\n'), ((5420, 5476), 'cv2.normalize', 'cv.normalize', (['Regimage', 'Regimage', '(0)', '(255)', 'cv.NORM_MINMAX'], {}), '(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\n', (5432, 5476), True, 'import cv2 as cv\n'), ((6226, 6282), 'cv2.normalize', 'cv.normalize', (['Regimage', 'Regimage', '(0)', '(255)', 'cv.NORM_MINMAX'], {}), '(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\n', (6238, 6282), True, 'import cv2 as cv\n'), ((7032, 7088), 'cv2.normalize', 'cv.normalize', (['Regimage', 'Regimage', '(0)', '(255)', 'cv.NORM_MINMAX'], {}), '(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\n', (7044, 7088), True, 'import cv2 as cv\n')] |
from PIL import Image
import numpy as np
def get_image_array(image_path):
image = Image.open(image_path)
array_from_image = np.array(image)
return array_from_image
def coalesce_into_column(multidir_image_array):
single_array = []
a,b,c = multidir_image_array.shape
# we are hitting an interesting time complexity here
# we could possibly look out for a snappier way
# not found one yet
for i in range(a):
for j in range(b):
for k in range(c):
single_array.append(multidir_image_array[i][j][k])
return single_array, len(single_array)
def get_corr(array_of_image_arrays, slice_):
# slice_ alllows us to slice images into same length somehow :)
return np.corrcoef(
[array[:slice_] for array in array_of_image_arrays]
)
def corr_of_multiple_images(list_of_paths):
array_of_image_arrays = []
minimum_length = float("+inf")
# hitting another interest chaining
#
for image_path in list_of_paths:
array, length = coalesce_into_column(
get_image_array(image_path)
)
# minimum_length will be useful for us to compare images
# using the rough size of the smallest image
minimum_length = (
minimum_length if length>minimum_length else length
)
array_of_image_arrays.append(array)
# In case the images are not the same, the
# the minimum length helps us to slice all images to
# same size. Would be better if the shapes are same.
return get_corr(array_of_image_arrays, minimum_length)
# You just need to submit a list of the image names to
# corr_of_multiple_images() function if the images are many
# change the image names available in your folder
# this was for my demo
print(corr_of_multiple_images(["samp1.jpeg","samp2.jpeg","samp3.jpeg","samp4.jpeg"]))
| [
"numpy.corrcoef",
"numpy.array",
"PIL.Image.open"
] | [((95, 117), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (105, 117), False, 'from PIL import Image\n'), ((139, 154), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (147, 154), True, 'import numpy as np\n'), ((756, 820), 'numpy.corrcoef', 'np.corrcoef', (['[array[:slice_] for array in array_of_image_arrays]'], {}), '([array[:slice_] for array in array_of_image_arrays])\n', (767, 820), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 08:59:19 2020
@author: Timothe
"""
import re
import numpy as np
def QuickRegexp(Line,regex,**kwargs):
"""Line : input string to be processed
regex : input regex, can be easily designed at : https://regex101.com/
kwargs :
case = False / True : case sensitive regexp matching (defaul false)
"""
if 'case' in kwargs:
case = kwargs.get("case")
else :
case = False
if case :
matches = re.finditer(regex, Line, re.MULTILINE|re.IGNORECASE)
else :
matches = re.finditer(regex, Line, re.MULTILINE)
for matchnum, match in enumerate(matches, start = 1):
MATCH = match.group()
return(MATCH)
return False
def ProgressBarImage(Fraction):
if Fraction == 1:
blue = np.zeros((10,100,3))
blue[:,:,2] = np.ones((10,100))
return(blue)
elif Fraction == 0:
blue = np.zeros((10,100,3))
return(blue)
else:
blue = np.zeros((10,100,3))
blue[:,:,2] = np.ones((10,100))
blue[:,int(Fraction*100):,:] = np.ones((10,100-int(Fraction*100),3))
return(blue)
def AlphaNum_Sort(List):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)',key)]
return sorted(List, key = alphanum_key)
| [
"re.finditer",
"numpy.zeros",
"numpy.ones",
"re.split"
] | [((509, 563), 're.finditer', 're.finditer', (['regex', 'Line', '(re.MULTILINE | re.IGNORECASE)'], {}), '(regex, Line, re.MULTILINE | re.IGNORECASE)\n', (520, 563), False, 'import re\n'), ((591, 629), 're.finditer', 're.finditer', (['regex', 'Line', 're.MULTILINE'], {}), '(regex, Line, re.MULTILINE)\n', (602, 629), False, 'import re\n'), ((842, 864), 'numpy.zeros', 'np.zeros', (['(10, 100, 3)'], {}), '((10, 100, 3))\n', (850, 864), True, 'import numpy as np\n'), ((885, 903), 'numpy.ones', 'np.ones', (['(10, 100)'], {}), '((10, 100))\n', (892, 903), True, 'import numpy as np\n'), ((963, 985), 'numpy.zeros', 'np.zeros', (['(10, 100, 3)'], {}), '((10, 100, 3))\n', (971, 985), True, 'import numpy as np\n'), ((1030, 1052), 'numpy.zeros', 'np.zeros', (['(10, 100, 3)'], {}), '((10, 100, 3))\n', (1038, 1052), True, 'import numpy as np\n'), ((1073, 1091), 'numpy.ones', 'np.ones', (['(10, 100)'], {}), '((10, 100))\n', (1080, 1091), True, 'import numpy as np\n'), ((1341, 1366), 're.split', 're.split', (['"""([0-9]+)"""', 'key'], {}), "('([0-9]+)', key)\n", (1349, 1366), False, 'import re\n')] |
import os
import logging
import collections
import yaml
import numpy as np
# from matplotlib import pyplot as plt
import graphviz as gv
import LCTM.metrics
from mathtools import utils, metrics
from blocks.core import labels as labels_lib
from blocks.core import blockassembly
logger = logging.getLogger(__name__)
def eval_metrics(pred_seq, true_seq, name_suffix='', append_to={}):
tp = metrics.truePositives(pred_seq, true_seq)
# tn = metrics.trueNegatives(pred_seq, true_seq)
fp = metrics.falsePositives(pred_seq, true_seq)
fn = metrics.falseNegatives(pred_seq, true_seq)
prc = utils.safeDivide(tp, tp + fp)
rec = utils.safeDivide(tp, tp + fn)
f1 = utils.safeDivide(2 * prc * rec, prc + rec)
acc = (pred_seq == true_seq).astype(float).mean()
metric_dict = {
'Accuracy' + name_suffix: acc,
'Precision' + name_suffix: prc,
'Recall' + name_suffix: rec,
'F1' + name_suffix: f1,
'Edit Score' + name_suffix: LCTM.metrics.edit_score(pred_seq, true_seq) / 100,
'Overlap Score' + name_suffix: LCTM.metrics.overlap_score(pred_seq, true_seq) / 100
}
append_to.update(metric_dict)
return append_to
def eval_metrics_part(pred_seq, true_seq, name_suffix='', append_to={}):
tp = metrics.truePositives(pred_seq, true_seq)
# tn = metrics.trueNegatives(pred_seq, true_seq)
fp = metrics.falsePositives(pred_seq, true_seq)
fn = metrics.falseNegatives(pred_seq, true_seq)
prc = utils.safeDivide(tp, tp + fp)
rec = utils.safeDivide(tp, tp + fn)
f1 = utils.safeDivide(2 * prc * rec, prc + rec)
acc = (pred_seq == true_seq).astype(float).mean()
metric_dict = {
'Accuracy' + name_suffix: acc,
'Precision' + name_suffix: prc,
'Recall' + name_suffix: rec,
'F1' + name_suffix: f1,
}
append_to.update(metric_dict)
return append_to
def drawLabels(paths, fig_fn, base_path, state_img_dir, path_labels=None, img_ext='png'):
""" Draw a sequence of `BlockAssembly` states using graphviz.
Parameters
----------
path : iterable( int )
A path is a list of state indices.
fig_fn : str
Filename of the figure
base_path : str
Path to the directory where figure will be saved
state_img_dir : str
Path to the directory containing source images of the states that make
up this path. State filenames are assumed to have the format
`state<state_index>.<img_ext>`
img_ext : str, optional
Extension specifying the image file type. Can be 'svg', 'png', etc.
"""
path_graph = gv.Digraph(
name=fig_fn, format=img_ext, directory=base_path,
graph_attr={'rankdir': 'LR'},
node_attr={'shape': 'plaintext'}
)
for j, path in enumerate(paths):
for i, state_index in enumerate(path):
image_fn = 'state{}.{}'.format(state_index, img_ext)
image_path = os.path.join(state_img_dir, image_fn)
if path_labels is not None:
label = f"{path_labels[j, i]}"
else:
label = ''
path_graph.node(
f"{j}, {i}", image=image_path,
fixedsize='true', width='1', height='0.5', imagescale='true',
pad='1', fontsize='12', label=label
)
if i > 0:
path_graph.edge(f"{j}, {i - 1}", f"{j}, {i}", fontsize='12')
path_graph.render(filename=fig_fn, directory=base_path, cleanup=True)
def makeEdges(vocab, is_action=False):
parts_vocab, edge_diffs = labels_lib.make_parts_vocab(
vocab, lower_tri_only=True, append_to_vocab=False
)
if is_action:
signs = np.array([a.sign for a in vocab], dtype=int)
signs[signs == -1] = 2
edge_diffs = np.concatenate((edge_diffs, signs[:, None]), axis=1)
return edge_diffs
def main(
out_dir=None, data_dir=None, scores_dir=None,
vocab_from_scores_dir=None, only_fold=None,
plot_io=None, draw_labels=None, vocab_fig_dir=None,
prefix='seq=', is_event=False,
results_file=None, sweep_param_name=None, model_params={}, cv_params={}):
data_dir = os.path.expanduser(data_dir)
scores_dir = os.path.expanduser(scores_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
io_dir_images = os.path.join(fig_dir, 'model-io_images')
if not os.path.exists(io_dir_images):
os.makedirs(io_dir_images)
io_dir_plots = os.path.join(fig_dir, 'model-io_plots')
if not os.path.exists(io_dir_plots):
os.makedirs(io_dir_plots)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
seq_ids = utils.getUniqueIds(
scores_dir, prefix=prefix, suffix='pred-label-seq.*',
to_array=True
)
logger.info(f"Loaded scores for {len(seq_ids)} sequences from {scores_dir}")
if vocab_from_scores_dir:
vocab = utils.loadVariable('vocab', scores_dir)
else:
vocab = utils.loadVariable('vocab', data_dir)
logger.info(f"Loaded vocab with {len(vocab)} items")
if is_event:
if vocab[0] != blockassembly.AssemblyAction():
vocab = [blockassembly.AssemblyAction()] + vocab
for i in range(len(vocab)):
sign = vocab[i].sign
if isinstance(sign, np.ndarray):
vocab[i].sign = np.sign(sign.sum())
edge_attrs = makeEdges(vocab, is_action=is_event)
all_metrics = collections.defaultdict(list)
# Define cross-validation folds
cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)
all_pred_seqs = []
all_true_seqs = []
for cv_index, cv_fold in enumerate(cv_folds):
if only_fold is not None and cv_index != only_fold:
continue
train_indices, val_indices, test_indices = cv_fold
logger.info(
f"CV FOLD {cv_index + 1} / {len(cv_folds)}: "
f"{len(train_indices)} train, {len(val_indices)} val, {len(test_indices)} test"
)
for i in test_indices:
seq_id = seq_ids[i]
logger.info(f" Processing sequence {seq_id}...")
trial_prefix = f"{prefix}{seq_id}"
score_seq = utils.loadVariable(f"{trial_prefix}_score-seq", scores_dir)
pred_seq = utils.loadVariable(f"{trial_prefix}_pred-label-seq", scores_dir)
true_seq = utils.loadVariable(f"{trial_prefix}_true-label-seq", scores_dir)
pred_parts_seq = edge_attrs[pred_seq]
true_parts_seq = edge_attrs[true_seq]
metric_dict = eval_metrics(pred_seq, true_seq)
part_metric_dict = eval_metrics_part(pred_parts_seq, true_parts_seq)
for key, value in part_metric_dict.items():
metric_dict[f'Part {key}'] = value
for name, value in metric_dict.items():
logger.info(f" {name}: {value * 100:.2f}%")
all_metrics[name].append(value)
utils.writeResults(results_file, metric_dict, sweep_param_name, model_params)
all_pred_seqs.append(pred_seq)
all_true_seqs.append(true_seq)
if plot_io:
perf_str = ' '.join(
f'{name}: {metric_dict[name] * 100:.2f}%'
for name in ('Accuracy', 'F1', 'Edit Score')
)
title = f'{trial_prefix} {perf_str}'
utils.plot_array(
score_seq.T, (true_seq.T, pred_seq.T), ('true', 'pred'),
fn=os.path.join(io_dir_plots, f"seq={seq_id:03d}.png"),
title=title
)
utils.plot_array(
score_seq.T, (true_parts_seq.T, pred_parts_seq.T), ('true', 'pred'),
fn=os.path.join(io_dir_plots, f"seq={seq_id:03d}_parts.png"),
title=title
)
if draw_labels:
drawLabels(
[
utils.computeSegments(pred_seq)[0],
utils.computeSegments(true_seq)[0]
],
f"seq={seq_id:03d}", io_dir_images,
vocab_fig_dir
)
if False:
confusions = metrics.confusionMatrix(all_pred_seqs, all_true_seqs, len(vocab))
utils.saveVariable(confusions, "confusions", out_data_dir)
per_class_acc, class_counts = metrics.perClassAcc(confusions, return_counts=True)
class_preds = confusions.sum(axis=1)
logger.info(f"MACRO ACC: {np.nanmean(per_class_acc) * 100:.2f}%")
metrics.plotConfusions(os.path.join(fig_dir, 'confusions.png'), confusions, vocab)
metrics.plotPerClassAcc(
os.path.join(fig_dir, 'per-class-results.png'),
vocab, per_class_acc, class_preds, class_counts
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
| [
"yaml.dump",
"collections.defaultdict",
"mathtools.metrics.falsePositives",
"mathtools.utils.saveVariable",
"mathtools.utils.computeSegments",
"os.path.join",
"mathtools.metrics.falseNegatives",
"blocks.core.blockassembly.AssemblyAction",
"numpy.nanmean",
"mathtools.utils.copyFile",
"os.path.exi... | [((289, 316), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (306, 316), False, 'import logging\n'), ((396, 437), 'mathtools.metrics.truePositives', 'metrics.truePositives', (['pred_seq', 'true_seq'], {}), '(pred_seq, true_seq)\n', (417, 437), False, 'from mathtools import utils, metrics\n'), ((500, 542), 'mathtools.metrics.falsePositives', 'metrics.falsePositives', (['pred_seq', 'true_seq'], {}), '(pred_seq, true_seq)\n', (522, 542), False, 'from mathtools import utils, metrics\n'), ((552, 594), 'mathtools.metrics.falseNegatives', 'metrics.falseNegatives', (['pred_seq', 'true_seq'], {}), '(pred_seq, true_seq)\n', (574, 594), False, 'from mathtools import utils, metrics\n'), ((606, 635), 'mathtools.utils.safeDivide', 'utils.safeDivide', (['tp', '(tp + fp)'], {}), '(tp, tp + fp)\n', (622, 635), False, 'from mathtools import utils, metrics\n'), ((646, 675), 'mathtools.utils.safeDivide', 'utils.safeDivide', (['tp', '(tp + fn)'], {}), '(tp, tp + fn)\n', (662, 675), False, 'from mathtools import utils, metrics\n'), ((685, 727), 'mathtools.utils.safeDivide', 'utils.safeDivide', (['(2 * prc * rec)', '(prc + rec)'], {}), '(2 * prc * rec, prc + rec)\n', (701, 727), False, 'from mathtools import utils, metrics\n'), ((1276, 1317), 'mathtools.metrics.truePositives', 'metrics.truePositives', (['pred_seq', 'true_seq'], {}), '(pred_seq, true_seq)\n', (1297, 1317), False, 'from mathtools import utils, metrics\n'), ((1380, 1422), 'mathtools.metrics.falsePositives', 'metrics.falsePositives', (['pred_seq', 'true_seq'], {}), '(pred_seq, true_seq)\n', (1402, 1422), False, 'from mathtools import utils, metrics\n'), ((1432, 1474), 'mathtools.metrics.falseNegatives', 'metrics.falseNegatives', (['pred_seq', 'true_seq'], {}), '(pred_seq, true_seq)\n', (1454, 1474), False, 'from mathtools import utils, metrics\n'), ((1486, 1515), 'mathtools.utils.safeDivide', 'utils.safeDivide', (['tp', '(tp + fp)'], {}), '(tp, tp + fp)\n', (1502, 1515), False, 'from mathtools import utils, metrics\n'), ((1526, 1555), 'mathtools.utils.safeDivide', 'utils.safeDivide', (['tp', '(tp + fn)'], {}), '(tp, tp + fn)\n', (1542, 1555), False, 'from mathtools import utils, metrics\n'), ((1565, 1607), 'mathtools.utils.safeDivide', 'utils.safeDivide', (['(2 * prc * rec)', '(prc + rec)'], {}), '(2 * prc * rec, prc + rec)\n', (1581, 1607), False, 'from mathtools import utils, metrics\n'), ((2618, 2747), 'graphviz.Digraph', 'gv.Digraph', ([], {'name': 'fig_fn', 'format': 'img_ext', 'directory': 'base_path', 'graph_attr': "{'rankdir': 'LR'}", 'node_attr': "{'shape': 'plaintext'}"}), "(name=fig_fn, format=img_ext, directory=base_path, graph_attr={\n 'rankdir': 'LR'}, node_attr={'shape': 'plaintext'})\n", (2628, 2747), True, 'import graphviz as gv\n'), ((3585, 3663), 'blocks.core.labels.make_parts_vocab', 'labels_lib.make_parts_vocab', (['vocab'], {'lower_tri_only': '(True)', 'append_to_vocab': '(False)'}), '(vocab, lower_tri_only=True, append_to_vocab=False)\n', (3612, 3663), True, 'from blocks.core import labels as labels_lib\n'), ((4201, 4229), 'os.path.expanduser', 'os.path.expanduser', (['data_dir'], {}), '(data_dir)\n', (4219, 4229), False, 'import os\n'), ((4247, 4277), 'os.path.expanduser', 'os.path.expanduser', (['scores_dir'], {}), '(scores_dir)\n', (4265, 4277), False, 'import os\n'), ((4292, 4319), 'os.path.expanduser', 'os.path.expanduser', (['out_dir'], {}), '(out_dir)\n', (4310, 4319), False, 'import os\n'), ((4635, 4667), 'os.path.join', 'os.path.join', (['out_dir', '"""figures"""'], {}), "(out_dir, 'figures')\n", (4647, 4667), False, 'import os\n'), ((4754, 4794), 'os.path.join', 'os.path.join', (['fig_dir', '"""model-io_images"""'], {}), "(fig_dir, 'model-io_images')\n", (4766, 4794), False, 'import os\n'), ((4892, 4931), 'os.path.join', 'os.path.join', (['fig_dir', '"""model-io_plots"""'], {}), "(fig_dir, 'model-io_plots')\n", (4904, 4931), False, 'import os\n'), ((5027, 5056), 'os.path.join', 'os.path.join', (['out_dir', '"""data"""'], {}), "(out_dir, 'data')\n", (5039, 5056), False, 'import os\n'), ((5147, 5238), 'mathtools.utils.getUniqueIds', 'utils.getUniqueIds', (['scores_dir'], {'prefix': 'prefix', 'suffix': '"""pred-label-seq.*"""', 'to_array': '(True)'}), "(scores_dir, prefix=prefix, suffix='pred-label-seq.*',\n to_array=True)\n", (5165, 5238), False, 'from mathtools import utils, metrics\n'), ((5923, 5952), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5946, 5952), False, 'import collections\n'), ((6057, 6111), 'mathtools.utils.saveVariable', 'utils.saveVariable', (['cv_folds', '"""cv-folds"""', 'out_data_dir'], {}), "(cv_folds, 'cv-folds', out_data_dir)\n", (6075, 6111), False, 'from mathtools import utils, metrics\n'), ((9463, 9485), 'mathtools.utils.parse_args', 'utils.parse_args', (['main'], {}), '(main)\n', (9479, 9485), False, 'from mathtools import utils, metrics\n'), ((9510, 9559), 'mathtools.utils.parse_config', 'utils.parse_config', (['cl_args'], {'script_name': '__file__'}), '(cl_args, script_name=__file__)\n', (9528, 9559), False, 'from mathtools import utils, metrics\n'), ((9652, 9689), 'os.path.expanduser', 'os.path.expanduser', (["config['out_dir']"], {}), "(config['out_dir'])\n", (9670, 9689), False, 'import os\n'), ((9859, 9892), 'mathtools.utils.copyFile', 'utils.copyFile', (['__file__', 'out_dir'], {}), '(__file__, out_dir)\n', (9873, 9892), False, 'from mathtools import utils, metrics\n'), ((3713, 3757), 'numpy.array', 'np.array', (['[a.sign for a in vocab]'], {'dtype': 'int'}), '([a.sign for a in vocab], dtype=int)\n', (3721, 3757), True, 'import numpy as np\n'), ((3810, 3862), 'numpy.concatenate', 'np.concatenate', (['(edge_diffs, signs[:, None])'], {'axis': '(1)'}), '((edge_diffs, signs[:, None]), axis=1)\n', (3824, 3862), True, 'import numpy as np\n'), ((4331, 4354), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (4345, 4354), False, 'import os\n'), ((4364, 4384), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (4375, 4384), False, 'import os\n'), ((4517, 4553), 'os.path.join', 'os.path.join', (['out_dir', '"""results.csv"""'], {}), "(out_dir, 'results.csv')\n", (4529, 4553), False, 'import os\n'), ((4587, 4619), 'os.path.expanduser', 'os.path.expanduser', (['results_file'], {}), '(results_file)\n', (4605, 4619), False, 'import os\n'), ((4679, 4702), 'os.path.exists', 'os.path.exists', (['fig_dir'], {}), '(fig_dir)\n', (4693, 4702), False, 'import os\n'), ((4712, 4732), 'os.makedirs', 'os.makedirs', (['fig_dir'], {}), '(fig_dir)\n', (4723, 4732), False, 'import os\n'), ((4806, 4835), 'os.path.exists', 'os.path.exists', (['io_dir_images'], {}), '(io_dir_images)\n', (4820, 4835), False, 'import os\n'), ((4845, 4871), 'os.makedirs', 'os.makedirs', (['io_dir_images'], {}), '(io_dir_images)\n', (4856, 4871), False, 'import os\n'), ((4943, 4971), 'os.path.exists', 'os.path.exists', (['io_dir_plots'], {}), '(io_dir_plots)\n', (4957, 4971), False, 'import os\n'), ((4981, 5006), 'os.makedirs', 'os.makedirs', (['io_dir_plots'], {}), '(io_dir_plots)\n', (4992, 5006), False, 'import os\n'), ((5068, 5096), 'os.path.exists', 'os.path.exists', (['out_data_dir'], {}), '(out_data_dir)\n', (5082, 5096), False, 'import os\n'), ((5106, 5131), 'os.makedirs', 'os.makedirs', (['out_data_dir'], {}), '(out_data_dir)\n', (5117, 5131), False, 'import os\n'), ((5386, 5425), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['"""vocab"""', 'scores_dir'], {}), "('vocab', scores_dir)\n", (5404, 5425), False, 'from mathtools import utils, metrics\n'), ((5452, 5489), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['"""vocab"""', 'data_dir'], {}), "('vocab', data_dir)\n", (5470, 5489), False, 'from mathtools import utils, metrics\n'), ((8850, 8908), 'mathtools.utils.saveVariable', 'utils.saveVariable', (['confusions', '"""confusions"""', 'out_data_dir'], {}), "(confusions, 'confusions', out_data_dir)\n", (8868, 8908), False, 'from mathtools import utils, metrics\n'), ((8948, 8999), 'mathtools.metrics.perClassAcc', 'metrics.perClassAcc', (['confusions'], {'return_counts': '(True)'}), '(confusions, return_counts=True)\n', (8967, 8999), False, 'from mathtools import utils, metrics\n'), ((9701, 9724), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (9715, 9724), False, 'import os\n'), ((9734, 9754), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (9745, 9754), False, 'import os\n'), ((9828, 9854), 'yaml.dump', 'yaml.dump', (['config', 'outfile'], {}), '(config, outfile)\n', (9837, 9854), False, 'import yaml\n'), ((2948, 2985), 'os.path.join', 'os.path.join', (['state_img_dir', 'image_fn'], {}), '(state_img_dir, image_fn)\n', (2960, 2985), False, 'import os\n'), ((4430, 4462), 'os.path.join', 'os.path.join', (['out_dir', '"""log.txt"""'], {}), "(out_dir, 'log.txt')\n", (4442, 4462), False, 'import os\n'), ((5589, 5619), 'blocks.core.blockassembly.AssemblyAction', 'blockassembly.AssemblyAction', ([], {}), '()\n', (5617, 5619), False, 'from blocks.core import blockassembly\n'), ((6730, 6789), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['f"""{trial_prefix}_score-seq"""', 'scores_dir'], {}), "(f'{trial_prefix}_score-seq', scores_dir)\n", (6748, 6789), False, 'from mathtools import utils, metrics\n'), ((6813, 6877), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['f"""{trial_prefix}_pred-label-seq"""', 'scores_dir'], {}), "(f'{trial_prefix}_pred-label-seq', scores_dir)\n", (6831, 6877), False, 'from mathtools import utils, metrics\n'), ((6901, 6965), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['f"""{trial_prefix}_true-label-seq"""', 'scores_dir'], {}), "(f'{trial_prefix}_true-label-seq', scores_dir)\n", (6919, 6965), False, 'from mathtools import utils, metrics\n'), ((7492, 7569), 'mathtools.utils.writeResults', 'utils.writeResults', (['results_file', 'metric_dict', 'sweep_param_name', 'model_params'], {}), '(results_file, metric_dict, sweep_param_name, model_params)\n', (7510, 7569), False, 'from mathtools import utils, metrics\n'), ((9151, 9190), 'os.path.join', 'os.path.join', (['fig_dir', '"""confusions.png"""'], {}), "(fig_dir, 'confusions.png')\n", (9163, 9190), False, 'import os\n'), ((9256, 9302), 'os.path.join', 'os.path.join', (['fig_dir', '"""per-class-results.png"""'], {}), "(fig_dir, 'per-class-results.png')\n", (9268, 9302), False, 'import os\n'), ((9769, 9801), 'os.path.join', 'os.path.join', (['out_dir', 'config_fn'], {}), '(out_dir, config_fn)\n', (9781, 9801), False, 'import os\n'), ((5642, 5672), 'blocks.core.blockassembly.AssemblyAction', 'blockassembly.AssemblyAction', ([], {}), '()\n', (5670, 5672), False, 'from blocks.core import blockassembly\n'), ((8053, 8104), 'os.path.join', 'os.path.join', (['io_dir_plots', 'f"""seq={seq_id:03d}.png"""'], {}), "(io_dir_plots, f'seq={seq_id:03d}.png')\n", (8065, 8104), False, 'import os\n'), ((8302, 8359), 'os.path.join', 'os.path.join', (['io_dir_plots', 'f"""seq={seq_id:03d}_parts.png"""'], {}), "(io_dir_plots, f'seq={seq_id:03d}_parts.png')\n", (8314, 8359), False, 'import os\n'), ((9079, 9104), 'numpy.nanmean', 'np.nanmean', (['per_class_acc'], {}), '(per_class_acc)\n', (9089, 9104), True, 'import numpy as np\n'), ((8514, 8545), 'mathtools.utils.computeSegments', 'utils.computeSegments', (['pred_seq'], {}), '(pred_seq)\n', (8535, 8545), False, 'from mathtools import utils, metrics\n'), ((8574, 8605), 'mathtools.utils.computeSegments', 'utils.computeSegments', (['true_seq'], {}), '(true_seq)\n', (8595, 8605), False, 'from mathtools import utils, metrics\n')] |
# --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""
Generating training instance
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as np
import json
import pickle
import random
from random import randint
import tensorflow as tf
import cv2
from .config import cfg
# for merge COCO and HICO dataset
MAX_COCO_ID = 650000
MAX_HICO_ID = 40000
def bbox_trans(human_box_ori, object_box_ori, ratio, size=64):
human_box = human_box_ori.copy()
object_box = object_box_ori.copy()
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
height = InteractionPattern[3] - InteractionPattern[1] + 1
width = InteractionPattern[2] - InteractionPattern[0] + 1
if height > width:
ratio = 'height'
else:
ratio = 'width'
# shift the top-left corner to (0,0)
human_box[0] -= InteractionPattern[0]
human_box[2] -= InteractionPattern[0]
human_box[1] -= InteractionPattern[1]
human_box[3] -= InteractionPattern[1]
object_box[0] -= InteractionPattern[0]
object_box[2] -= InteractionPattern[0]
object_box[1] -= InteractionPattern[1]
object_box[3] -= InteractionPattern[1]
if ratio == 'height': # height is larger than width
human_box[0] = 0 + size * human_box[0] / height
human_box[1] = 0 + size * human_box[1] / height
human_box[2] = (size * width / height - 1) - size * (width - 1 - human_box[2]) / height
human_box[3] = (size - 1) - size * (height - 1 - human_box[3]) / height
object_box[0] = 0 + size * object_box[0] / height
object_box[1] = 0 + size * object_box[1] / height
object_box[2] = (size * width / height - 1) - size * (width - 1 - object_box[2]) / height
object_box[3] = (size - 1) - size * (height - 1 - object_box[3]) / height
# Need to shift horizontally
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
# assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[3] == 63) & (InteractionPattern[2] <= 63)
if human_box[3] > object_box[3]:
human_box[3] = size - 1
else:
object_box[3] = size - 1
shift = size / 2 - (InteractionPattern[2] + 1) / 2
human_box += [shift, 0, shift, 0]
object_box += [shift, 0, shift, 0]
else: # width is larger than height
human_box[0] = 0 + size * human_box[0] / width
human_box[1] = 0 + size * human_box[1] / width
human_box[2] = (size - 1) - size * (width - 1 - human_box[2]) / width
human_box[3] = (size * height / width - 1) - size * (height - 1 - human_box[3]) / width
object_box[0] = 0 + size * object_box[0] / width
object_box[1] = 0 + size * object_box[1] / width
object_box[2] = (size - 1) - size * (width - 1 - object_box[2]) / width
object_box[3] = (size * height / width - 1) - size * (height - 1 - object_box[3]) / width
# Need to shift vertically
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
# assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[2] == 63) & (InteractionPattern[3] <= 63)
if human_box[2] > object_box[2]:
human_box[2] = size - 1
else:
object_box[2] = size - 1
shift = size / 2 - (InteractionPattern[3] + 1) / 2
human_box = human_box + [0, shift, 0, shift]
object_box = object_box + [0, shift, 0, shift]
return np.round(human_box), np.round(object_box)
def Get_next_sp(human_box, object_box):
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
height = InteractionPattern[3] - InteractionPattern[1] + 1
width = InteractionPattern[2] - InteractionPattern[0] + 1
if height > width:
H, O = bbox_trans(human_box, object_box, 'height')
else:
H, O = bbox_trans(human_box, object_box, 'width')
Pattern = np.zeros((64, 64, 2))
Pattern[int(H[1]):int(H[3]) + 1, int(H[0]):int(H[2]) + 1, 0] = 1
Pattern[int(O[1]):int(O[3]) + 1, int(O[0]):int(O[2]) + 1, 1] = 1
return Pattern
#
# def Get_next_sp_with_pose(human_box, object_box, human_pose, num_joints=17):
# InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
# max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
# height = InteractionPattern[3] - InteractionPattern[1] + 1
# width = InteractionPattern[2] - InteractionPattern[0] + 1
# if height > width:
# H, O = bbox_trans(human_box, object_box, 'height')
# else:
# H, O = bbox_trans(human_box, object_box, 'width')
#
# Pattern = np.zeros((64, 64, 2), dtype='float32')
# Pattern[int(H[1]):int(H[3]) + 1, int(H[0]):int(H[2]) + 1, 0] = 1
# Pattern[int(O[1]):int(O[3]) + 1, int(O[0]):int(O[2]) + 1, 1] = 1
#
# if human_pose != None and len(human_pose) == 51:
# skeleton = get_skeleton(human_box, human_pose, H, num_joints)
# else:
# skeleton = np.zeros((64, 64, 1), dtype='float32')
# skeleton[int(H[1]):int(H[3]) + 1, int(H[0]):int(H[2]) + 1, 0] = 0.05
#
# Pattern = np.concatenate((Pattern, skeleton), axis=2)
#
# return Pattern
def get_skeleton(human_box, human_pose, human_pattern, num_joints=17, size=64):
width = human_box[2] - human_box[0] + 1
height = human_box[3] - human_box[1] + 1
pattern_width = human_pattern[2] - human_pattern[0] + 1
pattern_height = human_pattern[3] - human_pattern[1] + 1
joints = np.zeros((num_joints + 1, 2), dtype='int32')
for i in range(num_joints):
joint_x, joint_y, joint_score = human_pose[3 * i: 3 * (i + 1)]
x_ratio = (joint_x - human_box[0]) / float(width)
y_ratio = (joint_y - human_box[1]) / float(height)
joints[i][0] = min(size - 1, int(round(x_ratio * pattern_width + human_pattern[0])))
joints[i][1] = min(size - 1, int(round(y_ratio * pattern_height + human_pattern[1])))
joints[num_joints] = (joints[5] + joints[6]) / 2
return draw_relation(human_pattern, joints)
def draw_relation(human_pattern, joints, size=64):
joint_relation = [[1, 3], [2, 4], [0, 1], [0, 2], [0, 17], [5, 17], [6, 17], [5, 7], [6, 8], [7, 9], [8, 10],
[11, 17], [12, 17], [11, 13], [12, 14], [13, 15], [14, 16]]
color = [0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
skeleton = np.zeros((size, size, 1), dtype="float32")
for i in range(len(joint_relation)):
cv2.line(skeleton, tuple(joints[joint_relation[i][0]]), tuple(joints[joint_relation[i][1]]), (color[i]))
# cv2.rectangle(skeleton, (int(human_pattern[0]), int(human_pattern[1])), (int(human_pattern[2]), int(human_pattern[3])), (255))
# cv2.imshow("Joints", skeleton)
# cv2.waitKey(0)
# print(skeleton[:,:,0])
return skeleton
def bb_IOU(boxA, boxB):
ixmin = np.maximum(boxA[0], boxB[0])
iymin = np.maximum(boxA[1], boxB[1])
ixmax = np.minimum(boxA[2], boxB[2])
iymax = np.minimum(boxA[3], boxB[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((boxB[2] - boxB[0] + 1.) * (boxB[3] - boxB[1] + 1.) +
(boxA[2] - boxA[0] + 1.) *
(boxA[3] - boxA[1] + 1.) - inters)
overlaps = inters / uni
return overlaps
def Augmented_box(bbox, shape, image_id, augment=15):
thres_ = 0.7
box = np.array([0, bbox[0], bbox[1], bbox[2], bbox[3]]).reshape(1, 5)
box = box.astype(np.float64)
if bbox[0] >= bbox[2] or bbox[1] >= bbox[3]:
return box
count = 0
time_count = 0
while count < augment:
time_count += 1
height = bbox[3] - bbox[1]
width = bbox[2] - bbox[0]
height_cen = (bbox[3] + bbox[1]) / 2
width_cen = (bbox[2] + bbox[0]) / 2
ratio = 1 + randint(-10, 10) * 0.01
height_shift = randint(-np.floor(height), np.floor(height)) * 0.1
width_shift = randint(-np.floor(width), np.floor(width)) * 0.1
H_0 = max(0, width_cen + width_shift - ratio * width / 2)
H_2 = min(shape[1] - 1, width_cen + width_shift + ratio * width / 2)
H_1 = max(0, height_cen + height_shift - ratio * height / 2)
H_3 = min(shape[0] - 1, height_cen + height_shift + ratio * height / 2)
if bb_IOU(bbox, np.array([H_0, H_1, H_2, H_3])) > thres_:
box_ = np.array([0, H_0, H_1, H_2, H_3]).reshape(1, 5)
box = np.concatenate((box, box_), axis=0)
count += 1
if time_count > 150:
return box
return box
def Generate_action(action_list, nums=29):
action_ = np.zeros(nums)
for GT_idx in action_list:
action_[GT_idx] = 1
action_ = action_.reshape(1, nums)
return action_
def Get_Next_Instance_HO_Neg(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter % Data_length]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(12) + '.jpg'
import os
if not os.path.exists(im_file):
print("not existing:", im_file)
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented, Human_augmented_solo, Object_augmented, action_HO, action_H, mask_HO, mask_H = Augmented_HO_Neg(
GT, Trainval_Neg, im_shape, Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes_solo'] = Human_augmented_solo
blobs['H_boxes'] = Human_augmented
blobs['O_boxes'] = Object_augmented
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
blobs['H_num'] = len(action_H)
return blobs
def Augmented_HO_Neg(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
action_HO_ = Generate_action(GT[1])
action_H_ = Generate_action(GT[4])
mask_HO_ = np.asarray(
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]).reshape(1, 29)
mask_H_ = np.asarray(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).reshape(1, 29)
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
Human_augmented_solo = Human_augmented.copy()
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
num_pos_neg = len(Human_augmented)
action_HO = action_HO_
action_H = action_H_
mask_HO = mask_HO_
mask_H = mask_H_
Pattern = np.empty((0, 64, 64, 2), dtype=np.float32)
for i in range(num_pos - 1):
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
action_H = np.concatenate((action_H, action_H_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
for i in range(num_pos_neg - num_pos):
action_HO = np.concatenate((action_HO, np.zeros(29).reshape(1, 29)), axis=0)
for i in range(num_pos_neg):
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, 2)
Human_augmented = Human_augmented.reshape(num_pos_neg, 5)
Human_augmented_solo = Human_augmented_solo.reshape(num_pos, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 29)
action_H = action_H.reshape(num_pos, 29)
mask_HO = mask_HO.reshape(num_pos_neg, 29)
mask_H = mask_H.reshape(num_pos, 29)
return Pattern, Human_augmented, Human_augmented_solo, Object_augmented, action_HO, action_H, mask_HO, mask_H
def Augmented_HO_spNeg(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
set_list = [(0, 38), (1, 31), (1, 32), (2, 43), (2, 44), (2, 77), (4, 1), (4, 19),
(4, 28), (4, 46), (4, 47), (4, 48), (4, 49), (4, 51), (4, 52), (4, 54),
(4, 55), (4, 56), (5, 2), (5, 3), (5, 4), (5, 6), (5, 7), (5, 8), (5, 9),
(5, 18), (5, 21), (6, 68), (7, 33), (8, 64), (9, 47), (9, 48), (9, 49),
(9, 50), (9, 51), (9, 52), (9, 53), (9, 54), (9, 55), (9, 56), (10, 2),
(10, 4), (10, 14), (10, 18), (10, 21), (10, 25), (10, 27), (10, 29),
(10, 57), (10, 58), (10, 60), (10, 61), (10, 62), (10, 64), (11, 31),
(11, 32), (11, 37), (11, 38), (12, 14), (12, 57), (12, 58), (12, 60),
(12, 61), (13, 40), (13, 41), (13, 42), (13, 46), (14, 1), (14, 25),
(14, 26), (14, 27), (14, 29), (14, 30), (14, 31), (14, 32), (14, 33),
(14, 34), (14, 35), (14, 37), (14, 38), (14, 39), (14, 40), (14, 41),
(14, 42), (14, 47), (14, 50), (14, 68), (14, 74), (14, 75), (14, 78),
(15, 30), (15, 33), (16, 43), (16, 44), (16, 45), (18, 1), (18, 2),
(18, 3), (18, 4), (18, 5), (18, 6), (18, 7), (18, 8), (18, 11),
(18, 14), (18, 15), (18, 16), (18, 17), (18, 18), (18, 19), (18, 20),
(18, 21), (18, 24), (18, 25), (18, 26), (18, 27), (18, 28), (18, 29),
(18, 30), (18, 31), (18, 32), (18, 33), (18, 34), (18, 35), (18, 36),
(18, 37), (18, 38), (18, 39), (18, 40), (18, 41), (18, 42), (18, 43),
(18, 44), (18, 45), (18, 46), (18, 47), (18, 48), (18, 49), (18, 51),
(18, 53), (18, 54), (18, 55), (18, 56), (18, 57), (18, 61), (18, 62),
(18, 63), (18, 64), (18, 65), (18, 66), (18, 67), (18, 68), (18, 73),
(18, 74), (18, 75), (18, 77), (19, 35), (19, 39), (20, 33), (21, 31),
(21, 32), (23, 1), (23, 11), (23, 19), (23, 20), (23, 24), (23, 28),
(23, 34), (23, 49), (23, 53), (23, 56), (23, 61), (23, 63), (23, 64),
(23, 67), (23, 68), (23, 73), (24, 74), (25, 1), (25, 2), (25, 4),
(25, 8), (25, 9), (25, 14), (25, 15), (25, 16), (25, 17), (25, 18),
(25, 19), (25, 21), (25, 25), (25, 26), (25, 27), (25, 28), (25, 29),
(25, 30), (25, 31), (25, 32), (25, 33), (25, 34), (25, 35), (25, 36),
(25, 37), (25, 38), (25, 39), (25, 40), (25, 41), (25, 42), (25, 43),
(25, 44), (25, 45), (25, 46), (25, 47), (25, 48), (25, 49), (25, 50),
(25, 51), (25, 52), (25, 53), (25, 54), (25, 55), (25, 56), (25, 57),
(25, 64), (25, 65), (25, 66), (25, 67), (25, 68), (25, 73), (25, 74),
(25, 77), (25, 78), (25, 79), (25, 80), (26, 32), (26, 37), (28, 30),
(28, 33)]
action_sp_ = Generate_action(GT[1])
action_HO_ = Generate_action(GT[1])
obj_cls = GT[-1]
action_compose = [set_list.index(item) for item in [(ho, obj_cls[0]) for ho in GT[1]]]
action_compose_ = Generate_action(action_compose, nums=len(set_list))
action_H_ = Generate_action(GT[4])
mask_sp_ = np.asarray(
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]).reshape(1, 29)
mask_HO_ = np.asarray(
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]).reshape(1, 29)
mask_H_ = np.asarray(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).reshape(1, 29)
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
if Human[0] == 0 and Human[1] == 0 and Human[2] == 0:
while len(Human_augmented) < Pos_augment + 1:
Human_augmented = np.concatenate(
[Human_augmented, Human_augmented[-(Pos_augment + 1 - len(Human_augmented)):]], axis=0)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
num_pos_neg = len(Human_augmented)
action_sp = action_sp_
action_HO = action_HO_
action_H = action_H_
action_compose = action_compose_
mask_sp = mask_sp_
mask_HO = mask_HO_
mask_H = mask_H_
Pattern = np.empty((0, 64, 64, 2), dtype=np.float32)
for i in range(num_pos - 1):
action_sp = np.concatenate((action_sp, action_sp_), axis=0)
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
action_H = np.concatenate((action_H, action_H_), axis=0)
action_compose = np.concatenate((action_compose, action_compose_), axis=0)
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_sp = np.concatenate((mask_sp, mask_sp_), axis=0)
for i in range(num_pos_neg - num_pos):
action_sp = np.concatenate((action_sp, np.zeros(29).reshape(1, 29)), axis=0)
action_compose = np.concatenate((action_compose, np.zeros(len(set_list)).reshape(1, len(set_list))), axis=0)
for i in range(num_pos_neg):
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, 2)
Human_augmented_sp = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented[:num_pos].reshape(num_pos, 5)
action_sp = action_sp.reshape(num_pos_neg, 29)
action_HO = action_HO.reshape(num_pos, 29)
action_H = action_H.reshape(num_pos, 29)
action_compose = action_compose.reshape(num_pos, len(set_list))
mask_sp = mask_sp.reshape(num_pos_neg, 29)
mask_HO = mask_HO.reshape(num_pos, 29)
mask_H = mask_H.reshape(num_pos, 29)
return Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, action_compose
def Augmented_HO_spNeg2(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
set_list = [(0, 38), (1, 31), (1, 32), (2, 43), (2, 44), (2, 77), (3, 1), (3, 19), (3, 28), (3, 46), (3, 47),
(3, 48), (3, 49), (3, 51), (3, 52), (3, 54), (3, 55), (3, 56), (4, 2), (4, 3), (4, 4), (4, 6), (4, 7),
(4, 8), (4, 9), (4, 18), (4, 21), (5, 68), (6, 33), (7, 64), (8, 47), (8, 48), (8, 49), (8, 50),
(8, 51), (8, 52), (8, 53), (8, 54), (8, 55), (8, 56), (9, 2), (9, 4), (9, 14), (9, 18), (9, 21),
(9, 25), (9, 27), (9, 29), (9, 57), (9, 58), (9, 60), (9, 61), (9, 62), (9, 64), (10, 31), (10, 32),
(10, 37), (10, 38), (11, 14), (11, 57), (11, 58), (11, 60), (11, 61), (12, 40), (12, 41), (12, 42),
(12, 46), (13, 1), (13, 25), (13, 26), (13, 27), (13, 29), (13, 30), (13, 31), (13, 32), (13, 33),
(13, 34), (13, 35), (13, 37), (13, 38), (13, 39), (13, 40), (13, 41), (13, 42), (13, 47), (13, 50),
(13, 68), (13, 74), (13, 75), (13, 78), (14, 30), (14, 33), (15, 43), (15, 44), (15, 45), (16, 1),
(16, 2), (16, 3), (16, 4), (16, 5), (16, 6), (16, 7), (16, 8), (16, 11), (16, 14), (16, 15), (16, 16),
(16, 17), (16, 18), (16, 19), (16, 20), (16, 21), (16, 24), (16, 25), (16, 26), (16, 27), (16, 28),
(16, 29), (16, 30), (16, 31), (16, 32), (16, 33), (16, 34), (16, 35), (16, 36), (16, 37), (16, 38),
(16, 39), (16, 40), (16, 41), (16, 42), (16, 43), (16, 44), (16, 45), (16, 46), (16, 47), (16, 48),
(16, 49), (16, 51), (16, 53), (16, 54), (16, 55), (16, 56), (16, 57), (16, 61), (16, 62), (16, 63),
(16, 64), (16, 65), (16, 66), (16, 67), (16, 68), (16, 73), (16, 74), (16, 75), (16, 77), (17, 35),
(17, 39), (18, 33), (19, 31), (19, 32), (20, 74), (21, 1), (21, 2), (21, 4), (21, 8), (21, 9), (21, 14),
(21, 15), (21, 16), (21, 17), (21, 18), (21, 19), (21, 21), (21, 25), (21, 26), (21, 27), (21, 28),
(21, 29), (21, 30), (21, 31), (21, 32), (21, 33), (21, 34), (21, 35), (21, 36), (21, 37), (21, 38),
(21, 39), (21, 40), (21, 41), (21, 42), (21, 43), (21, 44), (21, 45), (21, 46), (21, 47), (21, 48),
(21, 49), (21, 50), (21, 51), (21, 52), (21, 53), (21, 54), (21, 55), (21, 56), (21, 57), (21, 64),
(21, 65), (21, 66), (21, 67), (21, 68), (21, 73), (21, 74), (21, 77), (21, 78), (21, 79), (21, 80),
(22, 32), (22, 37), (23, 30), (23, 33)]
action_sp_ = Generate_action(GT[1], nums=24)
action_HO_ = Generate_action(GT[1], nums=24)
obj_cls = GT[-1]
action_compose = [set_list.index(item) for item in [(ho, obj_cls[0]) for ho in GT[1]]]
action_compose_ = Generate_action(action_compose, nums=len(set_list))
action_H_ = Generate_action(GT[4], nums=24)
mask_sp_ = np.ones([1, 24], np.int32)
mask_HO_ = np.ones([1, 24], np.int32)
mask_H_ = np.ones([1, 24], np.int32)
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
# pose_list = [GT[5]] * num_pos
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
# pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
# pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
num_pos_neg = len(Human_augmented)
action_sp = action_sp_
action_HO = action_HO_
action_H = action_H_
action_compose = action_compose_
mask_sp = mask_sp_
mask_HO = mask_HO_
mask_H = mask_H_
Pattern = np.empty((0, 64, 64, 2), dtype=np.float32)
pose_box = []
# print('pose infor:', GT[5], pose_list)
# pose_box = obtain_pose_box(Human_augmented, pose_list, shape)
for item in Human_augmented:
pose_box.extend([item] * 17)
for i in range(num_pos - 1):
action_sp = np.concatenate((action_sp, action_sp_), axis=0)
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
action_H = np.concatenate((action_H, action_H_), axis=0)
action_compose = np.concatenate((action_compose, action_compose_), axis=0)
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_sp = np.concatenate((mask_sp, mask_sp_), axis=0)
for i in range(num_pos_neg - num_pos):
action_sp = np.concatenate((action_sp, np.zeros(24).reshape(1, 24)), axis=0)
action_compose = np.concatenate((action_compose, np.zeros(len(set_list)).reshape(1, len(set_list))), axis=0)
for i in range(num_pos_neg):
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
mask = np.zeros(shape=(1, shape[0] // 16, shape[1] // 16, 1), dtype=np.float32)
# obj_box = Object_augmented[i][1:].astype(np.int32)
# print(obj_box)
# mask[:, obj_box[0]:obj_box[2], obj_box[1]:obj_box[3]] = 1
# from skimage import transform
# mask = transform.resize(mask, [1, shape[0] // 16, shape[1] // 16, 1], order=0, preserve_range=True)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, 2)
Human_augmented_sp = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented[:num_pos].reshape(num_pos, 5)
action_sp = action_sp.reshape(num_pos_neg, 24)
action_HO = action_HO.reshape(num_pos, 24)
action_H = action_H.reshape(num_pos, 24)
action_compose = action_compose.reshape(num_pos_neg, len(set_list))
mask_sp = mask_sp.reshape(num_pos_neg, 24)
mask_HO = mask_HO.reshape(num_pos, 24)
mask_H = mask_H.reshape(num_pos, 24)
return Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, action_compose
def Augmented_HO_spNeg3(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
set_list = [(0, 38), (1, 31), (1, 32), (2, 1), (2, 19), (2, 28), (2, 43), (2, 44), (2, 46), (2, 47), (2, 48),
(2, 49),
(2, 51), (2, 52), (2, 54), (2, 55), (2, 56), (2, 77), (3, 2), (3, 3), (3, 4), (3, 6), (3, 7), (3, 8),
(3, 9), (3, 18), (3, 21), (4, 68), (5, 33), (6, 64), (7, 43), (7, 44), (7, 45), (7, 47), (7, 48),
(7, 49),
(7, 50), (7, 51), (7, 52), (7, 53), (7, 54), (7, 55), (7, 56), (8, 2), (8, 4), (8, 14), (8, 18),
(8, 21),
(8, 25), (8, 27), (8, 29), (8, 57), (8, 58), (8, 60), (8, 61), (8, 62), (8, 64), (9, 31), (9, 32),
(9, 37),
(9, 38), (10, 14), (10, 57), (10, 58), (10, 60), (10, 61), (11, 40), (11, 41), (11, 42), (11, 46),
(12, 1),
(12, 25), (12, 26), (12, 27), (12, 29), (12, 30), (12, 31), (12, 32), (12, 33), (12, 34), (12, 35),
(12, 37), (12, 38), (12, 39), (12, 40), (12, 41), (12, 42), (12, 47), (12, 50), (12, 68), (12, 74),
(12, 75), (12, 78), (13, 30), (13, 33), (14, 1), (14, 2), (14, 3), (14, 4), (14, 5), (14, 6), (14, 7),
(14, 8), (14, 11), (14, 14), (14, 15), (14, 16), (14, 17), (14, 18), (14, 19), (14, 20), (14, 21),
(14, 24),
(14, 25), (14, 26), (14, 27), (14, 28), (14, 29), (14, 30), (14, 31), (14, 32), (14, 33), (14, 34),
(14, 35), (14, 36), (14, 37), (14, 38), (14, 39), (14, 40), (14, 41), (14, 42), (14, 43), (14, 44),
(14, 45), (14, 46), (14, 47), (14, 48), (14, 49), (14, 51), (14, 53), (14, 54), (14, 55), (14, 56),
(14, 57), (14, 61), (14, 62), (14, 63), (14, 64), (14, 65), (14, 66), (14, 67), (14, 68), (14, 73),
(14, 74), (14, 75), (14, 77), (15, 33), (15, 35), (15, 39), (16, 31), (16, 32), (17, 74), (18, 1),
(18, 2),
(18, 4), (18, 8), (18, 9), (18, 14), (18, 15), (18, 16), (18, 17), (18, 18), (18, 19), (18, 21),
(18, 25),
(18, 26), (18, 27), (18, 28), (18, 29), (18, 30), (18, 31), (18, 32), (18, 33), (18, 34), (18, 35),
(18, 36), (18, 37), (18, 38), (18, 39), (18, 40), (18, 41), (18, 42), (18, 43), (18, 44), (18, 45),
(18, 46), (18, 47), (18, 48), (18, 49), (18, 50), (18, 51), (18, 52), (18, 53), (18, 54), (18, 55),
(18, 56), (18, 57), (18, 64), (18, 65), (18, 66), (18, 67), (18, 68), (18, 73), (18, 74), (18, 77),
(18, 78), (18, 79), (18, 80), (19, 32), (19, 37), (20, 30), (20, 33)]
action_sp_ = Generate_action(GT[1], nums=21)
action_HO_ = Generate_action(GT[1], nums=21)
obj_cls = GT[-1]
action_compose = [set_list.index(item) for item in [(ho, obj_cls[0]) for ho in GT[1]]]
action_compose_ = Generate_action(action_compose, nums=len(set_list))
action_H_ = Generate_action(GT[4], nums=21)
mask_sp_ = np.ones([1, 21], np.int32)
mask_HO_ = np.ones([1, 21], np.int32)
mask_H_ = np.ones([1, 21], np.int32)
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
# pose_list = [GT[5]] * num_pos
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
# pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
# pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
num_pos_neg = len(Human_augmented)
action_sp = action_sp_
action_HO = action_HO_
action_H = action_H_
action_compose = action_compose_
mask_sp = mask_sp_
mask_HO = mask_HO_
mask_H = mask_H_
Pattern = np.empty((0, 64, 64, 2), dtype=np.float32)
pose_box = []
# print('pose infor:', GT[5], pose_list)
# pose_box = obtain_pose_box(Human_augmented, pose_list, shape)
for item in Human_augmented:
pose_box.extend([item] * 17)
for i in range(num_pos - 1):
action_sp = np.concatenate((action_sp, action_sp_), axis=0)
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
action_H = np.concatenate((action_H, action_H_), axis=0)
action_compose = np.concatenate((action_compose, action_compose_), axis=0)
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_sp = np.concatenate((mask_sp, mask_sp_), axis=0)
for i in range(num_pos_neg - num_pos):
action_sp = np.concatenate((action_sp, np.zeros(21).reshape(1, 21)), axis=0)
action_compose = np.concatenate((action_compose, np.zeros(len(set_list)).reshape(1, len(set_list))), axis=0)
for i in range(num_pos_neg):
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape([1, 64, 64, 2])
# Pattern_ = np.concatenate([Pattern_, np.zeros([1, 64, 64, 1])], axis=-1)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
mask = np.zeros(shape=(1, shape[0] // 16, shape[1] // 16, 1), dtype=np.float32)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, 2)
Human_augmented_sp = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented[:num_pos].reshape(num_pos, 5)
action_sp = action_sp.reshape(num_pos_neg, 21)
action_HO = action_HO.reshape(num_pos, 21)
action_H = action_H.reshape(num_pos, 21)
action_compose = action_compose.reshape(num_pos_neg, len(set_list))
mask_sp = mask_sp.reshape(num_pos_neg, 21)
mask_HO = mask_HO.reshape(num_pos, 21)
mask_H = mask_H.reshape(num_pos, 21)
return Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, action_compose
def Generate_action_HICO(action_list):
action_ = np.zeros(600)
for GT_idx in action_list:
action_[GT_idx] = 1
action_ = action_.reshape(1, 600)
return action_
def Get_Next_Instance_HO_Neg_HICO(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter % Data_length]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (str(image_id)).zfill(
8) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented, Object_augmented, action_HO, num_pos = Augmented_HO_Neg_HICO(GT, Trainval_Neg, im_shape,
Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['O_boxes'] = Object_augmented
blobs['gt_class_HO'] = action_HO
blobs['sp'] = Pattern
blobs['H_num'] = num_pos
return blobs
def Augmented_neg_box(bbox, shape, image_id, augment=15, bbox_list=[]):
thres_ = 0.25
# box = np.array([0, bbox[0], bbox[1], bbox[2], bbox[3]]).reshape(1, 5)
# box = box.astype(np.float64)
box = np.empty([1, 5], np.float64)
count = 0
time_count = 0
while count < augment:
time_count += 1
height = bbox[3] - bbox[1]
width = bbox[2] - bbox[0]
height_cen = (bbox[3] + bbox[1]) / 2
width_cen = (bbox[2] + bbox[0]) / 2
ratio = 1 + randint(-10, 10) * 0.01
height_shift = randint(-np.floor(height), np.floor(height))
height_shift = np.sign(height_shift) * 0.5 * height + height_shift
width_shift = randint(-np.floor(width), np.floor(width)) * 0.1
width_shift = np.sign(width_shift) * 0.5 * width + width_shift
H_0 = max(0, width_cen + width_shift - ratio * width / 2)
H_2 = min(shape[1] - 1, width_cen + width_shift + ratio * width / 2)
H_1 = max(0, height_cen + height_shift - ratio * height / 2)
H_3 = min(shape[0] - 1, height_cen + height_shift + ratio * height / 2)
valid_neg_box = True
for bbox1 in bbox_list:
if bb_IOU(bbox1, np.array([H_0, H_1, H_2, H_3])) > thres_:
valid_neg_box = False
break
if valid_neg_box:
box_ = np.array([0, H_0, H_1, H_2, H_3]).reshape(1, 5)
box = np.concatenate((box, box_), axis=0)
count += 1
if time_count > 150:
return box
return box
def obtain_data2_large(Pos_augment=15, Neg_select=60, augment_type=0, model_name='',
pattern_type=False, zero_shot_type=0, isalign=False, bnum=2, neg_type_ratio=0):
# bnum = 2
if pattern_type == 1:
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_with_pose.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO_with_pose.pkl', "rb"), encoding='latin1')
else:
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb"), encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(7)]
import time
st = time.time()
count_time = 0
avg_time = 0
# np.random.seed(0)
for im_orig, image_id, num_pos, Human_augmented, Object_augmented, \
action_HO, Pattern in g_func(Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type,
pattern_type, zero_shot_type, isalign, 0, neg_type_ratio):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
buffer[3][-1][:, 0] = len(buffer[3]) - 1
buffer[4][-1][:, 0] = len(buffer[3]) - 1
if len(buffer[0]) >= bnum:
# if len(buffer[3][0]) < len(buffer[3][1]):
# # make sure the second batch is less.
# for i in range(len(buffer)):
# tmp = buffer[i][0]
# buffer[i][0] = buffer[i][1]
# buffer[i][1] = tmp
# print("inner:", buffer[0][0].shape, buffer[0][1].shape, buffer[1], buffer[2], buffer[3].shape, buffer[4].shape, buffer[5].shape, buffer[6].shape)
# print("inner:", buffer[1], buffer[2][0], buffer[2][1], buffer[3][0].shape, buffer[3][1].shape, buffer[5][0].shape, buffer[5][1].shape)
# yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6]
# print("inner hint:", buffer[1], 'num_pos:', buffer[2], 'len of h boxes:',len(buffer[3][0]), len(buffer[3][1]),
# len(buffer[4][0]), len(buffer[4][1]), len(buffer[5][0]), len(buffer[5][1]), len(buffer[6][0]), len(buffer[6][1]))
pos_semi_list = []
if model_name.__contains__('x5new'):
for b in range(bnum):
pos_semi_list.append(int(buffer[2][b] + (len(buffer[3][b]) - buffer[2][b]) // 8))
else:
for b in range(bnum):
pos_semi_list.append(buffer[2][b])
for ii in range(3, 7):
pos_h_boxes = np.concatenate([buffer[ii][pi][:pos2] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
neg_h_boxes = np.concatenate([buffer[ii][pi][pos2:] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
buffer[ii] = np.concatenate([pos_h_boxes, neg_h_boxes], axis=0)
width = max([buffer[0][b].shape[1] for b in range(bnum)])
height = max([buffer[0][b].shape[2] for b in range(bnum)])
im_list = []
for b in range(bnum):
im_list.append(np.pad(buffer[0][b], [(0, 0), (0, max(0, width - buffer[0][b].shape[1])),
(0, max(0, height - buffer[0][b].shape[2])), (0, 0)],
mode='constant'))
width = max([buffer[7][b].shape[1] for b in range(bnum)])
height = max([buffer[7][b].shape[2] for b in range(bnum)])
yield np.concatenate(im_list, axis=0), buffer[1], sum(pos_semi_list), \
buffer[3], buffer[4], buffer[5], buffer[6], pos_semi_list[0]
buffer = [[] for i in range(8)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
if pattern_type == 1:
pattern_channel = 3
else:
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([bnum, None, None, 3]),
tf.TensorShape([bnum, ]),
tf.TensorShape([]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
# tf.TensorShape([2, None, None, None, 1])
)
)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx
def Augmented_HO_Neg_HICO(GT, Trainval_Neg, shape, Pos_augment, Neg_select, pattern_type=False, isalign=False,
box_list=[],
real_neg_ratio=0):
"""
:param GT:
:param Trainval_Neg:
:param shape:
:param Pos_augment:
:param Neg_select:
:param pattern_type:
:param isalign:
:param box_list:
:param real_neg_ratio: This is for no action HOI (all zeros)
:return:
"""
image_id = GT[0]
Human = GT[2]
Object = GT[3]
action_HO_ = Generate_action_HICO(GT[1])
action_HO = action_HO_
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
max_augmented_nums = max(len(Human_augmented), len(Object_augmented))
if isalign:
while len(Human_augmented) < max_augmented_nums:
Human_augmented = np.concatenate(
[Human_augmented, Human_augmented[-(max_augmented_nums - len(Human_augmented)):]], axis=0)
if isalign:
while len(Object_augmented) < max_augmented_nums:
Object_augmented = np.concatenate(
[Object_augmented, Object_augmented[-(max_augmented_nums - len(Object_augmented)):]], axis=0)
# print("shape:", Human_augmented.shape, Object_augmented.shape)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
action_HO = np.tile(action_HO, [len(Human_augmented), 1])
if len(box_list) > 0 and real_neg_ratio > 0:
aug_neg_objs = Augmented_neg_box(Object, shape, image_id, int(Pos_augment * real_neg_ratio), bbox_list=box_list)
if len(aug_neg_objs) > 0:
aug_neg_humans = np.tile([Human_augmented[0]], [len(aug_neg_objs), 1])
aug_neg_actions = np.zeros([len(aug_neg_objs), 600], )
# print(aug_neg_objs.shape, Object_augmented.shape, Human_augmented.shape, aug_neg_humans.shape)
Human_augmented = np.concatenate([Human_augmented, aug_neg_humans])
Object_augmented = np.concatenate([Object_augmented, aug_neg_objs])
action_HO = np.concatenate([action_HO, aug_neg_actions])
num_pos = len(Human_augmented)
pose_list = []
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
num_pos_neg = len(Human_augmented)
pattern_channel = 2
Pattern = np.empty((0, 64, 64, pattern_channel), dtype=np.float32)
for i in range(num_pos_neg):
# Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
# there are poses for the negative sample
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:])
Pattern_ = Pattern_.reshape(1, 64, 64, pattern_channel)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, pattern_channel)
Human_augmented = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 600)
# print("shape1:", Human_augmented.shape, Object_augmented.shape, num_pos, Neg_select)
return Pattern, Human_augmented, Object_augmented, action_HO, num_pos
def obtain_data2(Pos_augment=15, Neg_select=60, augment_type=0, model_name='', pattern_type=False,
zero_shot_type=0, isalign=False, neg_type_ratio=0):
b_num = 2
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb"), encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(7)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, Human_augmented, \
Object_augmented, action_HO, Pattern in g_func(Trainval_GT, Trainval_N, Pos_augment,
Neg_select,
augment_type, pattern_type,
zero_shot_type, isalign,
0):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
# buffer[8].append(pose_list)
# print(im_orig.shape, image_id, num_pos,
if len(buffer[0]) >= b_num:
# print("inner:", buffer[0][0].shape, buffer[0][1].shape, buffer[1], buffer[2], buffer[3].shape, buffer[4].shape, buffer[5].shape, buffer[6].shape)
# print("inner:", buffer[1], buffer[2][0], buffer[2][1], buffer[3][0].shape, buffer[3][1].shape, buffer[5][0].shape, buffer[5][1].shape)
# yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6]
if len(buffer[3][0]) < len(buffer[3][1]):
# make sure the second batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
buffer[3][1][:, 0] = 1
buffer[4][1][:, 0] = 1
# print("inner hint:", buffer[1], 'num_pos:', buffer[2], 'len of h boxes:',len(buffer[3][0]), len(buffer[3][1]),
# len(buffer[4][0]), len(buffer[4][1]), len(buffer[5][0]), len(buffer[5][1]), len(buffer[6][0]), len(buffer[6][1]))
if model_name.__contains__('x5new'):
pos1 = int(buffer[2][0] + (len(buffer[3][0]) - buffer[2][0]) // 8)
pos2 = int(buffer[2][1] + (len(buffer[3][1]) - buffer[2][1]) // 8)
else:
pos1 = buffer[2][0]
pos2 = buffer[2][1]
for ii in list(range(3, 7)):
pos_h_boxes = np.concatenate([buffer[ii][0][:pos1], buffer[ii][1][:pos2]], axis=0)
neg_h_boxes = np.concatenate([buffer[ii][0][pos1:], buffer[ii][1][pos2:]], axis=0)
buffer[ii] = np.concatenate([pos_h_boxes, neg_h_boxes], axis=0)
# buffer[ii] = np.concatenate([buffer[ii][0], buffer[ii][1]], axis=0)
buffer = buffer[:-1] + buffer[-1:]
im_shape1 = buffer[0][0].shape
im_shape2 = buffer[0][1].shape
width = max(im_shape1[1], im_shape2[1])
height = max(im_shape1[2], im_shape2[2])
im1 = np.pad(buffer[0][0],
[(0, 0), (0, max(0, width - im_shape1[1])), (0, max(0, height - im_shape1[2])), (0, 0)],
mode='constant')
im2 = np.pad(buffer[0][1],
[(0, 0), (0, max(0, width - im_shape2[1])), (0, max(0, height - im_shape2[2])), (0, 0)],
mode='constant')
split_idx = pos1
yield np.concatenate([im1, im2], axis=0), buffer[1], pos1 + pos2, buffer[3], buffer[4], buffer[5], \
buffer[6], split_idx
buffer = [[] for i in range(7 )]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
if pattern_type == 1:
pattern_channel = 3
else:
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([2, None, None, 3]),
tf.TensorShape([2, ]),
tf.TensorShape([]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
# tf.TensorShape([2, None, None, None, 1])
)
)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx
def get_new_Trainval_GT(Trainval_GT, is_zero_shot, unseen_idx):
unseen_idx = set(unseen_idx)
if is_zero_shot > 0:
new_Trainval_GT = []
for item in Trainval_GT:
if len(set(list(item[1])).intersection(unseen_idx)) == 0:
new_Trainval_GT.append(item)
Trainval_GT = new_Trainval_GT
return Trainval_GT
def extract_semi_data(semi_type, model_name):
print(semi_type, '===========')
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl'
if semi_type == 'default':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl'
elif semi_type == 'coco':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi.pkl'
elif semi_type == 'coco2':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi_coco2.pkl'
elif semi_type == 'coco1': # train2017
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi1.pkl'
elif semi_type == 'rehico':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl'
elif semi_type == 'vcoco':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_vcoco_semi.pkl'
if semi_type == 'both':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi.pkl', "rb"), encoding='latin1')
Trainval_semi1 = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
# Trainval_semi = Trainval_semi[:5000]
for item in Trainval_semi:
item[0] += MAX_HICO_ID
Trainval_semi.extend(Trainval_semi1)
elif semi_type == 'both1':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_vcoco_semi.pkl', "rb"),
encoding='latin1')
Trainval_semi1 = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
for item in Trainval_semi:
item[0] += MAX_HICO_ID
Trainval_semi.extend(Trainval_semi1)
pass
elif semi_type == 'bothzs':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi.pkl', "rb"), encoding='latin1')
Trainval_semi1 = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
# ids1 = [item[0] for item in Trainval_semi]
# ids2 = [item[0] for item in Trainval_semi1]
# ids = set(ids1).intersection(set(ids2))
# Trainval_semi = [item for item in Trainval_semi if item[0] not in ids]
zero_shot_type = get_zero_shot_type(model_name)
unseen_idx = get_unseen_index(zero_shot_type)
print(unseen_idx)
new_semi = []
print(len(Trainval_semi)) # 604907
for item in Trainval_semi:
item[0] += MAX_HICO_ID
# print(item)
if len(item[1]) > 0 and len(list(set(item[1]).intersection(set(unseen_idx)))) > 0:
new_semi.append(item)
print(len(new_semi), 'bothzs semi') # 524239 bothzs semi zs3 517008 bothzs semi zs4
print(type(Trainval_semi))
Trainval_semi = new_semi
Trainval_semi.extend(Trainval_semi1)
elif semi_type == 'cocozs':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi1.pkl', "rb"), encoding='latin1')
# ids1 = [item[0] for item in Trainval_semi]
# ids2 = [item[0] for item in Trainval_semi1]
# ids = set(ids1).intersection(set(ids2))
# Trainval_semi = [item for item in Trainval_semi if item[0] not in ids]
zero_shot_type = get_zero_shot_type(model_name)
unseen_idx = get_unseen_index(zero_shot_type)
# Trainval_semi1 = [item for item in Trainval_semi1 if len(list(set(item[1]).intersection(set(unseen_idx)))) == 0] # remove unseen objects.
print(unseen_idx)
new_semi = []
for item in Trainval_semi:
item[0] += MAX_HICO_ID
# print(item)
if len(item[1]) > 0 and len(list(set(item[1]).intersection(set(unseen_idx)))) > 0:
new_semi.append(item)
print(type(Trainval_semi))
Trainval_semi = new_semi
elif semi_type == 'coco3':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi1.pkl', "rb"), encoding='latin1')
Trainval_semi1 = pickle.load(
open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_obj365_coco_semi_obj365_coco.pkl', "rb"), encoding='latin1')
for item in Trainval_semi:
item[0] += MAX_HICO_ID
for item in Trainval_semi1:
item[0] += MAX_COCO_ID
Trainval_semi.extend(Trainval_semi1)
else:
with open(semi_pkl_path, "rb") as f:
Trainval_semi = pickle.load(f, encoding='latin1')
if semi_type == 'coco' or semi_type == 'coco2' or semi_type == 'coco1' or semi_type == 'vcoco':
for item in Trainval_semi:
item[0] += MAX_HICO_ID
if semi_type == 'rehico' and model_name.__contains__('_zs11'):
zero_shot_type = get_zero_shot_type(model_name)
unseen_idx = get_unseen_index(zero_shot_type)
Trainval_semi = get_new_Trainval_GT(Trainval_semi, zero_shot_type, unseen_idx)
# Trainval_semi = [item for item in Trainval_semi if
# len(list(set(item[1]).intersection(set(unseen_idx)))) == 0] # remove unseen objects.
pass
return Trainval_semi
def obtain_data2_large(Pos_augment=15, Neg_select=60, augment_type=0, model_name='',
pattern_type=False, zero_shot_type=0, isalign=False, bnum=2, neg_type_ratio=0):
# bnum = 2
if pattern_type == 1:
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_with_pose.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO_with_pose.pkl', "rb"), encoding='latin1')
else:
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb"), encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(8)]
import time
st = time.time()
count_time = 0
avg_time = 0
# np.random.seed(0)
for im_orig, image_id, num_pos, Human_augmented, Object_augmented, \
action_HO, Pattern in g_func(Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type,
pattern_type, zero_shot_type, isalign, 0):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
buffer[3][-1][:, 0] = len(buffer[3]) - 1
buffer[4][-1][:, 0] = len(buffer[3]) - 1
if len(buffer[0]) >= bnum:
# if len(buffer[3][0]) < len(buffer[3][1]):
# # make sure the second batch is less.
# for i in range(len(buffer)):
# tmp = buffer[i][0]
# buffer[i][0] = buffer[i][1]
# buffer[i][1] = tmp
# print("inner:", buffer[0][0].shape, buffer[0][1].shape, buffer[1], buffer[2], buffer[3].shape, buffer[4].shape, buffer[5].shape, buffer[6].shape)
# print("inner:", buffer[1], buffer[2][0], buffer[2][1], buffer[3][0].shape, buffer[3][1].shape, buffer[5][0].shape, buffer[5][1].shape)
# yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6]
# print("inner hint:", buffer[1], 'num_pos:', buffer[2], 'len of h boxes:',len(buffer[3][0]), len(buffer[3][1]),
# len(buffer[4][0]), len(buffer[4][1]), len(buffer[5][0]), len(buffer[5][1]), len(buffer[6][0]), len(buffer[6][1]))
pos_semi_list = []
if model_name.__contains__('x5new'):
for b in range(bnum):
pos_semi_list.append(int(buffer[2][b] + (len(buffer[3][b]) - buffer[2][b]) // 8))
else:
for b in range(bnum):
pos_semi_list.append(buffer[2][b])
for ii in range(3, 7):
pos_h_boxes = np.concatenate([buffer[ii][pi][:pos2] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
neg_h_boxes = np.concatenate([buffer[ii][pi][pos2:] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
buffer[ii] = np.concatenate([pos_h_boxes, neg_h_boxes], axis=0)
width = max([buffer[0][b].shape[1] for b in range(bnum)])
height = max([buffer[0][b].shape[2] for b in range(bnum)])
im_list = []
for b in range(bnum):
im_list.append(np.pad(buffer[0][b], [(0, 0), (0, max(0, width - buffer[0][b].shape[1])),
(0, max(0, height - buffer[0][b].shape[2])), (0, 0)],
mode='constant'))
yield np.concatenate(im_list, axis=0), buffer[1], sum(pos_semi_list), \
buffer[3], buffer[4], buffer[5], buffer[6], pos_semi_list[0]
buffer = [[] for i in range(8)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
if pattern_type == 1:
pattern_channel = 3
else:
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([bnum, None, None, 3]),
tf.TensorShape([bnum, ]),
tf.TensorShape([]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
)
)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx
def obtain_batch_data_semi1(Pos_augment=15, Neg_select=60, augment_type=0, model_name='', pattern_type=0,
zero_shot_type=0, isalign=False, epoch=0, semi_type='default', bnum=2, neg_type_ratio=0):
assert len(model_name) > 1, model_name
with open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
Trainval_semi = extract_semi_data(semi_type, model_name)
with open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb") as f:
Trainval_N = pickle.load(f, encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(7)]
import time
st = time.time()
count_time = 0
avg_time = 0
# np.random.seed(0)
semi_g = generator2(Trainval_semi, {}, Pos_augment, Neg_select, augment_type, False, zero_shot_type, isalign,
epoch, )
for im_orig, image_id, num_pos, Human_augmented, Object_augmented, \
action_HO, Pattern in g_func(Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type,
pattern_type, zero_shot_type, False, epoch,
):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
for b in range(bnum):
im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern, = next(semi_g)
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
buffer[3][b + 1][:, 0] = b + 1
buffer[4][b + 1][:, 0] = b + 1
assert num_pos == len(Human_augmented)
# print(buffer[3])
# print(len(buffer[0]))
# print("inner hint:", buffer[1], 'num_pos:', buffer[2], 'len of h boxes:',len(buffer[3][0]), len(buffer[3][1]),
# len(buffer[4][0]), len(buffer[4][1]), len(buffer[5][0]), len(buffer[5][1]), len(buffer[6][0]), len(buffer[6][1]))
pos_semi_list = []
if model_name.__contains__('x5new'):
pos1 = int(buffer[2][0] + (len(buffer[3][0]) - buffer[2][0]) // 8)
assert len(buffer[3][1]) == buffer[2][1], (len(buffer[3][1]), buffer[2][1],)
# print(pos1, (len(buffer[3][b+1]) - buffer[2][b+1]) // 8)
for b in range(bnum):
pos_semi_list.append(int(buffer[2][b + 1] + (len(buffer[3][b + 1]) - buffer[2][b + 1]) // 8))
else:
pos1 = buffer[2][0]
for b in range(bnum):
pos_semi_list.append(buffer[2][b + 1])
# print('before', buffer[3])
for ii in range(3, 7):
pos_h_boxes = np.concatenate(
[buffer[ii][0][:pos1]] + [buffer[ii][pi + 1][:pos2] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
neg_h_boxes = np.concatenate(
[buffer[ii][0][pos1:]] + [buffer[ii][pi + 1][pos2:] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
buffer[ii] = np.concatenate([pos_h_boxes, neg_h_boxes], axis=0)
# buffer[ii] = np.concatenate([buffer[ii][0], buffer[ii][1]], axis=0)
# print('after', buffer[3])
width = max([buffer[0][b].shape[1] for b in range(bnum + 1)])
height = max([buffer[0][b].shape[2] for b in range(bnum + 1)])
im_list = []
for b in range(bnum + 1):
im_list.append(np.pad(buffer[0][b], [(0, 0), (0, max(0, width - buffer[0][b].shape[1])),
(0, max(0, height - buffer[0][b].shape[2])), (0, 0)],
mode='constant'))
width = max([buffer[7][b].shape[1] for b in range(bnum + 1)])
height = max([buffer[7][b].shape[2] for b in range(bnum + 1)])
split_idx = pos1
yield np.concatenate(im_list, axis=0), buffer[1], pos1 + sum(pos_semi_list), \
buffer[3], buffer[4], buffer[5], buffer[6], split_idx
buffer = [[] for i in range(7)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([bnum + 1, None, None, 3]),
tf.TensorShape([bnum + 1, ]),
tf.TensorShape([]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
# tf.TensorShape([2, None, None, None, 1])
)
)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx
def Augmented_HO_Neg_HICO2(GT, Trainval_Neg, shape, Pos_augment, Neg_select, pose_type=0, isalign=False):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
action_HO_ = Generate_action_HICO(GT[1])
action_HO = action_HO_
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
if isalign:
while len(Human_augmented) < Pos_augment + 1:
Human_augmented = np.concatenate(
[Human_augmented, Human_augmented[-(Pos_augment + 1 - len(Human_augmented)):]], axis=0)
if isalign:
while len(Object_augmented) < Pos_augment + 1:
Object_augmented = np.concatenate(
[Object_augmented, Object_augmented[-(Pos_augment + 1 - len(Human_augmented)):]], axis=0)
# print("shape:", Human_augmented.shape, Object_augmented.shape)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
if isalign:
assert len(Human_augmented) == Pos_augment + 1, (len(Human_augmented), Pos_augment)
num_pos = len(Human_augmented)
if pose_type > 0: pose_list = [GT[5]] * num_pos
for i in range(num_pos - 1):
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
if pose_type > 0: pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
if pose_type > 0: pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
num_pos_neg = len(Human_augmented)
if pose_type > 0:
pattern_channel = 3
else:
pattern_channel = 2
Pattern = np.empty((0, 64, 64, pattern_channel), dtype=np.float32)
for i in range(num_pos_neg):
# Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
# there are poses for the negative sample
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:])
Pattern_ = Pattern_.reshape(1, 64, 64, pattern_channel)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, pattern_channel)
Human_augmented = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 600)
# print("shape1:", Human_augmented.shape, Object_augmented.shape, num_pos, Neg_select)
return Pattern, Human_augmented, Object_augmented, action_HO, num_pos
def coco_generator1(Pos_augment=15, Neg_select=30, augment_type=0, with_pose=False, is_zero_shot=0):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO.pkl', "rb"), encoding='latin1')
Neg_select1, Pos_augment1, inters_per_img = get_aug_params(Neg_select, Pos_augment, augment_type)
index_list = list(range(0, len(Trainval_GT)))
print("generator1", inters_per_img, Pos_augment1, 'Neg_select:', Neg_select1, augment_type)
import math
img_id_index_map = {}
for i, gt in enumerate(Trainval_GT):
img_id = gt[0]
if img_id in img_id_index_map:
img_id_index_map[img_id].append(i)
else:
img_id_index_map[img_id] = [i]
img_id_list = list(img_id_index_map.keys())
for k, v in img_id_index_map.items():
for i in range(math.ceil(len(v) * 1.0 / inters_per_img) - 1):
img_id_list.append(k)
import copy
while True:
running_map = copy.deepcopy(img_id_index_map)
# print('Step: ', i)
np.random.shuffle(index_list)
for k in running_map.keys():
np.random.shuffle(running_map[k])
for img_id_tmp in img_id_list:
gt_ids = running_map[img_id_tmp][:inters_per_img]
running_map[img_id_tmp] = running_map[img_id_tmp][inters_per_img:]
image_id = img_id_tmp
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
import os
if not os.path.exists(im_file):
print('not exist', im_file)
import cv2
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im.shape
blobs = {}
blobs['H_boxes'] = np.empty([0, 5], dtype=np.float32)
blobs['Hsp_boxes'] = np.empty([0, 5], dtype=np.float32)
blobs['O_boxes'] = np.empty([0, 5], dtype=np.float32)
blobs['gt_class_sp'] = np.empty([0, 29], dtype=np.float32)
blobs['gt_class_HO'] = np.empty([0, 29], dtype=np.float32)
blobs['gt_class_H'] = np.empty([0, 29], dtype=np.float32)
blobs['gt_class_C'] = np.empty([0, 238], dtype=np.float32)
blobs['Mask_sp'] = np.empty([0, 29], dtype=np.float32)
blobs['Mask_HO'] = np.empty([0, 29], dtype=np.float32)
blobs['Mask_H'] = np.empty([0, 29], dtype=np.float32)
blobs['sp'] = np.empty([0, 64, 64, 2], dtype=np.float32)
for i in gt_ids:
GT = Trainval_GT[i]
assert GT[0] == image_id
# im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
cur_neg_select = Neg_select1
cur_pos_augment = Pos_augment1
if augment_type > 1:
if i == gt_ids[-1]:
cur_neg_select = Neg_select1 * len(gt_ids)
else:
cur_neg_select = 0
else:
cur_neg_select = Neg_select1
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, action_compose = Augmented_HO_spNeg(GT,
Trainval_N,
im_shape,
Pos_augment=cur_pos_augment,
Neg_select=cur_neg_select)
# blobs['image'] = im_orig
blobs['H_boxes'] = np.concatenate((blobs['H_boxes'], Human_augmented), axis=0)
blobs['Hsp_boxes'] = np.concatenate((blobs['Hsp_boxes'], Human_augmented_sp), axis=0)
blobs['O_boxes'] = np.concatenate((blobs['O_boxes'], Object_augmented), axis=0)
blobs['gt_class_sp'] = np.concatenate((blobs['gt_class_sp'], action_sp), axis=0)
blobs['gt_class_HO'] = np.concatenate((blobs['gt_class_HO'], action_HO), axis=0)
blobs['gt_class_H'] = np.concatenate((blobs['gt_class_H'], action_H), axis=0)
blobs['gt_class_C'] = np.concatenate((blobs['gt_class_C'], action_compose), axis=0)
blobs['Mask_sp'] = np.concatenate((blobs['Mask_sp'], mask_sp), axis=0)
blobs['Mask_HO'] = np.concatenate((blobs['Mask_HO'], mask_HO), axis=0)
blobs['Mask_H'] = np.concatenate((blobs['Mask_H'], mask_H), axis=0)
blobs['sp'] = np.concatenate((blobs['sp'], Pattern), axis=0)
yield (im_orig, image_id, len(blobs['gt_class_H']), blobs)
def coco_generator(Pos_augment=15, Neg_select=30, augment_type=0, with_pose=False, is_zero_shot=0):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_with_pose_obj.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO_with_pose_obj.pkl', "rb"), encoding='latin1')
i = 0
index_list = list(range(0, len(Trainval_GT)))
set_list = [(0, 38), (1, 31), (1, 32), (2, 43), (2, 44), (2, 77), (4, 1), (4, 19), (4, 28), (4, 46), (4, 47),
(4, 48), (4, 49), (4, 51), (4, 52), (4, 54), (4, 55), (4, 56), (5, 2), (5, 3), (5, 4), (5, 6), (5, 7),
(5, 8), (5, 9), (5, 18), (5, 21), (6, 68), (7, 33), (8, 64), (9, 47), (9, 48), (9, 49), (9, 50),
(9, 51), (9, 52), (9, 53), (9, 54), (9, 55), (9, 56), (10, 2), (10, 4), (10, 14), (10, 18), (10, 21),
(10, 25), (10, 27), (10, 29), (10, 57), (10, 58), (10, 60), (10, 61), (10, 62), (10, 64), (11, 31),
(11, 32), (11, 37), (11, 38), (12, 14), (12, 57), (12, 58), (12, 60), (12, 61), (13, 40), (13, 41),
(13, 42), (13, 46), (14, 1), (14, 25), (14, 26), (14, 27), (14, 29), (14, 30), (14, 31), (14, 32),
(14, 33), (14, 34), (14, 35), (14, 37), (14, 38), (14, 39), (14, 40), (14, 41), (14, 42), (14, 47),
(14, 50), (14, 68), (14, 74), (14, 75), (14, 78), (15, 30), (15, 33), (16, 43), (16, 44), (16, 45),
(18, 1), (18, 2), (18, 3), (18, 4), (18, 5), (18, 6), (18, 7), (18, 8), (18, 11), (18, 14), (18, 15),
(18, 16), (18, 17), (18, 18), (18, 19), (18, 20), (18, 21), (18, 24), (18, 25), (18, 26), (18, 27),
(18, 28), (18, 29), (18, 30), (18, 31), (18, 32), (18, 33), (18, 34), (18, 35), (18, 36), (18, 37),
(18, 38), (18, 39), (18, 40), (18, 41), (18, 42), (18, 43), (18, 44), (18, 45), (18, 46), (18, 47),
(18, 48), (18, 49), (18, 51), (18, 53), (18, 54), (18, 55), (18, 56), (18, 57), (18, 61), (18, 62),
(18, 63), (18, 64), (18, 65), (18, 66), (18, 67), (18, 68), (18, 73), (18, 74), (18, 75), (18, 77),
(19, 35), (19, 39), (20, 33), (21, 31), (21, 32), (23, 1), (23, 11), (23, 19), (23, 20), (23, 24),
(23, 28), (23, 34), (23, 49), (23, 53), (23, 56), (23, 61), (23, 63), (23, 64), (23, 67), (23, 68),
(23, 73), (24, 74), (25, 1), (25, 2), (25, 4), (25, 8), (25, 9), (25, 14), (25, 15), (25, 16), (25, 17),
(25, 18), (25, 19), (25, 21), (25, 25), (25, 26), (25, 27), (25, 28), (25, 29), (25, 30), (25, 31),
(25, 32), (25, 33), (25, 34), (25, 35), (25, 36), (25, 37), (25, 38), (25, 39), (25, 40), (25, 41),
(25, 42), (25, 43), (25, 44), (25, 45), (25, 46), (25, 47), (25, 48), (25, 49), (25, 50), (25, 51),
(25, 52), (25, 53), (25, 54), (25, 55), (25, 56), (25, 57), (25, 64), (25, 65), (25, 66), (25, 67),
(25, 68), (25, 73), (25, 74), (25, 77), (25, 78), (25, 79), (25, 80), (26, 32), (26, 37), (28, 30),
(28, 33)]
while True:
# print('Step: ', i)
np.random.shuffle(index_list)
for i in index_list:
GT = Trainval_GT[i]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, gt_compose = Augmented_HO_spNeg(GT, Trainval_N,
im_shape,
Pos_augment,
Neg_select)
blobs = {}
# blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['gt_class_C'] = gt_compose
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
yield (im_orig, image_id, len(action_H), blobs)
def obtain_coco_data(Pos_augment=15, Neg_select=30, augment_type=0):
if augment_type == 0:
g = coco_generator
else:
g = coco_generator1
# generator()
dataset = tf.data.Dataset.from_generator(partial(g, Pos_augment, Neg_select, augment_type),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(
tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, 29]),
'gt_class_HO': tf.TensorShape([None, 29]),
'gt_class_H': tf.TensorShape([None, 29]),
'gt_class_C': tf.TensorShape([None, 238]),
'Mask_sp': tf.TensorShape([None, 29]),
'Mask_HO': tf.TensorShape([None, 29]),
'Mask_H': tf.TensorShape([None, 29]),
'sp': tf.TensorShape([None, 64, 64, 3]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs = iterator.get_next()
return image, image_id, num_pos, blobs
# image, num_pos = iterator.get_next()
# return image, num_pos
def obtain_coco_data1(Pos_augment=15, Neg_select=30, augment_type=0, with_pose=False, is_zero_shot=0):
if augment_type == 0:
g_func = coco_generator
else:
g_func = coco_generator1
def generator3(Pos_augment, Neg_select, augment_type, with_pose, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, with_pose, is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
if len(buffer[0]) > 1:
if buffer[2][0] < buffer[2][1]:
# make sure the first batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0], buffer[0][1], buffer[1][1], buffer[2][1], \
buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
# generator()
dataset = tf.data.Dataset.from_generator(
partial(generator3, Pos_augment, Neg_select, augment_type, with_pose, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}, tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, 29]),
'gt_class_HO': tf.TensorShape([None, 29]),
'gt_class_H': tf.TensorShape([None, 29]),
'gt_class_C': tf.TensorShape([None, 238]),
'Mask_sp': tf.TensorShape([None, 29]),
'Mask_HO': tf.TensorShape([None, 29]),
'Mask_H': tf.TensorShape([None, 29]),
'sp': tf.TensorShape([None, 64, 64, 3]),
}, tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, 29]),
'gt_class_HO': tf.TensorShape([None, 29]),
'gt_class_H': tf.TensorShape([None, 29]),
'gt_class_C': tf.TensorShape([None, 238]),
'Mask_sp': tf.TensorShape([None, 29]),
'Mask_HO': tf.TensorShape([None, 29]),
'Mask_H': tf.TensorShape([None, 29]),
'sp': tf.TensorShape([None, 64, 64, 3]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def obtain_coco_data_hoicoco_24(Pos_augment = 15, Neg_select=30, augment_type = 0, pattern_type=False, is_zero_shot=0, type=0):
if type == 0:
verb_num = 24
g_func = coco_generator2
elif type == 1:
verb_num = 21
g_func = coco_generator3
def generator3(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
# print(im_orig.shape, image_id, num_pos,
if len(buffer[0]) > 1:
if buffer[2][0] < buffer[2][1]:
# make sure the first batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0],buffer[0][1], buffer[1][1], buffer[2][1],buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
dataset = tf.data.Dataset.from_generator(partial(generator3, Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'pose_box':tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
},tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'pose_box': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'pose_box': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
},tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'pose_box': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def get_new_Trainval_N(Trainval_N, is_zero_shot, unseen_idx):
if is_zero_shot > 0:
new_Trainval_N = {}
for k in Trainval_N.keys():
new_Trainval_N[k] = []
for item in Trainval_N[k]: # the original code include a bug (k is wrongly set to 4)
if item[1] not in unseen_idx:
new_Trainval_N[k].append(item)
Trainval_N = new_Trainval_N
return Trainval_N
def get_zero_shot_type(model_name):
zero_shot_type = 0
if model_name.__contains__('_zs_'):
# for open long-tailed hoi detection
zero_shot_type = 7
elif model_name.__contains__('zsnrare'):
zero_shot_type = 4
elif model_name.__contains__('_zsrare_'):
zero_shot_type = 3
elif model_name.__contains__('_zsuo_'):
# for unseen object
zero_shot_type = 11
elif model_name.__contains__('_zs3_'):
# for VCL model
zero_shot_type = 3
elif model_name.__contains__('_zs4_'):
zero_shot_type = 4
return zero_shot_type
def get_epoch_iters(model_name):
epoch_iters = 43273
if model_name.__contains__('zsnrare'):
epoch_iters = 20000
elif model_name.__contains__('zs_'):
epoch_iters = 20000
elif model_name.__contains__('_zs4_'):
epoch_iters = 20000
elif model_name.__contains__('zsrare'):
epoch_iters = 40000
else:
epoch_iters = 43273
return epoch_iters
def get_augment_type(model_name):
augment_type = 0
if model_name.__contains__('_aug5'):
augment_type = 4
elif model_name.__contains__('_aug6'):
augment_type = 5
else:
# raise Exception('params wrong', args.model)
pass
return augment_type
def get_unseen_index(zero_shot_type):
unseen_idx = None
if zero_shot_type == 3:
# rare first
unseen_idx = [509, 279, 280, 402, 504, 286, 499, 498, 289, 485, 303, 311, 325, 439, 351, 358, 66, 427, 379, 418,
70, 416,
389, 90, 395, 76, 397, 84, 135, 262, 401, 592, 560, 586, 548, 593, 526, 181, 257, 539, 535, 260,
596, 345, 189,
205, 206, 429, 179, 350, 405, 522, 449, 261, 255, 546, 547, 44, 22, 334, 599, 239, 315, 317, 229,
158, 195,
238, 364, 222, 281, 149, 399, 83, 127, 254, 398, 403, 555, 552, 520, 531, 440, 436, 482, 274, 8,
188, 216, 597,
77, 407, 556, 469, 474, 107, 390, 410, 27, 381, 463, 99, 184, 100, 292, 517, 80, 333, 62, 354,
104, 55, 50,
198, 168, 391, 192, 595, 136, 581]
elif zero_shot_type == 4:
# non rare first
unseen_idx = [38, 41, 20, 18, 245, 11, 19, 154, 459, 42, 155, 139, 60, 461, 577, 153, 582, 89, 141, 576, 75,
212, 472, 61,
457, 146, 208, 94, 471, 131, 248, 544, 515, 566, 370, 481, 226, 250, 470, 323, 169, 480, 479, 230,
385, 73,
159, 190, 377, 176, 249, 371, 284, 48, 583, 53, 162, 140, 185, 106, 294, 56, 320, 152, 374, 338,
29, 594, 346,
456, 589, 45, 23, 67, 478, 223, 493, 228, 240, 215, 91, 115, 337, 559, 7, 218, 518, 297, 191, 266,
304, 6, 572,
529, 312, 9, 308, 417, 197, 193, 163, 455, 25, 54, 575, 446, 387, 483, 534, 340, 508, 110, 329,
246, 173, 506,
383, 93, 516, 64]
elif zero_shot_type == 11:
unseen_idx = [111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
126, 127, 128, 224, 225, 226, 227, 228, 229, 230, 231, 290, 291, 292, 293,
294, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 336, 337,
338, 339, 340, 341, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428,
429, 430, 431, 432, 433, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462,
463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 533, 534, 535, 536,
537, 558, 559, 560, 561, 595, 596, 597, 598, 599]
# miss [ 5, 6, 28, 56, 88] verbs 006 break 007 brush_with 029 flip 057 move 089 slide
elif zero_shot_type == 7:
# 24 rare merge of zs3 & zs4
unseen_idx = [509, 279, 280, 402, 504, 286, 499, 498, 289, 485, 303, 311, 325, 439, 351, 358, 66, 427, 379, 418, 70, 416, 389,
90, 38, 41, 20, 18, 245, 11, 19, 154, 459, 42, 155, 139, 60, 461, 577, 153, 582, 89, 141, 576, 75, 212, 472, 61,
457, 146, 208, 94, 471, 131, 248, 544, 515, 566, 370, 481, 226, 250, 470, 323, 169, 480, 479, 230, 385, 73, 159,
190, 377, 176, 249, 371, 284, 48, 583, 53, 162, 140, 185, 106, 294, 56, 320, 152, 374, 338, 29, 594, 346, 456, 589,
45, 23, 67, 478, 223, 493, 228, 240, 215, 91, 115, 337, 559, 7, 218, 518, 297, 191, 266, 304, 6, 572, 529, 312,
9]
# 22529, 14830, 22493, 17411, 21912,
return unseen_idx
def generator2(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type, pattern_type, zero_shot_type, isalign,
epoch=0):
"""
:param Trainval_GT:
:param Trainval_N:
:param Pos_augment:
:param Neg_select:
:param augment_type:
:param pattern_type:
:return:
"""
# import skimage
# assert skimage.__version__ == '0.14.2', "The version of skimage might affect the speed largely. I use 0.14.2"
Neg_select1, Pos_augment1, inters_per_img = get_aug_params(Neg_select, Pos_augment, augment_type)
unseen_idx = get_unseen_index(zero_shot_type)
Trainval_N = get_new_Trainval_N(Trainval_N, zero_shot_type, unseen_idx)
print("generator2", inters_per_img, Pos_augment1, 'Neg_select:', Neg_select1, augment_type, 'zero shot:',
zero_shot_type)
import math
img_id_index_map = {}
for i, gt in enumerate(Trainval_GT):
img_id = gt[0]
if img_id in img_id_index_map:
img_id_index_map[img_id].append(i)
else:
img_id_index_map[img_id] = [i]
img_id_list = list(img_id_index_map.keys())
for k, v in img_id_index_map.items():
for i in range(math.ceil(len(v) * 1.0 / inters_per_img) - 1):
img_id_list.append(k)
import copy
import time
st = time.time()
count_time = 0
avg_time = 0
while True:
running_map = copy.deepcopy(img_id_index_map)
# print('Step: ', i)
np.random.shuffle(img_id_list)
for k in running_map.keys():
np.random.shuffle(running_map[k])
for img_id_tmp in img_id_list:
gt_ids = running_map[img_id_tmp][:inters_per_img]
running_map[img_id_tmp] = running_map[img_id_tmp][inters_per_img:]
Pattern_list = []
Human_augmented_list = []
Object_augmented_list = []
action_HO_list = []
num_pos_list = 0
mask_all_list = []
image_id = img_id_tmp
if image_id in [528, 791, 1453, 2783, 3489, 3946, 3946, 11747, 11978, 12677, 16946, 17833, 19218, 19218,
22347, 27293, 27584, 28514, 33683, 35399]:
# This is a list contain multiple objects within the same object box. It seems like wrong annotations.
# We remove those images. This do not affect the performance in our experiment.
continue
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (
str(image_id)).zfill(
8) + '.jpg'
# id, gt, h, o
# print(gt_ids, gt_ids[0], Trainval_GT[gt_ids[0]])
import cv2
import os
if not os.path.exists(im_file):
print('not exist', im_file)
continue
im = cv2.imread(im_file)
if im is None:
print('node', im_file)
continue
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im.shape
import os
# print('generate batch read image:', time.time() - st, "average;", avg_time)
for i in gt_ids:
GT = Trainval_GT[i]
# rare data
if zero_shot_type > 0:
has_rare = False
for label in GT[1]:
if label in unseen_idx:
has_rare = True
if has_rare:
continue
assert GT[0] == image_id
# im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
cur_pos_augment = Pos_augment1
if augment_type > 1:
if i == gt_ids[-1]: # This must be -1
cur_neg_select = Neg_select1 * len(gt_ids)
else:
cur_neg_select = 0
else:
cur_neg_select = Neg_select1
# st1 = time.time()
Pattern, Human_augmented, Object_augmented, action_HO, num_pos = Augmented_HO_Neg_HICO(
GT,
Trainval_N,
im_shape,
Pos_augment=cur_pos_augment,
Neg_select=cur_neg_select,
pattern_type=pattern_type,
isalign=isalign)
# maintain same number of augmentation,
# print('generate batch read image:', i, time.time() - st1, cur_neg_select, len(Trainval_N[image_id]) if image_id in Trainval_N else 0)
Pattern_list.append(Pattern)
Human_augmented_list.append(Human_augmented)
Object_augmented_list.append(Object_augmented)
action_HO_list.append(action_HO)
num_pos_list += num_pos
# print('item:', Pattern.shape, num_pos)
if len(Pattern_list) <= 0:
continue
Pattern = np.concatenate(Pattern_list, axis=0)
Human_augmented = np.concatenate(Human_augmented_list, axis=0)
Object_augmented = np.concatenate(Object_augmented_list, axis=0)
action_HO = np.concatenate(action_HO_list, axis=0)
num_pos = num_pos_list
im_orig = np.expand_dims(im_orig, axis=0)
yield (im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern)
if augment_type < 0:
break
def get_aug_params(Neg_select, Pos_augment, augment_type):
Pos_augment1 = Pos_augment
Neg_select1 = Neg_select
inters_per_img = 2
if augment_type == 0:
inters_per_img = 1
Pos_augment1 = 15
Neg_select1 = 60
elif augment_type == 4:
inters_per_img = 5
Pos_augment1 = 6
Neg_select1 = 24
elif augment_type == 5:
inters_per_img = 7
Pos_augment1 = 10
Neg_select1 = 40
return Neg_select1, Pos_augment1, inters_per_img
def get_vcoco_aug_params(Neg_select, Pos_augment, augment_type):
Pos_augment1 = Pos_augment
Neg_select1 = Neg_select
inters_per_img = 2
if augment_type == 0:
inters_per_img = 1
Pos_augment1 = 15
Neg_select1 = 30
elif augment_type == 1:
inters_per_img = 2
Pos_augment1 = 15
Neg_select1 = 30
elif augment_type == 2:
inters_per_img = 3
Pos_augment1 = 15
Neg_select1 = 30
elif augment_type == -1:
inters_per_img = 1
Pos_augment1 = 0
Neg_select1 = 0
return Neg_select1, Pos_augment1, inters_per_img
def obtain_data(Pos_augment=15, Neg_select=60, augment_type=0, pattern_type=0, zero_shot_type=0, isalign=False,
epoch=0, coco=False, neg_type=0):
with open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb") as f:
Trainval_N = pickle.load(f, encoding='latin1')
if not coco:
with open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
elif coco == 2:
# 115904
with open(cfg.DATA_DIR + '/' + 'new_list_pickle_2.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
elif coco == 3:
# 115904
with open(cfg.DATA_DIR + '/' + 'new_list_pickle_3.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
with open(cfg.DATA_DIR + '/' + 'new_neg_dict.pkl', "rb") as f:
Trainval_N1 = pickle.load(f, encoding='latin1')
for k in Trainval_N:
if k in Trainval_N1:
Trainval_N[k].extend(Trainval_N1[k])
else:
print('Trainval_GT_HICO_COCO')
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_COCO.pkl', "rb"), encoding='latin1')
dataset = tf.data.Dataset.from_generator(partial(generator2, Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type, pattern_type, zero_shot_type, isalign, epoch,
), output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32),
output_shapes=(
tf.TensorShape([1, None, None, 3]), tf.TensorShape([]),
tf.TensorShape([]),
tf.TensorShape([None, 5]), tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, 2])))
# (im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp
def obtain_test_data(Pos_augment=15, Neg_select=60, augment_type=0, with_pose=False, large_neg_for_ho=False,
isalign=False):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Test_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Test_GT_HICO.pkl', "rb"), encoding='latin1')
g = generator2
dataset = tf.data.Dataset.from_generator(
partial(g, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type, with_pose, 0, isalign),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32),
output_shapes=(
tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
tf.TensorShape([None, 5]), tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, 2]),
))
# (im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp
def obtain_coco_data_hoicoco(Pos_augment=15, Neg_select=30, augment_type=0, pattern_type=False, is_zero_shot=0, type=0):
if type == 1:
verb_num = 21
g_func = coco_generator3
def generator3(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, pattern_type,
is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
# print(im_orig.shape, image_id, num_pos,
if len(buffer[0]) > 1:
if buffer[2][0] < buffer[2][1]:
# make sure the first batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0], buffer[0][1], buffer[1][1], buffer[2][1], \
buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
# generator()
dataset = tf.data.Dataset.from_generator(
partial(generator3, Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}, tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}, tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def coco_generator2(Pos_augment = 15, Neg_select=30, augment_type = 0, pattern_type=False, is_zero_shot=0):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_24.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO_obj_24.pkl', "rb"), encoding='latin1')
i = 0
index_list = list(range(0, len(Trainval_GT)))
while True:
# print('Step: ', i)
np.random.shuffle(index_list)
for i in index_list:
GT = Trainval_GT[i]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, gt_compose = Augmented_HO_spNeg2(GT, Trainval_N, im_shape, Pos_augment, Neg_select)
blobs = {}
# blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['gt_class_C'] = gt_compose
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
# blobs['H_num'] = len(action_H)
# print(image_id, len(action_H))
yield (im_orig, image_id, len(action_H), blobs)
# print(i, image_id, len(Trainval_GT))
# i += 1
# i = i % len(Trainval_GT)
def coco_generator3(Pos_augment = 15, Neg_select=30, augment_type = 0, pattern_type=False, is_zero_shot=0):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_21.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO_obj_21.pkl', "rb"), encoding='latin1')
i = 0
index_list = list(range(0, len(Trainval_GT)))
print(len(index_list))
while True:
# print('Step: ', i)
np.random.shuffle(index_list)
for i in index_list:
GT = Trainval_GT[i]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, gt_compose = Augmented_HO_spNeg3(GT, Trainval_N, im_shape, Pos_augment, Neg_select)
blobs = {}
# blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['gt_class_C'] = gt_compose
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
yield (im_orig, image_id, len(action_H), blobs)
if augment_type < 0:
break
def coco_generator_atl(Pos_augment = 15, Neg_select=0, augment_type = 0, pattern_type=False, is_zero_shot=0, type =0, vcoco_type = 21):
"""
Here, the name semi means atl. For objects, we do not have verb labels. Thus, we can only provide object id.
"""
print(type)
if type == 0:
# coco 2014 570834 length
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_semi.pkl', "rb"), encoding='latin1')
elif type == 2:
# hico 68389 length
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_hico_obj_semi_21.pkl', "rb"),
encoding='latin1')
elif type == 3:
# both
Trainval_GT_hico = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_hico_obj_semi_21.pkl', "rb"),
encoding='latin1')
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_semi_21.pkl', "rb"),
encoding='latin1')
for item in Trainval_GT:
item[0] += MAX_HICO_ID
Trainval_GT.extend(Trainval_GT_hico)
elif type == 4:
# --- 42631
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_vcoco_obj_semi_21.pkl', "rb"),
encoding='latin1')
elif type == 5:
# vcoco
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_vcoco1_obj_semi_21.pkl', "rb"),
encoding='latin1')
else:
# coco 2014 train 570834
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_semi_21.pkl', "rb"), encoding='latin1')
i = 0
index_list = list(range(0, len(Trainval_GT)))
if vcoco_type == 24:
g_func = Augmented_HO_spNeg2
else:
g_func = Augmented_HO_spNeg3
while True:
# print('Step: ', i)
np.random.shuffle(index_list)
for i in index_list:
GT = Trainval_GT[i]
image_id = GT[0]
if type == 2:
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (
str(image_id)).zfill(
8) + '.jpg'
elif type == 3:
if image_id < MAX_HICO_ID:
# obj365
tmp_id = image_id
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (
str(image_id)).zfill(
8) + '.jpg'
pass
else:
tmp_id = image_id - MAX_HICO_ID
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(tmp_id)).zfill(
12) + '.jpg'
import os
if not os.path.exists(im_file):
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/val2014/COCO_val2014_' + (
str(tmp_id)).zfill(12) + '.jpg'
if not os.path.exists(im_file):
print(im_file)
import os
if not os.path.exists(im_file):
print(im_file)
elif type == 6:
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
import os
if not os.path.exists(im_file):
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/val2014/COCO_val2014_' + (
str(image_id)).zfill(12) + '.jpg'
if not os.path.exists(im_file):
print(im_file)
elif type == 7:
if image_id >= MAX_COCO_ID:
# obj365
tmp_id = image_id - MAX_COCO_ID
im_file = cfg.LOCAL_DATA + '/dataset/Objects365/Images/train/train/obj365_train_' + (str(tmp_id)).zfill(
12) + '.jpg'
pass
else:
tmp_id = image_id
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(tmp_id)).zfill(
12) + '.jpg'
import os
if not os.path.exists(im_file):
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/val2014/COCO_val2014_' + (
str(tmp_id)).zfill(12) + '.jpg'
if not os.path.exists(im_file):
print(im_file)
import os
if not os.path.exists(im_file):
print(im_file)
else:
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, gt_compose = g_func(GT, {}, im_shape, Pos_augment, Neg_select)
blobs = {}
# blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['gt_class_C'] = gt_compose
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
# blobs['H_num'] = len(action_H)
# print(image_id, len(action_H))
yield (im_orig, image_id, len(action_H), blobs)
# print(i, image_id, len(Trainval_GT))
# i += 1
# i = i % len(Trainval_GT)
def obtain_coco_data2(Pos_augment = 15, Neg_select=30, augment_type = 0, type =0 ):
# Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO.pkl', "rb"), encoding='latin1')
# Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO.pkl', "rb"), encoding='latin1')
if type == 0:
compose_classes = 222
verb_num = 24
g_func = coco_generator2
elif type == 1:
compose_classes = 222
verb_num = 21
g_func = coco_generator3
elif type == 2:
compose_classes = 238
verb_num = 29
g_func = coco_generator1
# generator()
dataset = tf.data.Dataset.from_generator(partial(g_func, Pos_augment, Neg_select, augment_type), output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, compose_classes]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs = iterator.get_next()
return image, image_id, num_pos, blobs
# image, num_pos = iterator.get_next()
# return image, num_pos
def obtain_coco_data_atl(Pos_augment=15, Neg_select=30, augment_type=0, pattern_type=False, is_zero_shot=0, type=0, vcoco_type=21):
if vcoco_type == 21:
verb_num = 21
g_func = coco_generator3
elif vcoco_type == 24:
verb_num = 24
g_func = coco_generator2
else:
# default
verb_num = 21
g_func = coco_generator3
def generator3(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
semi_func = coco_generator_atl(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot, type, vcoco_type = vcoco_type)
# semi is atl. a weak-supervised manner.
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, pattern_type,
is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
im_orig, image_id, num_pos, blobs = next(semi_func)
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
# print(im_orig.shape, image_id, num_pos,
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0], buffer[0][1], buffer[1][1], buffer[2][1], \
buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
# generator()
dataset = tf.data.Dataset.from_generator(
partial(generator3, Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}, tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}, tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def obtain_coco_data_hoicoco_24_atl(Pos_augment=15, Neg_select=30, augment_type=0, pattern_type=False, is_zero_shot=0, type=0):
# default
verb_num = 24
g_func = coco_generator2
def generator3(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
semi_func = coco_generator_atl(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot, type)
# semi is atl. a weak-supervised manner.
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, pattern_type,
is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
im_orig, image_id, num_pos, blobs = next(semi_func)
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
# print(im_orig.shape, image_id, num_pos,
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0], buffer[0][1], buffer[1][1], buffer[2][1], \
buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
# generator()
dataset = tf.data.Dataset.from_generator(
partial(generator3, Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}, tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}, tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def get_epoch_iters(model_name):
epoch_iters = 43273
if model_name.__contains__('zsnrare'):
epoch_iters = 20000
elif model_name.__contains__('zs_'):
epoch_iters = 20000
elif model_name.__contains__('zsrare'):
epoch_iters = 40000
else:
epoch_iters = 43273
return epoch_iters
def obtain_data_vcl_hico(Pos_augment=15, Neg_select=60, augment_type=0, with_pose=False, zero_shot_type=0, isalign=False,
epoch=0):
# we do not use pose, thus we remove it.
with open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
with open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb") as f:
Trainval_N = pickle.load(f, encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(7)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern in g_func(Trainval_GT,
Trainval_N,
Pos_augment,
Neg_select,
augment_type,
with_pose,
zero_shot_type,
isalign, epoch):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
if len(buffer[0]) > 1:
# print("inner:", buffer[0][0].shape, buffer[0][1].shape, buffer[1], buffer[2], buffer[3].shape, buffer[4].shape, buffer[5].shape, buffer[6].shape)
# print("inner:", buffer[1], buffer[2][0], buffer[2][1], buffer[3][0].shape, buffer[3][1].shape, buffer[5][0].shape, buffer[5][1].shape)
# yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6]
if len(buffer[3][0]) < len(buffer[3][1]):
# make sure the second batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
split_idx = len(buffer[5][0])
buffer = buffer[:3] + [np.concatenate(item, axis=0) for item in buffer[3:]] + buffer[-1:]
yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[
6], split_idx
buffer = [[] for i in range(7)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
if with_pose:
pattern_channel = 3
else:
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([1, None, None, 3]),
tf.TensorShape([1, None, None, 3]),
tf.TensorShape([2, ]),
tf.TensorShape([2, ]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
)
)
dataset = dataset.prefetch(100)
iterator = dataset.make_one_shot_iterator()
image, image2, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return [image, image2], image_id, num_pos, [Human_augmented[:split_idx], Human_augmented[split_idx:]], \
[Object_augmented[:split_idx], Object_augmented[split_idx:]], \
[action_HO[:split_idx], action_HO[split_idx:]], \
[sp[:split_idx], sp[split_idx:]]
def Augmented_HO_Neg_HICO_inner(GT, negs, shape, Pos_augment, Neg_select, with_pose):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
pose_list = []
if Pos_augment < 0:
action_HO = np.empty([0, 600])
Human_augmented = np.empty([0, 5])
Object_augmented = np.empty([0, 5])
num_pos = 0
else:
action_HO_ = Generate_action_HICO(GT[1])
action_HO = action_HO_
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
for i in range(num_pos - 1):
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
if with_pose: pose_list = [GT[5]] * num_pos
num_pos_neg = len(Human_augmented)
if with_pose:
pattern_channel = 3
else:
pattern_channel = 2
Pattern = get_pattern(Human_augmented, Object_augmented, num_pos_neg, pose_list, shape, with_pose)
if negs is not None and Neg_select > 0:
if len(negs) < Neg_select:
Neg_select = len(negs)
List = range(Neg_select)
else:
List = random.sample(range(len(negs)), Neg_select)
_Human_augmented, _Object_augmented, _action_HO, _Pattern = get_neg_items(List, negs, shape, with_pose)
Human_augmented = np.concatenate([Human_augmented, _Human_augmented], axis=0)
Object_augmented = np.concatenate([Object_augmented, _Object_augmented], axis=0)
action_HO = np.concatenate([action_HO, _action_HO], axis=0)
Pattern = np.concatenate([Pattern, _Pattern], axis=0)
num_pos_neg = len(Human_augmented)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, pattern_channel)
Human_augmented = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 600)
return Pattern, Human_augmented, Object_augmented, action_HO, num_pos
def get_pattern(Human_augmented, Object_augmented, num_pos_neg, pose_list, shape, with_pose):
pattern_channel = 2
Pattern = np.empty((0, 64, 64, pattern_channel), dtype=np.float32)
for i in range(num_pos_neg):
# Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
# there are poses for the negative sample
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:])
Pattern_ = Pattern_.reshape(1, 64, 64, pattern_channel)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
return Pattern
def get_neg_items(neg_select_list, negs, shape, with_pose):
action_HO = np.empty([0, 600])
Human_augmented = np.empty([0, 5])
Object_augmented = np.empty([0, 5])
pose_list = []
for i in range(len(neg_select_list)):
Neg = negs[neg_select_list[i]]
if with_pose: pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
num_pos_neg = len(Human_augmented)
Pattern = get_pattern(Human_augmented, Object_augmented, num_pos_neg, pose_list, shape, with_pose)
return Human_augmented, Object_augmented, action_HO, Pattern
| [
"numpy.maximum",
"numpy.empty",
"numpy.floor",
"numpy.ones",
"pickle.load",
"numpy.round",
"random.randint",
"os.path.exists",
"tensorflow.TensorShape",
"numpy.random.shuffle",
"functools.partial",
"copy.deepcopy",
"numpy.minimum",
"numpy.asarray",
"numpy.concatenate",
"numpy.zeros",
... | [((4698, 4719), 'numpy.zeros', 'np.zeros', (['(64, 64, 2)'], {}), '((64, 64, 2))\n', (4706, 4719), True, 'import numpy as np\n'), ((6309, 6353), 'numpy.zeros', 'np.zeros', (['(num_joints + 1, 2)'], {'dtype': '"""int32"""'}), "((num_joints + 1, 2), dtype='int32')\n", (6317, 6353), True, 'import numpy as np\n'), ((7235, 7277), 'numpy.zeros', 'np.zeros', (['(size, size, 1)'], {'dtype': '"""float32"""'}), "((size, size, 1), dtype='float32')\n", (7243, 7277), True, 'import numpy as np\n'), ((7713, 7741), 'numpy.maximum', 'np.maximum', (['boxA[0]', 'boxB[0]'], {}), '(boxA[0], boxB[0])\n', (7723, 7741), True, 'import numpy as np\n'), ((7754, 7782), 'numpy.maximum', 'np.maximum', (['boxA[1]', 'boxB[1]'], {}), '(boxA[1], boxB[1])\n', (7764, 7782), True, 'import numpy as np\n'), ((7795, 7823), 'numpy.minimum', 'np.minimum', (['boxA[2]', 'boxB[2]'], {}), '(boxA[2], boxB[2])\n', (7805, 7823), True, 'import numpy as np\n'), ((7836, 7864), 'numpy.minimum', 'np.minimum', (['boxA[3]', 'boxB[3]'], {}), '(boxA[3], boxB[3])\n', (7846, 7864), True, 'import numpy as np\n'), ((7874, 7910), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin + 1.0)', '(0.0)'], {}), '(ixmax - ixmin + 1.0, 0.0)\n', (7884, 7910), True, 'import numpy as np\n'), ((7918, 7954), 'numpy.maximum', 'np.maximum', (['(iymax - iymin + 1.0)', '(0.0)'], {}), '(iymax - iymin + 1.0, 0.0)\n', (7928, 7954), True, 'import numpy as np\n'), ((9500, 9514), 'numpy.zeros', 'np.zeros', (['nums'], {}), '(nums)\n', (9508, 9514), True, 'import numpy as np\n'), ((10015, 10034), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (10025, 10034), False, 'import cv2\n'), ((12858, 12900), 'numpy.empty', 'np.empty', (['(0, 64, 64, 2)'], {'dtype': 'np.float32'}), '((0, 64, 64, 2), dtype=np.float32)\n', (12866, 12900), True, 'import numpy as np\n'), ((19707, 19749), 'numpy.empty', 'np.empty', (['(0, 64, 64, 2)'], {'dtype': 'np.float32'}), '((0, 64, 64, 2), dtype=np.float32)\n', (19715, 19749), True, 'import numpy as np\n'), ((24391, 24417), 'numpy.ones', 'np.ones', (['[1, 24]', 'np.int32'], {}), '([1, 24], np.int32)\n', (24398, 24417), True, 'import numpy as np\n'), ((24433, 24459), 'numpy.ones', 'np.ones', (['[1, 24]', 'np.int32'], {}), '([1, 24], np.int32)\n', (24440, 24459), True, 'import numpy as np\n'), ((24474, 24500), 'numpy.ones', 'np.ones', (['[1, 24]', 'np.int32'], {}), '([1, 24], np.int32)\n', (24481, 24500), True, 'import numpy as np\n'), ((26251, 26293), 'numpy.empty', 'np.empty', (['(0, 64, 64, 2)'], {'dtype': 'np.float32'}), '((0, 64, 64, 2), dtype=np.float32)\n', (26259, 26293), True, 'import numpy as np\n'), ((31637, 31663), 'numpy.ones', 'np.ones', (['[1, 21]', 'np.int32'], {}), '([1, 21], np.int32)\n', (31644, 31663), True, 'import numpy as np\n'), ((31679, 31705), 'numpy.ones', 'np.ones', (['[1, 21]', 'np.int32'], {}), '([1, 21], np.int32)\n', (31686, 31705), True, 'import numpy as np\n'), ((31720, 31746), 'numpy.ones', 'np.ones', (['[1, 21]', 'np.int32'], {}), '([1, 21], np.int32)\n', (31727, 31746), True, 'import numpy as np\n'), ((33497, 33539), 'numpy.empty', 'np.empty', (['(0, 64, 64, 2)'], {'dtype': 'np.float32'}), '((0, 64, 64, 2), dtype=np.float32)\n', (33505, 33539), True, 'import numpy as np\n'), ((35638, 35651), 'numpy.zeros', 'np.zeros', (['(600)'], {}), '(600)\n', (35646, 35651), True, 'import numpy as np\n'), ((36080, 36099), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (36090, 36099), False, 'import cv2\n'), ((36953, 36981), 'numpy.empty', 'np.empty', (['[1, 5]', 'np.float64'], {}), '([1, 5], np.float64)\n', (36961, 36981), True, 'import numpy as np\n'), ((48085, 48141), 'numpy.empty', 'np.empty', (['(0, 64, 64, pattern_channel)'], {'dtype': 'np.float32'}), '((0, 64, 64, pattern_channel), dtype=np.float32)\n', (48093, 48141), True, 'import numpy as np\n'), ((75672, 75728), 'numpy.empty', 'np.empty', (['(0, 64, 64, pattern_channel)'], {'dtype': 'np.float32'}), '((0, 64, 64, pattern_channel), dtype=np.float32)\n', (75680, 75728), True, 'import numpy as np\n'), ((105504, 105515), 'time.time', 'time.time', ([], {}), '()\n', (105513, 105515), False, 'import time\n'), ((151970, 152026), 'numpy.empty', 'np.empty', (['(0, 64, 64, pattern_channel)'], {'dtype': 'np.float32'}), '((0, 64, 64, pattern_channel), dtype=np.float32)\n', (151978, 152026), True, 'import numpy as np\n'), ((152517, 152535), 'numpy.empty', 'np.empty', (['[0, 600]'], {}), '([0, 600])\n', (152525, 152535), True, 'import numpy as np\n'), ((152558, 152574), 'numpy.empty', 'np.empty', (['[0, 5]'], {}), '([0, 5])\n', (152566, 152574), True, 'import numpy as np\n'), ((152598, 152614), 'numpy.empty', 'np.empty', (['[0, 5]'], {}), '([0, 5])\n', (152606, 152614), True, 'import numpy as np\n'), ((4136, 4155), 'numpy.round', 'np.round', (['human_box'], {}), '(human_box)\n', (4144, 4155), True, 'import numpy as np\n'), ((4157, 4177), 'numpy.round', 'np.round', (['object_box'], {}), '(object_box)\n', (4165, 4177), True, 'import numpy as np\n'), ((9941, 9964), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (9955, 9964), False, 'import os\n'), ((12955, 13002), 'numpy.concatenate', 'np.concatenate', (['(action_HO, action_HO_)'], {'axis': '(0)'}), '((action_HO, action_HO_), axis=0)\n', (12969, 13002), True, 'import numpy as np\n'), ((13022, 13067), 'numpy.concatenate', 'np.concatenate', (['(action_H, action_H_)'], {'axis': '(0)'}), '((action_H, action_H_), axis=0)\n', (13036, 13067), True, 'import numpy as np\n'), ((13085, 13126), 'numpy.concatenate', 'np.concatenate', (['(mask_H, mask_H_)'], {'axis': '(0)'}), '((mask_H, mask_H_), axis=0)\n', (13099, 13126), True, 'import numpy as np\n'), ((13183, 13226), 'numpy.concatenate', 'np.concatenate', (['(mask_HO, mask_HO_)'], {'axis': '(0)'}), '((mask_HO, mask_HO_), axis=0)\n', (13197, 13226), True, 'import numpy as np\n'), ((13510, 13553), 'numpy.concatenate', 'np.concatenate', (['(Pattern, Pattern_)'], {'axis': '(0)'}), '((Pattern, Pattern_), axis=0)\n', (13524, 13553), True, 'import numpy as np\n'), ((19804, 19851), 'numpy.concatenate', 'np.concatenate', (['(action_sp, action_sp_)'], {'axis': '(0)'}), '((action_sp, action_sp_), axis=0)\n', (19818, 19851), True, 'import numpy as np\n'), ((19872, 19919), 'numpy.concatenate', 'np.concatenate', (['(action_HO, action_HO_)'], {'axis': '(0)'}), '((action_HO, action_HO_), axis=0)\n', (19886, 19919), True, 'import numpy as np\n'), ((19939, 19984), 'numpy.concatenate', 'np.concatenate', (['(action_H, action_H_)'], {'axis': '(0)'}), '((action_H, action_H_), axis=0)\n', (19953, 19984), True, 'import numpy as np\n'), ((20010, 20067), 'numpy.concatenate', 'np.concatenate', (['(action_compose, action_compose_)'], {'axis': '(0)'}), '((action_compose, action_compose_), axis=0)\n', (20024, 20067), True, 'import numpy as np\n'), ((20086, 20129), 'numpy.concatenate', 'np.concatenate', (['(mask_HO, mask_HO_)'], {'axis': '(0)'}), '((mask_HO, mask_HO_), axis=0)\n', (20100, 20129), True, 'import numpy as np\n'), ((20147, 20188), 'numpy.concatenate', 'np.concatenate', (['(mask_H, mask_H_)'], {'axis': '(0)'}), '((mask_H, mask_H_), axis=0)\n', (20161, 20188), True, 'import numpy as np\n'), ((20245, 20288), 'numpy.concatenate', 'np.concatenate', (['(mask_sp, mask_sp_)'], {'axis': '(0)'}), '((mask_sp, mask_sp_), axis=0)\n', (20259, 20288), True, 'import numpy as np\n'), ((20689, 20732), 'numpy.concatenate', 'np.concatenate', (['(Pattern, Pattern_)'], {'axis': '(0)'}), '((Pattern, Pattern_), axis=0)\n', (20703, 20732), True, 'import numpy as np\n'), ((26549, 26596), 'numpy.concatenate', 'np.concatenate', (['(action_sp, action_sp_)'], {'axis': '(0)'}), '((action_sp, action_sp_), axis=0)\n', (26563, 26596), True, 'import numpy as np\n'), ((26617, 26664), 'numpy.concatenate', 'np.concatenate', (['(action_HO, action_HO_)'], {'axis': '(0)'}), '((action_HO, action_HO_), axis=0)\n', (26631, 26664), True, 'import numpy as np\n'), ((26684, 26729), 'numpy.concatenate', 'np.concatenate', (['(action_H, action_H_)'], {'axis': '(0)'}), '((action_H, action_H_), axis=0)\n', (26698, 26729), True, 'import numpy as np\n'), ((26755, 26812), 'numpy.concatenate', 'np.concatenate', (['(action_compose, action_compose_)'], {'axis': '(0)'}), '((action_compose, action_compose_), axis=0)\n', (26769, 26812), True, 'import numpy as np\n'), ((26831, 26874), 'numpy.concatenate', 'np.concatenate', (['(mask_HO, mask_HO_)'], {'axis': '(0)'}), '((mask_HO, mask_HO_), axis=0)\n', (26845, 26874), True, 'import numpy as np\n'), ((26892, 26933), 'numpy.concatenate', 'np.concatenate', (['(mask_H, mask_H_)'], {'axis': '(0)'}), '((mask_H, mask_H_), axis=0)\n', (26906, 26933), True, 'import numpy as np\n'), ((26990, 27033), 'numpy.concatenate', 'np.concatenate', (['(mask_sp, mask_sp_)'], {'axis': '(0)'}), '((mask_sp, mask_sp_), axis=0)\n', (27004, 27033), True, 'import numpy as np\n'), ((27434, 27477), 'numpy.concatenate', 'np.concatenate', (['(Pattern, Pattern_)'], {'axis': '(0)'}), '((Pattern, Pattern_), axis=0)\n', (27448, 27477), True, 'import numpy as np\n'), ((27494, 27566), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, shape[0] // 16, shape[1] // 16, 1)', 'dtype': 'np.float32'}), '(shape=(1, shape[0] // 16, shape[1] // 16, 1), dtype=np.float32)\n', (27502, 27566), True, 'import numpy as np\n'), ((33795, 33842), 'numpy.concatenate', 'np.concatenate', (['(action_sp, action_sp_)'], {'axis': '(0)'}), '((action_sp, action_sp_), axis=0)\n', (33809, 33842), True, 'import numpy as np\n'), ((33863, 33910), 'numpy.concatenate', 'np.concatenate', (['(action_HO, action_HO_)'], {'axis': '(0)'}), '((action_HO, action_HO_), axis=0)\n', (33877, 33910), True, 'import numpy as np\n'), ((33930, 33975), 'numpy.concatenate', 'np.concatenate', (['(action_H, action_H_)'], {'axis': '(0)'}), '((action_H, action_H_), axis=0)\n', (33944, 33975), True, 'import numpy as np\n'), ((34001, 34058), 'numpy.concatenate', 'np.concatenate', (['(action_compose, action_compose_)'], {'axis': '(0)'}), '((action_compose, action_compose_), axis=0)\n', (34015, 34058), True, 'import numpy as np\n'), ((34077, 34120), 'numpy.concatenate', 'np.concatenate', (['(mask_HO, mask_HO_)'], {'axis': '(0)'}), '((mask_HO, mask_HO_), axis=0)\n', (34091, 34120), True, 'import numpy as np\n'), ((34138, 34179), 'numpy.concatenate', 'np.concatenate', (['(mask_H, mask_H_)'], {'axis': '(0)'}), '((mask_H, mask_H_), axis=0)\n', (34152, 34179), True, 'import numpy as np\n'), ((34236, 34279), 'numpy.concatenate', 'np.concatenate', (['(mask_sp, mask_sp_)'], {'axis': '(0)'}), '((mask_sp, mask_sp_), axis=0)\n', (34250, 34279), True, 'import numpy as np\n'), ((34765, 34808), 'numpy.concatenate', 'np.concatenate', (['(Pattern, Pattern_)'], {'axis': '(0)'}), '((Pattern, Pattern_), axis=0)\n', (34779, 34808), True, 'import numpy as np\n'), ((34825, 34897), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, shape[0] // 16, shape[1] // 16, 1)', 'dtype': 'np.float32'}), '(shape=(1, shape[0] // 16, shape[1] // 16, 1), dtype=np.float32)\n', (34833, 34897), True, 'import numpy as np\n'), ((39166, 39177), 'time.time', 'time.time', ([], {}), '()\n', (39175, 39177), False, 'import time\n'), ((43155, 43242), 'functools.partial', 'partial', (['generator3', 'Trainval_GT', 'Trainval_N', 'Pos_augment', 'Neg_select', 'augment_type'], {}), '(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select,\n augment_type)\n', (43162, 43242), False, 'from functools import partial\n'), ((48492, 48535), 'numpy.concatenate', 'np.concatenate', (['(Pattern, Pattern_)'], {'axis': '(0)'}), '((Pattern, Pattern_), axis=0)\n', (48506, 48535), True, 'import numpy as np\n'), ((49528, 49539), 'time.time', 'time.time', ([], {}), '()\n', (49537, 49539), False, 'import time\n'), ((53706, 53793), 'functools.partial', 'partial', (['generator3', 'Trainval_GT', 'Trainval_N', 'Pos_augment', 'Neg_select', 'augment_type'], {}), '(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select,\n augment_type)\n', (53713, 53793), False, 'from functools import partial\n'), ((61320, 61331), 'time.time', 'time.time', ([], {}), '()\n', (61329, 61331), False, 'import time\n'), ((65144, 65231), 'functools.partial', 'partial', (['generator3', 'Trainval_GT', 'Trainval_N', 'Pos_augment', 'Neg_select', 'augment_type'], {}), '(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select,\n augment_type)\n', (65151, 65231), False, 'from functools import partial\n'), ((66752, 66785), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (66763, 66785), False, 'import pickle\n'), ((66940, 66973), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (66951, 66973), False, 'import pickle\n'), ((67157, 67168), 'time.time', 'time.time', ([], {}), '()\n', (67166, 67168), False, 'import time\n'), ((71508, 71595), 'functools.partial', 'partial', (['generator3', 'Trainval_GT', 'Trainval_N', 'Pos_augment', 'Neg_select', 'augment_type'], {}), '(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select,\n augment_type)\n', (71515, 71595), False, 'from functools import partial\n'), ((74147, 74194), 'numpy.concatenate', 'np.concatenate', (['(action_HO, action_HO_)'], {'axis': '(0)'}), '((action_HO, action_HO_), axis=0)\n', (74161, 74194), True, 'import numpy as np\n'), ((76079, 76122), 'numpy.concatenate', 'np.concatenate', (['(Pattern, Pattern_)'], {'axis': '(0)'}), '((Pattern, Pattern_), axis=0)\n', (76093, 76122), True, 'import numpy as np\n'), ((77598, 77629), 'copy.deepcopy', 'copy.deepcopy', (['img_id_index_map'], {}), '(img_id_index_map)\n', (77611, 77629), False, 'import copy\n'), ((77667, 77696), 'numpy.random.shuffle', 'np.random.shuffle', (['index_list'], {}), '(index_list)\n', (77684, 77696), True, 'import numpy as np\n'), ((84774, 84803), 'numpy.random.shuffle', 'np.random.shuffle', (['index_list'], {}), '(index_list)\n', (84791, 84803), True, 'import numpy as np\n'), ((86637, 86686), 'functools.partial', 'partial', (['g', 'Pos_augment', 'Neg_select', 'augment_type'], {}), '(g, Pos_augment, Neg_select, augment_type)\n', (86644, 86686), False, 'from functools import partial\n'), ((89108, 89119), 'time.time', 'time.time', ([], {}), '()\n', (89117, 89119), False, 'import time\n'), ((90292, 90379), 'functools.partial', 'partial', (['generator3', 'Pos_augment', 'Neg_select', 'augment_type', 'with_pose', 'is_zero_shot'], {}), '(generator3, Pos_augment, Neg_select, augment_type, with_pose,\n is_zero_shot)\n', (90299, 90379), False, 'from functools import partial\n'), ((94532, 94543), 'time.time', 'time.time', ([], {}), '()\n', (94541, 94543), False, 'import time\n'), ((95720, 95810), 'functools.partial', 'partial', (['generator3', 'Pos_augment', 'Neg_select', 'augment_type', 'pattern_type', 'is_zero_shot'], {}), '(generator3, Pos_augment, Neg_select, augment_type, pattern_type,\n is_zero_shot)\n', (95727, 95810), False, 'from functools import partial\n'), ((105590, 105621), 'copy.deepcopy', 'copy.deepcopy', (['img_id_index_map'], {}), '(img_id_index_map)\n', (105603, 105621), False, 'import copy\n'), ((105659, 105689), 'numpy.random.shuffle', 'np.random.shuffle', (['img_id_list'], {}), '(img_id_list)\n', (105676, 105689), True, 'import numpy as np\n'), ((111133, 111166), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (111144, 111166), False, 'import pickle\n'), ((112121, 112254), 'functools.partial', 'partial', (['generator2', 'Trainval_GT', 'Trainval_N', 'Pos_augment', 'Neg_select', 'augment_type', 'pattern_type', 'zero_shot_type', 'isalign', 'epoch'], {}), '(generator2, Trainval_GT, Trainval_N, Pos_augment, Neg_select,\n augment_type, pattern_type, zero_shot_type, isalign, epoch)\n', (112128, 112254), False, 'from functools import partial\n'), ((114116, 114217), 'functools.partial', 'partial', (['g', 'Trainval_GT', 'Trainval_N', 'Pos_augment', 'Neg_select', 'augment_type', 'with_pose', '(0)', 'isalign'], {}), '(g, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type,\n with_pose, 0, isalign)\n', (114123, 114217), False, 'from functools import partial\n'), ((115703, 115714), 'time.time', 'time.time', ([], {}), '()\n', (115712, 115714), False, 'import time\n'), ((117000, 117090), 'functools.partial', 'partial', (['generator3', 'Pos_augment', 'Neg_select', 'augment_type', 'pattern_type', 'is_zero_shot'], {}), '(generator3, Pos_augment, Neg_select, augment_type, pattern_type,\n is_zero_shot)\n', (117007, 117090), False, 'from functools import partial\n'), ((121328, 121357), 'numpy.random.shuffle', 'np.random.shuffle', (['index_list'], {}), '(index_list)\n', (121345, 121357), True, 'import numpy as np\n'), ((123344, 123373), 'numpy.random.shuffle', 'np.random.shuffle', (['index_list'], {}), '(index_list)\n', (123361, 123373), True, 'import numpy as np\n'), ((126656, 126685), 'numpy.random.shuffle', 'np.random.shuffle', (['index_list'], {}), '(index_list)\n', (126673, 126685), True, 'import numpy as np\n'), ((131622, 131676), 'functools.partial', 'partial', (['g_func', 'Pos_augment', 'Neg_select', 'augment_type'], {}), '(g_func, Pos_augment, Neg_select, augment_type)\n', (131629, 131676), False, 'from functools import partial\n'), ((133971, 133982), 'time.time', 'time.time', ([], {}), '()\n', (133980, 133982), False, 'import time\n'), ((135312, 135402), 'functools.partial', 'partial', (['generator3', 'Pos_augment', 'Neg_select', 'augment_type', 'pattern_type', 'is_zero_shot'], {}), '(generator3, Pos_augment, Neg_select, augment_type, pattern_type,\n is_zero_shot)\n', (135319, 135402), False, 'from functools import partial\n'), ((139539, 139550), 'time.time', 'time.time', ([], {}), '()\n', (139548, 139550), False, 'import time\n'), ((140855, 140945), 'functools.partial', 'partial', (['generator3', 'Pos_augment', 'Neg_select', 'augment_type', 'pattern_type', 'is_zero_shot'], {}), '(generator3, Pos_augment, Neg_select, augment_type, pattern_type,\n is_zero_shot)\n', (140862, 140945), False, 'from functools import partial\n'), ((145360, 145393), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (145371, 145393), False, 'import pickle\n'), ((145487, 145520), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (145498, 145520), False, 'import pickle\n'), ((145704, 145715), 'time.time', 'time.time', ([], {}), '()\n', (145713, 145715), False, 'import time\n'), ((148493, 148580), 'functools.partial', 'partial', (['generator3', 'Trainval_GT', 'Trainval_N', 'Pos_augment', 'Neg_select', 'augment_type'], {}), '(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select,\n augment_type)\n', (148500, 148580), False, 'from functools import partial\n'), ((149839, 149857), 'numpy.empty', 'np.empty', (['[0, 600]'], {}), '([0, 600])\n', (149847, 149857), True, 'import numpy as np\n'), ((149884, 149900), 'numpy.empty', 'np.empty', (['[0, 5]'], {}), '([0, 5])\n', (149892, 149900), True, 'import numpy as np\n'), ((149928, 149944), 'numpy.empty', 'np.empty', (['[0, 5]'], {}), '([0, 5])\n', (149936, 149944), True, 'import numpy as np\n'), ((151196, 151255), 'numpy.concatenate', 'np.concatenate', (['[Human_augmented, _Human_augmented]'], {'axis': '(0)'}), '([Human_augmented, _Human_augmented], axis=0)\n', (151210, 151255), True, 'import numpy as np\n'), ((151283, 151344), 'numpy.concatenate', 'np.concatenate', (['[Object_augmented, _Object_augmented]'], {'axis': '(0)'}), '([Object_augmented, _Object_augmented], axis=0)\n', (151297, 151344), True, 'import numpy as np\n'), ((151365, 151412), 'numpy.concatenate', 'np.concatenate', (['[action_HO, _action_HO]'], {'axis': '(0)'}), '([action_HO, _action_HO], axis=0)\n', (151379, 151412), True, 'import numpy as np\n'), ((151431, 151474), 'numpy.concatenate', 'np.concatenate', (['[Pattern, _Pattern]'], {'axis': '(0)'}), '([Pattern, _Pattern], axis=0)\n', (151445, 151474), True, 'import numpy as np\n'), ((152376, 152419), 'numpy.concatenate', 'np.concatenate', (['(Pattern, Pattern_)'], {'axis': '(0)'}), '((Pattern, Pattern_), axis=0)\n', (152390, 152419), True, 'import numpy as np\n'), ((8269, 8318), 'numpy.array', 'np.array', (['[0, bbox[0], bbox[1], bbox[2], bbox[3]]'], {}), '([0, bbox[0], bbox[1], bbox[2], bbox[3]])\n', (8277, 8318), True, 'import numpy as np\n'), ((9314, 9349), 'numpy.concatenate', 'np.concatenate', (['(box, box_)'], {'axis': '(0)'}), '((box, box_), axis=0)\n', (9328, 9349), True, 'import numpy as np\n'), ((11004, 11108), 'numpy.asarray', 'np.asarray', (['[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n 1, 1, 0, 1]'], {}), '([1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, \n 1, 0, 1, 1, 1, 1, 0, 1])\n', (11014, 11108), True, 'import numpy as np\n'), ((11142, 11246), 'numpy.asarray', 'np.asarray', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1])\n', (11152, 11246), True, 'import numpy as np\n'), ((17416, 17520), 'numpy.asarray', 'np.asarray', (['[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n 1, 1, 0, 1]'], {}), '([1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, \n 1, 0, 1, 1, 1, 1, 0, 1])\n', (17426, 17520), True, 'import numpy as np\n'), ((17555, 17659), 'numpy.asarray', 'np.asarray', (['[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n 1, 1, 0, 1]'], {}), '([1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, \n 1, 0, 1, 1, 1, 1, 0, 1])\n', (17565, 17659), True, 'import numpy as np\n'), ((17693, 17797), 'numpy.asarray', 'np.asarray', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1])\n', (17703, 17797), True, 'import numpy as np\n'), ((37323, 37339), 'numpy.floor', 'np.floor', (['height'], {}), '(height)\n', (37331, 37339), True, 'import numpy as np\n'), ((38155, 38190), 'numpy.concatenate', 'np.concatenate', (['(box, box_)'], {'axis': '(0)'}), '((box, box_), axis=0)\n', (38169, 38190), True, 'import numpy as np\n'), ((46536, 46585), 'numpy.concatenate', 'np.concatenate', (['[Human_augmented, aug_neg_humans]'], {}), '([Human_augmented, aug_neg_humans])\n', (46550, 46585), True, 'import numpy as np\n'), ((46617, 46665), 'numpy.concatenate', 'np.concatenate', (['[Object_augmented, aug_neg_objs]'], {}), '([Object_augmented, aug_neg_objs])\n', (46631, 46665), True, 'import numpy as np\n'), ((46690, 46734), 'numpy.concatenate', 'np.concatenate', (['[action_HO, aug_neg_actions]'], {}), '([action_HO, aug_neg_actions])\n', (46704, 46734), True, 'import numpy as np\n'), ((77746, 77779), 'numpy.random.shuffle', 'np.random.shuffle', (['running_map[k]'], {}), '(running_map[k])\n', (77763, 77779), True, 'import numpy as np\n'), ((78290, 78309), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (78300, 78309), False, 'import cv2\n'), ((78491, 78525), 'numpy.empty', 'np.empty', (['[0, 5]'], {'dtype': 'np.float32'}), '([0, 5], dtype=np.float32)\n', (78499, 78525), True, 'import numpy as np\n'), ((78559, 78593), 'numpy.empty', 'np.empty', (['[0, 5]'], {'dtype': 'np.float32'}), '([0, 5], dtype=np.float32)\n', (78567, 78593), True, 'import numpy as np\n'), ((78625, 78659), 'numpy.empty', 'np.empty', (['[0, 5]'], {'dtype': 'np.float32'}), '([0, 5], dtype=np.float32)\n', (78633, 78659), True, 'import numpy as np\n'), ((78695, 78730), 'numpy.empty', 'np.empty', (['[0, 29]'], {'dtype': 'np.float32'}), '([0, 29], dtype=np.float32)\n', (78703, 78730), True, 'import numpy as np\n'), ((78766, 78801), 'numpy.empty', 'np.empty', (['[0, 29]'], {'dtype': 'np.float32'}), '([0, 29], dtype=np.float32)\n', (78774, 78801), True, 'import numpy as np\n'), ((78836, 78871), 'numpy.empty', 'np.empty', (['[0, 29]'], {'dtype': 'np.float32'}), '([0, 29], dtype=np.float32)\n', (78844, 78871), True, 'import numpy as np\n'), ((78906, 78942), 'numpy.empty', 'np.empty', (['[0, 238]'], {'dtype': 'np.float32'}), '([0, 238], dtype=np.float32)\n', (78914, 78942), True, 'import numpy as np\n'), ((78974, 79009), 'numpy.empty', 'np.empty', (['[0, 29]'], {'dtype': 'np.float32'}), '([0, 29], dtype=np.float32)\n', (78982, 79009), True, 'import numpy as np\n'), ((79041, 79076), 'numpy.empty', 'np.empty', (['[0, 29]'], {'dtype': 'np.float32'}), '([0, 29], dtype=np.float32)\n', (79049, 79076), True, 'import numpy as np\n'), ((79107, 79142), 'numpy.empty', 'np.empty', (['[0, 29]'], {'dtype': 'np.float32'}), '([0, 29], dtype=np.float32)\n', (79115, 79142), True, 'import numpy as np\n'), ((79169, 79211), 'numpy.empty', 'np.empty', (['[0, 64, 64, 2]'], {'dtype': 'np.float32'}), '([0, 64, 64, 2], dtype=np.float32)\n', (79177, 79211), True, 'import numpy as np\n'), ((85055, 85074), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (85065, 85074), False, 'import cv2\n'), ((105739, 105772), 'numpy.random.shuffle', 'np.random.shuffle', (['running_map[k]'], {}), '(running_map[k])\n', (105756, 105772), True, 'import numpy as np\n'), ((107047, 107066), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (107057, 107066), False, 'import cv2\n'), ((109250, 109286), 'numpy.concatenate', 'np.concatenate', (['Pattern_list'], {'axis': '(0)'}), '(Pattern_list, axis=0)\n', (109264, 109286), True, 'import numpy as np\n'), ((109317, 109361), 'numpy.concatenate', 'np.concatenate', (['Human_augmented_list'], {'axis': '(0)'}), '(Human_augmented_list, axis=0)\n', (109331, 109361), True, 'import numpy as np\n'), ((109393, 109438), 'numpy.concatenate', 'np.concatenate', (['Object_augmented_list'], {'axis': '(0)'}), '(Object_augmented_list, axis=0)\n', (109407, 109438), True, 'import numpy as np\n'), ((109463, 109501), 'numpy.concatenate', 'np.concatenate', (['action_HO_list'], {'axis': '(0)'}), '(action_HO_list, axis=0)\n', (109477, 109501), True, 'import numpy as np\n'), ((109559, 109590), 'numpy.expand_dims', 'np.expand_dims', (['im_orig'], {'axis': '(0)'}), '(im_orig, axis=0)\n', (109573, 109590), True, 'import numpy as np\n'), ((111285, 111318), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (111296, 111318), False, 'import pickle\n'), ((121610, 121629), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (121620, 121629), False, 'import cv2\n'), ((123626, 123645), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (123636, 123645), False, 'import cv2\n'), ((129709, 129728), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (129719, 129728), False, 'import cv2\n'), ((150502, 150549), 'numpy.concatenate', 'np.concatenate', (['(action_HO, action_HO_)'], {'axis': '(0)'}), '((action_HO, action_HO_), axis=0)\n', (150516, 150549), True, 'import numpy as np\n'), ((8699, 8715), 'random.randint', 'randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (8706, 8715), False, 'from random import randint\n'), ((8774, 8790), 'numpy.floor', 'np.floor', (['height'], {}), '(height)\n', (8782, 8790), True, 'import numpy as np\n'), ((8846, 8861), 'numpy.floor', 'np.floor', (['width'], {}), '(width)\n', (8854, 8861), True, 'import numpy as np\n'), ((9187, 9217), 'numpy.array', 'np.array', (['[H_0, H_1, H_2, H_3]'], {}), '([H_0, H_1, H_2, H_3])\n', (9195, 9217), True, 'import numpy as np\n'), ((37248, 37264), 'random.randint', 'randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (37255, 37264), False, 'from random import randint\n'), ((37305, 37321), 'numpy.floor', 'np.floor', (['height'], {}), '(height)\n', (37313, 37321), True, 'import numpy as np\n'), ((37464, 37479), 'numpy.floor', 'np.floor', (['width'], {}), '(width)\n', (37472, 37479), True, 'import numpy as np\n'), ((43402, 43439), 'tensorflow.TensorShape', 'tf.TensorShape', (['[bnum, None, None, 3]'], {}), '([bnum, None, None, 3])\n', (43416, 43439), True, 'import tensorflow as tf\n'), ((43453, 43475), 'tensorflow.TensorShape', 'tf.TensorShape', (['[bnum]'], {}), '([bnum])\n', (43467, 43475), True, 'import tensorflow as tf\n'), ((43491, 43509), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (43505, 43509), True, 'import tensorflow as tf\n'), ((43523, 43548), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (43537, 43548), True, 'import tensorflow as tf\n'), ((43562, 43587), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (43576, 43587), True, 'import tensorflow as tf\n'), ((43601, 43628), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 600]'], {}), '([None, 600])\n', (43615, 43628), True, 'import tensorflow as tf\n'), ((43642, 43689), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, pattern_channel]'], {}), '([None, 64, 64, pattern_channel])\n', (43656, 43689), True, 'import tensorflow as tf\n'), ((43703, 43721), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (43717, 43721), True, 'import tensorflow as tf\n'), ((53953, 53987), 'tensorflow.TensorShape', 'tf.TensorShape', (['[2, None, None, 3]'], {}), '([2, None, None, 3])\n', (53967, 53987), True, 'import tensorflow as tf\n'), ((54001, 54020), 'tensorflow.TensorShape', 'tf.TensorShape', (['[2]'], {}), '([2])\n', (54015, 54020), True, 'import tensorflow as tf\n'), ((54036, 54054), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (54050, 54054), True, 'import tensorflow as tf\n'), ((54068, 54093), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (54082, 54093), True, 'import tensorflow as tf\n'), ((54107, 54132), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (54121, 54132), True, 'import tensorflow as tf\n'), ((54146, 54173), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 600]'], {}), '([None, 600])\n', (54160, 54173), True, 'import tensorflow as tf\n'), ((54187, 54234), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, pattern_channel]'], {}), '([None, 64, 64, pattern_channel])\n', (54201, 54234), True, 'import tensorflow as tf\n'), ((54248, 54266), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (54262, 54266), True, 'import tensorflow as tf\n'), ((65391, 65428), 'tensorflow.TensorShape', 'tf.TensorShape', (['[bnum, None, None, 3]'], {}), '([bnum, None, None, 3])\n', (65405, 65428), True, 'import tensorflow as tf\n'), ((65442, 65464), 'tensorflow.TensorShape', 'tf.TensorShape', (['[bnum]'], {}), '([bnum])\n', (65456, 65464), True, 'import tensorflow as tf\n'), ((65480, 65498), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (65494, 65498), True, 'import tensorflow as tf\n'), ((65512, 65537), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (65526, 65537), True, 'import tensorflow as tf\n'), ((65551, 65576), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (65565, 65576), True, 'import tensorflow as tf\n'), ((65590, 65617), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 600]'], {}), '([None, 600])\n', (65604, 65617), True, 'import tensorflow as tf\n'), ((65631, 65678), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, pattern_channel]'], {}), '([None, 64, 64, pattern_channel])\n', (65645, 65678), True, 'import tensorflow as tf\n'), ((65692, 65710), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (65706, 65710), True, 'import tensorflow as tf\n'), ((70153, 70203), 'numpy.concatenate', 'np.concatenate', (['[pos_h_boxes, neg_h_boxes]'], {'axis': '(0)'}), '([pos_h_boxes, neg_h_boxes], axis=0)\n', (70167, 70203), True, 'import numpy as np\n'), ((71755, 71796), 'tensorflow.TensorShape', 'tf.TensorShape', (['[bnum + 1, None, None, 3]'], {}), '([bnum + 1, None, None, 3])\n', (71769, 71796), True, 'import tensorflow as tf\n'), ((71810, 71836), 'tensorflow.TensorShape', 'tf.TensorShape', (['[bnum + 1]'], {}), '([bnum + 1])\n', (71824, 71836), True, 'import tensorflow as tf\n'), ((71852, 71870), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (71866, 71870), True, 'import tensorflow as tf\n'), ((71884, 71909), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (71898, 71909), True, 'import tensorflow as tf\n'), ((71923, 71948), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (71937, 71948), True, 'import tensorflow as tf\n'), ((71962, 71989), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 600]'], {}), '([None, 600])\n', (71976, 71989), True, 'import tensorflow as tf\n'), ((72003, 72050), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, pattern_channel]'], {}), '([None, 64, 64, pattern_channel])\n', (72017, 72050), True, 'import tensorflow as tf\n'), ((72064, 72082), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (72078, 72082), True, 'import tensorflow as tf\n'), ((78181, 78204), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (78195, 78204), False, 'import os\n'), ((80565, 80624), 'numpy.concatenate', 'np.concatenate', (["(blobs['H_boxes'], Human_augmented)"], {'axis': '(0)'}), "((blobs['H_boxes'], Human_augmented), axis=0)\n", (80579, 80624), True, 'import numpy as np\n'), ((80662, 80726), 'numpy.concatenate', 'np.concatenate', (["(blobs['Hsp_boxes'], Human_augmented_sp)"], {'axis': '(0)'}), "((blobs['Hsp_boxes'], Human_augmented_sp), axis=0)\n", (80676, 80726), True, 'import numpy as np\n'), ((80762, 80822), 'numpy.concatenate', 'np.concatenate', (["(blobs['O_boxes'], Object_augmented)"], {'axis': '(0)'}), "((blobs['O_boxes'], Object_augmented), axis=0)\n", (80776, 80822), True, 'import numpy as np\n'), ((80862, 80919), 'numpy.concatenate', 'np.concatenate', (["(blobs['gt_class_sp'], action_sp)"], {'axis': '(0)'}), "((blobs['gt_class_sp'], action_sp), axis=0)\n", (80876, 80919), True, 'import numpy as np\n'), ((80959, 81016), 'numpy.concatenate', 'np.concatenate', (["(blobs['gt_class_HO'], action_HO)"], {'axis': '(0)'}), "((blobs['gt_class_HO'], action_HO), axis=0)\n", (80973, 81016), True, 'import numpy as np\n'), ((81055, 81110), 'numpy.concatenate', 'np.concatenate', (["(blobs['gt_class_H'], action_H)"], {'axis': '(0)'}), "((blobs['gt_class_H'], action_H), axis=0)\n", (81069, 81110), True, 'import numpy as np\n'), ((81149, 81210), 'numpy.concatenate', 'np.concatenate', (["(blobs['gt_class_C'], action_compose)"], {'axis': '(0)'}), "((blobs['gt_class_C'], action_compose), axis=0)\n", (81163, 81210), True, 'import numpy as np\n'), ((81246, 81297), 'numpy.concatenate', 'np.concatenate', (["(blobs['Mask_sp'], mask_sp)"], {'axis': '(0)'}), "((blobs['Mask_sp'], mask_sp), axis=0)\n", (81260, 81297), True, 'import numpy as np\n'), ((81333, 81384), 'numpy.concatenate', 'np.concatenate', (["(blobs['Mask_HO'], mask_HO)"], {'axis': '(0)'}), "((blobs['Mask_HO'], mask_HO), axis=0)\n", (81347, 81384), True, 'import numpy as np\n'), ((81419, 81468), 'numpy.concatenate', 'np.concatenate', (["(blobs['Mask_H'], mask_H)"], {'axis': '(0)'}), "((blobs['Mask_H'], mask_H), axis=0)\n", (81433, 81468), True, 'import numpy as np\n'), ((81499, 81545), 'numpy.concatenate', 'np.concatenate', (["(blobs['sp'], Pattern)"], {'axis': '(0)'}), "((blobs['sp'], Pattern), axis=0)\n", (81513, 81545), True, 'import numpy as np\n'), ((87656, 87690), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (87670, 87690), True, 'import tensorflow as tf\n'), ((87692, 87710), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (87706, 87710), True, 'import tensorflow as tf\n'), ((87712, 87730), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (87726, 87730), True, 'import tensorflow as tf\n'), ((91463, 91497), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (91477, 91497), True, 'import tensorflow as tf\n'), ((91499, 91517), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (91513, 91517), True, 'import tensorflow as tf\n'), ((91519, 91537), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (91533, 91537), True, 'import tensorflow as tf\n'), ((92565, 92599), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (92579, 92599), True, 'import tensorflow as tf\n'), ((92601, 92619), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (92615, 92619), True, 'import tensorflow as tf\n'), ((92621, 92639), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (92635, 92639), True, 'import tensorflow as tf\n'), ((96729, 96763), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (96743, 96763), True, 'import tensorflow as tf\n'), ((96765, 96783), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (96779, 96783), True, 'import tensorflow as tf\n'), ((96785, 96803), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (96799, 96803), True, 'import tensorflow as tf\n'), ((97698, 97732), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (97712, 97732), True, 'import tensorflow as tf\n'), ((97734, 97752), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (97748, 97752), True, 'import tensorflow as tf\n'), ((97754, 97772), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (97768, 97772), True, 'import tensorflow as tf\n'), ((106936, 106959), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (106950, 106959), False, 'import os\n'), ((111458, 111491), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (111469, 111491), False, 'import pickle\n'), ((112574, 112608), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (112588, 112608), True, 'import tensorflow as tf\n'), ((112610, 112628), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (112624, 112628), True, 'import tensorflow as tf\n'), ((112679, 112697), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (112693, 112697), True, 'import tensorflow as tf\n'), ((112748, 112773), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (112762, 112773), True, 'import tensorflow as tf\n'), ((112775, 112800), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (112789, 112800), True, 'import tensorflow as tf\n'), ((112851, 112878), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 600]'], {}), '([None, 600])\n', (112865, 112878), True, 'import tensorflow as tf\n'), ((112929, 112962), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (112943, 112962), True, 'import tensorflow as tf\n'), ((114379, 114413), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (114393, 114413), True, 'import tensorflow as tf\n'), ((114415, 114433), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (114429, 114433), True, 'import tensorflow as tf\n'), ((114435, 114453), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (114449, 114453), True, 'import tensorflow as tf\n'), ((114467, 114492), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (114481, 114492), True, 'import tensorflow as tf\n'), ((114494, 114519), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (114508, 114519), True, 'import tensorflow as tf\n'), ((114533, 114560), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 600]'], {}), '([None, 600])\n', (114547, 114560), True, 'import tensorflow as tf\n'), ((114574, 114607), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (114588, 114607), True, 'import tensorflow as tf\n'), ((118174, 118208), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (118188, 118208), True, 'import tensorflow as tf\n'), ((118210, 118228), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (118224, 118228), True, 'import tensorflow as tf\n'), ((118230, 118248), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (118244, 118248), True, 'import tensorflow as tf\n'), ((119312, 119346), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (119326, 119346), True, 'import tensorflow as tf\n'), ((119348, 119366), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (119362, 119366), True, 'import tensorflow as tf\n'), ((119368, 119386), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (119382, 119386), True, 'import tensorflow as tf\n'), ((132100, 132134), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (132114, 132134), True, 'import tensorflow as tf\n'), ((132136, 132154), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (132150, 132154), True, 'import tensorflow as tf\n'), ((132156, 132174), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (132170, 132174), True, 'import tensorflow as tf\n'), ((136486, 136520), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (136500, 136520), True, 'import tensorflow as tf\n'), ((136522, 136540), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (136536, 136540), True, 'import tensorflow as tf\n'), ((136542, 136560), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (136556, 136560), True, 'import tensorflow as tf\n'), ((137624, 137658), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (137638, 137658), True, 'import tensorflow as tf\n'), ((137660, 137678), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (137674, 137678), True, 'import tensorflow as tf\n'), ((137680, 137698), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (137694, 137698), True, 'import tensorflow as tf\n'), ((142029, 142063), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (142043, 142063), True, 'import tensorflow as tf\n'), ((142065, 142083), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (142079, 142083), True, 'import tensorflow as tf\n'), ((142085, 142103), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (142099, 142103), True, 'import tensorflow as tf\n'), ((143167, 143201), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (143181, 143201), True, 'import tensorflow as tf\n'), ((143203, 143221), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (143217, 143221), True, 'import tensorflow as tf\n'), ((143223, 143241), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (143237, 143241), True, 'import tensorflow as tf\n'), ((148752, 148786), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (148766, 148786), True, 'import tensorflow as tf\n'), ((148800, 148834), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None, None, 3]'], {}), '([1, None, None, 3])\n', (148814, 148834), True, 'import tensorflow as tf\n'), ((148848, 148867), 'tensorflow.TensorShape', 'tf.TensorShape', (['[2]'], {}), '([2])\n', (148862, 148867), True, 'import tensorflow as tf\n'), ((148883, 148902), 'tensorflow.TensorShape', 'tf.TensorShape', (['[2]'], {}), '([2])\n', (148897, 148902), True, 'import tensorflow as tf\n'), ((148918, 148943), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (148932, 148943), True, 'import tensorflow as tf\n'), ((148957, 148982), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (148971, 148982), True, 'import tensorflow as tf\n'), ((148996, 149023), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 600]'], {}), '([None, 600])\n', (149010, 149023), True, 'import tensorflow as tf\n'), ((149037, 149084), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, pattern_channel]'], {}), '([None, 64, 64, pattern_channel])\n', (149051, 149084), True, 'import tensorflow as tf\n'), ((149098, 149116), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (149112, 149116), True, 'import tensorflow as tf\n'), ((8756, 8772), 'numpy.floor', 'np.floor', (['height'], {}), '(height)\n', (8764, 8772), True, 'import numpy as np\n'), ((8829, 8844), 'numpy.floor', 'np.floor', (['width'], {}), '(width)\n', (8837, 8844), True, 'import numpy as np\n'), ((9248, 9281), 'numpy.array', 'np.array', (['[0, H_0, H_1, H_2, H_3]'], {}), '([0, H_0, H_1, H_2, H_3])\n', (9256, 9281), True, 'import numpy as np\n'), ((37364, 37385), 'numpy.sign', 'np.sign', (['height_shift'], {}), '(height_shift)\n', (37371, 37385), True, 'import numpy as np\n'), ((37447, 37462), 'numpy.floor', 'np.floor', (['width'], {}), '(width)\n', (37455, 37462), True, 'import numpy as np\n'), ((37509, 37529), 'numpy.sign', 'np.sign', (['width_shift'], {}), '(width_shift)\n', (37516, 37529), True, 'import numpy as np\n'), ((37942, 37972), 'numpy.array', 'np.array', (['[H_0, H_1, H_2, H_3]'], {}), '([H_0, H_1, H_2, H_3])\n', (37950, 37972), True, 'import numpy as np\n'), ((38089, 38122), 'numpy.array', 'np.array', (['[0, H_0, H_1, H_2, H_3]'], {}), '([0, H_0, H_1, H_2, H_3])\n', (38097, 38122), True, 'import numpy as np\n'), ((41842, 41892), 'numpy.concatenate', 'np.concatenate', (['[pos_h_boxes, neg_h_boxes]'], {'axis': '(0)'}), '([pos_h_boxes, neg_h_boxes], axis=0)\n', (41856, 41892), True, 'import numpy as np\n'), ((52045, 52113), 'numpy.concatenate', 'np.concatenate', (['[buffer[ii][0][:pos1], buffer[ii][1][:pos2]]'], {'axis': '(0)'}), '([buffer[ii][0][:pos1], buffer[ii][1][:pos2]], axis=0)\n', (52059, 52113), True, 'import numpy as np\n'), ((52148, 52216), 'numpy.concatenate', 'np.concatenate', (['[buffer[ii][0][pos1:], buffer[ii][1][pos2:]]'], {'axis': '(0)'}), '([buffer[ii][0][pos1:], buffer[ii][1][pos2:]], axis=0)\n', (52162, 52216), True, 'import numpy as np\n'), ((52251, 52301), 'numpy.concatenate', 'np.concatenate', (['[pos_h_boxes, neg_h_boxes]'], {'axis': '(0)'}), '([pos_h_boxes, neg_h_boxes], axis=0)\n', (52265, 52301), True, 'import numpy as np\n'), ((63980, 64030), 'numpy.concatenate', 'np.concatenate', (['[pos_h_boxes, neg_h_boxes]'], {'axis': '(0)'}), '([pos_h_boxes, neg_h_boxes], axis=0)\n', (63994, 64030), True, 'import numpy as np\n'), ((71009, 71040), 'numpy.concatenate', 'np.concatenate', (['im_list'], {'axis': '(0)'}), '(im_list, axis=0)\n', (71023, 71040), True, 'import numpy as np\n'), ((87765, 87790), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (87779, 87790), True, 'import tensorflow as tf\n'), ((87817, 87842), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (87831, 87842), True, 'import tensorflow as tf\n'), ((87867, 87892), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (87881, 87892), True, 'import tensorflow as tf\n'), ((87921, 87947), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (87935, 87947), True, 'import tensorflow as tf\n'), ((87976, 88002), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (87990, 88002), True, 'import tensorflow as tf\n'), ((88030, 88056), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (88044, 88056), True, 'import tensorflow as tf\n'), ((88084, 88111), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 238]'], {}), '([None, 238])\n', (88098, 88111), True, 'import tensorflow as tf\n'), ((88136, 88162), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (88150, 88162), True, 'import tensorflow as tf\n'), ((88187, 88213), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (88201, 88213), True, 'import tensorflow as tf\n'), ((88237, 88263), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (88251, 88263), True, 'import tensorflow as tf\n'), ((88283, 88316), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 3]'], {}), '([None, 64, 64, 3])\n', (88297, 88316), True, 'import tensorflow as tf\n'), ((91638, 91663), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (91652, 91663), True, 'import tensorflow as tf\n'), ((91723, 91748), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (91737, 91748), True, 'import tensorflow as tf\n'), ((91806, 91831), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (91820, 91831), True, 'import tensorflow as tf\n'), ((91893, 91919), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (91907, 91919), True, 'import tensorflow as tf\n'), ((91981, 92007), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (91995, 92007), True, 'import tensorflow as tf\n'), ((92068, 92094), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (92082, 92094), True, 'import tensorflow as tf\n'), ((92155, 92182), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 238]'], {}), '([None, 238])\n', (92169, 92182), True, 'import tensorflow as tf\n'), ((92240, 92266), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (92254, 92266), True, 'import tensorflow as tf\n'), ((92324, 92350), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (92338, 92350), True, 'import tensorflow as tf\n'), ((92407, 92433), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (92421, 92433), True, 'import tensorflow as tf\n'), ((92486, 92519), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 3]'], {}), '([None, 64, 64, 3])\n', (92500, 92519), True, 'import tensorflow as tf\n'), ((92740, 92765), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (92754, 92765), True, 'import tensorflow as tf\n'), ((92825, 92850), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (92839, 92850), True, 'import tensorflow as tf\n'), ((92908, 92933), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (92922, 92933), True, 'import tensorflow as tf\n'), ((92995, 93021), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (93009, 93021), True, 'import tensorflow as tf\n'), ((93083, 93109), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (93097, 93109), True, 'import tensorflow as tf\n'), ((93170, 93196), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (93184, 93196), True, 'import tensorflow as tf\n'), ((93257, 93284), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 238]'], {}), '([None, 238])\n', (93271, 93284), True, 'import tensorflow as tf\n'), ((93342, 93368), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (93356, 93368), True, 'import tensorflow as tf\n'), ((93426, 93452), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (93440, 93452), True, 'import tensorflow as tf\n'), ((93509, 93535), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 29]'], {}), '([None, 29])\n', (93523, 93535), True, 'import tensorflow as tf\n'), ((93588, 93621), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 3]'], {}), '([None, 64, 64, 3])\n', (93602, 93621), True, 'import tensorflow as tf\n'), ((96868, 96893), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (96882, 96893), True, 'import tensorflow as tf\n'), ((96935, 96960), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (96949, 96960), True, 'import tensorflow as tf\n'), ((97001, 97026), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (97015, 97026), True, 'import tensorflow as tf\n'), ((97066, 97091), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (97080, 97091), True, 'import tensorflow as tf\n'), ((97135, 97167), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (97149, 97167), True, 'import tensorflow as tf\n'), ((97211, 97243), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (97225, 97243), True, 'import tensorflow as tf\n'), ((97286, 97318), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (97300, 97318), True, 'import tensorflow as tf\n'), ((97361, 97388), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 222]'], {}), '([None, 222])\n', (97375, 97388), True, 'import tensorflow as tf\n'), ((97428, 97460), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (97442, 97460), True, 'import tensorflow as tf\n'), ((97500, 97532), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (97514, 97532), True, 'import tensorflow as tf\n'), ((97571, 97603), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (97585, 97603), True, 'import tensorflow as tf\n'), ((97638, 97671), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (97652, 97671), True, 'import tensorflow as tf\n'), ((97837, 97862), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (97851, 97862), True, 'import tensorflow as tf\n'), ((97904, 97929), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (97918, 97929), True, 'import tensorflow as tf\n'), ((97970, 97995), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (97984, 97995), True, 'import tensorflow as tf\n'), ((98035, 98060), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (98049, 98060), True, 'import tensorflow as tf\n'), ((98104, 98136), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (98118, 98136), True, 'import tensorflow as tf\n'), ((98180, 98212), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (98194, 98212), True, 'import tensorflow as tf\n'), ((98255, 98287), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (98269, 98287), True, 'import tensorflow as tf\n'), ((98330, 98357), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 222]'], {}), '([None, 222])\n', (98344, 98357), True, 'import tensorflow as tf\n'), ((98397, 98429), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (98411, 98429), True, 'import tensorflow as tf\n'), ((98469, 98501), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (98483, 98501), True, 'import tensorflow as tf\n'), ((98540, 98572), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (98554, 98572), True, 'import tensorflow as tf\n'), ((98607, 98640), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (98621, 98640), True, 'import tensorflow as tf\n'), ((111631, 111664), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (111642, 111664), False, 'import pickle\n'), ((111762, 111795), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (111773, 111795), False, 'import pickle\n'), ((118349, 118374), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (118363, 118374), True, 'import tensorflow as tf\n'), ((118434, 118459), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (118448, 118459), True, 'import tensorflow as tf\n'), ((118517, 118542), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (118531, 118542), True, 'import tensorflow as tf\n'), ((118604, 118636), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (118618, 118636), True, 'import tensorflow as tf\n'), ((118698, 118730), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (118712, 118730), True, 'import tensorflow as tf\n'), ((118791, 118823), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (118805, 118823), True, 'import tensorflow as tf\n'), ((118884, 118911), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 222]'], {}), '([None, 222])\n', (118898, 118911), True, 'import tensorflow as tf\n'), ((118969, 119001), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (118983, 119001), True, 'import tensorflow as tf\n'), ((119059, 119091), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (119073, 119091), True, 'import tensorflow as tf\n'), ((119148, 119180), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (119162, 119180), True, 'import tensorflow as tf\n'), ((119233, 119266), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (119247, 119266), True, 'import tensorflow as tf\n'), ((119487, 119512), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (119501, 119512), True, 'import tensorflow as tf\n'), ((119572, 119597), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (119586, 119597), True, 'import tensorflow as tf\n'), ((119655, 119680), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (119669, 119680), True, 'import tensorflow as tf\n'), ((119742, 119774), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (119756, 119774), True, 'import tensorflow as tf\n'), ((119836, 119868), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (119850, 119868), True, 'import tensorflow as tf\n'), ((119929, 119961), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (119943, 119961), True, 'import tensorflow as tf\n'), ((120022, 120049), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 222]'], {}), '([None, 222])\n', (120036, 120049), True, 'import tensorflow as tf\n'), ((120107, 120139), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (120121, 120139), True, 'import tensorflow as tf\n'), ((120197, 120229), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (120211, 120229), True, 'import tensorflow as tf\n'), ((120286, 120318), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (120300, 120318), True, 'import tensorflow as tf\n'), ((120371, 120404), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (120385, 120404), True, 'import tensorflow as tf\n'), ((132239, 132264), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (132253, 132264), True, 'import tensorflow as tf\n'), ((132306, 132331), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (132320, 132331), True, 'import tensorflow as tf\n'), ((132371, 132396), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (132385, 132396), True, 'import tensorflow as tf\n'), ((132440, 132472), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (132454, 132472), True, 'import tensorflow as tf\n'), ((132516, 132548), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (132530, 132548), True, 'import tensorflow as tf\n'), ((132591, 132623), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (132605, 132623), True, 'import tensorflow as tf\n'), ((132666, 132705), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, compose_classes]'], {}), '([None, compose_classes])\n', (132680, 132705), True, 'import tensorflow as tf\n'), ((132745, 132777), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (132759, 132777), True, 'import tensorflow as tf\n'), ((132817, 132849), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (132831, 132849), True, 'import tensorflow as tf\n'), ((132888, 132920), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (132902, 132920), True, 'import tensorflow as tf\n'), ((132955, 132988), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (132969, 132988), True, 'import tensorflow as tf\n'), ((136661, 136686), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (136675, 136686), True, 'import tensorflow as tf\n'), ((136746, 136771), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (136760, 136771), True, 'import tensorflow as tf\n'), ((136829, 136854), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (136843, 136854), True, 'import tensorflow as tf\n'), ((136916, 136948), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (136930, 136948), True, 'import tensorflow as tf\n'), ((137010, 137042), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (137024, 137042), True, 'import tensorflow as tf\n'), ((137103, 137135), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (137117, 137135), True, 'import tensorflow as tf\n'), ((137196, 137223), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 222]'], {}), '([None, 222])\n', (137210, 137223), True, 'import tensorflow as tf\n'), ((137281, 137313), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (137295, 137313), True, 'import tensorflow as tf\n'), ((137371, 137403), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (137385, 137403), True, 'import tensorflow as tf\n'), ((137460, 137492), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (137474, 137492), True, 'import tensorflow as tf\n'), ((137545, 137578), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (137559, 137578), True, 'import tensorflow as tf\n'), ((137799, 137824), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (137813, 137824), True, 'import tensorflow as tf\n'), ((137884, 137909), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (137898, 137909), True, 'import tensorflow as tf\n'), ((137967, 137992), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (137981, 137992), True, 'import tensorflow as tf\n'), ((138054, 138086), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (138068, 138086), True, 'import tensorflow as tf\n'), ((138148, 138180), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (138162, 138180), True, 'import tensorflow as tf\n'), ((138241, 138273), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (138255, 138273), True, 'import tensorflow as tf\n'), ((138334, 138361), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 222]'], {}), '([None, 222])\n', (138348, 138361), True, 'import tensorflow as tf\n'), ((138419, 138451), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (138433, 138451), True, 'import tensorflow as tf\n'), ((138509, 138541), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (138523, 138541), True, 'import tensorflow as tf\n'), ((138598, 138630), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (138612, 138630), True, 'import tensorflow as tf\n'), ((138683, 138716), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (138697, 138716), True, 'import tensorflow as tf\n'), ((142204, 142229), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (142218, 142229), True, 'import tensorflow as tf\n'), ((142289, 142314), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (142303, 142314), True, 'import tensorflow as tf\n'), ((142372, 142397), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (142386, 142397), True, 'import tensorflow as tf\n'), ((142459, 142491), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (142473, 142491), True, 'import tensorflow as tf\n'), ((142553, 142585), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (142567, 142585), True, 'import tensorflow as tf\n'), ((142646, 142678), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (142660, 142678), True, 'import tensorflow as tf\n'), ((142739, 142766), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 222]'], {}), '([None, 222])\n', (142753, 142766), True, 'import tensorflow as tf\n'), ((142824, 142856), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (142838, 142856), True, 'import tensorflow as tf\n'), ((142914, 142946), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (142928, 142946), True, 'import tensorflow as tf\n'), ((143003, 143035), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (143017, 143035), True, 'import tensorflow as tf\n'), ((143088, 143121), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (143102, 143121), True, 'import tensorflow as tf\n'), ((143342, 143367), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (143356, 143367), True, 'import tensorflow as tf\n'), ((143427, 143452), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (143441, 143452), True, 'import tensorflow as tf\n'), ((143510, 143535), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 5]'], {}), '([None, 5])\n', (143524, 143535), True, 'import tensorflow as tf\n'), ((143597, 143629), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (143611, 143629), True, 'import tensorflow as tf\n'), ((143691, 143723), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (143705, 143723), True, 'import tensorflow as tf\n'), ((143784, 143816), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (143798, 143816), True, 'import tensorflow as tf\n'), ((143877, 143904), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 222]'], {}), '([None, 222])\n', (143891, 143904), True, 'import tensorflow as tf\n'), ((143962, 143994), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (143976, 143994), True, 'import tensorflow as tf\n'), ((144052, 144084), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (144066, 144084), True, 'import tensorflow as tf\n'), ((144141, 144173), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, verb_num]'], {}), '([None, verb_num])\n', (144155, 144173), True, 'import tensorflow as tf\n'), ((144226, 144259), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 64, 64, 2]'], {}), '([None, 64, 64, 2])\n', (144240, 144259), True, 'import tensorflow as tf\n'), ((13318, 13330), 'numpy.zeros', 'np.zeros', (['(29)'], {}), '(29)\n', (13326, 13330), True, 'import numpy as np\n'), ((20380, 20392), 'numpy.zeros', 'np.zeros', (['(29)'], {}), '(29)\n', (20388, 20392), True, 'import numpy as np\n'), ((27125, 27137), 'numpy.zeros', 'np.zeros', (['(24)'], {}), '(24)\n', (27133, 27137), True, 'import numpy as np\n'), ((34371, 34383), 'numpy.zeros', 'np.zeros', (['(21)'], {}), '(21)\n', (34379, 34383), True, 'import numpy as np\n'), ((42564, 42595), 'numpy.concatenate', 'np.concatenate', (['im_list'], {'axis': '(0)'}), '(im_list, axis=0)\n', (42578, 42595), True, 'import numpy as np\n'), ((53123, 53157), 'numpy.concatenate', 'np.concatenate', (['[im1, im2]'], {'axis': '(0)'}), '([im1, im2], axis=0)\n', (53137, 53157), True, 'import numpy as np\n'), ((64553, 64584), 'numpy.concatenate', 'np.concatenate', (['im_list'], {'axis': '(0)'}), '(im_list, axis=0)\n', (64567, 64584), True, 'import numpy as np\n'), ((127958, 127981), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (127972, 127981), False, 'import os\n'), ((152834, 152891), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (152842, 152891), True, 'import numpy as np\n'), ((152990, 153047), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (152998, 153047), True, 'import numpy as np\n'), ((59713, 59746), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (59724, 59746), False, 'import pickle\n'), ((127623, 127646), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (127637, 127646), False, 'import os\n'), ((128248, 128271), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (128262, 128271), False, 'import os\n'), ((128452, 128475), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (128466, 128475), False, 'import os\n'), ((147844, 147872), 'numpy.concatenate', 'np.concatenate', (['item'], {'axis': '(0)'}), '(item, axis=0)\n', (147858, 147872), True, 'import numpy as np\n'), ((11905, 11962), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (11913, 11962), True, 'import numpy as np\n'), ((12077, 12134), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (12085, 12134), True, 'import numpy as np\n'), ((12453, 12510), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (12461, 12510), True, 'import numpy as np\n'), ((12625, 12682), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (12633, 12682), True, 'import numpy as np\n'), ((18667, 18724), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (18675, 18724), True, 'import numpy as np\n'), ((18839, 18896), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (18847, 18896), True, 'import numpy as np\n'), ((19215, 19272), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (19223, 19272), True, 'import numpy as np\n'), ((19387, 19444), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (19395, 19444), True, 'import numpy as np\n'), ((25168, 25225), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (25176, 25225), True, 'import numpy as np\n'), ((25340, 25397), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (25348, 25397), True, 'import numpy as np\n'), ((25759, 25816), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (25767, 25816), True, 'import numpy as np\n'), ((25931, 25988), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (25939, 25988), True, 'import numpy as np\n'), ((32414, 32471), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (32422, 32471), True, 'import numpy as np\n'), ((32586, 32643), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (32594, 32643), True, 'import numpy as np\n'), ((33005, 33062), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (33013, 33062), True, 'import numpy as np\n'), ((33177, 33234), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (33185, 33234), True, 'import numpy as np\n'), ((47013, 47070), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (47021, 47070), True, 'import numpy as np\n'), ((47185, 47242), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (47193, 47242), True, 'import numpy as np\n'), ((47657, 47714), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (47665, 47714), True, 'import numpy as np\n'), ((47829, 47886), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (47837, 47886), True, 'import numpy as np\n'), ((74477, 74534), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (74485, 74534), True, 'import numpy as np\n'), ((74649, 74706), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (74657, 74706), True, 'import numpy as np\n'), ((75180, 75237), 'numpy.array', 'np.array', (['[0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]'], {}), '([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]])\n', (75188, 75237), True, 'import numpy as np\n'), ((75352, 75409), 'numpy.array', 'np.array', (['[0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]'], {}), '([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]])\n', (75360, 75409), True, 'import numpy as np\n'), ((127841, 127864), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (127855, 127864), False, 'import os\n'), ((129462, 129485), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (129476, 129485), False, 'import os\n'), ((129127, 129150), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (129141, 129150), False, 'import os\n'), ((129345, 129368), 'os.path.exists', 'os.path.exists', (['im_file'], {}), '(im_file)\n', (129359, 129368), False, 'import os\n')] |
import warnings
import os
import math
import numpy as np
import PIL
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from ofa.imagenet_classification.data_providers.base_provider import DataProvider
from ofa.utils.my_dataloader.my_random_resize_crop import MyRandomResizedCrop
from ofa.utils.my_dataloader.my_distributed_sampler import MyDistributedSampler
class Flowers102DataProvider(DataProvider):
def __init__(self, save_path=None, train_batch_size=32, test_batch_size=512, valid_size=None, n_worker=32,
resize_scale=0.08, distort_color=None, image_size=224,
num_replicas=None, rank=None):
# warnings.filterwarnings('ignore')
self._save_path = save_path
self.image_size = image_size # int or list of int
self.distort_color = distort_color
self.resize_scale = resize_scale
self._valid_transform_dict = {}
if not isinstance(self.image_size, int):
assert isinstance(self.image_size, list)
from ofa.imagenet_codebase.data_providers.my_data_loader import MyDataLoader
self.image_size.sort() # e.g., 160 -> 224
MyRandomResizedCrop.IMAGE_SIZE_LIST = self.image_size.copy()
MyRandomResizedCrop.ACTIVE_SIZE = max(self.image_size)
for img_size in self.image_size:
self._valid_transform_dict[img_size] = self.build_valid_transform(img_size)
self.active_img_size = max(self.image_size)
valid_transforms = self._valid_transform_dict[self.active_img_size]
train_loader_class = MyDataLoader # randomly sample image size for each batch of training image
else:
self.active_img_size = self.image_size
valid_transforms = self.build_valid_transform()
train_loader_class = torch.utils.data.DataLoader
train_transforms = self.build_train_transform()
train_dataset = self.train_dataset(train_transforms)
weights = self.make_weights_for_balanced_classes(
train_dataset.imgs, self.n_classes)
weights = torch.DoubleTensor(weights)
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
if valid_size is not None:
raise NotImplementedError("validation dataset not yet implemented")
# valid_dataset = self.valid_dataset(valid_transforms)
# self.train = train_loader_class(
# train_dataset, batch_size=train_batch_size, sampler=train_sampler,
# num_workers=n_worker, pin_memory=True)
# self.valid = torch.utils.data.DataLoader(
# valid_dataset, batch_size=test_batch_size,
# num_workers=n_worker, pin_memory=True)
else:
self.train = train_loader_class(
train_dataset, batch_size=train_batch_size, sampler=train_sampler,
num_workers=n_worker, pin_memory=True,
)
self.valid = None
test_dataset = self.test_dataset(valid_transforms)
self.test = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=True, num_workers=n_worker, pin_memory=True,
)
if self.valid is None:
self.valid = self.test
@staticmethod
def name():
return 'flowers102'
@property
def data_shape(self):
return 3, self.active_img_size, self.active_img_size # C, H, W
@property
def n_classes(self):
return 102
@property
def save_path(self):
if self._save_path is None:
# self._save_path = '/mnt/datastore/Oxford102Flowers' # home server
self._save_path = '/mnt/datastore/Flowers102' # home server
if not os.path.exists(self._save_path):
# self._save_path = '/mnt/datastore/Oxford102Flowers' # home server
self._save_path = '/mnt/datastore/Flowers102' # home server
return self._save_path
@property
def data_url(self):
raise ValueError('unable to download %s' % self.name())
def train_dataset(self, _transforms):
dataset = datasets.ImageFolder(self.train_path, _transforms)
return dataset
# def valid_dataset(self, _transforms):
# dataset = datasets.ImageFolder(self.valid_path, _transforms)
# return dataset
def test_dataset(self, _transforms):
dataset = datasets.ImageFolder(self.test_path, _transforms)
return dataset
@property
def train_path(self):
return os.path.join(self.save_path, 'train')
# @property
# def valid_path(self):
# return os.path.join(self.save_path, 'train')
@property
def test_path(self):
return os.path.join(self.save_path, 'test')
@property
def normalize(self):
return transforms.Normalize(
mean=[0.5178361839861569, 0.4106749456881299, 0.32864167836880803],
std=[0.2972239085211309, 0.24976049135203868, 0.28533308036347665])
@staticmethod
def make_weights_for_balanced_classes(images, nclasses):
count = [0] * nclasses
# Counts per label
for item in images:
count[item[1]] += 1
weight_per_class = [0.] * nclasses
# Total number of images.
N = float(sum(count))
# super-sample the smaller classes.
for i in range(nclasses):
weight_per_class[i] = N / float(count[i])
weight = [0] * len(images)
# Calculate a weight per image.
for idx, val in enumerate(images):
weight[idx] = weight_per_class[val[1]]
return weight
def build_train_transform(self, image_size=None, print_log=True):
if image_size is None:
image_size = self.image_size
if print_log:
print('Color jitter: %s, resize_scale: %s, img_size: %s' %
(self.distort_color, self.resize_scale, image_size))
if self.distort_color == 'torch':
color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
elif self.distort_color == 'tf':
color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5)
else:
color_transform = None
if isinstance(image_size, list):
resize_transform_class = MyRandomResizedCrop
print('Use MyRandomResizedCrop: %s, \t %s' % MyRandomResizedCrop.get_candidate_image_size(),
'sync=%s, continuous=%s' % (MyRandomResizedCrop.SYNC_DISTRIBUTED, MyRandomResizedCrop.CONTINUOUS))
else:
resize_transform_class = transforms.RandomResizedCrop
train_transforms = [
transforms.RandomAffine(
45, translate=(0.4, 0.4), scale=(0.75, 1.5), shear=None, resample=PIL.Image.BILINEAR, fillcolor=0),
resize_transform_class(image_size, scale=(self.resize_scale, 1.0)),
# transforms.RandomHorizontalFlip(),
]
if color_transform is not None:
train_transforms.append(color_transform)
train_transforms += [
transforms.ToTensor(),
self.normalize,
]
train_transforms = transforms.Compose(train_transforms)
return train_transforms
def build_valid_transform(self, image_size=None):
if image_size is None:
image_size = self.active_img_size
return transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
self.normalize,
])
def assign_active_img_size(self, new_img_size):
self.active_img_size = new_img_size
if self.active_img_size not in self._valid_transform_dict:
self._valid_transform_dict[self.active_img_size] = self.build_valid_transform()
# change the transform of the valid and test set
self.valid.dataset.transform = self._valid_transform_dict[self.active_img_size]
self.test.dataset.transform = self._valid_transform_dict[self.active_img_size]
def build_sub_train_loader(self, n_images, batch_size, num_worker=None, num_replicas=None, rank=None):
# used for resetting running statistics
if self.__dict__.get('sub_train_%d' % self.active_img_size, None) is None:
if num_worker is None:
num_worker = self.train.num_workers
n_samples = len(self.train.dataset.samples)
g = torch.Generator()
g.manual_seed(DataProvider.SUB_SEED)
rand_indexes = torch.randperm(n_samples, generator=g).tolist()
new_train_dataset = self.train_dataset(
self.build_train_transform(image_size=self.active_img_size, print_log=False))
chosen_indexes = rand_indexes[:n_images]
if num_replicas is not None:
sub_sampler = MyDistributedSampler(new_train_dataset, num_replicas, rank, np.array(chosen_indexes))
else:
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
sub_data_loader = torch.utils.data.DataLoader(
new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
num_workers=num_worker, pin_memory=True,
)
self.__dict__['sub_train_%d' % self.active_img_size] = []
for images, labels in sub_data_loader:
self.__dict__['sub_train_%d' % self.active_img_size].append((images, labels))
return self.__dict__['sub_train_%d' % self.active_img_size]
| [
"torchvision.transforms.ColorJitter",
"torchvision.transforms.RandomAffine",
"ofa.utils.my_dataloader.my_random_resize_crop.MyRandomResizedCrop.get_candidate_image_size",
"math.ceil",
"os.path.exists",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.Compose",
"numpy.array",
"torchvision.... | [((4314, 4364), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['self.train_path', '_transforms'], {}), '(self.train_path, _transforms)\n', (4334, 4364), True, 'import torchvision.datasets as datasets\n'), ((4589, 4638), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['self.test_path', '_transforms'], {}), '(self.test_path, _transforms)\n', (4609, 4638), True, 'import torchvision.datasets as datasets\n'), ((4722, 4759), 'os.path.join', 'os.path.join', (['self.save_path', '"""train"""'], {}), "(self.save_path, 'train')\n", (4734, 4759), False, 'import os\n'), ((4919, 4955), 'os.path.join', 'os.path.join', (['self.save_path', '"""test"""'], {}), "(self.save_path, 'test')\n", (4931, 4955), False, 'import os\n'), ((5015, 5181), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5178361839861569, 0.4106749456881299, 0.32864167836880803]', 'std': '[0.2972239085211309, 0.24976049135203868, 0.28533308036347665]'}), '(mean=[0.5178361839861569, 0.4106749456881299, \n 0.32864167836880803], std=[0.2972239085211309, 0.24976049135203868, \n 0.28533308036347665])\n', (5035, 5181), True, 'import torchvision.transforms as transforms\n'), ((7427, 7463), 'torchvision.transforms.Compose', 'transforms.Compose', (['train_transforms'], {}), '(train_transforms)\n', (7445, 7463), True, 'import torchvision.transforms as transforms\n'), ((6212, 6289), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)', 'hue': '(0.1)'}), '(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)\n', (6234, 6289), True, 'import torchvision.transforms as transforms\n'), ((6923, 7050), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', (['(45)'], {'translate': '(0.4, 0.4)', 'scale': '(0.75, 1.5)', 'shear': 'None', 'resample': 'PIL.Image.BILINEAR', 'fillcolor': '(0)'}), '(45, translate=(0.4, 0.4), scale=(0.75, 1.5), shear=\n None, resample=PIL.Image.BILINEAR, fillcolor=0)\n', (6946, 7050), True, 'import torchvision.transforms as transforms\n'), ((7338, 7359), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7357, 7359), True, 'import torchvision.transforms as transforms\n'), ((3916, 3947), 'os.path.exists', 'os.path.exists', (['self._save_path'], {}), '(self._save_path)\n', (3930, 3947), False, 'import os\n'), ((6361, 6424), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(32.0 / 255.0)', 'saturation': '(0.5)'}), '(brightness=32.0 / 255.0, saturation=0.5)\n', (6383, 6424), True, 'import torchvision.transforms as transforms\n'), ((7743, 7776), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (7764, 7776), True, 'import torchvision.transforms as transforms\n'), ((7790, 7811), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7809, 7811), True, 'import torchvision.transforms as transforms\n'), ((6636, 6682), 'ofa.utils.my_dataloader.my_random_resize_crop.MyRandomResizedCrop.get_candidate_image_size', 'MyRandomResizedCrop.get_candidate_image_size', ([], {}), '()\n', (6680, 6682), False, 'from ofa.utils.my_dataloader.my_random_resize_crop import MyRandomResizedCrop\n'), ((9240, 9264), 'numpy.array', 'np.array', (['chosen_indexes'], {}), '(chosen_indexes)\n', (9248, 9264), True, 'import numpy as np\n'), ((7698, 7727), 'math.ceil', 'math.ceil', (['(image_size / 0.875)'], {}), '(image_size / 0.875)\n', (7707, 7727), False, 'import math\n')] |
"""simulate.py: Contains Cantilever class."""
# pylint: disable=E1101,R0902,C0103
__copyright__ = "Copyright 2020, Ginger Lab"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import numpy as np
from math import pi
from scipy.integrate import odeint
import ffta
# Set constant 2 * pi.
PI2 = 2 * pi
class Cantilever:
"""Damped Driven Harmonic Oscillator Simulator for AFM Cantilevers.
Simulates a DDHO with given parameters.
This class contains the functions needed to simulate. To create a class that
simulates a subset, it needs to overload the following functions:
force(self, t)
omega(self, t)
dZdt(self, t) if the given ODE form will not work
Attributes
----------
amp : float
Amplitude of the cantilever in meters.
beta : float
Damping factor of the cantilever in rad/s.
delta : float
Initial phase of the cantilever in radians.
delta_freq : float
Frequency shift of the cantilever under excitation.
mass : float
Mass of the cantilever in kilograms.
Z : ndarray
ODE integration result, sampled at sampling_rate. Default integration
is at 100 MHz.
t_Z : ndarray
Time axis based on the provided total time and sampling rate
f_Z : ndarray
Frequency axis based on the provided sampling rate
Method
------
simulate(trigger_phase=180)
Simulates the cantilever motion with excitation happening
at the given phase.
See Also
--------
pixel: Pixel processing for FF-trEFM data.
Examples
--------
>>> from ffta.simulation import cantilever
>>> from ffta.simulation.utils import load
>>>
>>> params_file = '../examples/sim_params.cfg'
>>> params = load.simulation_configuration(params_file)
>>>
>>> c = cantilever.Cantilever(*params)
>>> Z, infodict = c.simulate()
>>> c.analyze()
>>> c.analyze(roi=0.004) # can change the parameters as desired
:param can_params: Parameters for cantilever properties. The dictionary contains:
amp_invols = float (in m/V)
def_invols = float (in m/V)
soft_amp = float (in V)
drive_freq = float (in Hz)
res_freq = float (in Hz)
k = float (in N/m)
q_factor = float
:type can_params: dict
:param force_params: Parameters for forces. The dictionary contains:
es_force = float (in N)
delta_freq = float (in Hz)
tau = float (in seconds)
v_dc = float (in Volts)
v_ac = float (in Volts)
v_cpd = float (in Volts)
dCdz = float (in F/m)
:type force_params: dict
:param sim_params: Parameters for simulation. The dictionary contains:
trigger = float (in seconds)
total_time = float (in seconds)
sampling_rate = int (in Hz)
:type sim_params: dict
"""
def __init__(self, can_params, force_params, sim_params):
# Initialize cantilever parameters and calculate some others.
for key, value in can_params.items():
setattr(self, key, value)
self.w0 = PI2 * self.res_freq # Radial resonance frequency.
self.wd = PI2 * self.drive_freq # Radial drive frequency.
if not np.allclose(self.w0, self.wd):
print('Resonance and Drive not equal. Make sure simulation is long enough!')
self.beta = self.w0 / (2 * self.q_factor) # Damping factor.
self.mass = self.k / (self.w0 ** 2) # Mass of the cantilever in kg.
self.amp = self.soft_amp * self.amp_invols # Amplitude in meters.
# Calculate reduced driving force and phase in equilibrium.
np.seterr(divide='ignore') # suprress divide-by-0 warning in arctan
self.f0 = self.amp * np.sqrt((self.w0 ** 2 - self.wd ** 2) ** 2 +
4 * self.beta ** 2 * self.wd ** 2)
self.delta = np.abs(np.arctan(np.divide(2 * self.wd * self.beta,
self.w0 ** 2 - self.wd ** 2)))
# Initialize force parameters and calculate some others.
for key, value in force_params.items():
setattr(self, key, value)
self.delta_w = PI2 * self.delta_freq # Frequency shift in radians.
self.fe = self.es_force / self.mass # Reduced electrostatic force.
# Initialize simulation parameters.
for key, value in sim_params.items():
setattr(self, key, value)
# Calculate time axis for simulated tip motion without extra cycles
num_pts = int(self.total_time * self.sampling_rate)
self.t_Z = np.linspace(0, self.total_time, num=num_pts)
# Calculate frequency axis for simulated tip_motion without extra cycles.
self.freq_Z = np.linspace(0, int(self.sampling_rate / 2), num=int(num_pts / 2 + 1))
# Create a Pixel class-compatible params file
self.fit_params = {}
self.parameters = force_params
self.parameters.update(**sim_params)
self.can_params = can_params
self.create_parameters(self.parameters, self.can_params)
return
def set_conditions(self, trigger_phase=180):
"""
Sets initial conditions and other simulation parameters.
:param trigger_phase: Trigger phase is in degrees and wrt cosine. Default value is 180.
:type trigger_phase: float, optional
"""
self.trigger_phase = np.mod(np.pi * trigger_phase / 180, PI2)
self.n_points = int(self.total_time * self.sampling_rate)
# Add extra cycles to the simulation to find correct phase at trigger.
cycle_points = int(2 * self.sampling_rate / self.res_freq)
self.n_points_sim = cycle_points + self.n_points
# Create time vector and find the trigger wrt phase.
self.t = np.arange(self.n_points_sim) / self.sampling_rate
# Current phase at trigger.
current_phase = np.mod(self.wd * self.trigger - self.delta, PI2)
phase_diff = np.mod(self.trigger_phase - current_phase, PI2)
self.t0 = self.trigger + phase_diff / self.wd # modified trigger point
# Set the initial conditions at t=0.
z0 = self.amp * np.sin(-self.delta)
v0 = self.amp * self.wd * np.cos(-self.delta)
self.Z0 = np.array([z0, v0])
return
def force(self, t, t0=0, tau=0):
"""
Force on the cantilever at a given time.
:param t: time in seconds
:type t: float
:param t0:
:type t0:
:param tau:
:type tau:
:returns: Force on the cantilever at a given time, in N/kg.
:rtype: float
"""
driving_force = self.f0 * np.sin(self.wd * t)
return driving_force
def omega(self, t, t0=0, tau=0):
"""
Resonance frequency behavior
:param t: time in seconds
:type t: float
:param t0:
:type t0:
:param tau:
:type tau:
:returns: Resonance frequency of the cantilever at a given time, in rad/s.
:type w: float
"""
return self.w0
def dZ_dt(self, Z, t=0):
"""
Takes the derivative of the given Z with respect to time.
:param Z: Z[0] is the cantilever position, and Z[1] is the cantilever
velocity.
:type Z: (2, ) array_like
:param t: time
:type t : float
:returns: Zdot[0] is the cantilever velocity, and Zdot[1] is the cantilever
acceleration.
:rtype Zdot: (2, ) array_like
"""
t0 = self.t0
tau = self.tau
v = Z[1]
vdot = (self.force(t, t0, tau) -
self.omega(t, t0, tau) * Z[1] / self.q_factor -
self.omega(t, t0, tau) ** 2 * Z[0])
return np.array([v, vdot])
def simulate(self, trigger_phase=180, Z0=None):
"""
Simulates the cantilever motion.
:param trigger_phase: Trigger phase is in degrees and wrt cosine. Default value is 180.
:type trigger_phase: float, optional
:param Z0: Z0 = [z0, v0], the initial position and velocity
If not specified, is calculated from the analytical solution to DDHO
(using "set_conditions")
:type Z0: list, optional
:returns: tuple (Z, infodict)
WHERE
array_like Z is Cantilever position in Volts, in format (n_points, 1)
dict infodict is information about the ODE solver.
"""
if Z0:
if not isinstance(Z0, (np.ndarray, list)):
raise TypeError('Must be 2-size array or list')
if len(Z0) != 2:
raise ValueError('Must specify exactly [z0, v0]')
self.n_points = int(self.total_time * self.sampling_rate)
self.t = np.arange(self.n_points) / self.sampling_rate
self.t0 = self.trigger
self.Z0 = Z0
else:
self.set_conditions(trigger_phase)
Z, infodict = odeint(self.dZ_dt, self.Z0, self.t, full_output=True)
t0_idx = int(self.t0 * self.sampling_rate)
tidx = int(self.trigger * self.sampling_rate)
Z_cut = Z[(t0_idx - tidx):(t0_idx + self.n_points - tidx), 0]
self.infodict = infodict
self.Z = Z_cut
return self.Z, self.infodict
def downsample(self, target_rate=1e7):
'''
Downsamples the cantilever output. Used primarily to match experiments
or for lower computational load
This will overwrite the existing output with the downsampled verison
:param target_rate: The sampling rate for the signal to be converted to. 1e7 = 10 MHz
:type target_rate: int
'''
if target_rate > self.sampling_rate:
raise ValueError('Target should be less than the initial sampling rate')
step = int(self.sampling_rate / target_rate)
n_points = int(self.total_time * target_rate)
self.Z = self.Z[0::step].reshape(n_points, 1) / self.def_invols
return
def create_parameters(self, params={}, can_params={}, fit_params={}):
'''
Creates a Pixel class-compatible parameters and cantilever parameters Dict
:param params: Contains analysis parameters for the Pixel cass
:type params: dict, optional
:param can_params: Contains cantilever parameters for the Pixel class. These data are
optional for the analysis.
:type can_params: dict, optional
:param fit_params: Contains various parameters for fitting and analysis. See Pixel class.
:type fit_params: dict, optional
'''
# default seeding of parameters
_parameters = {'bandpass_filter': 1.0,
'drive_freq': 277261,
'filter_bandwidth': 10000.0,
'n_taps': 799,
'roi': 0.0003,
'sampling_rate': 1e7,
'total_time': 0.002,
'trigger': 0.0005,
'window': 'blackman',
'wavelet_analysis': 0,
'fft_time_res': 2e-5}
_can_params = {'amp_invols': 5.52e-08,
'def_invols': 5.06e-08,
'k': 26.2,
'q_factor': 432}
_fit_params = {'filter_amplitude': True,
'method': 'hilbert',
'fit': True,
'fit_form': 'product'
}
for key, val in _parameters.items():
if key not in params:
if hasattr(self, key):
params[key] = self.__dict__[key]
else:
params[key] = val
for key, val in _can_params.items():
if key not in can_params:
if hasattr(self, key):
can_params[key] = self.__dict__[key]
else:
can_params[key] = val
for key, val in _fit_params.items():
if key not in fit_params:
if hasattr(self, key):
fit_params[key] = self.__dict__[key]
else:
fit_params[key] = val
# then write to the Class
self.parameters.update(**params)
self.can_params.update(**can_params)
self.fit_params.update(**fit_params)
return
def analyze(self, plot=True, **kwargs):
'''
Converts output to a Pixel class and analyzes
:param plot: If True, calls Pixel.plot() to display the results
:type plot: bool, optional
:param kwargs:
:type kwargs:
:returns:
:rtype: Pixel object
'''
param_keys = ['bandpass_filter', 'drive_freq', 'filter_bandwidth', 'n_taps',
'roi', 'sampling_rate', 'total_time', 'trigger', 'window', 'wavelet_analysis',
'fft_time_res']
can_param_keys = ['amp_invols', 'def_invols', 'k', 'q_factor']
fit_param_keys = ['filter_amplitude', 'method', 'fit', 'fit_form']
params = {}
can_params = {}
fit_params = {}
for k, v in kwargs.items():
if k in param_keys:
params[k] = v
elif k in can_param_keys:
can_params[k] = v
elif k in fit_param_keys:
fit_params[k] = v
self.create_parameters(params, can_params, fit_params)
pix = ffta.pixel.Pixel(self.Z, self.parameters, self.can_params, **self.fit_params)
pix.analyze()
if plot:
pix.plot()
return pix
| [
"numpy.divide",
"numpy.seterr",
"scipy.integrate.odeint",
"numpy.allclose",
"numpy.mod",
"ffta.pixel.Pixel",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.cos",
"numpy.sqrt"
] | [((3697, 3723), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (3706, 3723), True, 'import numpy as np\n'), ((4654, 4698), 'numpy.linspace', 'np.linspace', (['(0)', 'self.total_time'], {'num': 'num_pts'}), '(0, self.total_time, num=num_pts)\n', (4665, 4698), True, 'import numpy as np\n'), ((5483, 5523), 'numpy.mod', 'np.mod', (['(np.pi * trigger_phase / 180)', 'PI2'], {}), '(np.pi * trigger_phase / 180, PI2)\n', (5489, 5523), True, 'import numpy as np\n'), ((5984, 6032), 'numpy.mod', 'np.mod', (['(self.wd * self.trigger - self.delta)', 'PI2'], {}), '(self.wd * self.trigger - self.delta, PI2)\n', (5990, 6032), True, 'import numpy as np\n'), ((6054, 6101), 'numpy.mod', 'np.mod', (['(self.trigger_phase - current_phase)', 'PI2'], {}), '(self.trigger_phase - current_phase, PI2)\n', (6060, 6101), True, 'import numpy as np\n'), ((6346, 6364), 'numpy.array', 'np.array', (['[z0, v0]'], {}), '([z0, v0])\n', (6354, 6364), True, 'import numpy as np\n'), ((7924, 7943), 'numpy.array', 'np.array', (['[v, vdot]'], {}), '([v, vdot])\n', (7932, 7943), True, 'import numpy as np\n'), ((9167, 9220), 'scipy.integrate.odeint', 'odeint', (['self.dZ_dt', 'self.Z0', 'self.t'], {'full_output': '(True)'}), '(self.dZ_dt, self.Z0, self.t, full_output=True)\n', (9173, 9220), False, 'from scipy.integrate import odeint\n'), ((13773, 13850), 'ffta.pixel.Pixel', 'ffta.pixel.Pixel', (['self.Z', 'self.parameters', 'self.can_params'], {}), '(self.Z, self.parameters, self.can_params, **self.fit_params)\n', (13789, 13850), False, 'import ffta\n'), ((3278, 3307), 'numpy.allclose', 'np.allclose', (['self.w0', 'self.wd'], {}), '(self.w0, self.wd)\n', (3289, 3307), True, 'import numpy as np\n'), ((3795, 3874), 'numpy.sqrt', 'np.sqrt', (['((self.w0 ** 2 - self.wd ** 2) ** 2 + 4 * self.beta ** 2 * self.wd ** 2)'], {}), '((self.w0 ** 2 - self.wd ** 2) ** 2 + 4 * self.beta ** 2 * self.wd ** 2)\n', (3802, 3874), True, 'import numpy as np\n'), ((5873, 5901), 'numpy.arange', 'np.arange', (['self.n_points_sim'], {}), '(self.n_points_sim)\n', (5882, 5901), True, 'import numpy as np\n'), ((6253, 6272), 'numpy.sin', 'np.sin', (['(-self.delta)'], {}), '(-self.delta)\n', (6259, 6272), True, 'import numpy as np\n'), ((6307, 6326), 'numpy.cos', 'np.cos', (['(-self.delta)'], {}), '(-self.delta)\n', (6313, 6326), True, 'import numpy as np\n'), ((6770, 6789), 'numpy.sin', 'np.sin', (['(self.wd * t)'], {}), '(self.wd * t)\n', (6776, 6789), True, 'import numpy as np\n'), ((3950, 4013), 'numpy.divide', 'np.divide', (['(2 * self.wd * self.beta)', '(self.w0 ** 2 - self.wd ** 2)'], {}), '(2 * self.wd * self.beta, self.w0 ** 2 - self.wd ** 2)\n', (3959, 4013), True, 'import numpy as np\n'), ((8976, 9000), 'numpy.arange', 'np.arange', (['self.n_points'], {}), '(self.n_points)\n', (8985, 9000), True, 'import numpy as np\n')] |
"""
Dataset stored as NPY files in directory or
as NPZ dictionary.
"""
import os
import numpy as np
import torch
from ..tools import np_of_torchdefaultdtype
from .database import Database
from .restarter import Restartable
class DirectoryDatabase(Database, Restartable):
"""
Database stored as NPY files in a diectory.
:param directory: directory path where the files are stored
:param name: prefix for the arrays.
This function loads arrays of the format f"{name}{db_name}.npy" for each variable db_name in inputs and targets.
Other arguments: See ``Database``.
.. Note::
This database loader does not support the ``allow_unfound`` setting in the base ``Database``. The
variables to load must be set explicitly in the inputs and targets.
"""
def __init__(self, directory, name, inputs, targets, *args, quiet=False, allow_unfound=False, **kwargs):
if allow_unfound:
raise ValueError("DirectoryDatabase class does not support allow_unfound argument.")
arr_dict = self.load_arrays(directory, name, inputs, targets, quiet=quiet)
super().__init__(arr_dict, inputs, targets, *args, **kwargs, quiet=quiet)
self.restarter = self.make_restarter(
directory,
name,
inputs,
targets,
*args,
**kwargs,
quiet=quiet,
)
def get_file_dict(self, directory, prefix):
try:
file_list = os.listdir(directory)
except FileNotFoundError as fee:
raise FileNotFoundError(
"ERROR: Couldn't find directory {} containing files."
'A solution is to explicitly specify "path" in database_params '.format(directory)
) from fee
data_labels = {
file[len(prefix) : -4]: file for file in file_list if file.startswith(prefix) and file.endswith(".npy")
}
# Make sure we actually found some files
if not data_labels:
raise FileNotFoundError(
"No files found at {} .".format(directory) + "for database prefix {}".format(prefix)
)
return data_labels
def load_arrays(self, directory, name, inputs, targets, quiet=False, allow_unfound=False):
var_list = inputs + targets
# Make sure the path actually exists
try:
# Backward compatibility.
data_labels = self.get_file_dict(directory, prefix="data-" + name)
except FileNotFoundError:
data_labels = self.get_file_dict(directory, prefix=name)
if not quiet:
print("Arrays found: ", data_labels)
# Load files
arr_dict = {
label: np.load(os.path.join(directory, file))
for label, file in data_labels.items()
if allow_unfound or (label in var_list)
}
# Put float64 data in pytorch default dtype
floatX = np_of_torchdefaultdtype()
for k, v in arr_dict.items():
if v.dtype == "float64":
arr_dict[k] = v.astype(floatX)
if not quiet:
print("Data types:")
print({k: v.dtype for k, v in arr_dict.items()})
return arr_dict
class NPZDatabase(Database, Restartable):
def __init__(self, file, inputs, targets, *args, allow_unfound=False, quiet=False, **kwargs):
arr_dict = self.load_arrays(file, inputs, targets, quiet=quiet, allow_unfound=allow_unfound)
super().__init__(arr_dict, inputs, targets, *args, **kwargs, quiet=quiet)
self.restarter = self.make_restarter(
file, inputs, targets, *args, **kwargs, quiet=quiet, allow_unfound=allow_unfound
)
def load_arrays(self, file, inputs, targets, allow_unfound=False, quiet=False):
arr_dict = np.load(file)
# Make sure the path actually exists
if not quiet:
print("Arrays found: ", list(arr_dict.keys()))
# Load files
if not allow_unfound:
var_list = inputs + targets
arr_dict = {k: v for k, v in arr_dict.items() if k in var_list}
# Put float64 data in pytorch default dtype
floatX = np_of_torchdefaultdtype()
for k, v in arr_dict.items():
if v.dtype == "float64":
arr_dict[k] = v.astype(floatX)
if not quiet:
print("Data types:")
print({k: v.dtype for k, v in arr_dict.items()})
return arr_dict
| [
"numpy.load",
"os.path.join",
"os.listdir"
] | [((3824, 3837), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (3831, 3837), True, 'import numpy as np\n'), ((1487, 1508), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1497, 1508), False, 'import os\n'), ((2741, 2770), 'os.path.join', 'os.path.join', (['directory', 'file'], {}), '(directory, file)\n', (2753, 2770), False, 'import os\n')] |
import ray
import torch
import os
import time
import numpy as np
import numpy.random as rd
from collections import deque
import datetime
from copy import deepcopy
from ray_elegantrl.buffer import ReplayBuffer, ReplayBufferMP
from ray_elegantrl.evaluate import RecordEpisode, RecordEvaluate, Evaluator
from ray_elegantrl.config import default_config
"""
Modify [ElegantRL](https://github.com/AI4Finance-LLC/ElegantRL)
by https://github.com/GyChou
"""
class Arguments:
def __init__(self, configs=default_config):
self.gpu_id = configs['gpu_id'] # choose the GPU for running. gpu_id is None means set it automatically
# current work directory. cwd is None means set it automatically
self.cwd = configs['cwd'] if 'cwd' in configs.keys() else None
# current work directory with time.
self.if_cwd_time = configs['if_cwd_time'] if 'cwd' in configs.keys() else False
# initialize random seed in self.init_before_training()
self.random_seed = 0
# id state_dim action_dim reward_dim target_return horizon_step
self.env = configs['env']
# Deep Reinforcement Learning algorithm
self.agent = configs['agent']
self.agent['agent_name'] = self.agent['class_name']().__class__.__name__
self.trainer = configs['trainer']
self.interactor = configs['interactor']
self.buffer = configs['buffer']
self.evaluator = configs['evaluator']
self.config = deepcopy(configs)
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
'''if_per_explore'''
if self.buffer['if_on_policy']:
self.if_per_explore = False
else:
self.if_per_explore = configs['interactor']['random_explore'] if 'random_explore' in configs[
'interactor'].keys() else False
self.buffer['if_rnn'] = self.agent['if_rnn']
if self.agent['if_rnn']:
self.buffer['hidden_state_dim'] = self.agent['hidden_state_dim']
self.buffer['action_type'] = self.env['action_type']
self.buffer['state_dim'] = self.env['state_dim']
self.buffer['action_dim'] = self.env['action_dim']
self.buffer['reward_dim'] = self.env['reward_dim']
self.buffer['rollout_num'] = self.interactor['rollout_num']
def init_before_training(self, if_main=True):
'''set gpu_id automatically'''
if self.gpu_id is None: # set gpu_id automatically
import sys
self.gpu_id = sys.argv[-1][-4]
else:
self.gpu_id = str(self.gpu_id)
if not self.gpu_id.isdigit(): # set gpu_id as '0' in default
self.gpu_id = '0'
'''set cwd automatically'''
if self.cwd is None:
if self.if_cwd_time:
curr_time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
else:
curr_time = 'current'
if 'carla' in self.env["id"]:
a = ''
if isinstance(self.env["action_dim"], list):
for e in self.env["action_dim"]:
a += str(e) + '_'
else:
a = str(self.env["action_dim"]) + '_'
self.cwd = f'./veh_control_logs/{self.env["id"]}' \
f'_{self.env["params_name"]["params"]["town"]}' \
f'_{self.env["params_name"]["params"]["task_mode"]}' \
f'_s{self.env["state_dim"]}_a{a}r{self.env["reward_dim"]}' \
f'_tr{self.env["target_return"]}_ms{self.env["max_step"]}' \
f'_{self.env["params_name"]["params"]["if_dest_end"]}/' \
f'{self.agent["agent_name"]}_{self.agent["policy_type"]}_{self.agent["objective_type"]}/' \
f'exp_{curr_time}_cuda:{self.gpu_id}'
else:
self.cwd = f'./veh_control_logs/{self.env["id"]}_s{self.env["state_dim"]}_' \
f'a{self.env["action_dim"]}_r{self.env["reward_dim"]}' \
f'_tr{self.env["target_return"]}_ms{self.env["max_step"]}/' \
f'{self.agent["agent_name"]}_{self.agent["policy_type"]}_{self.agent["objective_type"]}/' \
f'exp_{curr_time}_cuda:{self.gpu_id}'
if if_main:
print(f'| GPU id: {self.gpu_id}, cwd: {self.cwd}')
import shutil # remove history according to bool(if_remove)
if self.if_remove is None:
self.if_remove = bool(input("PRESS 'y' to REMOVE: {}? ".format(self.cwd)) == 'y')
if self.if_remove:
shutil.rmtree(self.cwd, ignore_errors=True)
print("| Remove history")
os.makedirs(self.cwd, exist_ok=True)
'''save exp parameters'''
from ruamel.yaml.main import YAML
yaml = YAML()
del self.config['agent']['class_name']
del self.config['if_cwd_time']
self.config['cwd'] = self.cwd
with open(self.cwd + '/parameters.yaml', 'w', encoding="utf-8") as f:
yaml.dump(self.config, f)
del self.config
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu_id)
torch.set_default_dtype(torch.float32)
torch.manual_seed(self.random_seed)
np.random.seed(self.random_seed)
def make_env(env_dict, id=None, seed=0):
import gym
import gym.envs
import gym_carla_feature
if 'params_name' in env_dict:
if env_dict['params_name'] == 'params':
env_dict['params_name']['params']['port'] = env_dict['params_name']['params']['port'] + id * 4
env_dict['params_name']['params']['label'] = id
env = gym.make(env_dict['id'], **env_dict['params_name'])
else:
env = gym.make(env_dict['id'])
env.seed(seed=(id + seed))
return env
@ray.remote
class InterActor(object):
def __init__(self, id, args):
self.id = id
args.init_before_training(if_main=False)
self.env = make_env(args.env, self.id, seed=args.random_seed)
self.env_max_step = args.env['max_step']
self.env_horizon = args.interactor[
'env_horizon'] if 'env_horizon' in args.interactor.keys() else self.env_max_step
self.reward_scale = np.array(args.interactor['reward_scale'])
self._horizon_step = args.interactor['horizon_step'] // args.interactor['rollout_num']
self.gamma = np.array(args.interactor['gamma']).reshape(-1) if type(
args.interactor['gamma']) is np.ndarray else np.ones(
args.env['reward_dim']) * args.interactor['gamma']
self.action_dim = args.env['action_dim']
# choose -1 discrete action space | 1 continuous action space | 0 hybird action space |
self.action_type = args.env['action_type']
self.agent_config = args.agent
if self.agent_config['if_rnn']:
self.hidden_state_dim = args.buffer['hidden_state_dim']
if args.agent['agent_name'] in [ # 'AgentPPO',
# 'AgentPPO2',
'AgentPPO2CMA',
'AgentPPO2RS'
'AgentMPO',
'AgentPPOMO',
'AgentPPOMO2',
] and (args.agent['policy_type'] not in ['beta', 'beta2']):
self.modify_action = lambda x: np.tanh(x)
elif args.agent['agent_name'] in ['AgentHybridPPO']:
def modify_action(action):
action[:-1] = np.tanh(action[:-1])
return action
self.modify_action = modify_action
elif args.agent['agent_name'] in ['AgentHybridPPO2', 'AgentHierarchicalPPO2']:
if args.agent['discrete_degree'] == 3:
def modify_action(action):
def mapping(da_dim, x):
if da_dim == 0:
return np.clip(x, -1, 0)
elif da_dim == 2:
return np.clip(x, 0, 1)
else:
return 0.
da_idx = int(action[-1])
mod_a = np.zeros(action[:-1].shape)
mod_a[0] = mapping(da_idx // 3, action[0])
mod_a[1] = mapping(da_idx % 3, action[1])
return mod_a
# return action[:-1]
elif args.agent['discrete_degree'] == 2:
def modify_action(action):
def mapping(da_dim, x):
if da_dim == 1:
return x
else:
return 0.
da_idx = int(action[-1])
mod_a = np.zeros(action[:-1].shape)
mod_a[0] = mapping(da_idx // 2, action[0])
mod_a[1] = mapping(da_idx % 2, action[1])
return mod_a
self.modify_action = modify_action
elif args.agent['agent_name'] in ['AgentSafePPO2']:
def modify_action(origin_action):
safe_action = origin_action[-1]
action = np.tanh(origin_action[:-1])
if abs(action[1]) > safe_action:
action[1] = action[1] * safe_action
return action
self.modify_action = modify_action
else:
self.modify_action = lambda x: x
# if args.agent['agent_name'] in ['HybridSAC']:
# self.exploit_policy = self.exploit_policys
# elif args.agent['agent_name'] in ['AgentSafePPO2']:
# self.exploit_policy = self.safe_exploit_policy
# elif args.agent['agent_name'] in ['AgentRNNPPO2']:
# self.exploit_policy = self.exploit_rnn_policy
# else:
# self.exploit_policy = self.exploit_one_policy
if self.agent_config['if_rnn']:
self.buffer = ReplayBuffer(
max_len=args.buffer['max_buf'] // args.interactor['rollout_num'] + args.env['max_step'],
if_on_policy=args.buffer['if_on_policy'],
state_dim=args.env['state_dim'],
action_dim=1 if args.env['action_type'] == -1 else (
args.env['action_dim'][0] + 1 if args.env['action_type'] == 0 else args.env['action_dim']),
reward_dim=args.env['reward_dim'],
if_per=args.buffer['if_per'],
if_discrete_action=(args.buffer[
'action_type'] == -1) if 'action_type' in args.buffer.keys() else False,
if_rnn=self.agent_config['if_rnn'],
hidden_state_dim=args.buffer['hidden_state_dim'],
if_gpu=False)
else:
self.buffer = ReplayBuffer(
max_len=args.buffer['max_buf'] // args.interactor['rollout_num'] + args.env['max_step'],
if_on_policy=args.buffer['if_on_policy'],
state_dim=args.env['state_dim'],
action_dim=1 if args.env['action_type'] == -1 else (
args.env['action_dim'][0] + 1 if args.env['action_type'] == 0 else args.env['action_dim']),
reward_dim=args.env['reward_dim'],
if_per=args.buffer['if_per'],
if_discrete_action=(args.buffer[
'action_type'] == -1) if 'action_type' in args.buffer.keys() else False,
if_gpu=False)
self.record_episode = RecordEpisode(args.env)
@ray.method(num_returns=1)
def explore_env(self, select_action, policy):
self.buffer.empty_buffer_before_explore()
actual_step = 0
terminal = True
while actual_step < self._horizon_step:
state_list = []
action_list = []
reward_list = []
gamma_list = []
if terminal:
state = self.env.reset()
terminal = False
if self.agent_config['if_rnn']:
hidden_state = None
cell_state = None
hidden_state_list = []
cell_state_list = []
if self.agent_config['infer_by_sequence']:
sq_state = state.reshape(1, -1)
for i in range(self.env_horizon):
if self.agent_config['if_rnn']:
if self.agent_config['infer_by_sequence']:
idx = len(hidden_state_list) if len(hidden_state_list) < self.agent_config['rnn_timestep'] else \
self.agent_config['rnn_timestep']
hidden_state_input = hidden_state_list[-idx] if len(hidden_state_list) > 0 else None
cell_state_input = cell_state_list[-idx] if len(cell_state_list) > 0 else None
action, hidden_state, cell_state = select_action(sq_state,
policy,
hidden_state_input,
cell_state_input,
explore_rate=self.agent_config[
'explore_rate'] if 'explore_rate' in self.agent_config.keys() else 1.,
infer_by_sequence=self.agent_config[
'infer_by_sequence'])
else:
action, hidden_state, cell_state = select_action(state,
policy,
hidden_state,
cell_state,
explore_rate=1.)
else:
action = select_action(state,
policy,
explore_rate=self.agent_config[
'explore_rate'] if 'explore_rate' in self.agent_config.keys() else 1., )
next_s, reward, terminal, _ = self.env.step(self.modify_action(action))
done = True if i == (self.env_horizon - 1) else terminal
state_list.append(state)
action_list.append(action)
reward_list.append(reward * self.reward_scale)
gamma_list.append(np.zeros(self.gamma.shape) if done else self.gamma)
if self.agent_config['if_rnn']:
hidden_state_list.append(hidden_state)
cell_state_list.append(cell_state)
actual_step += 1
if self.agent_config['if_rnn'] and self.agent_config['infer_by_sequence']:
idx = max(sq_state.shape[0] - self.agent_config['rnn_timestep'] + 1, 0)
sq_state = np.vstack((sq_state[idx, :], next_s))
state = next_s
if done:
if self.agent_config['if_rnn']:
self.buffer.extend_buffer(np.array(state_list),
np.array(action_list),
np.array(reward_list),
np.array(gamma_list),
np.array(hidden_state_list),
np.array(cell_state_list))
else:
self.buffer.extend_buffer(np.array(state_list),
np.array(action_list),
np.array(reward_list),
np.array(gamma_list))
break
self.buffer.update_now_len_before_sample()
if self.agent_config['if_rnn']:
return actual_step, \
self.buffer.buf_state[:self.buffer.now_len], \
self.buffer.buf_action[:self.buffer.now_len], \
self.buffer.buf_reward[:self.buffer.now_len], \
self.buffer.buf_gamma[:self.buffer.now_len], \
self.buffer.buf_hidden_state[:self.buffer.now_len], \
self.buffer.buf_cell_state[:self.buffer.now_len]
else:
return actual_step, \
self.buffer.buf_state[:self.buffer.now_len], \
self.buffer.buf_action[:self.buffer.now_len], \
self.buffer.buf_reward[:self.buffer.now_len], \
self.buffer.buf_gamma[:self.buffer.now_len]
@ray.method(num_returns=1)
def random_explore_env(self, r_horizon_step=None):
self.buffer.empty_buffer_before_explore()
if r_horizon_step is None:
r_horizon_step = self._horizon_step
else:
r_horizon_step = max(min(r_horizon_step, self.buffer.max_len - 1), self._horizon_step)
actual_step = 0
while actual_step < r_horizon_step:
state_list = []
action_list = []
reward_list = []
gamma_list = []
if self.agent_config['if_rnn']:
hidden_state_list = []
cell_state_list = []
state = self.env.reset()
for _ in range(self.env_max_step):
# action = rd.randint(self.action_dim) if self.action_type==-1 else rd.uniform(-1, 1,
if self.action_type == 0:
action = np.hstack((rd.uniform(-1, 1, self.action_dim[0]), rd.randint(self.action_dim[1])))
else:
action = self.env.action_space.sample()
next_s, reward, done, _ = self.env.step(action)
state_list.append(state)
action_list.append(action)
reward_list.append(reward * self.reward_scale)
gamma_list.append(np.zeros(self.gamma.shape) if done else self.gamma)
if self.agent_config['if_rnn']:
hidden_state = np.zeros((1, self.hidden_state_dim), dtype=np.float32)
cell_state = np.zeros((1, self.hidden_state_dim), dtype=np.float32)
hidden_state_list.append(hidden_state)
cell_state_list.append(cell_state)
actual_step += 1
if done:
if self.agent_config['if_rnn']:
self.buffer.extend_buffer(np.array(state_list),
np.array(action_list),
np.array(reward_list),
np.array(gamma_list),
np.array(hidden_state_list),
np.array(cell_state_list))
else:
self.buffer.extend_buffer(np.array(state_list),
np.array(action_list),
np.array(reward_list),
np.array(gamma_list))
break
state = next_s
self.buffer.update_now_len_before_sample()
if self.agent_config['if_rnn']:
return actual_step, \
self.buffer.buf_state[:self.buffer.now_len], \
self.buffer.buf_action[:self.buffer.now_len], \
self.buffer.buf_reward[:self.buffer.now_len], \
self.buffer.buf_gamma[:self.buffer.now_len], \
self.buffer.buf_hidden_state[:self.buffer.now_len], \
self.buffer.buf_cell_state[:self.buffer.now_len]
else:
return actual_step, \
self.buffer.buf_state[:self.buffer.now_len], \
self.buffer.buf_action[:self.buffer.now_len], \
self.buffer.buf_reward[:self.buffer.now_len], \
self.buffer.buf_gamma[:self.buffer.now_len]
def exploit_env(self, select_action, policy, eval_times):
self.record_episode.clear()
eval_record = RecordEvaluate()
for _ in range(eval_times):
state = self.env.reset()
if self.agent_config['if_rnn']:
hidden_state = None
cell_state = None
hidden_state_list = []
cell_state_list = []
if self.agent_config['infer_by_sequence']:
sq_state = state.reshape(1, -1)
for _ in range(self.env_max_step):
if self.agent_config['if_rnn']:
if self.agent_config['infer_by_sequence']:
idx = len(hidden_state_list) if len(hidden_state_list) < self.agent_config['rnn_timestep'] else \
self.agent_config['rnn_timestep']
hidden_state_input = hidden_state_list[-idx] if len(hidden_state_list) > 0 else None
cell_state_input = cell_state_list[-idx] if len(cell_state_list) > 0 else None
action, hidden_state, cell_state = select_action(sq_state,
policy,
hidden_state_input,
cell_state_input,
explore_rate=0.,
infer_by_sequence=self.agent_config[
'infer_by_sequence'])
else:
action, hidden_state, cell_state = select_action(state,
policy,
hidden_state,
cell_state,
explore_rate=0.)
else:
action = select_action(state,
policy,
explore_rate=0.)
next_s, reward, done, info = self.env.step(self.modify_action(action))
self.record_episode.add_record(reward, info)
if self.agent_config['if_rnn']:
hidden_state_list.append(hidden_state)
cell_state_list.append(cell_state)
if done:
break
if self.agent_config['if_rnn'] and self.agent_config['infer_by_sequence']:
idx = max(sq_state.shape[0] - self.agent_config['rnn_timestep'] + 1, 0)
sq_state = np.vstack((sq_state[idx, :], next_s))
sq_state = np.vstack((sq_state[idx, :], next_s))
state = next_s
cost_threshold = self.agent_config[
'cost_threshold'] if 'cost_threshold' in self.agent_config.keys() else None
eval_record.add(self.record_episode.get_result(cost_threshold))
self.record_episode.clear()
return eval_record.results
# @staticmethod
# def exploit_policys(state, policy):
# state = torch.as_tensor((state,), dtype=torch.float32).detach_()
# action_c = policy[0](state)
# action_d_int = policy[1].get_a_prob(torch.cat((state, action_c), dim=1)).argmax(dim=1, keepdim=True)
# return torch.cat((action_c, action_d_int), dim=1)[0].detach().numpy()
#
# @staticmethod
# def safe_exploit_policy(state, policy):
# safe_actions = [0., 0.2, 0.4, 0.6, 0.8, 1.]
# state = torch.as_tensor((state,), dtype=torch.float32).detach_()
# action_tensor = policy[0](state)
# action = action_tensor[0].detach().numpy()
# # a_prob = policy[1].get_a_prob(torch.cat([state, action_tensor], dim=1))[0].detach().numpy()
# # # steer_clip = rd.choice(a_prob.shape[0], p=a_prob)
# # steer_clip = safe_actions[a_prob.argmax(axis=0)]
# # if abs(action[1]) > steer_clip:
# # action[1] = action[1] * steer_clip
# return action
#
# @staticmethod
# def exploit_one_policy(state, policy):
# if isinstance(policy, list):
# return policy[0](torch.as_tensor((state,), dtype=torch.float32).detach_())[0].detach().numpy()
# else:
# return policy(torch.as_tensor((state,), dtype=torch.float32).detach_())[0].detach().numpy()
#
# @staticmethod
# def exploit_rnn_policy(state, policy, hidden_state, cell_state, hidden_state_dim):
# state = torch.as_tensor((state,), dtype=torch.float32).detach_()
# if hidden_state is None or cell_state is None:
# hidden_state = torch.zeros([1, hidden_state_dim], dtype=torch.float32)
# cell_state = torch.zeros([1, hidden_state_dim], dtype=torch.float32)
# else:
# hidden_state = torch.as_tensor((hidden_state,), dtype=torch.float32).detach_()
# cell_state = torch.as_tensor((cell_state,), dtype=torch.float32).detach_()
# action, hidden_state_next, cell_state_next = policy.actor_forward(state, hidden_state, cell_state)
# return action[0].detach().numpy(), \
# hidden_state_next[0].detach().numpy(), \
# cell_state_next[0].detach().numpy()
class Trainer(object):
def __init__(self, args_trainer, agent, buffer):
self.agent = agent
self.buffer = buffer
self.sample_step = args_trainer['sample_step']
self.batch_size = args_trainer['batch_size']
self.policy_reuse = args_trainer['policy_reuse']
def train(self):
self.agent.to_device()
train_record = self.agent.update_net(self.buffer, self.sample_step, self.batch_size, self.policy_reuse)
if self.buffer.if_on_policy:
self.buffer.empty_buffer_before_explore()
return train_record
def beginer(config, params=None):
args = Arguments(config)
args.init_before_training()
args_id = ray.put(args)
#######Init######
agent = args.agent['class_name'](args=args.agent)
agent.init(args.agent['net_dim'],
args.env['state_dim'],
args.env['action_dim'],
args.env['reward_dim'],
args.buffer['if_per'])
interactors = []
for i in range(args.interactor['rollout_num']):
time.sleep(1)
interactors.append(InterActor.remote(i, args_id))
if args.buffer['if_rnn']:
buffer_mp = ReplayBufferMP(
max_len=args.buffer['max_buf'] + args.env['max_step'] * args.interactor['rollout_num'],
state_dim=args.env['state_dim'],
action_dim=1 if args.env['action_type'] == -1 else (
args.env['action_dim'][0] + 1 if args.env['action_type'] == 0 else args.env['action_dim']),
reward_dim=args.env['reward_dim'],
if_on_policy=args.buffer['if_on_policy'],
if_per=args.buffer['if_per'],
if_discrete_action=(args.buffer[
'action_type'] == -1) if 'action_type' in args.buffer.keys() else False,
rollout_num=args.interactor['rollout_num'],
if_rnn=args.buffer['if_rnn'],
hidden_state_dim=args.buffer['hidden_state_dim'],
if_gpu=args.buffer['if_gpu']
)
else:
buffer_mp = ReplayBufferMP(
max_len=args.buffer['max_buf'] + args.env['max_step'] * args.interactor['rollout_num'],
state_dim=args.env['state_dim'],
action_dim=1 if args.env['action_type'] == -1 else (
args.env['action_dim'][0] + 1 if args.env['action_type'] == 0 else args.env['action_dim']),
reward_dim=args.env['reward_dim'],
if_on_policy=args.buffer['if_on_policy'],
if_per=args.buffer['if_per'],
if_discrete_action=(args.buffer[
'action_type'] == -1) if 'action_type' in args.buffer.keys() else False,
rollout_num=args.interactor['rollout_num'],
if_gpu=args.buffer['if_gpu']
)
trainer = Trainer(args.trainer, agent, buffer_mp)
evaluator = Evaluator(args)
rollout_num = args.interactor['rollout_num']
#######Random Explore Before Interacting#######
if args.if_per_explore:
episodes_ids = [interactors[i].random_explore_env.remote() for i in range(rollout_num)]
assert len(episodes_ids) > 0
for i in range(len(episodes_ids)):
done_id, episodes_ids = ray.wait(episodes_ids)
if args.buffer['if_rnn']:
actual_step, buf_state, buf_action, buf_reward, buf_gamma, buf_hidden_state, buf_cell_state = ray.get(
done_id[0])
buffer_mp.extend_buffer(buf_state, buf_action, buf_reward, buf_gamma, i, buf_hidden_state,
buf_cell_state)
else:
actual_step, buf_state, buf_action, buf_reward, buf_gamma = ray.get(done_id[0])
buffer_mp.extend_buffer(buf_state, buf_action, buf_reward, buf_gamma, i)
#######Interacting Begining#######
start_time = time.time()
eval_step = 0
while (evaluator.record_totalstep < evaluator.break_step) or (evaluator.record_satisfy_return):
agent.to_cpu()
policy_id = ray.put(agent.policy)
#######Explore Environment#######
episodes_ids = [interactors[i].explore_env.remote(agent.select_action, policy_id) for i in
range(rollout_num)]
sample_step = 0
for i in range(len(episodes_ids)):
done_id, episodes_ids = ray.wait(episodes_ids)
if args.buffer['if_rnn']:
actual_step, buf_state, buf_action, buf_reward, buf_gamma, buf_hidden_state, buf_cell_state = ray.get(
done_id[0])
sample_step += actual_step
buffer_mp.extend_buffer(buf_state, buf_action, buf_reward, buf_gamma, i, buf_hidden_state,
buf_cell_state)
else:
actual_step, buf_state, buf_action, buf_reward, buf_gamma = ray.get(done_id[0])
sample_step += actual_step
buffer_mp.extend_buffer(buf_state, buf_action, buf_reward, buf_gamma, i)
evaluator.update_totalstep(sample_step)
#######Training#######
train_record = trainer.train()
eval_step += sample_step
#######Evaluate#######
if evaluator.eval_gap < eval_step:
evaluator.tb_train(train_record)
eval_step = 0
agent.to_cpu()
policy_id = ray.put(agent.policy)
evalRecorder = RecordEvaluate()
if_eval = True
#######pre-eval#######
if evaluator.pre_eval_times > 0:
eval_results = ray.get(
[interactors[i].exploit_env.remote(agent.select_action,
policy_id,
eval_times=evaluator.pre_eval_times)
for i in range(rollout_num)])
for eval_result in eval_results:
evalRecorder.add_many(eval_result)
eval_record = evalRecorder.eval_result()
if eval_record['return'][0]['max'] < evaluator.target_return:
if_eval = False
evaluator.tb_eval(eval_record)
#######eval#######
if if_eval:
eval_results = ray.get(
[interactors[i].exploit_env.remote(agent.select_action,
policy_id,
eval_times=evaluator.eval_times)
for i in range(rollout_num)])
for eval_result in eval_results:
evalRecorder.add_many(eval_result)
eval_record = evalRecorder.eval_result()
evaluator.tb_eval(eval_record)
#######Save Model#######
evaluator.analyze_result(eval_record)
evaluator.iter_print(train_record, eval_record, use_time=(time.time() - start_time))
evaluator.save_model(agent)
start_time = time.time()
print(f'#######Experiment Finished!\t TotalTime:{evaluator.total_time:8.0f}s #######')
| [
"numpy.random.seed",
"numpy.ones",
"numpy.clip",
"torch.set_default_dtype",
"ray.put",
"numpy.random.randint",
"shutil.rmtree",
"ray_elegantrl.evaluate.RecordEvaluate",
"ray_elegantrl.evaluate.RecordEpisode",
"datetime.datetime.now",
"copy.deepcopy",
"ray.method",
"numpy.tanh",
"torch.manu... | [((11571, 11596), 'ray.method', 'ray.method', ([], {'num_returns': '(1)'}), '(num_returns=1)\n', (11581, 11596), False, 'import ray\n'), ((17025, 17050), 'ray.method', 'ray.method', ([], {'num_returns': '(1)'}), '(num_returns=1)\n', (17035, 17050), False, 'import ray\n'), ((26763, 26776), 'ray.put', 'ray.put', (['args'], {}), '(args)\n', (26770, 26776), False, 'import ray\n'), ((28928, 28943), 'ray_elegantrl.evaluate.Evaluator', 'Evaluator', (['args'], {}), '(args)\n', (28937, 28943), False, 'from ray_elegantrl.evaluate import RecordEpisode, RecordEvaluate, Evaluator\n'), ((29921, 29932), 'time.time', 'time.time', ([], {}), '()\n', (29930, 29932), False, 'import time\n'), ((1472, 1489), 'copy.deepcopy', 'deepcopy', (['configs'], {}), '(configs)\n', (1480, 1489), False, 'from copy import deepcopy\n'), ((5317, 5355), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float32'], {}), '(torch.float32)\n', (5340, 5355), False, 'import torch\n'), ((5364, 5399), 'torch.manual_seed', 'torch.manual_seed', (['self.random_seed'], {}), '(self.random_seed)\n', (5381, 5399), False, 'import torch\n'), ((5408, 5440), 'numpy.random.seed', 'np.random.seed', (['self.random_seed'], {}), '(self.random_seed)\n', (5422, 5440), True, 'import numpy as np\n'), ((5811, 5862), 'gym.make', 'gym.make', (["env_dict['id']"], {}), "(env_dict['id'], **env_dict['params_name'])\n", (5819, 5862), False, 'import gym\n'), ((5887, 5911), 'gym.make', 'gym.make', (["env_dict['id']"], {}), "(env_dict['id'])\n", (5895, 5911), False, 'import gym\n'), ((6387, 6428), 'numpy.array', 'np.array', (["args.interactor['reward_scale']"], {}), "(args.interactor['reward_scale'])\n", (6395, 6428), True, 'import numpy as np\n'), ((11541, 11564), 'ray_elegantrl.evaluate.RecordEpisode', 'RecordEpisode', (['args.env'], {}), '(args.env)\n', (11554, 11564), False, 'from ray_elegantrl.evaluate import RecordEpisode, RecordEvaluate, Evaluator\n'), ((20606, 20622), 'ray_elegantrl.evaluate.RecordEvaluate', 'RecordEvaluate', ([], {}), '()\n', (20620, 20622), False, 'from ray_elegantrl.evaluate import RecordEpisode, RecordEvaluate, Evaluator\n'), ((27126, 27139), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (27136, 27139), False, 'import time\n'), ((30094, 30115), 'ray.put', 'ray.put', (['agent.policy'], {}), '(agent.policy)\n', (30101, 30115), False, 'import ray\n'), ((4810, 4846), 'os.makedirs', 'os.makedirs', (['self.cwd'], {'exist_ok': '(True)'}), '(self.cwd, exist_ok=True)\n', (4821, 4846), False, 'import os\n'), ((4951, 4957), 'ruamel.yaml.main.YAML', 'YAML', ([], {}), '()\n', (4955, 4957), False, 'from ruamel.yaml.main import YAML\n'), ((29286, 29308), 'ray.wait', 'ray.wait', (['episodes_ids'], {}), '(episodes_ids)\n', (29294, 29308), False, 'import ray\n'), ((30404, 30426), 'ray.wait', 'ray.wait', (['episodes_ids'], {}), '(episodes_ids)\n', (30412, 30426), False, 'import ray\n'), ((31416, 31437), 'ray.put', 'ray.put', (['agent.policy'], {}), '(agent.policy)\n', (31423, 31437), False, 'import ray\n'), ((31465, 31481), 'ray_elegantrl.evaluate.RecordEvaluate', 'RecordEvaluate', ([], {}), '()\n', (31479, 31481), False, 'from ray_elegantrl.evaluate import RecordEpisode, RecordEvaluate, Evaluator\n'), ((33074, 33085), 'time.time', 'time.time', ([], {}), '()\n', (33083, 33085), False, 'import time\n'), ((4712, 4755), 'shutil.rmtree', 'shutil.rmtree', (['self.cwd'], {'ignore_errors': '(True)'}), '(self.cwd, ignore_errors=True)\n', (4725, 4755), False, 'import shutil\n'), ((6658, 6689), 'numpy.ones', 'np.ones', (["args.env['reward_dim']"], {}), "(args.env['reward_dim'])\n", (6665, 6689), True, 'import numpy as np\n'), ((7398, 7408), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (7405, 7408), True, 'import numpy as np\n'), ((29457, 29476), 'ray.get', 'ray.get', (['done_id[0]'], {}), '(done_id[0])\n', (29464, 29476), False, 'import ray\n'), ((29755, 29774), 'ray.get', 'ray.get', (['done_id[0]'], {}), '(done_id[0])\n', (29762, 29774), False, 'import ray\n'), ((30575, 30594), 'ray.get', 'ray.get', (['done_id[0]'], {}), '(done_id[0])\n', (30582, 30594), False, 'import ray\n'), ((30916, 30935), 'ray.get', 'ray.get', (['done_id[0]'], {}), '(done_id[0])\n', (30923, 30935), False, 'import ray\n'), ((6545, 6579), 'numpy.array', 'np.array', (["args.interactor['gamma']"], {}), "(args.interactor['gamma'])\n", (6553, 6579), True, 'import numpy as np\n'), ((7539, 7559), 'numpy.tanh', 'np.tanh', (['action[:-1]'], {}), '(action[:-1])\n', (7546, 7559), True, 'import numpy as np\n'), ((15241, 15278), 'numpy.vstack', 'np.vstack', (['(sq_state[idx, :], next_s)'], {}), '((sq_state[idx, :], next_s))\n', (15250, 15278), True, 'import numpy as np\n'), ((18456, 18510), 'numpy.zeros', 'np.zeros', (['(1, self.hidden_state_dim)'], {'dtype': 'np.float32'}), '((1, self.hidden_state_dim), dtype=np.float32)\n', (18464, 18510), True, 'import numpy as np\n'), ((18544, 18598), 'numpy.zeros', 'np.zeros', (['(1, self.hidden_state_dim)'], {'dtype': 'np.float32'}), '((1, self.hidden_state_dim), dtype=np.float32)\n', (18552, 18598), True, 'import numpy as np\n'), ((23406, 23443), 'numpy.vstack', 'np.vstack', (['(sq_state[idx, :], next_s)'], {}), '((sq_state[idx, :], next_s))\n', (23415, 23443), True, 'import numpy as np\n'), ((23475, 23512), 'numpy.vstack', 'np.vstack', (['(sq_state[idx, :], next_s)'], {}), '((sq_state[idx, :], next_s))\n', (23484, 23512), True, 'import numpy as np\n'), ((2819, 2842), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2840, 2842), False, 'import datetime\n'), ((14779, 14805), 'numpy.zeros', 'np.zeros', (['self.gamma.shape'], {}), '(self.gamma.shape)\n', (14787, 14805), True, 'import numpy as np\n'), ((18321, 18347), 'numpy.zeros', 'np.zeros', (['self.gamma.shape'], {}), '(self.gamma.shape)\n', (18329, 18347), True, 'import numpy as np\n'), ((32982, 32993), 'time.time', 'time.time', ([], {}), '()\n', (32991, 32993), False, 'import time\n'), ((8192, 8219), 'numpy.zeros', 'np.zeros', (['action[:-1].shape'], {}), '(action[:-1].shape)\n', (8200, 8219), True, 'import numpy as np\n'), ((9190, 9217), 'numpy.tanh', 'np.tanh', (['origin_action[:-1]'], {}), '(origin_action[:-1])\n', (9197, 9217), True, 'import numpy as np\n'), ((15437, 15457), 'numpy.array', 'np.array', (['state_list'], {}), '(state_list)\n', (15445, 15457), True, 'import numpy as np\n'), ((15509, 15530), 'numpy.array', 'np.array', (['action_list'], {}), '(action_list)\n', (15517, 15530), True, 'import numpy as np\n'), ((15582, 15603), 'numpy.array', 'np.array', (['reward_list'], {}), '(reward_list)\n', (15590, 15603), True, 'import numpy as np\n'), ((15655, 15675), 'numpy.array', 'np.array', (['gamma_list'], {}), '(gamma_list)\n', (15663, 15675), True, 'import numpy as np\n'), ((15727, 15754), 'numpy.array', 'np.array', (['hidden_state_list'], {}), '(hidden_state_list)\n', (15735, 15754), True, 'import numpy as np\n'), ((15806, 15831), 'numpy.array', 'np.array', (['cell_state_list'], {}), '(cell_state_list)\n', (15814, 15831), True, 'import numpy as np\n'), ((15909, 15929), 'numpy.array', 'np.array', (['state_list'], {}), '(state_list)\n', (15917, 15929), True, 'import numpy as np\n'), ((15981, 16002), 'numpy.array', 'np.array', (['action_list'], {}), '(action_list)\n', (15989, 16002), True, 'import numpy as np\n'), ((16054, 16075), 'numpy.array', 'np.array', (['reward_list'], {}), '(reward_list)\n', (16062, 16075), True, 'import numpy as np\n'), ((16127, 16147), 'numpy.array', 'np.array', (['gamma_list'], {}), '(gamma_list)\n', (16135, 16147), True, 'import numpy as np\n'), ((17922, 17959), 'numpy.random.uniform', 'rd.uniform', (['(-1)', '(1)', 'self.action_dim[0]'], {}), '(-1, 1, self.action_dim[0])\n', (17932, 17959), True, 'import numpy.random as rd\n'), ((17961, 17991), 'numpy.random.randint', 'rd.randint', (['self.action_dim[1]'], {}), '(self.action_dim[1])\n', (17971, 17991), True, 'import numpy.random as rd\n'), ((18873, 18893), 'numpy.array', 'np.array', (['state_list'], {}), '(state_list)\n', (18881, 18893), True, 'import numpy as np\n'), ((18945, 18966), 'numpy.array', 'np.array', (['action_list'], {}), '(action_list)\n', (18953, 18966), True, 'import numpy as np\n'), ((19018, 19039), 'numpy.array', 'np.array', (['reward_list'], {}), '(reward_list)\n', (19026, 19039), True, 'import numpy as np\n'), ((19091, 19111), 'numpy.array', 'np.array', (['gamma_list'], {}), '(gamma_list)\n', (19099, 19111), True, 'import numpy as np\n'), ((19163, 19190), 'numpy.array', 'np.array', (['hidden_state_list'], {}), '(hidden_state_list)\n', (19171, 19190), True, 'import numpy as np\n'), ((19242, 19267), 'numpy.array', 'np.array', (['cell_state_list'], {}), '(cell_state_list)\n', (19250, 19267), True, 'import numpy as np\n'), ((19345, 19365), 'numpy.array', 'np.array', (['state_list'], {}), '(state_list)\n', (19353, 19365), True, 'import numpy as np\n'), ((19417, 19438), 'numpy.array', 'np.array', (['action_list'], {}), '(action_list)\n', (19425, 19438), True, 'import numpy as np\n'), ((19490, 19511), 'numpy.array', 'np.array', (['reward_list'], {}), '(reward_list)\n', (19498, 19511), True, 'import numpy as np\n'), ((19563, 19583), 'numpy.array', 'np.array', (['gamma_list'], {}), '(gamma_list)\n', (19571, 19583), True, 'import numpy as np\n'), ((8778, 8805), 'numpy.zeros', 'np.zeros', (['action[:-1].shape'], {}), '(action[:-1].shape)\n', (8786, 8805), True, 'import numpy as np\n'), ((7938, 7955), 'numpy.clip', 'np.clip', (['x', '(-1)', '(0)'], {}), '(x, -1, 0)\n', (7945, 7955), True, 'import numpy as np\n'), ((8033, 8049), 'numpy.clip', 'np.clip', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (8040, 8049), True, 'import numpy as np\n')] |
#!/usr/bin/python3
#-*- coding: UTF-8 -*-
import collections
import numpy as np
import tensorflow as tf
'''
author: log16
Data: 2017/5/4
'''
#-------------------------------数据预处理---------------------------#
poetry_file =r'C:\Users\huaru\PycharmProjects\LSTM_CNN\data\poetry.txt'
# 诗集
poetrys = []
with open(poetry_file, "rb") as f:
for line in f:
try:
line = line.decode('UTF-8')
line = line.strip(u'\n')
title, content = line.strip(u' ').split(u':')
content = content.replace(u' ',u'')
if u'_' in content or u'(' in content or u'(' in content or u'《' in content or u'[' in content:
continue
if len(content) < 5 or len(content) > 79:
continue
content = u'[' + content + u']'
poetrys.append(content)
except Exception as e:
pass
# 按诗的字数排序
poetrys = sorted(poetrys,key=lambda line: len(line))
print('唐诗总数: ', len(poetrys))
# 统计每个字出现次数
all_words = []
for poetry in poetrys:
all_words += [word for word in poetry]
counter = collections.Counter(all_words)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
words, _ = zip(*count_pairs)
# 取前多少个常用字
words = words[:len(words)] + (' ',)
# 每个字映射为一个数字ID
word_num_map = dict(zip(words, range(len(words))))
# 把诗转换为向量形式,参考TensorFlow练习1
to_num = lambda word: word_num_map.get(word, len(words))
poetrys_vector = [ list(map(to_num, poetry)) for poetry in poetrys]
#[[314, 3199, 367, 1556, 26, 179, 680, 0, 3199, 41, 506, 40, 151, 4, 98, 1],
#[339, 3, 133, 31, 302, 653, 512, 0, 37, 148, 294, 25, 54, 833, 3, 1, 965, 1315, 377, 1700, 562, 21, 37, 0, 2, 1253, 21, 36, 264, 877, 809, 1]
#....]
# 每次取64首诗进行训练
batch_size = 64
n_chunk = len(poetrys_vector) // batch_size
class DataSet(object):
def __init__(self,data_size):
self._data_size = data_size
self._epochs_completed = 0
self._index_in_epoch = 0
self._data_index = np.arange(data_size)
def next_batch(self,batch_size):
start = self._index_in_epoch
if start + batch_size > self._data_size:
np.random.shuffle(self._data_index)
self._epochs_completed = self._epochs_completed + 1
self._index_in_epoch = batch_size
full_batch_features ,full_batch_labels = self.data_batch(0,batch_size)
return full_batch_features ,full_batch_labels
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
full_batch_features ,full_batch_labels = self.data_batch(start,end)
if self._index_in_epoch == self._data_size:
self._index_in_epoch = 0
self._epochs_completed = self._epochs_completed + 1
np.random.shuffle(self._data_index)
return full_batch_features,full_batch_labels
def data_batch(self,start,end):
batches = []
for i in range(start,end):
batches.append(poetrys_vector[self._data_index[i]])
length = max(map(len,batches))
xdata = np.full((end - start,length), word_num_map[' '], np.int32)
for row in range(end - start):
xdata[row,:len(batches[row])] = batches[row]
ydata = np.copy(xdata)
ydata[:,:-1] = xdata[:,1:]
return xdata,ydata
#---------------------------------------RNN--------------------------------------#
input_data = tf.placeholder(tf.int32, [batch_size, None])
output_targets = tf.placeholder(tf.int32, [batch_size, None])
# 定义RNN
def neural_network(model='lstm', rnn_size=128, num_layers=2):
if model == 'rnn':
cell_fun = tf.nn.rnn_cell.BasicRNNCell
elif model == 'gru':
cell_fun = tf.nn.rnn_cell.GRUCell
elif model == 'lstm':
cell_fun = tf.nn.rnn_cell.BasicLSTMCell
cell = cell_fun(rnn_size, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * num_layers, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [rnn_size, len(words)])
softmax_b = tf.get_variable("softmax_b", [len(words)])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [len(words), rnn_size])
inputs = tf.nn.embedding_lookup(embedding, input_data)
outputs, last_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state, scope='rnnlm')
output = tf.reshape(outputs,[-1, rnn_size])
logits = tf.matmul(output, softmax_w) + softmax_b
probs = tf.nn.softmax(logits)
return logits, last_state, probs, cell, initial_state
def load_model(sess, saver,ckpt_path):
latest_ckpt = tf.train.latest_checkpoint(ckpt_path)
if latest_ckpt:
print ('resume from', latest_ckpt)
saver.restore(sess, latest_ckpt)
return int(latest_ckpt[latest_ckpt.rindex('-') + 1:])
else:
print ('building model from scratch')
sess.run(tf.global_variables_initializer())
return -1
#训练
def train_neural_network():
logits, last_state, _, _, _ = neural_network()
targets = tf.reshape(output_targets, [-1])
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [targets], [tf.ones_like(targets, dtype=tf.float32)], len(words))
# loss = tf.nn.seq2seq.sequence_loss_by_example([logits], [targets], [tf.ones_like(targets, dtype=tf.float32)], len(words))
cost = tf.reduce_mean(loss)
learning_rate = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 5)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.apply_gradients(zip(grads, tvars))
Session_config = tf.ConfigProto(allow_soft_placement=True)
Session_config.gpu_options.allow_growth = True
trainds = DataSet(len(poetrys_vector))
with tf.Session(config=Session_config) as sess:
with tf.device('/gpu:2'):
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(tf.all_variables())
last_epoch = load_model(sess, saver,'model/')
for epoch in range(last_epoch + 1,100):
sess.run(tf.assign(learning_rate, 0.002 * (0.97 ** epoch)))
#sess.run(tf.assign(learning_rate, 0.01))
all_loss = 0.0
for batche in range(n_chunk):
x,y = trainds.next_batch(batch_size)
train_loss, _, _ = sess.run([cost, last_state, train_op], feed_dict={input_data: x, output_targets: y})
all_loss = all_loss + train_loss
if batche % 50 == 1:
#print(epoch, batche, 0.01,train_loss)
print(epoch, batche, 0.002 * (0.97 ** epoch), train_loss)
saver.save(sess, 'model/poetry.module', global_step=epoch)
print(epoch, ' Loss: ', all_loss * 1.0 / n_chunk)
train_neural_network()
| [
"tensorflow.trainable_variables",
"tensorflow.reshape",
"tensorflow.ConfigProto",
"tensorflow.matmul",
"tensorflow.train.latest_checkpoint",
"tensorflow.Variable",
"numpy.arange",
"tensorflow.assign",
"numpy.full",
"tensorflow.nn.softmax",
"numpy.copy",
"tensorflow.variable_scope",
"tensorfl... | [((1151, 1181), 'collections.Counter', 'collections.Counter', (['all_words'], {}), '(all_words)\n', (1170, 1181), False, 'import collections\n'), ((3538, 3582), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, None]'], {}), '(tf.int32, [batch_size, None])\n', (3552, 3582), True, 'import tensorflow as tf\n'), ((3602, 3646), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, None]'], {}), '(tf.int32, [batch_size, None])\n', (3616, 3646), True, 'import tensorflow as tf\n'), ((4014, 4083), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['([cell] * num_layers)'], {'state_is_tuple': '(True)'}), '([cell] * num_layers, state_is_tuple=True)\n', (4041, 4083), True, 'import tensorflow as tf\n'), ((4549, 4624), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'inputs'], {'initial_state': 'initial_state', 'scope': '"""rnnlm"""'}), "(cell, inputs, initial_state=initial_state, scope='rnnlm')\n", (4566, 4624), True, 'import tensorflow as tf\n'), ((4640, 4675), 'tensorflow.reshape', 'tf.reshape', (['outputs', '[-1, rnn_size]'], {}), '(outputs, [-1, rnn_size])\n', (4650, 4675), True, 'import tensorflow as tf\n'), ((4749, 4770), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (4762, 4770), True, 'import tensorflow as tf\n'), ((4890, 4927), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['ckpt_path'], {}), '(ckpt_path)\n', (4916, 4927), True, 'import tensorflow as tf\n'), ((5324, 5356), 'tensorflow.reshape', 'tf.reshape', (['output_targets', '[-1]'], {}), '(output_targets, [-1])\n', (5334, 5356), True, 'import tensorflow as tf\n'), ((5634, 5654), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (5648, 5654), True, 'import tensorflow as tf\n'), ((5677, 5710), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (5688, 5710), True, 'import tensorflow as tf\n'), ((5725, 5749), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (5747, 5749), True, 'import tensorflow as tf\n'), ((5904, 5941), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (5926, 5941), True, 'import tensorflow as tf\n'), ((6029, 6070), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (6043, 6070), True, 'import tensorflow as tf\n'), ((2064, 2084), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (2073, 2084), True, 'import numpy as np\n'), ((3177, 3236), 'numpy.full', 'np.full', (['(end - start, length)', "word_num_map[' ']", 'np.int32'], {}), "((end - start, length), word_num_map[' '], np.int32)\n", (3184, 3236), True, 'import numpy as np\n'), ((3354, 3368), 'numpy.copy', 'np.copy', (['xdata'], {}), '(xdata)\n', (3361, 3368), True, 'import numpy as np\n'), ((4165, 4191), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rnnlm"""'], {}), "('rnnlm')\n", (4182, 4191), True, 'import tensorflow as tf\n'), ((4694, 4722), 'tensorflow.matmul', 'tf.matmul', (['output', 'softmax_w'], {}), '(output, softmax_w)\n', (4703, 4722), True, 'import tensorflow as tf\n'), ((5790, 5815), 'tensorflow.gradients', 'tf.gradients', (['cost', 'tvars'], {}), '(cost, tvars)\n', (5802, 5815), True, 'import tensorflow as tf\n'), ((6181, 6214), 'tensorflow.Session', 'tf.Session', ([], {'config': 'Session_config'}), '(config=Session_config)\n', (6191, 6214), True, 'import tensorflow as tf\n'), ((2221, 2256), 'numpy.random.shuffle', 'np.random.shuffle', (['self._data_index'], {}), '(self._data_index)\n', (2238, 2256), True, 'import numpy as np\n'), ((4348, 4367), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (4357, 4367), True, 'import tensorflow as tf\n'), ((4471, 4516), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'input_data'], {}), '(embedding, input_data)\n', (4493, 4516), True, 'import tensorflow as tf\n'), ((5167, 5200), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5198, 5200), True, 'import tensorflow as tf\n'), ((5441, 5480), 'tensorflow.ones_like', 'tf.ones_like', (['targets'], {'dtype': 'tf.float32'}), '(targets, dtype=tf.float32)\n', (5453, 5480), True, 'import tensorflow as tf\n'), ((6237, 6256), 'tensorflow.device', 'tf.device', (['"""/gpu:2"""'], {}), "('/gpu:2')\n", (6246, 6256), True, 'import tensorflow as tf\n'), ((2870, 2905), 'numpy.random.shuffle', 'np.random.shuffle', (['self._data_index'], {}), '(self._data_index)\n', (2887, 2905), True, 'import numpy as np\n'), ((6281, 6310), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (6308, 6310), True, 'import tensorflow as tf\n'), ((6353, 6371), 'tensorflow.all_variables', 'tf.all_variables', ([], {}), '()\n', (6369, 6371), True, 'import tensorflow as tf\n'), ((6510, 6557), 'tensorflow.assign', 'tf.assign', (['learning_rate', '(0.002 * 0.97 ** epoch)'], {}), '(learning_rate, 0.002 * 0.97 ** epoch)\n', (6519, 6557), True, 'import tensorflow as tf\n')] |
"""This module provides a complicated algorithm for making voxels out of regularly
gridded points. Considering that this algorithm is rather complex, we are keeping
it in its own module until we can simplify it, clean up the code, and make it
capable of handling non-uniformly gridded points
"""
__all__ = [
'VoxelizePoints',
]
__displayname__ = 'Voxelize'
import numpy as np
import vtk
from vtk.util import keys
from vtk.numpy_interface import dataset_adapter as dsa
from vtk.util import numpy_support as nps
from ..base import FilterBase
from .. import _helpers
from ..version import checkNumpy
from .xyz import RotationTool
from .. import interface
###############################################################################
class VoxelizePoints(FilterBase):
"""This makes a ``vtkUnstructuredGrid`` of scattered points given voxel
sizes as input arrays. This assumes that the data is at least 2-Dimensional
on the XY Plane.
"""
__displayname__ = 'Voxelize Points'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(self,
nInputPorts=1, inputType='vtkPolyData',
nOutputPorts=1, outputType='vtkUnstructuredGrid')
self.__dx = kwargs.get('dx', None)
self.__dy = kwargs.get('dy', None)
self.__dz = kwargs.get('dz', None)
self.__estimateGrid = kwargs.get('estimate', True)
self.__safe = kwargs.get('safe', 10.0)
# Not controlled by user:
self.__angle = 0.0
def AddFieldData(self, grid):
"""An internal helper to add the recovered information as field data
"""
# Add angle
a = vtk.vtkDoubleArray()
a.SetName('Recovered Angle (Deg.)')
a.SetNumberOfValues(1)
a.SetValue(0, np.rad2deg(self.__angle))
grid.GetFieldData().AddArray(a)
# Add cell sizes
s = vtk.vtkDoubleArray()
s.SetName('Recovered Cell Sizes')
s.SetNumberOfComponents(3)
s.InsertNextTuple3(self.__dx, self.__dy, self.__dz)
grid.GetFieldData().AddArray(s)
return grid
@staticmethod
def AddCellData(grid, arr, name):
"""Add a NumPy array as cell data to the given grid input
"""
c = interface.convertArray(arr, name=name)
grid.GetCellData().AddArray(c)
return grid
def EstimateUniformSpacing(self, x, y, z):
"""This assumes that the input points make up some sort of uniformly
spaced grid on at least an XY plane.
"""
# TODO: implement ability to rotate around Z axis (think PoroTomo vs UTM)
# TODO: implement way to estimate rotation
assert(len(x) == len(y) == len(z))
num = len(x)
if num == 1:
# Only one point.. use safe
return x, y, z, self.__safe, self.__safe, self.__safe, 0.0
r = RotationTool()
xr, yr, zr, dx, dy, angle = r.EstimateAndRotate(x, y, z)
self.__angle = angle
uz = np.diff(np.unique(z))
if len(uz) > 0: dz = np.average(uz)
else: dz = self.__safe
self.__dx = dx
self.__dy = dy
self.__dz = dz
return xr, yr, zr
def PointsToGrid(self, xo,yo,zo, dx,dy,dz, grid=None):
"""Convert XYZ points to a ``vtkUnstructuredGrid``.
"""
if not checkNumpy(alert='warn'):
return grid
if grid is None:
grid = vtk.vtkUnstructuredGrid()
# TODO: Check dtypes on all arrays. Need to be floats
if self.__estimateGrid:
x,y,z = self.EstimateUniformSpacing(xo, yo, zo)
else:
x,y,z = xo, yo, zo
dx,dy,dz = self.__dx, self.__dy, self.__dz
if isinstance(dx, np.ndarray) and len(dx) != len(x):
raise _helpers.PVGeoError('X-Cell spacings are not properly defined for all points.')
if isinstance(dy, np.ndarray) and len(dy) != len(y):
raise _helpers.PVGeoError('X-Cell spacings are not properly defined for all points.')
if isinstance(dz, np.ndarray) and len(dz) != len(z):
raise _helpers.PVGeoError('X-Cell spacings are not properly defined for all points.')
numCells = len(x)
# Generate cell nodes for all points in data set
#- Bottom
c_n1 = np.stack( ((x - dx/2) , (y - dy/2), (z - dz/2) ), axis=1)
c_n2 = np.stack(( (x + dx/2) , (y - dy/2), (z - dz/2) ), axis=1)
c_n3 = np.stack(( (x - dx/2) , (y + dy/2), (z - dz/2) ), axis=1)
c_n4 = np.stack(( (x + dx/2) , (y + dy/2), (z - dz/2) ), axis=1)
#- Top
c_n5 = np.stack(( (x - dx/2) , (y - dy/2), (z + dz/2) ), axis=1)
c_n6 = np.stack(( (x + dx/2) , (y - dy/2), (z + dz/2) ), axis=1)
c_n7 = np.stack(( (x - dx/2) , (y + dy/2), (z + dz/2) ), axis=1)
c_n8 = np.stack(( (x + dx/2) , (y + dy/2), (z + dz/2) ), axis=1)
#- Concatenate
all_nodes = np.concatenate((
c_n1,
c_n2,
c_n3,
c_n4,
c_n5,
c_n6,
c_n7,
c_n8), axis=0)
# Search for unique nodes and use the min cell size as the tolerance
TOLERANCE = np.min([dx, dy]) / 2.0
# Round XY plane by the tolerance
txy = np.around(all_nodes[:,0:2]/TOLERANCE)
all_nodes[:,0:2] = txy
unique_nodes, ind_nodes = np.unique(all_nodes, return_inverse=True, axis=0)
unique_nodes[:,0:2] *= TOLERANCE
numPts = len(unique_nodes)
# Make the cells
pts = vtk.vtkPoints()
cells = vtk.vtkCellArray()
# insert unique nodes as points
if self.__estimateGrid:
unique_nodes[:,0:2] = RotationTool.Rotate(unique_nodes[:,0:2], -self.__angle)
self.AddFieldData(grid)
# Add unique nodes as points in output
pts.SetData(interface.convertArray(unique_nodes))
# Add cell vertices
j = np.multiply(np.tile(np.arange(0, 8, 1), numCells), numCells)
arridx = np.add(j, np.repeat(np.arange(0, numCells, 1, dtype=int), 8))
ids = ind_nodes[arridx].reshape((numCells, 8))
cellsMat = np.concatenate((np.ones((ids.shape[0], 1), dtype=np.int64)*ids.shape[1], ids), axis=1).ravel()
cells = vtk.vtkCellArray()
cells.SetNumberOfCells(numCells)
cells.SetCells(numCells, nps.numpy_to_vtkIdTypeArray(cellsMat, deep=True))
# Set the output
grid.SetPoints(pts)
grid.SetCells(vtk.VTK_VOXEL, cells)
return grid
def _CopyArrays(self, pdi, pdo):
"""internal helper to copy arrays from point data to cell data in the voxels.
"""
for i in range(pdi.GetPointData().GetNumberOfArrays()):
arr = pdi.GetPointData().GetArray(i)
_helpers.addArray(pdo, 1, arr) # adds to CELL data
return pdo
def RequestData(self, request, inInfoVec, outInfoVec):
"""Used by pipeline to generate output
"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfoVec, 0, 0)
pdo = self.GetOutputData(outInfoVec, 0)
# Perfrom task
wpdi = dsa.WrapDataObject(pdi)
pts = wpdi.Points
x, y, z = pts[:,0], pts[:,1], pts[:,2]
self.PointsToGrid(x, y, z,
self.__dx, self.__dy, self.__dz, grid=pdo)
# Now append data to grid
self._CopyArrays(pdi, pdo)
return 1
#### Seters and Geters ####
def SetSafeSize(self, safe):
"""A voxel size to use if a spacing cannot be determined for an axis
"""
if self.__safe != safe:
self.__safe = safe
self.Modified()
def SetDeltaX(self, dx):
"""Set the X cells spacing
Args:
dx (float or np.array(floats)): the spacing(s) for the cells in
the X-direction
"""
self.__dx = dx
self.Modified()
def SetDeltaY(self, dy):
"""Set the Y cells spacing
Args:
dy (float or np.array(floats)): the spacing(s) for the cells in
the Y-direction
"""
self.__dy = dy
self.Modified()
def SetDeltaZ(self, dz):
"""Set the Z cells spacing
Args:
dz (float or np.array(floats)): the spacing(s) for the cells in
the Z-direction
"""
self.__dz = dz
self.SetSafeSize(np.min(dz))
self.Modified()
def SetDeltas(self, dx, dy, dz):
"""Set the cell spacings for each axial direction
Args:
dx (float or np.array(floats)): the spacing(s) for the cells in
the X-direction
dy (float or np.array(floats)): the spacing(s) for the cells in
the Y-direction
dz (float or np.array(floats)): the spacing(s) for the cells in
the Z-direction
"""
self.SetDeltaX(dx)
self.SetDeltaY(dy)
self.SetDeltaZ(dz)
def SetEstimateGrid(self, flag):
"""Set a flag on whether or not to estimate the grid spacing/rotation
"""
if self.__estimateGrid != flag:
self.__estimateGrid = flag
self.Modified()
def GetRecoveredAngle(self, degrees=True):
"""Returns the recovered angle if set to recover the input grid. If the
input points are rotated, then this angle will reflect a close
approximation of that rotation.
Args:
degrees (bool): A flag on to return decimal degrees or radians.
"""
if degrees: return np.rad2deg(self.__angle)
return self.__angle
def GetSpacing(self):
"""Get the cell spacings"""
return (self.__dx, self.__dy, self.__dz)
###############################################################################
| [
"numpy.stack",
"vtk.vtkUnstructuredGrid",
"numpy.average",
"numpy.concatenate",
"vtk.vtkPoints",
"vtk.vtkDoubleArray",
"numpy.ones",
"numpy.rad2deg",
"numpy.around",
"numpy.min",
"vtk.util.numpy_support.numpy_to_vtkIdTypeArray",
"vtk.numpy_interface.dataset_adapter.WrapDataObject",
"numpy.ar... | [((1667, 1687), 'vtk.vtkDoubleArray', 'vtk.vtkDoubleArray', ([], {}), '()\n', (1685, 1687), False, 'import vtk\n'), ((1888, 1908), 'vtk.vtkDoubleArray', 'vtk.vtkDoubleArray', ([], {}), '()\n', (1906, 1908), False, 'import vtk\n'), ((4306, 4360), 'numpy.stack', 'np.stack', (['(x - dx / 2, y - dy / 2, z - dz / 2)'], {'axis': '(1)'}), '((x - dx / 2, y - dy / 2, z - dz / 2), axis=1)\n', (4314, 4360), True, 'import numpy as np\n'), ((4379, 4433), 'numpy.stack', 'np.stack', (['(x + dx / 2, y - dy / 2, z - dz / 2)'], {'axis': '(1)'}), '((x + dx / 2, y - dy / 2, z - dz / 2), axis=1)\n', (4387, 4433), True, 'import numpy as np\n'), ((4452, 4506), 'numpy.stack', 'np.stack', (['(x - dx / 2, y + dy / 2, z - dz / 2)'], {'axis': '(1)'}), '((x - dx / 2, y + dy / 2, z - dz / 2), axis=1)\n', (4460, 4506), True, 'import numpy as np\n'), ((4525, 4579), 'numpy.stack', 'np.stack', (['(x + dx / 2, y + dy / 2, z - dz / 2)'], {'axis': '(1)'}), '((x + dx / 2, y + dy / 2, z - dz / 2), axis=1)\n', (4533, 4579), True, 'import numpy as np\n'), ((4613, 4667), 'numpy.stack', 'np.stack', (['(x - dx / 2, y - dy / 2, z + dz / 2)'], {'axis': '(1)'}), '((x - dx / 2, y - dy / 2, z + dz / 2), axis=1)\n', (4621, 4667), True, 'import numpy as np\n'), ((4686, 4740), 'numpy.stack', 'np.stack', (['(x + dx / 2, y - dy / 2, z + dz / 2)'], {'axis': '(1)'}), '((x + dx / 2, y - dy / 2, z + dz / 2), axis=1)\n', (4694, 4740), True, 'import numpy as np\n'), ((4759, 4813), 'numpy.stack', 'np.stack', (['(x - dx / 2, y + dy / 2, z + dz / 2)'], {'axis': '(1)'}), '((x - dx / 2, y + dy / 2, z + dz / 2), axis=1)\n', (4767, 4813), True, 'import numpy as np\n'), ((4832, 4886), 'numpy.stack', 'np.stack', (['(x + dx / 2, y + dy / 2, z + dz / 2)'], {'axis': '(1)'}), '((x + dx / 2, y + dy / 2, z + dz / 2), axis=1)\n', (4840, 4886), True, 'import numpy as np\n'), ((4934, 5006), 'numpy.concatenate', 'np.concatenate', (['(c_n1, c_n2, c_n3, c_n4, c_n5, c_n6, c_n7, c_n8)'], {'axis': '(0)'}), '((c_n1, c_n2, c_n3, c_n4, c_n5, c_n6, c_n7, c_n8), axis=0)\n', (4948, 5006), True, 'import numpy as np\n'), ((5281, 5321), 'numpy.around', 'np.around', (['(all_nodes[:, 0:2] / TOLERANCE)'], {}), '(all_nodes[:, 0:2] / TOLERANCE)\n', (5290, 5321), True, 'import numpy as np\n'), ((5384, 5433), 'numpy.unique', 'np.unique', (['all_nodes'], {'return_inverse': '(True)', 'axis': '(0)'}), '(all_nodes, return_inverse=True, axis=0)\n', (5393, 5433), True, 'import numpy as np\n'), ((5550, 5565), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (5563, 5565), False, 'import vtk\n'), ((5582, 5600), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (5598, 5600), False, 'import vtk\n'), ((6274, 6292), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (6290, 6292), False, 'import vtk\n'), ((7156, 7179), 'vtk.numpy_interface.dataset_adapter.WrapDataObject', 'dsa.WrapDataObject', (['pdi'], {}), '(pdi)\n', (7174, 7179), True, 'from vtk.numpy_interface import dataset_adapter as dsa\n'), ((1785, 1809), 'numpy.rad2deg', 'np.rad2deg', (['self.__angle'], {}), '(self.__angle)\n', (1795, 1809), True, 'import numpy as np\n'), ((3006, 3018), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (3015, 3018), True, 'import numpy as np\n'), ((3049, 3063), 'numpy.average', 'np.average', (['uz'], {}), '(uz)\n', (3059, 3063), True, 'import numpy as np\n'), ((3432, 3457), 'vtk.vtkUnstructuredGrid', 'vtk.vtkUnstructuredGrid', ([], {}), '()\n', (3455, 3457), False, 'import vtk\n'), ((5202, 5218), 'numpy.min', 'np.min', (['[dx, dy]'], {}), '([dx, dy])\n', (5208, 5218), True, 'import numpy as np\n'), ((6367, 6415), 'vtk.util.numpy_support.numpy_to_vtkIdTypeArray', 'nps.numpy_to_vtkIdTypeArray', (['cellsMat'], {'deep': '(True)'}), '(cellsMat, deep=True)\n', (6394, 6415), True, 'from vtk.util import numpy_support as nps\n'), ((8420, 8430), 'numpy.min', 'np.min', (['dz'], {}), '(dz)\n', (8426, 8430), True, 'import numpy as np\n'), ((9589, 9613), 'numpy.rad2deg', 'np.rad2deg', (['self.__angle'], {}), '(self.__angle)\n', (9599, 9613), True, 'import numpy as np\n'), ((5967, 5985), 'numpy.arange', 'np.arange', (['(0)', '(8)', '(1)'], {}), '(0, 8, 1)\n', (5976, 5985), True, 'import numpy as np\n'), ((6045, 6081), 'numpy.arange', 'np.arange', (['(0)', 'numCells', '(1)'], {'dtype': 'int'}), '(0, numCells, 1, dtype=int)\n', (6054, 6081), True, 'import numpy as np\n'), ((6178, 6220), 'numpy.ones', 'np.ones', (['(ids.shape[0], 1)'], {'dtype': 'np.int64'}), '((ids.shape[0], 1), dtype=np.int64)\n', (6185, 6220), True, 'import numpy as np\n')] |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grid_qubit."""
import pickle
import numpy as np
import pytest
import cirq
def test_init():
q = cirq.GridQubit(3, 4)
assert q.row == 3
assert q.col == 4
q = cirq.GridQid(1, 2, dimension=3)
assert q.row == 1
assert q.col == 2
assert q.dimension == 3
def test_eq():
eq = cirq.testing.EqualsTester()
eq.make_equality_group(lambda: cirq.GridQubit(0, 0), lambda: cirq.GridQid(0, 0, dimension=2))
eq.make_equality_group(lambda: cirq.GridQubit(1, 0), lambda: cirq.GridQid(1, 0, dimension=2))
eq.make_equality_group(lambda: cirq.GridQubit(0, 1), lambda: cirq.GridQid(0, 1, dimension=2))
eq.make_equality_group(lambda: cirq.GridQid(0, 0, dimension=3))
def test_pickled_hash():
q = cirq.GridQubit(3, 4)
q_bad = cirq.GridQubit(3, 4)
q_bad._hash += 1
assert q_bad == q
assert hash(q_bad) != hash(q)
data = pickle.dumps(q_bad)
q_ok = pickle.loads(data)
assert q_ok == q
assert hash(q_ok) == hash(q)
def test_str():
assert str(cirq.GridQubit(5, 2)) == 'q(5, 2)'
assert str(cirq.GridQid(5, 2, dimension=3)) == 'q(5, 2) (d=3)'
def test_circuit_info():
assert cirq.circuit_diagram_info(cirq.GridQubit(5, 2)) == cirq.CircuitDiagramInfo(
wire_symbols=('(5, 2)',)
)
assert cirq.circuit_diagram_info(cirq.GridQid(5, 2, dimension=3)) == cirq.CircuitDiagramInfo(
wire_symbols=('(5, 2) (d=3)',)
)
def test_repr():
cirq.testing.assert_equivalent_repr(cirq.GridQubit(5, 2))
cirq.testing.assert_equivalent_repr(cirq.GridQid(5, 2, dimension=3))
def test_cmp():
order = cirq.testing.OrderTester()
order.add_ascending_equivalence_group(cirq.GridQubit(0, 0), cirq.GridQid(0, 0, dimension=2))
order.add_ascending(
cirq.GridQid(0, 0, dimension=3),
cirq.GridQid(0, 1, dimension=1),
cirq.GridQubit(0, 1),
cirq.GridQid(0, 1, dimension=3),
cirq.GridQid(1, 0, dimension=1),
cirq.GridQubit(1, 0),
cirq.GridQid(1, 0, dimension=3),
cirq.GridQid(1, 1, dimension=1),
cirq.GridQubit(1, 1),
cirq.GridQid(1, 1, dimension=3),
)
def test_cmp_failure():
with pytest.raises(TypeError, match='not supported between instances'):
_ = 0 < cirq.GridQubit(0, 0)
with pytest.raises(TypeError, match='not supported between instances'):
_ = cirq.GridQubit(0, 0) < 0
with pytest.raises(TypeError, match='not supported between instances'):
_ = 0 < cirq.GridQid(1, 1, dimension=3)
with pytest.raises(TypeError, match='not supported between instances'):
_ = cirq.GridQid(1, 1, dimension=3) < 0
def test_is_adjacent():
assert cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(0, 1))
assert cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(0, -1))
assert cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(1, 0))
assert cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(-1, 0))
assert not cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(+1, -1))
assert not cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(+1, +1))
assert not cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(-1, -1))
assert not cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(-1, +1))
assert not cirq.GridQubit(0, 0).is_adjacent(cirq.GridQubit(2, 0))
assert cirq.GridQubit(500, 999).is_adjacent(cirq.GridQubit(501, 999))
assert not cirq.GridQubit(500, 999).is_adjacent(cirq.GridQubit(5034, 999))
def test_neighbors():
assert cirq.GridQubit(1, 1).neighbors() == {
cirq.GridQubit(1, 2),
cirq.GridQubit(2, 1),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 0),
}
# Restrict to a list of qubits
restricted_qubits = [cirq.GridQubit(2, 1), cirq.GridQubit(2, 2)]
assert cirq.GridQubit(1, 1).neighbors(restricted_qubits) == {cirq.GridQubit(2, 1)}
def test_square():
assert cirq.GridQubit.square(2, top=1, left=1) == [
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 2),
cirq.GridQubit(2, 1),
cirq.GridQubit(2, 2),
]
assert cirq.GridQubit.square(2) == [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
]
assert cirq.GridQid.square(2, top=1, left=1, dimension=3) == [
cirq.GridQid(1, 1, dimension=3),
cirq.GridQid(1, 2, dimension=3),
cirq.GridQid(2, 1, dimension=3),
cirq.GridQid(2, 2, dimension=3),
]
assert cirq.GridQid.square(2, dimension=3) == [
cirq.GridQid(0, 0, dimension=3),
cirq.GridQid(0, 1, dimension=3),
cirq.GridQid(1, 0, dimension=3),
cirq.GridQid(1, 1, dimension=3),
]
def test_rect():
assert cirq.GridQubit.rect(1, 2, top=5, left=6) == [cirq.GridQubit(5, 6), cirq.GridQubit(5, 7)]
assert cirq.GridQubit.rect(2, 2) == [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
]
assert cirq.GridQid.rect(1, 2, top=5, left=6, dimension=3) == [
cirq.GridQid(5, 6, dimension=3),
cirq.GridQid(5, 7, dimension=3),
]
assert cirq.GridQid.rect(2, 2, dimension=3) == [
cirq.GridQid(0, 0, dimension=3),
cirq.GridQid(0, 1, dimension=3),
cirq.GridQid(1, 0, dimension=3),
cirq.GridQid(1, 1, dimension=3),
]
def test_diagram():
s = """
-----AB-----
----ABCD----
---ABCDEF---
--ABCDEFGH--
-ABCDEFGHIJ-
ABCDEFGHIJKL
-CDEFGHIJKL-
--EFGHIJKL--
---GHIJKL---
----IJKL----
-----KL-----
"""
assert len(cirq.GridQubit.from_diagram(s)) == 72
assert len(cirq.GridQid.from_diagram(s, dimension=3)) == 72
s2 = """
AB
BA"""
assert cirq.GridQubit.from_diagram(s2) == [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
]
assert cirq.GridQid.from_diagram(s2, dimension=3) == [
cirq.GridQid(0, 0, dimension=3),
cirq.GridQid(0, 1, dimension=3),
cirq.GridQid(1, 0, dimension=3),
cirq.GridQid(1, 1, dimension=3),
]
with pytest.raises(ValueError, match="Input string has invalid character"):
cirq.GridQubit.from_diagram('@')
def test_addition_subtraction():
# GridQubits
assert cirq.GridQubit(1, 2) + (2, 5) == cirq.GridQubit(3, 7)
assert cirq.GridQubit(1, 2) + (0, 0) == cirq.GridQubit(1, 2)
assert cirq.GridQubit(1, 2) + (-1, 0) == cirq.GridQubit(0, 2)
assert cirq.GridQubit(1, 2) - (2, 5) == cirq.GridQubit(-1, -3)
assert cirq.GridQubit(1, 2) - (0, 0) == cirq.GridQubit(1, 2)
assert cirq.GridQubit(1, 2) - (-1, 0) == cirq.GridQubit(2, 2)
assert (2, 5) + cirq.GridQubit(1, 2) == cirq.GridQubit(3, 7)
assert (2, 5) - cirq.GridQubit(1, 2) == cirq.GridQubit(1, 3)
assert cirq.GridQubit(1, 2) + cirq.GridQubit(3, 5) == cirq.GridQubit(4, 7)
assert cirq.GridQubit(3, 5) - cirq.GridQubit(2, 1) == cirq.GridQubit(1, 4)
assert cirq.GridQubit(1, -2) + cirq.GridQubit(3, 5) == cirq.GridQubit(4, 3)
# GridQids
assert cirq.GridQid(1, 2, dimension=3) + (2, 5) == cirq.GridQid(3, 7, dimension=3)
assert cirq.GridQid(1, 2, dimension=3) + (0, 0) == cirq.GridQid(1, 2, dimension=3)
assert cirq.GridQid(1, 2, dimension=3) + (-1, 0) == cirq.GridQid(0, 2, dimension=3)
assert cirq.GridQid(1, 2, dimension=3) - (2, 5) == cirq.GridQid(-1, -3, dimension=3)
assert cirq.GridQid(1, 2, dimension=3) - (0, 0) == cirq.GridQid(1, 2, dimension=3)
assert cirq.GridQid(1, 2, dimension=3) - (-1, 0) == cirq.GridQid(2, 2, dimension=3)
assert (2, 5) + cirq.GridQid(1, 2, dimension=3) == cirq.GridQid(3, 7, dimension=3)
assert (2, 5) - cirq.GridQid(1, 2, dimension=3) == cirq.GridQid(1, 3, dimension=3)
assert cirq.GridQid(1, 2, dimension=3) + cirq.GridQid(3, 5, dimension=3) == cirq.GridQid(
4, 7, dimension=3
)
assert cirq.GridQid(3, 5, dimension=3) - cirq.GridQid(2, 1, dimension=3) == cirq.GridQid(
1, 4, dimension=3
)
assert cirq.GridQid(1, -2, dimension=3) + cirq.GridQid(3, 5, dimension=3) == cirq.GridQid(
4, 3, dimension=3
)
@pytest.mark.parametrize('dtype', (np.int8, np.int16, np.int32, np.int64, int))
def test_addition_subtraction_numpy_array(dtype):
assert cirq.GridQubit(1, 2) + np.array([1, 2], dtype=dtype) == cirq.GridQubit(2, 4)
assert cirq.GridQubit(1, 2) + np.array([0, 0], dtype=dtype) == cirq.GridQubit(1, 2)
assert cirq.GridQubit(1, 2) + np.array([-1, 0], dtype=dtype) == cirq.GridQubit(0, 2)
assert cirq.GridQubit(1, 2) - np.array([1, 2], dtype=dtype) == cirq.GridQubit(0, 0)
assert cirq.GridQubit(1, 2) - np.array([0, 0], dtype=dtype) == cirq.GridQubit(1, 2)
assert cirq.GridQid(1, 2, dimension=3) - np.array([-1, 0], dtype=dtype) == cirq.GridQid(
2, 2, dimension=3
)
assert cirq.GridQid(1, 2, dimension=3) + np.array([1, 2], dtype=dtype) == cirq.GridQid(
2, 4, dimension=3
)
assert cirq.GridQid(1, 2, dimension=3) + np.array([0, 0], dtype=dtype) == cirq.GridQid(
1, 2, dimension=3
)
assert cirq.GridQid(1, 2, dimension=3) + np.array([-1, 0], dtype=dtype) == cirq.GridQid(
0, 2, dimension=3
)
assert cirq.GridQid(1, 2, dimension=3) - np.array([1, 2], dtype=dtype) == cirq.GridQid(
0, 0, dimension=3
)
assert cirq.GridQid(1, 2, dimension=3) - np.array([0, 0], dtype=dtype) == cirq.GridQid(
1, 2, dimension=3
)
assert cirq.GridQid(1, 2, dimension=3) - np.array([-1, 0], dtype=dtype) == cirq.GridQid(
2, 2, dimension=3
)
def test_unsupported_add():
with pytest.raises(TypeError, match='1'):
_ = cirq.GridQubit(1, 1) + 1
with pytest.raises(TypeError, match='(1,)'):
_ = cirq.GridQubit(1, 1) + (1,)
with pytest.raises(TypeError, match='(1, 2, 3)'):
_ = cirq.GridQubit(1, 1) + (1, 2, 3)
with pytest.raises(TypeError, match='(1, 2.0)'):
_ = cirq.GridQubit(1, 1) + (1, 2.0)
with pytest.raises(TypeError, match='1'):
_ = cirq.GridQubit(1, 1) - 1
with pytest.raises(TypeError, match='[1., 2.]'):
_ = cirq.GridQubit(1, 1) + np.array([1.0, 2.0])
with pytest.raises(TypeError, match='[1, 2, 3]'):
_ = cirq.GridQubit(1, 1) + np.array([1, 2, 3], dtype=int)
def test_addition_subtraction_type_error():
with pytest.raises(TypeError, match="bort"):
_ = cirq.GridQubit(5, 3) + "bort"
with pytest.raises(TypeError, match="bort"):
_ = cirq.GridQubit(5, 3) - "bort"
with pytest.raises(TypeError, match="bort"):
_ = cirq.GridQid(5, 3, dimension=3) + "bort"
with pytest.raises(TypeError, match="bort"):
_ = cirq.GridQid(5, 3, dimension=3) - "bort"
with pytest.raises(TypeError, match="Can only add GridQids with identical dimension."):
_ = cirq.GridQid(5, 3, dimension=3) + cirq.GridQid(3, 5, dimension=4)
with pytest.raises(TypeError, match="Can only subtract GridQids with identical dimension."):
_ = cirq.GridQid(5, 3, dimension=3) - cirq.GridQid(3, 5, dimension=4)
def test_neg():
assert -cirq.GridQubit(1, 2) == cirq.GridQubit(-1, -2)
assert -cirq.GridQid(1, 2, dimension=3) == cirq.GridQid(-1, -2, dimension=3)
def test_to_json():
assert cirq.GridQubit(5, 6)._json_dict_() == {'row': 5, 'col': 6}
assert cirq.GridQid(5, 6, dimension=3)._json_dict_() == {'row': 5, 'col': 6, 'dimension': 3}
def test_immutable():
with pytest.raises(AttributeError, match="can't set attribute"):
q = cirq.GridQubit(1, 2)
q.col = 3
with pytest.raises(AttributeError, match="can't set attribute"):
q = cirq.GridQubit(1, 2)
q.row = 3
with pytest.raises(AttributeError, match="can't set attribute"):
q = cirq.GridQid(1, 2, dimension=3)
q.col = 3
with pytest.raises(AttributeError, match="can't set attribute"):
q = cirq.GridQid(1, 2, dimension=3)
q.row = 3
with pytest.raises(AttributeError, match="can't set attribute"):
q = cirq.GridQid(1, 2, dimension=3)
q.dimension = 3
def test_complex():
assert complex(cirq.GridQubit(row=1, col=2)) == 2 + 1j
assert isinstance(complex(cirq.GridQubit(row=1, col=2)), complex)
| [
"pickle.loads",
"cirq.testing.EqualsTester",
"cirq.GridQid.square",
"cirq.GridQubit.rect",
"cirq.GridQubit.square",
"cirq.CircuitDiagramInfo",
"cirq.GridQid.from_diagram",
"cirq.GridQubit",
"cirq.testing.OrderTester",
"pytest.raises",
"numpy.array",
"cirq.GridQid.rect",
"pytest.mark.parametr... | [((8679, 8757), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '(np.int8, np.int16, np.int32, np.int64, int)'], {}), "('dtype', (np.int8, np.int16, np.int32, np.int64, int))\n", (8702, 8757), False, 'import pytest\n'), ((701, 721), 'cirq.GridQubit', 'cirq.GridQubit', (['(3)', '(4)'], {}), '(3, 4)\n', (715, 721), False, 'import cirq\n'), ((775, 806), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (787, 806), False, 'import cirq\n'), ((905, 932), 'cirq.testing.EqualsTester', 'cirq.testing.EqualsTester', ([], {}), '()\n', (930, 932), False, 'import cirq\n'), ((1330, 1350), 'cirq.GridQubit', 'cirq.GridQubit', (['(3)', '(4)'], {}), '(3, 4)\n', (1344, 1350), False, 'import cirq\n'), ((1363, 1383), 'cirq.GridQubit', 'cirq.GridQubit', (['(3)', '(4)'], {}), '(3, 4)\n', (1377, 1383), False, 'import cirq\n'), ((1472, 1491), 'pickle.dumps', 'pickle.dumps', (['q_bad'], {}), '(q_bad)\n', (1484, 1491), False, 'import pickle\n'), ((1503, 1521), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (1515, 1521), False, 'import pickle\n'), ((2191, 2217), 'cirq.testing.OrderTester', 'cirq.testing.OrderTester', ([], {}), '()\n', (2215, 2217), False, 'import cirq\n'), ((1800, 1849), 'cirq.CircuitDiagramInfo', 'cirq.CircuitDiagramInfo', ([], {'wire_symbols': "('(5, 2)',)"}), "(wire_symbols=('(5, 2)',))\n", (1823, 1849), False, 'import cirq\n'), ((1937, 1992), 'cirq.CircuitDiagramInfo', 'cirq.CircuitDiagramInfo', ([], {'wire_symbols': "('(5, 2) (d=3)',)"}), "(wire_symbols=('(5, 2) (d=3)',))\n", (1960, 1992), False, 'import cirq\n'), ((2066, 2086), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(2)'], {}), '(5, 2)\n', (2080, 2086), False, 'import cirq\n'), ((2128, 2159), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(2)'], {'dimension': '(3)'}), '(5, 2, dimension=3)\n', (2140, 2159), False, 'import cirq\n'), ((2260, 2280), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (2274, 2280), False, 'import cirq\n'), ((2282, 2313), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(0)'], {'dimension': '(2)'}), '(0, 0, dimension=2)\n', (2294, 2313), False, 'import cirq\n'), ((2348, 2379), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(0)'], {'dimension': '(3)'}), '(0, 0, dimension=3)\n', (2360, 2379), False, 'import cirq\n'), ((2389, 2420), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(1)'], {'dimension': '(1)'}), '(0, 1, dimension=1)\n', (2401, 2420), False, 'import cirq\n'), ((2430, 2450), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (2444, 2450), False, 'import cirq\n'), ((2460, 2491), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(1)'], {'dimension': '(3)'}), '(0, 1, dimension=3)\n', (2472, 2491), False, 'import cirq\n'), ((2501, 2532), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(0)'], {'dimension': '(1)'}), '(1, 0, dimension=1)\n', (2513, 2532), False, 'import cirq\n'), ((2542, 2562), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (2556, 2562), False, 'import cirq\n'), ((2572, 2603), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(0)'], {'dimension': '(3)'}), '(1, 0, dimension=3)\n', (2584, 2603), False, 'import cirq\n'), ((2613, 2644), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(1)'], {'dimension': '(1)'}), '(1, 1, dimension=1)\n', (2625, 2644), False, 'import cirq\n'), ((2654, 2674), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (2668, 2674), False, 'import cirq\n'), ((2684, 2715), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(1)'], {'dimension': '(3)'}), '(1, 1, dimension=3)\n', (2696, 2715), False, 'import cirq\n'), ((2758, 2823), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""not supported between instances"""'}), "(TypeError, match='not supported between instances')\n", (2771, 2823), False, 'import pytest\n'), ((2871, 2936), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""not supported between instances"""'}), "(TypeError, match='not supported between instances')\n", (2884, 2936), False, 'import pytest\n'), ((2984, 3049), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""not supported between instances"""'}), "(TypeError, match='not supported between instances')\n", (2997, 3049), False, 'import pytest\n'), ((3108, 3173), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""not supported between instances"""'}), "(TypeError, match='not supported between instances')\n", (3121, 3173), False, 'import pytest\n'), ((3293, 3313), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (3307, 3313), False, 'import cirq\n'), ((3359, 3380), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(-1)'], {}), '(0, -1)\n', (3373, 3380), False, 'import cirq\n'), ((3426, 3446), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (3440, 3446), False, 'import cirq\n'), ((3492, 3513), 'cirq.GridQubit', 'cirq.GridQubit', (['(-1)', '(0)'], {}), '(-1, 0)\n', (3506, 3513), False, 'import cirq\n'), ((3924, 3948), 'cirq.GridQubit', 'cirq.GridQubit', (['(501)', '(999)'], {}), '(501, 999)\n', (3938, 3948), False, 'import cirq\n'), ((4289, 4309), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(1)'], {}), '(2, 1)\n', (4303, 4309), False, 'import cirq\n'), ((4311, 4331), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(2)'], {}), '(2, 2)\n', (4325, 4331), False, 'import cirq\n'), ((4452, 4491), 'cirq.GridQubit.square', 'cirq.GridQubit.square', (['(2)'], {'top': '(1)', 'left': '(1)'}), '(2, top=1, left=1)\n', (4473, 4491), False, 'import cirq\n'), ((4634, 4658), 'cirq.GridQubit.square', 'cirq.GridQubit.square', (['(2)'], {}), '(2)\n', (4655, 4658), False, 'import cirq\n'), ((4802, 4852), 'cirq.GridQid.square', 'cirq.GridQid.square', (['(2)'], {'top': '(1)', 'left': '(1)', 'dimension': '(3)'}), '(2, top=1, left=1, dimension=3)\n', (4821, 4852), False, 'import cirq\n'), ((5039, 5074), 'cirq.GridQid.square', 'cirq.GridQid.square', (['(2)'], {'dimension': '(3)'}), '(2, dimension=3)\n', (5058, 5074), False, 'import cirq\n'), ((5280, 5320), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(2)'], {'top': '(5)', 'left': '(6)'}), '(1, 2, top=5, left=6)\n', (5299, 5320), False, 'import cirq\n'), ((5380, 5405), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(2)', '(2)'], {}), '(2, 2)\n', (5399, 5405), False, 'import cirq\n'), ((5549, 5600), 'cirq.GridQid.rect', 'cirq.GridQid.rect', (['(1)', '(2)'], {'top': '(5)', 'left': '(6)', 'dimension': '(3)'}), '(1, 2, top=5, left=6, dimension=3)\n', (5566, 5600), False, 'import cirq\n'), ((5705, 5741), 'cirq.GridQid.rect', 'cirq.GridQid.rect', (['(2)', '(2)'], {'dimension': '(3)'}), '(2, 2, dimension=3)\n', (5722, 5741), False, 'import cirq\n'), ((6249, 6280), 'cirq.GridQubit.from_diagram', 'cirq.GridQubit.from_diagram', (['s2'], {}), '(s2)\n', (6276, 6280), False, 'import cirq\n'), ((6423, 6465), 'cirq.GridQid.from_diagram', 'cirq.GridQid.from_diagram', (['s2'], {'dimension': '(3)'}), '(s2, dimension=3)\n', (6448, 6465), False, 'import cirq\n'), ((6651, 6720), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Input string has invalid character"""'}), "(ValueError, match='Input string has invalid character')\n", (6664, 6720), False, 'import pytest\n'), ((6730, 6762), 'cirq.GridQubit.from_diagram', 'cirq.GridQubit.from_diagram', (['"""@"""'], {}), "('@')\n", (6757, 6762), False, 'import cirq\n'), ((6859, 6879), 'cirq.GridQubit', 'cirq.GridQubit', (['(3)', '(7)'], {}), '(3, 7)\n', (6873, 6879), False, 'import cirq\n'), ((6924, 6944), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (6938, 6944), False, 'import cirq\n'), ((6990, 7010), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(2)'], {}), '(0, 2)\n', (7004, 7010), False, 'import cirq\n'), ((7055, 7077), 'cirq.GridQubit', 'cirq.GridQubit', (['(-1)', '(-3)'], {}), '(-1, -3)\n', (7069, 7077), False, 'import cirq\n'), ((7122, 7142), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (7136, 7142), False, 'import cirq\n'), ((7188, 7208), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(2)'], {}), '(2, 2)\n', (7202, 7208), False, 'import cirq\n'), ((7254, 7274), 'cirq.GridQubit', 'cirq.GridQubit', (['(3)', '(7)'], {}), '(3, 7)\n', (7268, 7274), False, 'import cirq\n'), ((7319, 7339), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(3)'], {}), '(1, 3)\n', (7333, 7339), False, 'import cirq\n'), ((7399, 7419), 'cirq.GridQubit', 'cirq.GridQubit', (['(4)', '(7)'], {}), '(4, 7)\n', (7413, 7419), False, 'import cirq\n'), ((7478, 7498), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(4)'], {}), '(1, 4)\n', (7492, 7498), False, 'import cirq\n'), ((7558, 7578), 'cirq.GridQubit', 'cirq.GridQubit', (['(4)', '(3)'], {}), '(4, 3)\n', (7572, 7578), False, 'import cirq\n'), ((7650, 7681), 'cirq.GridQid', 'cirq.GridQid', (['(3)', '(7)'], {'dimension': '(3)'}), '(3, 7, dimension=3)\n', (7662, 7681), False, 'import cirq\n'), ((7737, 7768), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (7749, 7768), False, 'import cirq\n'), ((7825, 7856), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(2)'], {'dimension': '(3)'}), '(0, 2, dimension=3)\n', (7837, 7856), False, 'import cirq\n'), ((7912, 7945), 'cirq.GridQid', 'cirq.GridQid', (['(-1)', '(-3)'], {'dimension': '(3)'}), '(-1, -3, dimension=3)\n', (7924, 7945), False, 'import cirq\n'), ((8001, 8032), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (8013, 8032), False, 'import cirq\n'), ((8089, 8120), 'cirq.GridQid', 'cirq.GridQid', (['(2)', '(2)'], {'dimension': '(3)'}), '(2, 2, dimension=3)\n', (8101, 8120), False, 'import cirq\n'), ((8177, 8208), 'cirq.GridQid', 'cirq.GridQid', (['(3)', '(7)'], {'dimension': '(3)'}), '(3, 7, dimension=3)\n', (8189, 8208), False, 'import cirq\n'), ((8264, 8295), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(3)'], {'dimension': '(3)'}), '(1, 3, dimension=3)\n', (8276, 8295), False, 'import cirq\n'), ((8377, 8408), 'cirq.GridQid', 'cirq.GridQid', (['(4)', '(7)'], {'dimension': '(3)'}), '(4, 7, dimension=3)\n', (8389, 8408), False, 'import cirq\n'), ((8503, 8534), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(4)'], {'dimension': '(3)'}), '(1, 4, dimension=3)\n', (8515, 8534), False, 'import cirq\n'), ((8630, 8661), 'cirq.GridQid', 'cirq.GridQid', (['(4)', '(3)'], {'dimension': '(3)'}), '(4, 3, dimension=3)\n', (8642, 8661), False, 'import cirq\n'), ((8875, 8895), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(4)'], {}), '(2, 4)\n', (8889, 8895), False, 'import cirq\n'), ((8963, 8983), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (8977, 8983), False, 'import cirq\n'), ((9052, 9072), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(2)'], {}), '(0, 2)\n', (9066, 9072), False, 'import cirq\n'), ((9140, 9160), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (9154, 9160), False, 'import cirq\n'), ((9228, 9248), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (9242, 9248), False, 'import cirq\n'), ((9328, 9359), 'cirq.GridQid', 'cirq.GridQid', (['(2)', '(2)'], {'dimension': '(3)'}), '(2, 2, dimension=3)\n', (9340, 9359), False, 'import cirq\n'), ((9453, 9484), 'cirq.GridQid', 'cirq.GridQid', (['(2)', '(4)'], {'dimension': '(3)'}), '(2, 4, dimension=3)\n', (9465, 9484), False, 'import cirq\n'), ((9577, 9608), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (9589, 9608), False, 'import cirq\n'), ((9702, 9733), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(2)'], {'dimension': '(3)'}), '(0, 2, dimension=3)\n', (9714, 9733), False, 'import cirq\n'), ((9826, 9857), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(0)'], {'dimension': '(3)'}), '(0, 0, dimension=3)\n', (9838, 9857), False, 'import cirq\n'), ((9950, 9981), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (9962, 9981), False, 'import cirq\n'), ((10075, 10106), 'cirq.GridQid', 'cirq.GridQid', (['(2)', '(2)'], {'dimension': '(3)'}), '(2, 2, dimension=3)\n', (10087, 10106), False, 'import cirq\n'), ((10160, 10195), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""1"""'}), "(TypeError, match='1')\n", (10173, 10195), False, 'import pytest\n'), ((10243, 10281), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""(1,)"""'}), "(TypeError, match='(1,)')\n", (10256, 10281), False, 'import pytest\n'), ((10332, 10375), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""(1, 2, 3)"""'}), "(TypeError, match='(1, 2, 3)')\n", (10345, 10375), False, 'import pytest\n'), ((10431, 10473), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""(1, 2.0)"""'}), "(TypeError, match='(1, 2.0)')\n", (10444, 10473), False, 'import pytest\n'), ((10529, 10564), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""1"""'}), "(TypeError, match='1')\n", (10542, 10564), False, 'import pytest\n'), ((10613, 10655), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""[1., 2.]"""'}), "(TypeError, match='[1., 2.]')\n", (10626, 10655), False, 'import pytest\n'), ((10722, 10765), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""[1, 2, 3]"""'}), "(TypeError, match='[1, 2, 3]')\n", (10735, 10765), False, 'import pytest\n'), ((10888, 10926), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""bort"""'}), "(TypeError, match='bort')\n", (10901, 10926), False, 'import pytest\n'), ((10979, 11017), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""bort"""'}), "(TypeError, match='bort')\n", (10992, 11017), False, 'import pytest\n'), ((11071, 11109), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""bort"""'}), "(TypeError, match='bort')\n", (11084, 11109), False, 'import pytest\n'), ((11173, 11211), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""bort"""'}), "(TypeError, match='bort')\n", (11186, 11211), False, 'import pytest\n'), ((11276, 11362), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Can only add GridQids with identical dimension."""'}), "(TypeError, match=\n 'Can only add GridQids with identical dimension.')\n", (11289, 11362), False, 'import pytest\n'), ((11446, 11537), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Can only subtract GridQids with identical dimension."""'}), "(TypeError, match=\n 'Can only subtract GridQids with identical dimension.')\n", (11459, 11537), False, 'import pytest\n'), ((11666, 11688), 'cirq.GridQubit', 'cirq.GridQubit', (['(-1)', '(-2)'], {}), '(-1, -2)\n', (11680, 11688), False, 'import cirq\n'), ((11736, 11769), 'cirq.GridQid', 'cirq.GridQid', (['(-1)', '(-2)'], {'dimension': '(3)'}), '(-1, -2, dimension=3)\n', (11748, 11769), False, 'import cirq\n'), ((11993, 12051), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (12006, 12051), False, 'import pytest\n'), ((12065, 12085), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (12079, 12085), False, 'import cirq\n'), ((12114, 12172), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (12127, 12172), False, 'import pytest\n'), ((12186, 12206), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (12200, 12206), False, 'import cirq\n'), ((12235, 12293), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (12248, 12293), False, 'import pytest\n'), ((12307, 12338), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (12319, 12338), False, 'import cirq\n'), ((12367, 12425), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (12380, 12425), False, 'import pytest\n'), ((12439, 12470), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (12451, 12470), False, 'import cirq\n'), ((12499, 12557), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (12512, 12557), False, 'import pytest\n'), ((12571, 12602), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (12583, 12602), False, 'import cirq\n'), ((968, 988), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (982, 988), False, 'import cirq\n'), ((998, 1029), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(0)'], {'dimension': '(2)'}), '(0, 0, dimension=2)\n', (1010, 1029), False, 'import cirq\n'), ((1066, 1086), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (1080, 1086), False, 'import cirq\n'), ((1096, 1127), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(0)'], {'dimension': '(2)'}), '(1, 0, dimension=2)\n', (1108, 1127), False, 'import cirq\n'), ((1164, 1184), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (1178, 1184), False, 'import cirq\n'), ((1194, 1225), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(1)'], {'dimension': '(2)'}), '(0, 1, dimension=2)\n', (1206, 1225), False, 'import cirq\n'), ((1262, 1293), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(0)'], {'dimension': '(3)'}), '(0, 0, dimension=3)\n', (1274, 1293), False, 'import cirq\n'), ((1609, 1629), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(2)'], {}), '(5, 2)\n', (1623, 1629), False, 'import cirq\n'), ((1659, 1690), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(2)'], {'dimension': '(3)'}), '(5, 2, dimension=3)\n', (1671, 1690), False, 'import cirq\n'), ((1775, 1795), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(2)'], {}), '(5, 2)\n', (1789, 1795), False, 'import cirq\n'), ((1901, 1932), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(2)'], {'dimension': '(3)'}), '(5, 2, dimension=3)\n', (1913, 1932), False, 'import cirq\n'), ((2841, 2861), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (2855, 2861), False, 'import cirq\n'), ((2950, 2970), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (2964, 2970), False, 'import cirq\n'), ((3067, 3098), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(1)'], {'dimension': '(3)'}), '(1, 1, dimension=3)\n', (3079, 3098), False, 'import cirq\n'), ((3187, 3218), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(1)'], {'dimension': '(3)'}), '(1, 1, dimension=3)\n', (3199, 3218), False, 'import cirq\n'), ((3260, 3280), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3274, 3280), False, 'import cirq\n'), ((3326, 3346), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3340, 3346), False, 'import cirq\n'), ((3393, 3413), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3407, 3413), False, 'import cirq\n'), ((3459, 3479), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3473, 3479), False, 'import cirq\n'), ((3564, 3586), 'cirq.GridQubit', 'cirq.GridQubit', (['(+1)', '(-1)'], {}), '(+1, -1)\n', (3578, 3586), False, 'import cirq\n'), ((3636, 3658), 'cirq.GridQubit', 'cirq.GridQubit', (['(+1)', '(+1)'], {}), '(+1, +1)\n', (3650, 3658), False, 'import cirq\n'), ((3708, 3730), 'cirq.GridQubit', 'cirq.GridQubit', (['(-1)', '(-1)'], {}), '(-1, -1)\n', (3722, 3730), False, 'import cirq\n'), ((3780, 3802), 'cirq.GridQubit', 'cirq.GridQubit', (['(-1)', '(+1)'], {}), '(-1, +1)\n', (3794, 3802), False, 'import cirq\n'), ((3853, 3873), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(0)'], {}), '(2, 0)\n', (3867, 3873), False, 'import cirq\n'), ((3887, 3911), 'cirq.GridQubit', 'cirq.GridQubit', (['(500)', '(999)'], {}), '(500, 999)\n', (3901, 3911), False, 'import cirq\n'), ((4002, 4027), 'cirq.GridQubit', 'cirq.GridQubit', (['(5034)', '(999)'], {}), '(5034, 999)\n', (4016, 4027), False, 'import cirq\n'), ((4110, 4130), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (4124, 4130), False, 'import cirq\n'), ((4140, 4160), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(1)'], {}), '(2, 1)\n', (4154, 4160), False, 'import cirq\n'), ((4170, 4190), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (4184, 4190), False, 'import cirq\n'), ((4200, 4220), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (4214, 4220), False, 'import cirq\n'), ((4398, 4418), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(1)'], {}), '(2, 1)\n', (4412, 4418), False, 'import cirq\n'), ((4505, 4525), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (4519, 4525), False, 'import cirq\n'), ((4535, 4555), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (4549, 4555), False, 'import cirq\n'), ((4565, 4585), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(1)'], {}), '(2, 1)\n', (4579, 4585), False, 'import cirq\n'), ((4595, 4615), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(2)'], {}), '(2, 2)\n', (4609, 4615), False, 'import cirq\n'), ((4672, 4692), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (4686, 4692), False, 'import cirq\n'), ((4702, 4722), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (4716, 4722), False, 'import cirq\n'), ((4732, 4752), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (4746, 4752), False, 'import cirq\n'), ((4762, 4782), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (4776, 4782), False, 'import cirq\n'), ((4866, 4897), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(1)'], {'dimension': '(3)'}), '(1, 1, dimension=3)\n', (4878, 4897), False, 'import cirq\n'), ((4907, 4938), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (4919, 4938), False, 'import cirq\n'), ((4948, 4979), 'cirq.GridQid', 'cirq.GridQid', (['(2)', '(1)'], {'dimension': '(3)'}), '(2, 1, dimension=3)\n', (4960, 4979), False, 'import cirq\n'), ((4989, 5020), 'cirq.GridQid', 'cirq.GridQid', (['(2)', '(2)'], {'dimension': '(3)'}), '(2, 2, dimension=3)\n', (5001, 5020), False, 'import cirq\n'), ((5088, 5119), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(0)'], {'dimension': '(3)'}), '(0, 0, dimension=3)\n', (5100, 5119), False, 'import cirq\n'), ((5129, 5160), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(1)'], {'dimension': '(3)'}), '(0, 1, dimension=3)\n', (5141, 5160), False, 'import cirq\n'), ((5170, 5201), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(0)'], {'dimension': '(3)'}), '(1, 0, dimension=3)\n', (5182, 5201), False, 'import cirq\n'), ((5211, 5242), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(1)'], {'dimension': '(3)'}), '(1, 1, dimension=3)\n', (5223, 5242), False, 'import cirq\n'), ((5325, 5345), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(6)'], {}), '(5, 6)\n', (5339, 5345), False, 'import cirq\n'), ((5347, 5367), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(7)'], {}), '(5, 7)\n', (5361, 5367), False, 'import cirq\n'), ((5419, 5439), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (5433, 5439), False, 'import cirq\n'), ((5449, 5469), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (5463, 5469), False, 'import cirq\n'), ((5479, 5499), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (5493, 5499), False, 'import cirq\n'), ((5509, 5529), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (5523, 5529), False, 'import cirq\n'), ((5614, 5645), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(6)'], {'dimension': '(3)'}), '(5, 6, dimension=3)\n', (5626, 5645), False, 'import cirq\n'), ((5655, 5686), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(7)'], {'dimension': '(3)'}), '(5, 7, dimension=3)\n', (5667, 5686), False, 'import cirq\n'), ((5755, 5786), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(0)'], {'dimension': '(3)'}), '(0, 0, dimension=3)\n', (5767, 5786), False, 'import cirq\n'), ((5796, 5827), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(1)'], {'dimension': '(3)'}), '(0, 1, dimension=3)\n', (5808, 5827), False, 'import cirq\n'), ((5837, 5868), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(0)'], {'dimension': '(3)'}), '(1, 0, dimension=3)\n', (5849, 5868), False, 'import cirq\n'), ((5878, 5909), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(1)'], {'dimension': '(3)'}), '(1, 1, dimension=3)\n', (5890, 5909), False, 'import cirq\n'), ((6113, 6143), 'cirq.GridQubit.from_diagram', 'cirq.GridQubit.from_diagram', (['s'], {}), '(s)\n', (6140, 6143), False, 'import cirq\n'), ((6166, 6207), 'cirq.GridQid.from_diagram', 'cirq.GridQid.from_diagram', (['s'], {'dimension': '(3)'}), '(s, dimension=3)\n', (6191, 6207), False, 'import cirq\n'), ((6294, 6314), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (6308, 6314), False, 'import cirq\n'), ((6324, 6344), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (6338, 6344), False, 'import cirq\n'), ((6354, 6374), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (6368, 6374), False, 'import cirq\n'), ((6384, 6404), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (6398, 6404), False, 'import cirq\n'), ((6479, 6510), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(0)'], {'dimension': '(3)'}), '(0, 0, dimension=3)\n', (6491, 6510), False, 'import cirq\n'), ((6520, 6551), 'cirq.GridQid', 'cirq.GridQid', (['(0)', '(1)'], {'dimension': '(3)'}), '(0, 1, dimension=3)\n', (6532, 6551), False, 'import cirq\n'), ((6561, 6592), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(0)'], {'dimension': '(3)'}), '(1, 0, dimension=3)\n', (6573, 6592), False, 'import cirq\n'), ((6602, 6633), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(1)'], {'dimension': '(3)'}), '(1, 1, dimension=3)\n', (6614, 6633), False, 'import cirq\n'), ((6826, 6846), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (6840, 6846), False, 'import cirq\n'), ((6891, 6911), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (6905, 6911), False, 'import cirq\n'), ((6956, 6976), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (6970, 6976), False, 'import cirq\n'), ((7022, 7042), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (7036, 7042), False, 'import cirq\n'), ((7089, 7109), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (7103, 7109), False, 'import cirq\n'), ((7154, 7174), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (7168, 7174), False, 'import cirq\n'), ((7230, 7250), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (7244, 7250), False, 'import cirq\n'), ((7295, 7315), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (7309, 7315), False, 'import cirq\n'), ((7352, 7372), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (7366, 7372), False, 'import cirq\n'), ((7375, 7395), 'cirq.GridQubit', 'cirq.GridQubit', (['(3)', '(5)'], {}), '(3, 5)\n', (7389, 7395), False, 'import cirq\n'), ((7431, 7451), 'cirq.GridQubit', 'cirq.GridQubit', (['(3)', '(5)'], {}), '(3, 5)\n', (7445, 7451), False, 'import cirq\n'), ((7454, 7474), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(1)'], {}), '(2, 1)\n', (7468, 7474), False, 'import cirq\n'), ((7510, 7531), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(-2)'], {}), '(1, -2)\n', (7524, 7531), False, 'import cirq\n'), ((7534, 7554), 'cirq.GridQubit', 'cirq.GridQubit', (['(3)', '(5)'], {}), '(3, 5)\n', (7548, 7554), False, 'import cirq\n'), ((7606, 7637), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (7618, 7637), False, 'import cirq\n'), ((7693, 7724), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (7705, 7724), False, 'import cirq\n'), ((7780, 7811), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (7792, 7811), False, 'import cirq\n'), ((7868, 7899), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (7880, 7899), False, 'import cirq\n'), ((7957, 7988), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (7969, 7988), False, 'import cirq\n'), ((8044, 8075), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (8056, 8075), False, 'import cirq\n'), ((8142, 8173), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (8154, 8173), False, 'import cirq\n'), ((8229, 8260), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (8241, 8260), False, 'import cirq\n'), ((8308, 8339), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (8320, 8339), False, 'import cirq\n'), ((8342, 8373), 'cirq.GridQid', 'cirq.GridQid', (['(3)', '(5)'], {'dimension': '(3)'}), '(3, 5, dimension=3)\n', (8354, 8373), False, 'import cirq\n'), ((8434, 8465), 'cirq.GridQid', 'cirq.GridQid', (['(3)', '(5)'], {'dimension': '(3)'}), '(3, 5, dimension=3)\n', (8446, 8465), False, 'import cirq\n'), ((8468, 8499), 'cirq.GridQid', 'cirq.GridQid', (['(2)', '(1)'], {'dimension': '(3)'}), '(2, 1, dimension=3)\n', (8480, 8499), False, 'import cirq\n'), ((8560, 8592), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(-2)'], {'dimension': '(3)'}), '(1, -2, dimension=3)\n', (8572, 8592), False, 'import cirq\n'), ((8595, 8626), 'cirq.GridQid', 'cirq.GridQid', (['(3)', '(5)'], {'dimension': '(3)'}), '(3, 5, dimension=3)\n', (8607, 8626), False, 'import cirq\n'), ((8819, 8839), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (8833, 8839), False, 'import cirq\n'), ((8842, 8871), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'dtype'}), '([1, 2], dtype=dtype)\n', (8850, 8871), True, 'import numpy as np\n'), ((8907, 8927), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (8921, 8927), False, 'import cirq\n'), ((8930, 8959), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'dtype'}), '([0, 0], dtype=dtype)\n', (8938, 8959), True, 'import numpy as np\n'), ((8995, 9015), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (9009, 9015), False, 'import cirq\n'), ((9018, 9048), 'numpy.array', 'np.array', (['[-1, 0]'], {'dtype': 'dtype'}), '([-1, 0], dtype=dtype)\n', (9026, 9048), True, 'import numpy as np\n'), ((9084, 9104), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (9098, 9104), False, 'import cirq\n'), ((9107, 9136), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'dtype'}), '([1, 2], dtype=dtype)\n', (9115, 9136), True, 'import numpy as np\n'), ((9172, 9192), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (9186, 9192), False, 'import cirq\n'), ((9195, 9224), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'dtype'}), '([0, 0], dtype=dtype)\n', (9203, 9224), True, 'import numpy as np\n'), ((9260, 9291), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (9272, 9291), False, 'import cirq\n'), ((9294, 9324), 'numpy.array', 'np.array', (['[-1, 0]'], {'dtype': 'dtype'}), '([-1, 0], dtype=dtype)\n', (9302, 9324), True, 'import numpy as np\n'), ((9386, 9417), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (9398, 9417), False, 'import cirq\n'), ((9420, 9449), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'dtype'}), '([1, 2], dtype=dtype)\n', (9428, 9449), True, 'import numpy as np\n'), ((9510, 9541), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (9522, 9541), False, 'import cirq\n'), ((9544, 9573), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'dtype'}), '([0, 0], dtype=dtype)\n', (9552, 9573), True, 'import numpy as np\n'), ((9634, 9665), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (9646, 9665), False, 'import cirq\n'), ((9668, 9698), 'numpy.array', 'np.array', (['[-1, 0]'], {'dtype': 'dtype'}), '([-1, 0], dtype=dtype)\n', (9676, 9698), True, 'import numpy as np\n'), ((9759, 9790), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (9771, 9790), False, 'import cirq\n'), ((9793, 9822), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'dtype'}), '([1, 2], dtype=dtype)\n', (9801, 9822), True, 'import numpy as np\n'), ((9883, 9914), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (9895, 9914), False, 'import cirq\n'), ((9917, 9946), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'dtype'}), '([0, 0], dtype=dtype)\n', (9925, 9946), True, 'import numpy as np\n'), ((10007, 10038), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (10019, 10038), False, 'import cirq\n'), ((10041, 10071), 'numpy.array', 'np.array', (['[-1, 0]'], {'dtype': 'dtype'}), '([-1, 0], dtype=dtype)\n', (10049, 10071), True, 'import numpy as np\n'), ((10209, 10229), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (10223, 10229), False, 'import cirq\n'), ((10295, 10315), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (10309, 10315), False, 'import cirq\n'), ((10389, 10409), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (10403, 10409), False, 'import cirq\n'), ((10487, 10507), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (10501, 10507), False, 'import cirq\n'), ((10578, 10598), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (10592, 10598), False, 'import cirq\n'), ((10669, 10689), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (10683, 10689), False, 'import cirq\n'), ((10692, 10712), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (10700, 10712), True, 'import numpy as np\n'), ((10779, 10799), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (10793, 10799), False, 'import cirq\n'), ((10802, 10832), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'int'}), '([1, 2, 3], dtype=int)\n', (10810, 10832), True, 'import numpy as np\n'), ((10940, 10960), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(3)'], {}), '(5, 3)\n', (10954, 10960), False, 'import cirq\n'), ((11031, 11051), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(3)'], {}), '(5, 3)\n', (11045, 11051), False, 'import cirq\n'), ((11123, 11154), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(3)'], {'dimension': '(3)'}), '(5, 3, dimension=3)\n', (11135, 11154), False, 'import cirq\n'), ((11225, 11256), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(3)'], {'dimension': '(3)'}), '(5, 3, dimension=3)\n', (11237, 11256), False, 'import cirq\n'), ((11371, 11402), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(3)'], {'dimension': '(3)'}), '(5, 3, dimension=3)\n', (11383, 11402), False, 'import cirq\n'), ((11405, 11436), 'cirq.GridQid', 'cirq.GridQid', (['(3)', '(5)'], {'dimension': '(4)'}), '(3, 5, dimension=4)\n', (11417, 11436), False, 'import cirq\n'), ((11546, 11577), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(3)'], {'dimension': '(3)'}), '(5, 3, dimension=3)\n', (11558, 11577), False, 'import cirq\n'), ((11580, 11611), 'cirq.GridQid', 'cirq.GridQid', (['(3)', '(5)'], {'dimension': '(4)'}), '(3, 5, dimension=4)\n', (11592, 11611), False, 'import cirq\n'), ((11642, 11662), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (11656, 11662), False, 'import cirq\n'), ((11701, 11732), 'cirq.GridQid', 'cirq.GridQid', (['(1)', '(2)'], {'dimension': '(3)'}), '(1, 2, dimension=3)\n', (11713, 11732), False, 'import cirq\n'), ((12668, 12696), 'cirq.GridQubit', 'cirq.GridQubit', ([], {'row': '(1)', 'col': '(2)'}), '(row=1, col=2)\n', (12682, 12696), False, 'import cirq\n'), ((12738, 12766), 'cirq.GridQubit', 'cirq.GridQubit', ([], {'row': '(1)', 'col': '(2)'}), '(row=1, col=2)\n', (12752, 12766), False, 'import cirq\n'), ((3531, 3551), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3545, 3551), False, 'import cirq\n'), ((3603, 3623), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3617, 3623), False, 'import cirq\n'), ((3675, 3695), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3689, 3695), False, 'import cirq\n'), ((3747, 3767), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3761, 3767), False, 'import cirq\n'), ((3820, 3840), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3834, 3840), False, 'import cirq\n'), ((3965, 3989), 'cirq.GridQubit', 'cirq.GridQubit', (['(500)', '(999)'], {}), '(500, 999)\n', (3979, 3989), False, 'import cirq\n'), ((4064, 4084), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (4078, 4084), False, 'import cirq\n'), ((4344, 4364), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (4358, 4364), False, 'import cirq\n'), ((11803, 11823), 'cirq.GridQubit', 'cirq.GridQubit', (['(5)', '(6)'], {}), '(5, 6)\n', (11817, 11823), False, 'import cirq\n'), ((11874, 11905), 'cirq.GridQid', 'cirq.GridQid', (['(5)', '(6)'], {'dimension': '(3)'}), '(5, 6, dimension=3)\n', (11886, 11905), False, 'import cirq\n')] |
####################
# George Mason University - ECE612
# <NAME> - Spring 2017
#
# Final Project
# spectrum.py
# Implements a numpy FFT in Python 3.4
# and scales the results to fit on an LED array
####################
import numpy as np
#samplerate: Choose a sample rate of 44.1 KHz, the same sample rate used for audio CDs
# From Nyquist, this samplerate will capture 20 KHz roughly the limit of human hearing
#chunk: The size of each packet read from the microphone and the points of the subsequent FFT
#num_columns: The number of columns of LEDs that will display our spectrum
#
# Use this function to precompute the bin mapping and save the results outside
# of the main audio processing loop.
def find_bin_mapping_np(num_columns, min_freq, max_freq, chunk=4096, samplerate=44100):
#Need to group and assign output bins of the FFT to each column
#Since sound is logarithmic, we will assign equal amounts of log(spectrum)
# to each column which will result in fewer bins for the lower columns
#Audible frequency range is 20Hz - 20KHz
#If we only had one column, it would cover the entire range
bin_mapping = np.array([min_freq, max_freq])
num_cols_mapped = 1
#First, take the log of each entry
bin_mapping = np.log10(bin_mapping)
#As we add bins, insert values into bin_mapping
while num_cols_mapped < num_columns:
new_vals = np.array([])
for i in range(num_cols_mapped):
new_vals = np.append(new_vals, sum(bin_mapping[i:i+2]) / 2.0)
#Interleave these values into bin_mapping
bin_mapping = np.insert(bin_mapping, list(range(1,num_cols_mapped+1)), new_vals)
#Double the number of columns mapped each iteration
num_cols_mapped = num_cols_mapped * 2
#Done mapping, but the bin_mapping list is still in log form
#Use NumPy power() to convert back to frequency in Hz
bin_freqs = np.power(10, bin_mapping)
#Based on the number of points in our FFT, find the closest bin index to each frequency entry
#Only the first half of the bins contain useful information and each bin has width of
# (sampling_rate / chunk)
bin_mapping = [int(round(x / (samplerate / chunk))) for x in bin_freqs]
print("Selected Bin Mapping: ", bin_mapping)
print("Selected Bin Freqs: ", bin_freqs)
#So now, each column will average the FFT bins between each pair of indexes in bin_mapping
return bin_mapping
# Data: Should be a chunk-length array of real samples to compute spectral data for
# bin_mapping: An array of bin indexes. This function will scale and then sum the FFT output
# between each bin_index and append to the output array
# chunk: Size of the FFT and the number of values in data
# scale: Optional argument with a default of 4. Scales fft output by powers of 2
# If set to 4 and the input is full scale 16-bit audio, should produce values between 0 and 8
# Increase this parameter for audio data with low volume, or decrease to drive more than 8 LEDs per column
def get_spectrum(data, bin_mapping, chunk, scale=4):
#Use the rfft function which only computes half of the FFT
# Since our input is all real data, only the one half is useful
y_fft = np.fft.rfft(data)
# FFT returns complex float
# Use abs() to get magnitude and then cast to int
# Eventually mapping to just 8 LEDs, so okay to cast now and lose precision
y_amp = (np.abs(y_fft)).astype(int)
#After the FFT, the amplitudes are large. On the order of 2^15 (Max input from Mic) * chunk
# Dividing by (2^15 * chunk) would scale to between 0 and 1
# But we want to drive LEDs of height 8, so don't divide by quite as much
# Use right_shift to perform a faster divide-by-power-of-two
y_shift = np.right_shift(y_amp, int(np.log2(chunk) + 15 - scale))
bin_amplitudes = np.array([], dtype='i2')
#Iterate through every item pair in bin_mapping using zip
# Returns one item from each range on each iteration
# bin_mapping[:-1] iterates from beginning to last-1 item
# bin_mapping[1:] iterates from second to last item
for x,y in zip(bin_mapping[:-1],bin_mapping[1:]):
#Sum energy between the indexes [x, y) and append to output array
# Python [x:y] indexing does not include the y-th item
amplitude = (np.sum(y_shift[x:y]))
bin_amplitudes = np.append(bin_amplitudes, amplitude)
# Loudness is logarithmic, so take the log2 of the bin powers
bin_amplitudes = np.add(bin_amplitudes, np.ones(len(bin_amplitudes), int))
bin_amplitudes = np.log2(bin_amplitudes).astype(int)
return bin_amplitudes
| [
"numpy.fft.rfft",
"numpy.sum",
"numpy.abs",
"numpy.power",
"numpy.log2",
"numpy.append",
"numpy.array",
"numpy.log10"
] | [((1161, 1191), 'numpy.array', 'np.array', (['[min_freq, max_freq]'], {}), '([min_freq, max_freq])\n', (1169, 1191), True, 'import numpy as np\n'), ((1274, 1295), 'numpy.log10', 'np.log10', (['bin_mapping'], {}), '(bin_mapping)\n', (1282, 1295), True, 'import numpy as np\n'), ((1921, 1946), 'numpy.power', 'np.power', (['(10)', 'bin_mapping'], {}), '(10, bin_mapping)\n', (1929, 1946), True, 'import numpy as np\n'), ((3255, 3272), 'numpy.fft.rfft', 'np.fft.rfft', (['data'], {}), '(data)\n', (3266, 3272), True, 'import numpy as np\n'), ((3869, 3893), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""i2"""'}), "([], dtype='i2')\n", (3877, 3893), True, 'import numpy as np\n'), ((1409, 1421), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1417, 1421), True, 'import numpy as np\n'), ((4335, 4355), 'numpy.sum', 'np.sum', (['y_shift[x:y]'], {}), '(y_shift[x:y])\n', (4341, 4355), True, 'import numpy as np\n'), ((4380, 4416), 'numpy.append', 'np.append', (['bin_amplitudes', 'amplitude'], {}), '(bin_amplitudes, amplitude)\n', (4389, 4416), True, 'import numpy as np\n'), ((3448, 3461), 'numpy.abs', 'np.abs', (['y_fft'], {}), '(y_fft)\n', (3454, 3461), True, 'import numpy as np\n'), ((4581, 4604), 'numpy.log2', 'np.log2', (['bin_amplitudes'], {}), '(bin_amplitudes)\n', (4588, 4604), True, 'import numpy as np\n'), ((3818, 3832), 'numpy.log2', 'np.log2', (['chunk'], {}), '(chunk)\n', (3825, 3832), True, 'import numpy as np\n')] |
# some_file.py
import sys
import time
import json
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/tf/jovyan/work')
import logging
import numpy as np
import os
import tensorflow as tf
import numpy.random as rnd
from sklearn.metrics import f1_score, precision_recall_fscore_support
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from ad_examples.common.utils import read_csv, dataframe_to_matrix
from ad_examples.common.gen_samples import get_synthetic_samples
from ad_examples.common.nn_utils import AutoencoderAnomalyDetector
from ad_examples.aad.aad_support import AadOpts, get_aad_command_args, configure_logger
from ad_examples.aad.forest_description import CompactDescriber, MinimumVolumeCoverDescriber, BayesianRulesetsDescriber, get_region_memberships
from ad_examples.aad.demo_aad import get_debug_args, detect_anomalies_and_describe
from ad_examples.loda.loda import Loda
logger = logging.getLogger(__name__)
def convert_scores_to_classes(scores, anomaly_ratio):
"""
Converts list of scores to flags (0/1) - top anomalies are marked as 1.
"""
anomaly_cnt = int(len(scores) * anomaly_ratio)
anomaly_indices = np.array(scores).argsort()[-anomaly_cnt:][::-1]
y_pred = np.zeros(len(scores))
np.put(y_pred, anomaly_indices, 1)
return y_pred
def load_data(input_file):
print("loading csv...")
# t = "ber"
# size = "simple"
# n = "_normalized_hours"
# data_df = read_csv("../notebooks/data/simple.type123.csv", header=True)
# data_df = read_csv("./data/data_parking/csv/type-ber/simple.type-ber.csv", header=True)
#data_df = read_csv("./data/data_parking/csv" + n + "/type-" + t + "/" + size + ".type-" + t + ".csv", header=True)
data_df = read_csv(input_file, header=True)
# print(data_df)
print("transforming data...")
x, y = dataframe_to_matrix(data_df)
return (x, y)
def slice_data(x, y, idx_from, idx_to):
n = x.shape[0]
return (x[idx_from:idx_to, :], y[idx_from:idx_to])
def run_ad_algorithm(algo_type, x_old, scores_old, x_new, outliers_fraction):
rnd.seed(42)
call_mode_normal=True
ad=None
print(algo_type)
if algo_type == "ifor":
# print("running IFOR...")
ad = IsolationForest(max_samples=256, contamination=outliers_fraction, random_state=None)
elif algo_type == "lof":
# print("running LOF...")
ad = LocalOutlierFactor(n_neighbors=35, contamination=outliers_fraction)
call_mode_normal=False
elif algo_type == "loda":
# print("running LODA...")
ad = Loda(mink=10, maxk=100)
# print("running auto-encoder...")
# input_dims = x_old.shape[1]
# ad = AutoencoderAnomalyDetector(
# n_inputs = input_dims,
# n_neurons = [2 * input_dims, round( input_dims/ 5), 2 * input_dims],
# normalize_scale = True,
# activations=[tf.nn.tanh, tf.nn.tanh, tf.nn.tanh, None]
# )
ad.fit(x_old)
if len(scores_old) == 0:
# print("Calculating inital scores")
if call_mode_normal == True:
scores_old = -ad.decision_function(x_old)
else:
scores_old = -ad._decision_function(x_old)
# print("Evaluating...")
if call_mode_normal == True:
scores = -ad.decision_function(x_new)
else:
scores = -ad._decision_function(x_new)
# print("Combining with historic scores and converting to classes...")
scores_combined = np.concatenate((np.array(scores_old), np.array(scores)), 0)
y_pred_combined = convert_scores_to_classes(scores_combined, outliers_fraction)
y_pred = y_pred_combined[len(scores_old):]
return (scores_combined, y_pred)
#################################################################################
args = sys.argv
print(args)
algo=args[2]
(gt_x, gt_y) = load_data(args[1])
day_rec_cnt = 24 * 12
block_size = 7 * day_rec_cnt
idx_start = 60 * day_rec_cnt
idx_curr_time = idx_start
n = gt_y.shape[0]
scores_all = np.zeros(0)
y_pred = np.zeros(0)
outlier_fraction = 0.01
if len(args) > 3 :
outlier_fraction = float(args[3])
t0 = time.clock()
while idx_curr_time < n :
print(n, idx_curr_time, block_size)
(x1, y1) = slice_data(gt_x, gt_y, 0, idx_curr_time)
(x2, y2) = slice_data(gt_x, gt_y, idx_curr_time, idx_curr_time + block_size)
(scores_all, y_pred_new) = run_ad_algorithm(algo, x1, scores_all, x2, outlier_fraction)
y_pred = np.concatenate((np.array(y_pred), np.array(y_pred_new)), 0)
# print(np.sum(y1), np.sum(y2), np.sum(y_pred))
idx_curr_time = idx_curr_time + block_size
y_tmp = gt_y[idx_start:idx_curr_time]
f1 = f1_score(y_tmp, y_pred, average=None) # average='weighted')
print(f1)
t1 = time.clock()
print("finished with training, analyzing combined output")
y = gt_y[idx_start:]
print("Elapsed time")
print(t1 -t0)
print("Calculating F1 scores...")
f1 = f1_score(y, y_pred, average=None) # average='weighted')
prec2, recall2, f05, _ = precision_recall_fscore_support(y_tmp, y_pred, average=None, beta=0.5)
print(json.dumps({ "time": t1 - t0, "f1": f1[1], "precision": prec2[1], "recall": recall2[1], "f05": f05[1] }))
| [
"numpy.random.seed",
"sklearn.ensemble.IsolationForest",
"numpy.put",
"ad_examples.common.utils.dataframe_to_matrix",
"sklearn.neighbors.LocalOutlierFactor",
"numpy.zeros",
"sys.path.insert",
"time.clock",
"ad_examples.common.utils.read_csv",
"json.dumps",
"sklearn.metrics.f1_score",
"numpy.ar... | [((102, 139), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""/tf/jovyan/work"""'], {}), "(1, '/tf/jovyan/work')\n", (117, 139), False, 'import sys\n'), ((965, 992), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (982, 992), False, 'import logging\n'), ((4018, 4029), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (4026, 4029), True, 'import numpy as np\n'), ((4039, 4050), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (4047, 4050), True, 'import numpy as np\n'), ((4138, 4150), 'time.clock', 'time.clock', ([], {}), '()\n', (4148, 4150), False, 'import time\n'), ((4750, 4762), 'time.clock', 'time.clock', ([], {}), '()\n', (4760, 4762), False, 'import time\n'), ((4921, 4954), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {'average': 'None'}), '(y, y_pred, average=None)\n', (4929, 4954), False, 'from sklearn.metrics import f1_score, precision_recall_fscore_support\n'), ((5002, 5072), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_tmp', 'y_pred'], {'average': 'None', 'beta': '(0.5)'}), '(y_tmp, y_pred, average=None, beta=0.5)\n', (5033, 5072), False, 'from sklearn.metrics import f1_score, precision_recall_fscore_support\n'), ((1300, 1334), 'numpy.put', 'np.put', (['y_pred', 'anomaly_indices', '(1)'], {}), '(y_pred, anomaly_indices, 1)\n', (1306, 1334), True, 'import numpy as np\n'), ((1784, 1817), 'ad_examples.common.utils.read_csv', 'read_csv', (['input_file'], {'header': '(True)'}), '(input_file, header=True)\n', (1792, 1817), False, 'from ad_examples.common.utils import read_csv, dataframe_to_matrix\n'), ((1885, 1913), 'ad_examples.common.utils.dataframe_to_matrix', 'dataframe_to_matrix', (['data_df'], {}), '(data_df)\n', (1904, 1913), False, 'from ad_examples.common.utils import read_csv, dataframe_to_matrix\n'), ((2132, 2144), 'numpy.random.seed', 'rnd.seed', (['(42)'], {}), '(42)\n', (2140, 2144), True, 'import numpy.random as rnd\n'), ((4670, 4707), 'sklearn.metrics.f1_score', 'f1_score', (['y_tmp', 'y_pred'], {'average': 'None'}), '(y_tmp, y_pred, average=None)\n', (4678, 4707), False, 'from sklearn.metrics import f1_score, precision_recall_fscore_support\n'), ((5080, 5186), 'json.dumps', 'json.dumps', (["{'time': t1 - t0, 'f1': f1[1], 'precision': prec2[1], 'recall': recall2[1],\n 'f05': f05[1]}"], {}), "({'time': t1 - t0, 'f1': f1[1], 'precision': prec2[1], 'recall':\n recall2[1], 'f05': f05[1]})\n", (5090, 5186), False, 'import json\n'), ((2281, 2369), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'max_samples': '(256)', 'contamination': 'outliers_fraction', 'random_state': 'None'}), '(max_samples=256, contamination=outliers_fraction,\n random_state=None)\n', (2296, 2369), False, 'from sklearn.ensemble import IsolationForest\n'), ((2442, 2509), 'sklearn.neighbors.LocalOutlierFactor', 'LocalOutlierFactor', ([], {'n_neighbors': '(35)', 'contamination': 'outliers_fraction'}), '(n_neighbors=35, contamination=outliers_fraction)\n', (2460, 2509), False, 'from sklearn.neighbors import LocalOutlierFactor\n'), ((3508, 3528), 'numpy.array', 'np.array', (['scores_old'], {}), '(scores_old)\n', (3516, 3528), True, 'import numpy as np\n'), ((3530, 3546), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3538, 3546), True, 'import numpy as np\n'), ((4476, 4492), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (4484, 4492), True, 'import numpy as np\n'), ((4494, 4514), 'numpy.array', 'np.array', (['y_pred_new'], {}), '(y_pred_new)\n', (4502, 4514), True, 'import numpy as np\n'), ((2619, 2642), 'ad_examples.loda.loda.Loda', 'Loda', ([], {'mink': '(10)', 'maxk': '(100)'}), '(mink=10, maxk=100)\n', (2623, 2642), False, 'from ad_examples.loda.loda import Loda\n'), ((1213, 1229), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1221, 1229), True, 'import numpy as np\n')] |
# The script contains definition of different elements to be analyzed
# from spectra obtained from Olympus Delta XRF
# The definition includes element name from periodic table,
# the beam number, which is the most suitable for the element,
# Savitzky-Golay filter window length
# integration limits for peak integration
import numpy as np
class ElementData:
def __init__(self, name, beam, filter_window, int_limits, molar_weight) -> None:
self.name = name # e.g. Au
self.beam = beam # beam number: 0, 1 or 2
self.filter_window = filter_window # odd integer,
# see https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html
self.int_limits = int_limits # keV, and array of two coordinates for start and end of peak
self.molar_weight = molar_weight # g/mol, needed to calculate ppm
def get_elements() -> dict:
return {
'Si': ElementData('Si', 2, 9, np.array([[1.5, 2],]), 28.08550),
'Au': ElementData('Au', 1, 17, np.array([[9.3, 10.2], [10.75, 12.25]]), 196.9665690)
} | [
"numpy.array"
] | [((943, 963), 'numpy.array', 'np.array', (['[[1.5, 2]]'], {}), '([[1.5, 2]])\n', (951, 963), True, 'import numpy as np\n'), ((1016, 1055), 'numpy.array', 'np.array', (['[[9.3, 10.2], [10.75, 12.25]]'], {}), '([[9.3, 10.2], [10.75, 12.25]])\n', (1024, 1055), True, 'import numpy as np\n')] |
import random
import numpy as np
import torch
import torch.nn as nn
class res_MLPBlock(nn.Module):
"""Skippable MLPBlock with relu"""
def __init__(self, width):
super(res_MLPBlock, self).__init__()
self.block = nn.Sequential(nn.Linear(width, width), nn.ReLU(), nn.BatchNorm1d(width)) # nn.LayerNorm(width)) # batch norm doesnt really make sense for MCMC
def forward(self, x):
"""b is sample from binary variable or activation probability (soft forward)"""
return x + self.block(x)
class SGD_regression_homo(nn.Module):
def __init__(self, input_dim, output_dim, width, n_layers, seed=None):
super(SGD_regression_homo, self).__init__()
self.seed = seed
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# self.log_std = nn.Parameter(torch.zeros(self.output_dim))
self.input_dim = input_dim
self.output_dim = output_dim
self.width = width
self.n_layers = n_layers
self.layers = []
self.layers += [nn.Linear(input_dim, width), nn.ReLU(), nn.BatchNorm1d(width)]
for _ in range(self.n_layers - 1):
self.layers.append(res_MLPBlock(width))
self.layers += [nn.Linear(width, output_dim)]
self.layers = nn.Sequential(*self.layers)
def forward(self, x):
mean = self.layers(x)
return mean # , self.log_std.exp()
def forward_predict(self, x, Nsamples=0):
"""This function is different from forward to compactly represent eval functions"""
mu = self.forward(x)
return mu, torch.ones_like(mu) * 0 # TODO: torch.zeros_like?
def get_regulariser(self):
"""MC dropout uses weight decay to approximate the KL divergence"""
return 0
| [
"torch.ones_like",
"torch.nn.ReLU",
"numpy.random.seed",
"torch.nn.Sequential",
"torch.manual_seed",
"torch.nn.BatchNorm1d",
"random.seed",
"torch.nn.Linear"
] | [((1337, 1364), 'torch.nn.Sequential', 'nn.Sequential', (['*self.layers'], {}), '(*self.layers)\n', (1350, 1364), True, 'import torch.nn as nn\n'), ((251, 274), 'torch.nn.Linear', 'nn.Linear', (['width', 'width'], {}), '(width, width)\n', (260, 274), True, 'import torch.nn as nn\n'), ((276, 285), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (283, 285), True, 'import torch.nn as nn\n'), ((287, 308), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['width'], {}), '(width)\n', (301, 308), True, 'import torch.nn as nn\n'), ((764, 781), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (775, 781), False, 'import random\n'), ((794, 814), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (808, 814), True, 'import numpy as np\n'), ((827, 850), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (844, 850), False, 'import torch\n'), ((1103, 1130), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'width'], {}), '(input_dim, width)\n', (1112, 1130), True, 'import torch.nn as nn\n'), ((1132, 1141), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1139, 1141), True, 'import torch.nn as nn\n'), ((1143, 1164), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['width'], {}), '(width)\n', (1157, 1164), True, 'import torch.nn as nn\n'), ((1285, 1313), 'torch.nn.Linear', 'nn.Linear', (['width', 'output_dim'], {}), '(width, output_dim)\n', (1294, 1313), True, 'import torch.nn as nn\n'), ((1652, 1671), 'torch.ones_like', 'torch.ones_like', (['mu'], {}), '(mu)\n', (1667, 1671), False, 'import torch\n')] |
#!/usr/bin/python2.7
import tensorflow as tf
import numpy as np
import os
from scipy.ndimage.filters import gaussian_filter1d
from utils.helper_functions import get_label_length_seq
class ModelCNN:
def __init__(self, nRows, nCols):
self.input_vid = tf.placeholder('float', [None, nRows, nCols, 1], name='input_vid')
self.target = tf.placeholder('float', [None, nRows, nCols, 1], name='target')
self.nRows = nRows
self.nCols = nCols
self.__build()
def __weight_variable(self, shape, myName):
initial = tf.truncated_normal(shape, stddev=0.1, name=myName)
return tf.Variable(initial)
def __bias_variable(self, shape, myName):
initial = tf.constant(0.1, shape=shape, name=myName)
return tf.Variable(initial)
def __conv(self, x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def __max_pool_2x1(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
def __build(self):
w_conv1 = self.__weight_variable([5, 1, 1, 8], 'w_conv1')
b_conv1 = self.__bias_variable([8], 'b_conv1')
w_conv2 = self.__weight_variable([5, 1, 8, 16], 'w_conv2')
b_conv2 = self.__bias_variable([16], 'b_conv2')
W_fc1 = self.__weight_variable([int(4*1*self.nRows*self.nCols), 1024], 'W_fc1')
b_fc1 = self.__bias_variable([1024], 'b_fc1')
W_fc2 = self.__weight_variable([1024, self.nRows*self.nCols], 'W_fc2')
b_fc2 = self.__bias_variable([self.nRows*self.nCols], 'b_fc2')
h_conv1 = tf.nn.relu(self.__conv(self.input_vid, w_conv1) + b_conv1)
h_pool1 = self.__max_pool_2x1(h_conv1)
h_conv2 = tf.nn.relu(self.__conv(h_pool1, w_conv2) + b_conv2)
h_pool2 = self.__max_pool_2x1(h_conv2)
input_vid_flat = tf.reshape(h_pool2, [-1, int(4*1*self.nRows*self.nCols)])
h_fc1 = tf.nn.relu(tf.matmul(input_vid_flat, W_fc1) + b_fc1)
pred_flat = tf.matmul(h_fc1, W_fc2) + b_fc2
prediction_unscaled = tf.reshape(pred_flat, [-1, self.nRows, self.nCols, 1])
## l2 normalization
self.prediction = tf.nn.l2_normalize(prediction_unscaled, dim=2)
self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep=100)
def train(self, sess, model_save_path, batch_gen, nEpochs, save_freq, batch_size):
my_loss = tf.reduce_mean(tf.square( self.target - self.prediction ))
correct_prediction = tf.equal(tf.argmax(self.prediction,2), tf.argmax(self.target,2))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer = tf.train.AdamOptimizer(0.001).minimize(my_loss)
sess.run(tf.global_variables_initializer())
for epoch in range(nEpochs):
epoch_acc = 0
i=0
while(batch_gen.has_next()):
batch_vid, batch_target = batch_gen.next_batch(batch_size)
_, acc = sess.run([optimizer, accuracy], feed_dict={self.input_vid: batch_vid, self.target: batch_target})
i=i+1
epoch_acc += acc
batch_gen.reset()
if epoch%save_freq==0:
print ('Epoch', (epoch+1), 'completed out of',nEpochs,'training Acc: %.2f'%(epoch_acc/i))
path = model_save_path+"/epoch-"+str(epoch+1)
if not os.path.exists(path):
os.makedirs(path)
self.saver.save(sess, path+"/model.ckpt")
def __post_process(self, result, sigma):
new_res = gaussian_filter1d(result, sigma=sigma, axis=0)
return new_res
def predict(self, sess, model_save_path, input_x, sigma, actions_dict):
self.saver.restore(sess, model_save_path)
result = sess.run([self.prediction], feed_dict={self.input_vid: input_x})[0]
result = np.reshape(result,[self.nRows, self.nCols])
result = self.__post_process(result, sigma)
output = []
for i in range(len(result)):
output.append(actions_dict.keys()[actions_dict.values().index(np.argmax(result[i]))])
label_seq, length_seq = get_label_length_seq(output)
return label_seq, length_seq
| [
"utils.helper_functions.get_label_length_seq",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.argmax",
"tensorflow.reshape",
"tensorflow.nn.l2_normalize",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.truncated_normal",
"os.path.exists",
"tensorflow.placehold... | [((268, 334), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, nRows, nCols, 1]'], {'name': '"""input_vid"""'}), "('float', [None, nRows, nCols, 1], name='input_vid')\n", (282, 334), True, 'import tensorflow as tf\n'), ((357, 420), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, nRows, nCols, 1]'], {'name': '"""target"""'}), "('float', [None, nRows, nCols, 1], name='target')\n", (371, 420), True, 'import tensorflow as tf\n'), ((578, 629), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)', 'name': 'myName'}), '(shape, stddev=0.1, name=myName)\n', (597, 629), True, 'import tensorflow as tf\n'), ((645, 665), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (656, 665), True, 'import tensorflow as tf\n'), ((740, 782), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape', 'name': 'myName'}), '(0.1, shape=shape, name=myName)\n', (751, 782), True, 'import tensorflow as tf\n'), ((798, 818), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (809, 818), True, 'import tensorflow as tf\n'), ((872, 928), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (884, 928), True, 'import tensorflow as tf\n'), ((990, 1065), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 1, 1]', 'strides': '[1, 2, 1, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')\n", (1004, 1065), True, 'import tensorflow as tf\n'), ((2140, 2194), 'tensorflow.reshape', 'tf.reshape', (['pred_flat', '[-1, self.nRows, self.nCols, 1]'], {}), '(pred_flat, [-1, self.nRows, self.nCols, 1])\n', (2150, 2194), True, 'import tensorflow as tf\n'), ((2249, 2295), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['prediction_unscaled'], {'dim': '(2)'}), '(prediction_unscaled, dim=2)\n', (2267, 2295), True, 'import tensorflow as tf\n'), ((2317, 2384), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'write_version': 'tf.train.SaverDef.V2', 'max_to_keep': '(100)'}), '(write_version=tf.train.SaverDef.V2, max_to_keep=100)\n', (2331, 2384), True, 'import tensorflow as tf\n'), ((3719, 3765), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['result'], {'sigma': 'sigma', 'axis': '(0)'}), '(result, sigma=sigma, axis=0)\n', (3736, 3765), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((4041, 4085), 'numpy.reshape', 'np.reshape', (['result', '[self.nRows, self.nCols]'], {}), '(result, [self.nRows, self.nCols])\n', (4051, 4085), True, 'import numpy as np\n'), ((4344, 4372), 'utils.helper_functions.get_label_length_seq', 'get_label_length_seq', (['output'], {}), '(output)\n', (4364, 4372), False, 'from utils.helper_functions import get_label_length_seq\n'), ((2078, 2101), 'tensorflow.matmul', 'tf.matmul', (['h_fc1', 'W_fc2'], {}), '(h_fc1, W_fc2)\n', (2087, 2101), True, 'import tensorflow as tf\n'), ((2507, 2547), 'tensorflow.square', 'tf.square', (['(self.target - self.prediction)'], {}), '(self.target - self.prediction)\n', (2516, 2547), True, 'import tensorflow as tf\n'), ((2589, 2618), 'tensorflow.argmax', 'tf.argmax', (['self.prediction', '(2)'], {}), '(self.prediction, 2)\n', (2598, 2618), True, 'import tensorflow as tf\n'), ((2619, 2644), 'tensorflow.argmax', 'tf.argmax', (['self.target', '(2)'], {}), '(self.target, 2)\n', (2628, 2644), True, 'import tensorflow as tf\n'), ((2679, 2718), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2686, 2718), True, 'import tensorflow as tf\n'), ((2828, 2861), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2859, 2861), True, 'import tensorflow as tf\n'), ((2015, 2047), 'tensorflow.matmul', 'tf.matmul', (['input_vid_flat', 'W_fc1'], {}), '(input_vid_flat, W_fc1)\n', (2024, 2047), True, 'import tensorflow as tf\n'), ((2745, 2774), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (2767, 2774), True, 'import tensorflow as tf\n'), ((3531, 3551), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3545, 3551), False, 'import os\n'), ((3573, 3590), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3584, 3590), False, 'import os\n'), ((4284, 4304), 'numpy.argmax', 'np.argmax', (['result[i]'], {}), '(result[i])\n', (4293, 4304), True, 'import numpy as np\n')] |
import csv
import os
import time
import uuid
import pickle
from random import randint
import numpy as np
import pandas as pd
from hyperopt import STATUS_OK, Trials, fmin, hp, tpe
from sklearn.metrics import mean_squared_error as mse
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
from data import fetch_data, connect_db
MODEL_FILE = os.path.join('models', 'models.pkl')
TRAIN_LOG_FILE = os.path.join('models', 'train.log')
MONGO_URI = os.environ.get('MONGO_URI')
seed = randint(0, 10000)
space = {'max_depth': hp.quniform('max_depth', 3, 15, 1),
'gamma': hp.uniform('gamma', 1, 9),
'reg_alpha': hp.quniform('reg_alpha', 0, 200, 1),
'reg_lambda': hp.uniform('reg_lambda', 0, 1),
'colsample_bytree': hp.uniform('colsample_bytree', 0.5, 1),
'min_child_weight': hp.quniform('min_child_weight', 0, 10, 1),
'n_estimators': hp.quniform('n_estimators', 100, 200, 25),
}
def split_data(X, y, seed, test_size=0.1, val_size=0.1):
"""Split data to train, validation, test sets."""
X_train_, X_test, y_train_, y_test = train_test_split(
X, y, test_size=test_size, random_state=seed)
X_train, X_val, y_train, y_val = train_test_split(
X_train_, y_train_, test_size=val_size, random_state=seed)
return X_train, X_val, X_test, y_train, y_val, y_test
def transform_data(original_df):
"""Add features and transform original df to X & y for modelling."""
X_COL = ['cloud_cover', 'dew_point', 'humidity', 'ozone',
'precipitation', 'pressure', 'temperature',
'uv_index', 'visibility', 'wind_gust', 'wind_speed',
'wind_speed_^_2', 'wind_speed_^_3', 'wind_gust_^_2',
'wind_gust_^_3', 'sin_wind_bearing', 'cos_wind_bearing']
df = original_df.copy(deep=True)
df['time'] = pd.to_datetime(df['time'], utc=True)
df['wind_speed_^_2'] = df['wind_speed']**2
df['wind_speed_^_3'] = df['wind_speed']**3
df['wind_gust_^_2'] = df['wind_gust']**2
df['wind_gust_^_3'] = df['wind_gust']**3
df['sin_wind_bearing'] = np.sin(df['wind_bearing'] * np.pi / 180.)
df['cos_wind_bearing'] = np.cos(df['wind_bearing'] * np.pi / 180.)
X = df[X_COL]
if 'actual' in df.columns:
y = df.actual
else:
y = None
return X, y
def best_model_from_trials(trials):
"""Extract and return the best model object from trails."""
valid_trial_list = [trial for trial in trials
if STATUS_OK == trial['result']['status']]
losses = [float(trial['result']['loss']) for trial in valid_trial_list]
index_having_minimum_loss = np.argmin(losses)
best_trial_obj = valid_trial_list[index_having_minimum_loss]
return best_trial_obj['result']['model']
def optimize_model(farm, max_evals, timeout):
"""Use Hyperopt to optimize hyperparams, return best model."""
time_start = time.time()
# ingest data
client = connect_db(MONGO_URI)
df = fetch_data(client, farm, limit=None)
df.dropna(inplace=True)
X, y = transform_data(df)
X_train, X_val, X_test, y_train, y_val, y_test = split_data(
X, y, seed=seed)
# tune paramaters
def objective(space):
"""Define Hyperopt objectives to minimize MSE."""
model = XGBRegressor(n_estimators=int(space['n_estimators']),
max_depth=int(space['max_depth']),
gamma=space['gamma'],
reg_alpha=int(space['reg_alpha']),
min_child_weight=space['min_child_weight'],
colsample_bytree=space['colsample_bytree'],
random_state=seed)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_val, y_val)],
eval_metric='rmse',
early_stopping_rounds=5,
verbose=False)
pred = model.predict(X_val)
return {'loss': mse(y_val, pred), 'status': STATUS_OK, 'model': model}
trials = Trials()
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=max_evals,
trials=trials,
timeout=timeout)
model = best_model_from_trials(trials)
# logging
m, s = divmod(time.time()-time_start, 60)
h, m = divmod(m, 60)
runtime = '%03d:%02d:%02d' % (h, m, s)
dt_range = f'{df.time.iloc[0]}~{df.time.iloc[-1]}'
trial_no = len(trials.trials)
header = ['unique_id', 'timestamp', 'model_name', 'runtime', 'trials',
'dt_range_UTC', 'best_param', 'test_rmse', 'seed']
write_header = False
if not os.path.exists(TRAIN_LOG_FILE):
write_header = True
with open(TRAIN_LOG_FILE, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter='\t')
if write_header:
writer.writerow(header)
to_write = map(str, [
uuid.uuid4(),
int(time.time()),
farm,
runtime,
trial_no,
dt_range,
best,
mse(model.predict(X_test), y_test, squared=False),
seed,
])
writer.writerow(to_write)
return model
def train_models(train_list, max_evals=50, timeout=300, dump=True):
"""Train models for all farms, returns a dict of all model objects."""
models = dict()
for farm in train_list:
model = optimize_model(farm, max_evals=max_evals, timeout=timeout)
models[farm] = model
if dump:
print('Dumping file...', end='', flush=True)
pickle.dump(models, open(MODEL_FILE, 'wb'))
print(' Done!')
return models
| [
"sklearn.model_selection.train_test_split",
"hyperopt.fmin",
"numpy.argmin",
"data.connect_db",
"numpy.sin",
"hyperopt.hp.quniform",
"os.path.join",
"random.randint",
"data.fetch_data",
"os.path.exists",
"hyperopt.Trials",
"sklearn.metrics.mean_squared_error",
"hyperopt.hp.uniform",
"csv.w... | [((375, 411), 'os.path.join', 'os.path.join', (['"""models"""', '"""models.pkl"""'], {}), "('models', 'models.pkl')\n", (387, 411), False, 'import os\n'), ((429, 464), 'os.path.join', 'os.path.join', (['"""models"""', '"""train.log"""'], {}), "('models', 'train.log')\n", (441, 464), False, 'import os\n'), ((477, 504), 'os.environ.get', 'os.environ.get', (['"""MONGO_URI"""'], {}), "('MONGO_URI')\n", (491, 504), False, 'import os\n'), ((513, 530), 'random.randint', 'randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (520, 530), False, 'from random import randint\n'), ((553, 587), 'hyperopt.hp.quniform', 'hp.quniform', (['"""max_depth"""', '(3)', '(15)', '(1)'], {}), "('max_depth', 3, 15, 1)\n", (564, 587), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((607, 632), 'hyperopt.hp.uniform', 'hp.uniform', (['"""gamma"""', '(1)', '(9)'], {}), "('gamma', 1, 9)\n", (617, 632), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((656, 691), 'hyperopt.hp.quniform', 'hp.quniform', (['"""reg_alpha"""', '(0)', '(200)', '(1)'], {}), "('reg_alpha', 0, 200, 1)\n", (667, 691), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((716, 746), 'hyperopt.hp.uniform', 'hp.uniform', (['"""reg_lambda"""', '(0)', '(1)'], {}), "('reg_lambda', 0, 1)\n", (726, 746), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((777, 815), 'hyperopt.hp.uniform', 'hp.uniform', (['"""colsample_bytree"""', '(0.5)', '(1)'], {}), "('colsample_bytree', 0.5, 1)\n", (787, 815), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((846, 887), 'hyperopt.hp.quniform', 'hp.quniform', (['"""min_child_weight"""', '(0)', '(10)', '(1)'], {}), "('min_child_weight', 0, 10, 1)\n", (857, 887), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((914, 955), 'hyperopt.hp.quniform', 'hp.quniform', (['"""n_estimators"""', '(100)', '(200)', '(25)'], {}), "('n_estimators', 100, 200, 25)\n", (925, 955), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((1122, 1184), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'seed'}), '(X, y, test_size=test_size, random_state=seed)\n', (1138, 1184), False, 'from sklearn.model_selection import train_test_split\n'), ((1231, 1306), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train_', 'y_train_'], {'test_size': 'val_size', 'random_state': 'seed'}), '(X_train_, y_train_, test_size=val_size, random_state=seed)\n', (1247, 1306), False, 'from sklearn.model_selection import train_test_split\n'), ((1860, 1896), 'pandas.to_datetime', 'pd.to_datetime', (["df['time']"], {'utc': '(True)'}), "(df['time'], utc=True)\n", (1874, 1896), True, 'import pandas as pd\n'), ((2110, 2152), 'numpy.sin', 'np.sin', (["(df['wind_bearing'] * np.pi / 180.0)"], {}), "(df['wind_bearing'] * np.pi / 180.0)\n", (2116, 2152), True, 'import numpy as np\n'), ((2181, 2223), 'numpy.cos', 'np.cos', (["(df['wind_bearing'] * np.pi / 180.0)"], {}), "(df['wind_bearing'] * np.pi / 180.0)\n", (2187, 2223), True, 'import numpy as np\n'), ((2667, 2684), 'numpy.argmin', 'np.argmin', (['losses'], {}), '(losses)\n', (2676, 2684), True, 'import numpy as np\n'), ((2928, 2939), 'time.time', 'time.time', ([], {}), '()\n', (2937, 2939), False, 'import time\n'), ((2971, 2992), 'data.connect_db', 'connect_db', (['MONGO_URI'], {}), '(MONGO_URI)\n', (2981, 2992), False, 'from data import fetch_data, connect_db\n'), ((3002, 3038), 'data.fetch_data', 'fetch_data', (['client', 'farm'], {'limit': 'None'}), '(client, farm, limit=None)\n', (3012, 3038), False, 'from data import fetch_data, connect_db\n'), ((4083, 4091), 'hyperopt.Trials', 'Trials', ([], {}), '()\n', (4089, 4091), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((4103, 4209), 'hyperopt.fmin', 'fmin', ([], {'fn': 'objective', 'space': 'space', 'algo': 'tpe.suggest', 'max_evals': 'max_evals', 'trials': 'trials', 'timeout': 'timeout'}), '(fn=objective, space=space, algo=tpe.suggest, max_evals=max_evals,\n trials=trials, timeout=timeout)\n', (4107, 4209), False, 'from hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n'), ((4723, 4753), 'os.path.exists', 'os.path.exists', (['TRAIN_LOG_FILE'], {}), '(TRAIN_LOG_FILE)\n', (4737, 4753), False, 'import os\n'), ((4847, 4882), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '"""\t"""'}), "(csvfile, delimiter='\\t')\n", (4857, 4882), False, 'import csv\n'), ((4014, 4030), 'sklearn.metrics.mean_squared_error', 'mse', (['y_val', 'pred'], {}), '(y_val, pred)\n', (4017, 4030), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((4362, 4373), 'time.time', 'time.time', ([], {}), '()\n', (4371, 4373), False, 'import time\n'), ((5002, 5014), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5012, 5014), False, 'import uuid\n'), ((5049, 5060), 'time.time', 'time.time', ([], {}), '()\n', (5058, 5060), False, 'import time\n')] |
"""Perturbation-based probabilistic ODE solver."""
from typing import Callable, Optional
import numpy as np
import scipy.integrate
from probnum import problems
from probnum.diffeq import perturbed, stepsize
from probnum.typing import ArrayLike, FloatLike
__all__ = ["perturbsolve_ivp"]
METHODS = {
"RK45": scipy.integrate.RK45,
"RK23": scipy.integrate.RK23,
}
"""Implemented Scipy solvers."""
# aliases for (otherwise too-)long lines
lognorm = perturbed.step.PerturbedStepSolver.construct_with_lognormal_perturbation
uniform = perturbed.step.PerturbedStepSolver.construct_with_uniform_perturbation
PERTURBS = {
"step-lognormal": lognorm,
"step-uniform": uniform,
}
"""Implemented perturbation-styles."""
# This interface function is allowed to have many input arguments.
# Having many input arguments implies having many local arguments,
# so we need to disable both here.
# pylint: disable="too-many-arguments,too-many-locals"
def perturbsolve_ivp(
f: Callable,
t0: FloatLike,
tmax: FloatLike,
y0: ArrayLike,
rng: np.random.Generator,
method: str = "RK45",
perturb: str = "step-lognormal",
noise_scale: FloatLike = 10.0,
adaptive: bool = True,
atol: FloatLike = 1e-6,
rtol: FloatLike = 1e-3,
step: Optional[FloatLike] = None,
time_stops: Optional[ArrayLike] = None,
) -> perturbed.step.PerturbedStepSolution:
"""Solve an initial value problem with a perturbation-based ODE solver.
Parameters
----------
f
ODE vector field.
t0
Initial time point.
tmax
Final time point.
y0
Initial value.
rng
Random number generator.
method
Integration method to use.
The following are available (docs adapted from
https://docs.scipy.org/doc/scipy/\
reference/generated/scipy.integrate.solve_ivp.html):
* `RK45` (default): Explicit Runge-Kutta method of order 5(4) [2]_.
The error is controlled assuming accuracy of the fourth-order
method, but steps are taken using the fifth-order accurate
formula (local extrapolation is done). A quartic interpolation
polynomial is used for the dense output [3]_. Can be applied in
the complex domain.
* `RK23`: Explicit Runge-Kutta method of order 3(2) [4]_. The error
is controlled assuming accuracy of the second-order method, but
steps are taken using the third-order accurate formula (local
extrapolation is done). A cubic Hermite polynomial is used for the
dense output. Can be applied in the complex domain.
Other integrators are not supported currently.
perturb
Which perturbation style to use.
Currently, the following methods are supported:
* `step-lognormal`: Perturbed-step
(aka random time-step numerical integration) method
with lognormally distributed perturbation of the step-size [1]_.
* `step-uniform`: Perturbed-step
(aka random time-step numerical integration) method
with uniformly distributed perturbation of the step-size [1]_.
Other integrators are not supported currently.
noise_scale
Scale of the perturbation. Optional.
Default is 10.0. The magnitude of this parameter
significantly impacts the width of the posterior.
adaptive
Whether to use adaptive steps or not. Default is `True`.
atol
Absolute tolerance of the adaptive step-size selection scheme.
Optional. Default is ``1e-6``.
rtol
Relative tolerance of the adaptive step-size selection scheme.
Optional. Default is ``1e-3``.
step
Step size. If atol and rtol are not specified, this step-size
is used for a fixed-step ODE solver.
If they are specified, this only affects the first step. Optional.
Default is None, in which case the first step is chosen
as prescribed by :meth:`propose_firststep`.
time_stops
Time-points through which the solver must step. Optional. Default is None.
Raises
------
ValueError
If the 'method' string does not correspond to a supported method.
ValueError
If the 'perturb' string does not correspond to
a supported perturbation style.
Returns
-------
perturbed.step.PerturbedStepSolution
ODE Solution of the perturbed-step-solver.
Examples
--------
>>> from probnum.diffeq import perturbsolve_ivp
>>> import numpy as np
Solve a simple logistic ODE with fixed steps.
Per default, `perturbsolve_ivp` uses a perturbed-step solver
with lognormal perturbation.
>>> rng = np.random.default_rng(seed=2)
>>>
>>> def f(t, x):
... return 4*x*(1-x)
>>>
>>> y0 = np.array([0.15])
>>> t0, tmax = 0., 1.5
>>> solution = perturbsolve_ivp(
... f, t0, tmax, y0, rng=rng, step=0.25, method="RK23", adaptive=False
... )
>>> print(np.round(solution.states.mean, 3))
[[0.15 ]
[0.325]
[0.56 ]
[0.772]
[0.893]
[0.964]
[0.989]]
Each solution is the result of a randomly-perturbed call of
an underlying Runge-Kutta solver.
Therefore, if you call it again, the output will be different:
>>> other_solution = perturbsolve_ivp(
... f, t0, tmax, y0, rng=rng, step=0.25, method="RK23", adaptive=False
... )
>>> print(np.round(other_solution.states.mean, 3))
[[0.15 ]
[0.319]
[0.57 ]
[0.785]
[0.908]
[0.968]
[0.989]]
Other methods, such as `RK45` (as well as other perturbation styles)
are easily accessible.
Let us solve the same equation, with an adaptive RK45 solver
that uses uniformly perturbed steps.
>>> solution = perturbsolve_ivp(
... f, t0, tmax, y0, rng=rng, atol=1e-5, rtol=1e-6,
... method="RK45", perturb="step-uniform", adaptive=True
... )
>>> print(np.round(solution.states.mean, 3))
[[0.15 ]
[0.152]
[0.167]
[0.26 ]
[0.431]
[0.646]
[0.849]
[0.883]
[0.915]
[0.953]
[0.976]
[0.986]]
References
----------
.. [1] <NAME>. and <NAME>..
Random time step probabilistic methods for uncertainty quantification
in chaotic and geometric numerical integration.
Statistics and Computing. 2020.
.. [2] <NAME>, <NAME>..
A family of embedded Runge-Kutta formulae.
Journal of Computational and Applied Mathematics,
Vol. 6, No. 1, pp. 19-26, 1980.
.. [3] <NAME>.
Some Practical Runge-Kutta Formulas.
Mathematics of Computation, Vol. 46, No. 173, pp. 135-150, 1986.
.. [4] <NAME>, <NAME>.
A 3(2) Pair of Runge-Kutta Formulas.
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
"""
ivp = problems.InitialValueProblem(t0=t0, tmax=tmax, y0=np.asarray(y0), f=f)
steprule = _create_steprule(adaptive, atol, ivp, rtol, step)
if method not in METHODS:
raise ValueError(
f"Parameter method='{method}' is not supported. "
f"Possible values are {list(METHODS.keys())}."
)
if perturb not in PERTURBS:
raise ValueError(
f"Parameter perturb='{perturb}' is not supported. "
f"Possible values are {list(PERTURBS.keys())}."
)
wrapped_scipy_solver = perturbed.scipy_wrapper.WrappedScipyRungeKutta(
METHODS[method], steprule=steprule
)
perturbed_solver = PERTURBS[perturb](
rng=rng,
solver=wrapped_scipy_solver,
noise_scale=noise_scale,
)
return perturbed_solver.solve(ivp=ivp, stop_at=time_stops)
def _create_steprule(adaptive, atol, ivp, rtol, step):
if adaptive is True:
if atol is None or rtol is None:
raise ValueError(
"Please provide absolute and relative tolerance for adaptive steps."
)
firststep = step if step is not None else stepsize.propose_firststep(ivp)
steprule = stepsize.AdaptiveSteps(firststep=firststep, atol=atol, rtol=rtol)
else:
steprule = stepsize.ConstantSteps(step)
return steprule
| [
"probnum.diffeq.stepsize.ConstantSteps",
"probnum.diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta",
"probnum.diffeq.stepsize.AdaptiveSteps",
"probnum.diffeq.stepsize.propose_firststep",
"numpy.asarray"
] | [((7486, 7573), 'probnum.diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta', 'perturbed.scipy_wrapper.WrappedScipyRungeKutta', (['METHODS[method]'], {'steprule': 'steprule'}), '(METHODS[method], steprule=\n steprule)\n', (7532, 7573), False, 'from probnum.diffeq import perturbed, stepsize\n'), ((8134, 8199), 'probnum.diffeq.stepsize.AdaptiveSteps', 'stepsize.AdaptiveSteps', ([], {'firststep': 'firststep', 'atol': 'atol', 'rtol': 'rtol'}), '(firststep=firststep, atol=atol, rtol=rtol)\n', (8156, 8199), False, 'from probnum.diffeq import perturbed, stepsize\n'), ((8229, 8257), 'probnum.diffeq.stepsize.ConstantSteps', 'stepsize.ConstantSteps', (['step'], {}), '(step)\n', (8251, 8257), False, 'from probnum.diffeq import perturbed, stepsize\n'), ((6992, 7006), 'numpy.asarray', 'np.asarray', (['y0'], {}), '(y0)\n', (7002, 7006), True, 'import numpy as np\n'), ((8083, 8114), 'probnum.diffeq.stepsize.propose_firststep', 'stepsize.propose_firststep', (['ivp'], {}), '(ivp)\n', (8109, 8114), False, 'from probnum.diffeq import perturbed, stepsize\n')] |
"""
==============================
Embla file reader from python
==============================
This script is a python version of : https://github.com/gpiantoni/hgse_private/blob/master/ebmread.m
Version 0.21
Author: <NAME> <EMAIL>
date: 3.20.2021
"""
import numpy as np
from struct import unpack
import datetime
# parameter define
EBM_RAWDATA_HEADER = 'Embla data file'
EBM_RESULTS_HEADER = 'Embla results file'
EBM_R_VERSION = b'\x80'
EBM_R_SUBJECT_INFO = b'\xd0'
EBM_R_RATECORR = b'\x8a'
EBM_R_HEADER = b'\x81'
EBM_R_TIME = b'\x84'
EBM_R_CHANNEL = b'\x85'
EBM_R_SAMPLING_RATE = b'\x86'
EBM_R_UNIT_GAIN = b'\x87'
EBM_R_CHANNEL_NAME = b'\x90'
EBM_R_DATA = b'\x20'
EBM_UNKNOWN_DATASIZE = b'\xff\xff\xff\xff'
EBM_END_OF_SIGNATURE = b'\x1A'
EBM_MAX_SIGNATURE_SIZE = 80
EBM_MAC_ENDIAN = b'\xff'
EBM_INTEL_ENDIAN = b'\x00'
ERROR_UNKNOWN_SIGNATURE = 'This is not a Embla data file'
ERROR_FILE_NOT_FOUND = 'Could not open data file'
ERROR_UNKNOWN = 'Failure in reading the file'
ERROR_CANCEL = 'Operation was canceled'
endian = 'big'
# Big endian by default
SIZE_uchar = 1
SIZE_char = 1
SIZE_ulong = 4
SIZE_long = 4
SIZE_int8 = 1
SIZE_int16 = 2
def unpack_one(num_type, buffer, endian):
se = ">" if endian == "big" else "<"
return unpack(se + num_type, buffer)[0]
def cal_stoptime(starttime, deltat):
seconds = int(deltat)
microsec = (deltat - seconds) * 1e6
dt = datetime.timedelta(seconds=seconds, microseconds=microsec)
return starttime + dt
def ebmreader(filepath, onlyheader=False):
with open(filepath, "rb") as f:
header = {}
signature = []
signature.append(f.read(SIZE_char))
i = 0
while signature[
-1] != EBM_END_OF_SIGNATURE and i < EBM_MAX_SIGNATURE_SIZE - 1:
i = i + 1
signature.append(f.read(SIZE_char))
signature = "".join(map(lambda x: x.decode("windows-1252"), signature))
assert i != EBM_MAX_SIGNATURE_SIZE - 1, ERROR_UNKNOWN_SIGNATURE
assert EBM_RAWDATA_HEADER in signature, ERROR_UNKNOWN_SIGNATURE
ch = f.read(SIZE_char)
if ch == EBM_MAC_ENDIAN:
endian = "big"
elif ch == EBM_INTEL_ENDIAN:
endian = "little"
wideId = 1
# Store the position of the start of the block structure
# If this is not a file with 8 bit block IDs then we will change
# this again.
firstBlockOffset = f.tell()
ch = f.read(SIZE_uchar)
if ch == b"\xff":
ch = f.read(SIZE_ulong)
if ch == b"\xff\xff\xff\xff":
# we have 32 bit block IDs so we skip the rest of the
# 32 byte header and store the position of the block
# structure which should start right after.
firstBlockOffset = firstBlockOffset + 31
wideId = 1
f.seek(firstBlockOffset, 0)
# find the data block
rec = 0
recnum = -1
header["starttime"] = []
header["stoptime"] = []
header["length"] = []
data = []
while True:
if wideId != 0:
rec = unpack_one("L", f.read(SIZE_ulong), endian)
else:
rec = unpack_one("B", f.read(SIZE_uchar), endian)
recSize = unpack_one("l", f.read(SIZE_long), endian)
recPos = f.tell()
if rec == int.from_bytes(EBM_R_VERSION, endian):
minor = int.from_bytes(f.read(SIZE_int8), endian)
major = int.from_bytes(f.read(SIZE_int8), endian)
header["fileversion"] = major + 0.01 * minor
if rec == int.from_bytes(EBM_R_SUBJECT_INFO, endian):
tmp = f.read(SIZE_int8 * recSize)
header["subjectinfo"] = tmp.decode("windows-1252").rstrip(
"\x00")
if rec == int.from_bytes(EBM_R_HEADER, endian):
tmp = f.read(SIZE_int8 * recSize)
header["extra"] = tmp.decode("windows-1252").rstrip("\x00")
if rec == int.from_bytes(EBM_R_TIME, endian):
year = unpack_one("h", f.read(SIZE_int16), endian)
month = int.from_bytes(f.read(SIZE_int8), endian)
day = int.from_bytes(f.read(SIZE_int8), endian)
hour = int.from_bytes(f.read(SIZE_int8), endian)
minute = int.from_bytes(f.read(SIZE_int8), endian)
second = int.from_bytes(f.read(SIZE_int8), endian)
hsec = int.from_bytes(f.read(SIZE_int8), endian) * 10000
times_data = (year, month, day, hour, minute, second, hsec)
recnum = recnum + 1
header["starttime"].append(datetime.datetime(*times_data))
if rec == int.from_bytes(EBM_R_CHANNEL, endian):
header["channel"] = unpack_one("h", f.read(SIZE_int16), endian)
if rec == int.from_bytes(EBM_R_SAMPLING_RATE, endian):
header["frequency"] = unpack_one("L", f.read(SIZE_long),
endian) / 1000
if rec == int.from_bytes(EBM_R_RATECORR, endian):
header["sec_error"] = unpack_one("d", f.read(8), endian)
if rec == int.from_bytes(EBM_R_UNIT_GAIN, endian):
header["unitgain"] = unpack_one("l", f.read(SIZE_long),
endian) * 1e-9
if rec == int.from_bytes(EBM_R_CHANNEL_NAME, endian):
tmp = f.read(recSize * SIZE_int8)
header["channelname"] = tmp.decode("windows-1252").strip(
"\x00")
# read data
if rec == int.from_bytes(EBM_R_DATA, endian):
if onlyheader is False:
newdata = f.read(recSize)
newdata = np.frombuffer(newdata, np.int16)
newdata = newdata * header["unitgain"]
data.append(newdata)
else:
f.seek(recSize, 1)
current = header["starttime"][recnum]
header["stoptime"].append(
cal_stoptime(current, recSize / 2 / header["frequency"]))
header["length"].append(recSize // 2)
b = f.read(1)
if not b:
break
else:
f.seek(recPos + recSize, 0)
if len(header["stoptime"]) > 1:
header["interrupt length"] = [
(stop - start).seconds
for start, stop in zip(header["starttime"], header["stoptime"])
]
return data, header
if __name__ == "__main__":
import matplotlib.pyplot as plt
data, header = ebmreader(
r"C:\Users\45805\OneDrive\workspace\my_porject\2nd year\pyembreader\SL012\Plethysmogram.ebm",
onlyheader=True)
print(header["starttime"])
print(header["length"])
print(data)
# plt.figure()
# plt.plot(data[-1][:])
# plt.show()
| [
"numpy.frombuffer",
"struct.unpack",
"datetime.timedelta",
"datetime.datetime"
] | [((1396, 1454), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'seconds', 'microseconds': 'microsec'}), '(seconds=seconds, microseconds=microsec)\n', (1414, 1454), False, 'import datetime\n'), ((1249, 1278), 'struct.unpack', 'unpack', (['(se + num_type)', 'buffer'], {}), '(se + num_type, buffer)\n', (1255, 1278), False, 'from struct import unpack\n'), ((4709, 4739), 'datetime.datetime', 'datetime.datetime', (['*times_data'], {}), '(*times_data)\n', (4726, 4739), False, 'import datetime\n'), ((5837, 5869), 'numpy.frombuffer', 'np.frombuffer', (['newdata', 'np.int16'], {}), '(newdata, np.int16)\n', (5850, 5869), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" evaluation """
import numpy as np
from shapely.geometry import Polygon
def calculate_eao(dataset_name, all_failures, all_overlaps, gt_traj_length, skipping=5):
'''
input:dataset name
all_failures: type is list , index of failure
all_overlaps: type is list , length of list is the length of all_failures
gt_traj_length: type is list , length of list is the length of all_failures
skipping:number of skipping per failing
'''
if dataset_name == "VOT2016":
low = 108
high = 371
elif dataset_name == "VOT2015":
low = 108
high = 371
fragment_num = sum([len(x)+1 for x in all_failures])
max_len = max([len(x) for x in all_overlaps])
tags = [1] * max_len
seq_weight = 1 / (1 + 1e-10) # division by zero
eao = {}
# prepare segments
fweights = np.ones(fragment_num, dtype=np.float32) * np.nan
fragments = np.ones((fragment_num, max_len), dtype=np.float32) * np.nan
seg_counter = 0
for traj_len, failures, overlaps in zip(gt_traj_length, all_failures, all_overlaps):
if failures:
points = [x+skipping for x in failures if
x+skipping <= len(overlaps)]
points.insert(0, 0)
for i, _ in enumerate(points):
if i != len(points) - 1:
fragment = np.array(overlaps[points[i]:points[i+1]+1], dtype=np.float32)
fragments[seg_counter, :] = 0
else:
fragment = np.array(overlaps[points[i]:], dtype=np.float32)
fragment[np.isnan(fragment)] = 0
fragments[seg_counter, :len(fragment)] = fragment
if i != len(points) - 1:
tag_value = tags[points[i]:points[i+1]+1]
w = sum(tag_value) / (points[i+1] - points[i]+1)
fweights[seg_counter] = seq_weight * w
else:
tag_value = tags[points[i]:len(overlaps)]
w = sum(tag_value) / (traj_len - points[i]+1e-16)
fweights[seg_counter] = seq_weight * w
seg_counter += 1
else:
# no failure
max_idx = min(len(overlaps), max_len)
fragments[seg_counter, :max_idx] = overlaps[:max_idx]
tag_value = tags[0: max_idx]
w = sum(tag_value) / max_idx
fweights[seg_counter] = seq_weight * w
seg_counter += 1
expected_overlaps = calculate_expected_overlap(fragments, fweights)
print(len(expected_overlaps))
# calculate eao
weight = np.zeros((len(expected_overlaps)))
weight[low-1:high-1+1] = 1
expected_overlaps = np.array(expected_overlaps, dtype=np.float32)
is_valid = np.logical_not(np.isnan(expected_overlaps))
eao_ = np.sum(expected_overlaps[is_valid] * weight[is_valid]) / np.sum(weight[is_valid])
eao = eao_
return eao
def calculate_expected_overlap(fragments, fweights):
""" compute expected iou """
max_len = fragments.shape[1]
expected_overlaps = np.zeros((max_len), np.float32)
expected_overlaps[0] = 1
# TODO Speed Up
for i in range(1, max_len):
mask = np.logical_not(np.isnan(fragments[:, i]))
if np.any(mask):
fragment = fragments[mask, 1:i+1]
seq_mean = np.sum(fragment, 1) / fragment.shape[1]
expected_overlaps[i] = np.sum(seq_mean *
fweights[mask]) / np.sum(fweights[mask])
return expected_overlaps
def calculate_accuracy_failures(pred_trajectory, gt_trajectory, \
bound=None):
'''
args:
pred_trajectory:list of bbox
gt_trajectory: list of bbox ,shape == pred_trajectory
bound :w and h of img
return :
overlaps:list ,iou value in pred_trajectory
acc : mean iou value
failures: failures point in pred_trajectory
num_failures: number of failres
'''
overlaps = []
failures = []
for i, pred_traj in enumerate(pred_trajectory):
if len(pred_traj) == 1:
if pred_trajectory[i][0] == 2:
failures.append(i)
overlaps.append(float("nan"))
else:
if bound is not None:
poly_img = Polygon(np.array([[0, 0],\
[0, bound[1]],\
[bound[0], bound[1]],\
[bound[0], 0]])).convex_hull
if len(gt_trajectory[i]) == 8:
poly_pred = Polygon(np.array([[pred_trajectory[i][0], pred_trajectory[i][1]], \
[pred_trajectory[i][2], pred_trajectory[i][1]], \
[pred_trajectory[i][2], pred_trajectory[i][3]], \
[pred_trajectory[i][0], pred_trajectory[i][3]] \
])).convex_hull
poly_gt = Polygon(np.array(gt_trajectory[i]).reshape(4, 2)).convex_hull
if bound is not None:
gt_inter_img = poly_gt.intersection(poly_img)
pred_inter_img = poly_pred.intersection(poly_img)
inter_area = gt_inter_img.intersection(pred_inter_img).area
overlap = inter_area /(gt_inter_img.area + pred_inter_img.area - inter_area)
else:
inter_area = poly_gt.intersection(poly_pred).area
overlap = inter_area / (poly_gt.area + poly_pred.area - inter_area)
elif len(gt_trajectory[i]) == 4:
overlap = iou(np.array(pred_trajectory[i]).reshape(-1, 4), np.array(gt_trajectory[i]).reshape(-1, 4))
overlaps.append(overlap)
acc = 0
num_failures = len(failures)
if overlaps:
acc = np.nanmean(overlaps)
return acc, overlaps, failures, num_failures
def judge_failures(pred_bbox, gt_bbox, threshold=0):
"""" judge whether to fail or not """
if len(gt_bbox) == 4:
if iou(np.array(pred_bbox).reshape(-1, 4), np.array(gt_bbox).reshape(-1, 4)) > threshold:
return False
else:
poly_pred = Polygon(np.array([[pred_bbox[0], pred_bbox[1]], \
[pred_bbox[2], pred_bbox[1]], \
[pred_bbox[2], pred_bbox[3]], \
[pred_bbox[0], pred_bbox[3]] \
])).convex_hull
poly_gt = Polygon(np.array(gt_bbox).reshape(4, 2)).convex_hull
inter_area = poly_gt.intersection(poly_pred).area
overlap = inter_area / (poly_gt.area + poly_pred.area - inter_area)
if overlap > threshold:
return False
return True
def iou(box1, box2):
""" compute iou """
box1, box2 = box1.copy(), box2.copy()
N = box1.shape[0]
K = box2.shape[0]
box1 = np.array(box1.reshape((N, 1, 4)))+np.zeros((1, K, 4))#box1=[N,K,4]
box2 = np.array(box2.reshape((1, K, 4)))+np.zeros((N, 1, 4))#box1=[N,K,4]
x_max = np.max(np.stack((box1[:, :, 0], box2[:, :, 0]), axis=-1), axis=2)
x_min = np.min(np.stack((box1[:, :, 2], box2[:, :, 2]), axis=-1), axis=2)
y_max = np.max(np.stack((box1[:, :, 1], box2[:, :, 1]), axis=-1), axis=2)
y_min = np.min(np.stack((box1[:, :, 3], box2[:, :, 3]), axis=-1), axis=2)
tb = x_min-x_max
lr = y_min-y_max
tb[np.where(tb < 0)] = 0
lr[np.where(lr < 0)] = 0
over_square = tb*lr
all_square = (box1[:, :, 2] - box1[:, :, 0]) * (box1[:, :, 3] - box1[:, :, 1]) + (box2[:, :, 2] - \
box2[:, :, 0]) * (box2[:, :, 3] - box2[:, :, 1]) - over_square
return over_square / all_square
| [
"numpy.stack",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.isnan",
"numpy.any",
"numpy.where",
"numpy.array",
"numpy.nanmean"
] | [((3373, 3418), 'numpy.array', 'np.array', (['expected_overlaps'], {'dtype': 'np.float32'}), '(expected_overlaps, dtype=np.float32)\n', (3381, 3418), True, 'import numpy as np\n'), ((3745, 3774), 'numpy.zeros', 'np.zeros', (['max_len', 'np.float32'], {}), '(max_len, np.float32)\n', (3753, 3774), True, 'import numpy as np\n'), ((1513, 1552), 'numpy.ones', 'np.ones', (['fragment_num'], {'dtype': 'np.float32'}), '(fragment_num, dtype=np.float32)\n', (1520, 1552), True, 'import numpy as np\n'), ((1578, 1628), 'numpy.ones', 'np.ones', (['(fragment_num, max_len)'], {'dtype': 'np.float32'}), '((fragment_num, max_len), dtype=np.float32)\n', (1585, 1628), True, 'import numpy as np\n'), ((3449, 3476), 'numpy.isnan', 'np.isnan', (['expected_overlaps'], {}), '(expected_overlaps)\n', (3457, 3476), True, 'import numpy as np\n'), ((3489, 3543), 'numpy.sum', 'np.sum', (['(expected_overlaps[is_valid] * weight[is_valid])'], {}), '(expected_overlaps[is_valid] * weight[is_valid])\n', (3495, 3543), True, 'import numpy as np\n'), ((3546, 3570), 'numpy.sum', 'np.sum', (['weight[is_valid]'], {}), '(weight[is_valid])\n', (3552, 3570), True, 'import numpy as np\n'), ((3927, 3939), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (3933, 3939), True, 'import numpy as np\n'), ((6550, 6570), 'numpy.nanmean', 'np.nanmean', (['overlaps'], {}), '(overlaps)\n', (6560, 6570), True, 'import numpy as np\n'), ((7663, 7682), 'numpy.zeros', 'np.zeros', (['(1, K, 4)'], {}), '((1, K, 4))\n', (7671, 7682), True, 'import numpy as np\n'), ((7741, 7760), 'numpy.zeros', 'np.zeros', (['(N, 1, 4)'], {}), '((N, 1, 4))\n', (7749, 7760), True, 'import numpy as np\n'), ((7793, 7842), 'numpy.stack', 'np.stack', (['(box1[:, :, 0], box2[:, :, 0])'], {'axis': '(-1)'}), '((box1[:, :, 0], box2[:, :, 0]), axis=-1)\n', (7801, 7842), True, 'import numpy as np\n'), ((7871, 7920), 'numpy.stack', 'np.stack', (['(box1[:, :, 2], box2[:, :, 2])'], {'axis': '(-1)'}), '((box1[:, :, 2], box2[:, :, 2]), axis=-1)\n', (7879, 7920), True, 'import numpy as np\n'), ((7949, 7998), 'numpy.stack', 'np.stack', (['(box1[:, :, 1], box2[:, :, 1])'], {'axis': '(-1)'}), '((box1[:, :, 1], box2[:, :, 1]), axis=-1)\n', (7957, 7998), True, 'import numpy as np\n'), ((8027, 8076), 'numpy.stack', 'np.stack', (['(box1[:, :, 3], box2[:, :, 3])'], {'axis': '(-1)'}), '((box1[:, :, 3], box2[:, :, 3]), axis=-1)\n', (8035, 8076), True, 'import numpy as np\n'), ((8135, 8151), 'numpy.where', 'np.where', (['(tb < 0)'], {}), '(tb < 0)\n', (8143, 8151), True, 'import numpy as np\n'), ((8164, 8180), 'numpy.where', 'np.where', (['(lr < 0)'], {}), '(lr < 0)\n', (8172, 8180), True, 'import numpy as np\n'), ((3889, 3914), 'numpy.isnan', 'np.isnan', (['fragments[:, i]'], {}), '(fragments[:, i])\n', (3897, 3914), True, 'import numpy as np\n'), ((4010, 4029), 'numpy.sum', 'np.sum', (['fragment', '(1)'], {}), '(fragment, 1)\n', (4016, 4029), True, 'import numpy as np\n'), ((4085, 4118), 'numpy.sum', 'np.sum', (['(seq_mean * fweights[mask])'], {}), '(seq_mean * fweights[mask])\n', (4091, 4118), True, 'import numpy as np\n'), ((4163, 4185), 'numpy.sum', 'np.sum', (['fweights[mask]'], {}), '(fweights[mask])\n', (4169, 4185), True, 'import numpy as np\n'), ((6903, 7038), 'numpy.array', 'np.array', (['[[pred_bbox[0], pred_bbox[1]], [pred_bbox[2], pred_bbox[1]], [pred_bbox[2],\n pred_bbox[3]], [pred_bbox[0], pred_bbox[3]]]'], {}), '([[pred_bbox[0], pred_bbox[1]], [pred_bbox[2], pred_bbox[1]], [\n pred_bbox[2], pred_bbox[3]], [pred_bbox[0], pred_bbox[3]]])\n', (6911, 7038), True, 'import numpy as np\n'), ((2020, 2085), 'numpy.array', 'np.array', (['overlaps[points[i]:points[i + 1] + 1]'], {'dtype': 'np.float32'}), '(overlaps[points[i]:points[i + 1] + 1], dtype=np.float32)\n', (2028, 2085), True, 'import numpy as np\n'), ((2185, 2233), 'numpy.array', 'np.array', (['overlaps[points[i]:]'], {'dtype': 'np.float32'}), '(overlaps[points[i]:], dtype=np.float32)\n', (2193, 2233), True, 'import numpy as np\n'), ((2259, 2277), 'numpy.isnan', 'np.isnan', (['fragment'], {}), '(fragment)\n', (2267, 2277), True, 'import numpy as np\n'), ((4968, 5038), 'numpy.array', 'np.array', (['[[0, 0], [0, bound[1]], [bound[0], bound[1]], [bound[0], 0]]'], {}), '([[0, 0], [0, bound[1]], [bound[0], bound[1]], [bound[0], 0]])\n', (4976, 5038), True, 'import numpy as np\n'), ((5245, 5457), 'numpy.array', 'np.array', (['[[pred_trajectory[i][0], pred_trajectory[i][1]], [pred_trajectory[i][2],\n pred_trajectory[i][1]], [pred_trajectory[i][2], pred_trajectory[i][3]],\n [pred_trajectory[i][0], pred_trajectory[i][3]]]'], {}), '([[pred_trajectory[i][0], pred_trajectory[i][1]], [pred_trajectory[\n i][2], pred_trajectory[i][1]], [pred_trajectory[i][2], pred_trajectory[\n i][3]], [pred_trajectory[i][0], pred_trajectory[i][3]]])\n', (5253, 5457), True, 'import numpy as np\n'), ((6757, 6776), 'numpy.array', 'np.array', (['pred_bbox'], {}), '(pred_bbox)\n', (6765, 6776), True, 'import numpy as np\n'), ((6793, 6810), 'numpy.array', 'np.array', (['gt_bbox'], {}), '(gt_bbox)\n', (6801, 6810), True, 'import numpy as np\n'), ((7234, 7251), 'numpy.array', 'np.array', (['gt_bbox'], {}), '(gt_bbox)\n', (7242, 7251), True, 'import numpy as np\n'), ((5688, 5714), 'numpy.array', 'np.array', (['gt_trajectory[i]'], {}), '(gt_trajectory[i])\n', (5696, 5714), True, 'import numpy as np\n'), ((6349, 6377), 'numpy.array', 'np.array', (['pred_trajectory[i]'], {}), '(pred_trajectory[i])\n', (6357, 6377), True, 'import numpy as np\n'), ((6394, 6420), 'numpy.array', 'np.array', (['gt_trajectory[i]'], {}), '(gt_trajectory[i])\n', (6402, 6420), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/14 22:51
# @Author : <NAME>
# @Site : www.jackokie.com
# @File : file_2_1.py
# @Software: PyCharm
# @contact: <EMAIL>
import numpy as np
import matplotlib.pyplot as plt
# matplotlib.rc('font', size=30)
fig = plt.figure(figsize=(8,6), dpi=160)
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
x = np.arange(-5, 10, 0.1)
y = (x>0)*x
plt.xlim([-5, 12])
plt.yticks([2, 4, 6, 8, 10], fontsize=16)
plt.xticks(fontsize=16)
fig.add_axes()
plt.plot(x, y, 'r')
plt.xlabel('$z$', fontsize=16)
plt.ylabel("$f(z)$", rotation='horizontal',fontsize=16)
plt.tight_layout()
plt.show()
# fig.savefig('/home/scl/Documents/JackokiePapers/figures/chapter_2/fig_2_2.png')
fig.savefig('E:/JackokiePapers/figures/chapter_2/fig_2_2.png')
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.xlabel"
] | [((282, 317), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)', 'dpi': '(160)'}), '(figsize=(8, 6), dpi=160)\n', (292, 317), True, 'import matplotlib.pyplot as plt\n'), ((585, 607), 'numpy.arange', 'np.arange', (['(-5)', '(10)', '(0.1)'], {}), '(-5, 10, 0.1)\n', (594, 607), True, 'import numpy as np\n'), ((621, 639), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-5, 12]'], {}), '([-5, 12])\n', (629, 639), True, 'import matplotlib.pyplot as plt\n'), ((640, 681), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[2, 4, 6, 8, 10]'], {'fontsize': '(16)'}), '([2, 4, 6, 8, 10], fontsize=16)\n', (650, 681), True, 'import matplotlib.pyplot as plt\n'), ((682, 705), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (692, 705), True, 'import matplotlib.pyplot as plt\n'), ((721, 740), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""'], {}), "(x, y, 'r')\n", (729, 740), True, 'import matplotlib.pyplot as plt\n'), ((741, 771), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$z$"""'], {'fontsize': '(16)'}), "('$z$', fontsize=16)\n", (751, 771), True, 'import matplotlib.pyplot as plt\n'), ((772, 828), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$f(z)$"""'], {'rotation': '"""horizontal"""', 'fontsize': '(16)'}), "('$f(z)$', rotation='horizontal', fontsize=16)\n", (782, 828), True, 'import matplotlib.pyplot as plt\n'), ((828, 846), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (844, 846), True, 'import matplotlib.pyplot as plt\n'), ((847, 857), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (855, 857), True, 'import matplotlib.pyplot as plt\n')] |
import time
from collections import OrderedDict, defaultdict
from functools import reduce, wraps
from inspect import signature
import matplotlib.pyplot as plt
from rfho import as_list
import tensorflow as tf
import rfho as rf
try:
from IPython.display import IFrame
import IPython
except ImportError:
print('Looks like IPython is not installed...')
IFrame, IPython = None, None
import gzip
import os
import _pickle as pickle
import numpy as np
try:
from tabulate import tabulate
except ImportError:
print('Might want to install library "tabulate" for a better dictionary printing')
tabulate = None
def join_paths(*paths):
return reduce(lambda acc, new_path: os.path.join(acc, new_path), paths)
SAVE_SETTINGS = {
'NOTEBOOK_TITLE': ''
}
_EXP_ROOT_FOLDER = os.getenv('RFHO_EXP_FOLDER')
if _EXP_ROOT_FOLDER is None:
print('Environment variable RFHO_EXP_FOLDER not found. Current directory will be used')
_EXP_ROOT_FOLDER = join_paths(os.getcwd(), 'Experiments')
print('Experiment save directory is ', _EXP_ROOT_FOLDER)
FOLDER_NAMINGS = { # TODO should go into a settings file?
'EXP_ROOT': _EXP_ROOT_FOLDER,
'OBJ_DIR': 'Obj_data',
'PLOTS_DIR': 'Plots',
'MODELS_DIR': 'Models',
'GEPHI_DIR': 'GePhi',
}
def check_or_create_dir(directory, notebook_mode=True, create=True):
if not os.path.exists(directory) and create:
os.mkdir(directory)
print('folder', directory, 'has been created')
if notebook_mode and SAVE_SETTINGS['NOTEBOOK_TITLE']:
directory = join_paths(directory, SAVE_SETTINGS['NOTEBOOK_TITLE']) # += '/' + settings['NOTEBOOK_TITLE']
if not os.path.exists(directory) and create:
os.mkdir(directory)
print('folder ', directory, 'has been created')
return directory
def save_fig(name, root_dir=None, notebook_mode=True, default_overwrite=False, extension='pdf', **savefig_kwargs):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['PLOTS_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.%s' % (name, extension)) # directory + '/%s.pdf' % name
if not default_overwrite and os.path.isfile(filename):
# if IPython is not None:
# IPython.display.display(tuple(IFrame(filename, width=800, height=600))) # FIXME not working!
overwrite = input('A file named %s already exists. Overwrite (Leave string empty for NO!)?' % filename)
if not overwrite:
print('No changes done.')
return
plt.savefig(filename, **savefig_kwargs)
# print('file saved')
def save_obj(obj, name, root_dir=None, notebook_mode=True, default_overwrite=False):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['OBJ_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.pkgz' % name) # directory + '/%s.pkgz' % name
if not default_overwrite and os.path.isfile(filename):
overwrite = input('A file named %s already exists. Overwrite (Leave string empty for NO!)?' % filename)
if not overwrite:
print('No changes done.')
return
print('Overwriting...')
with gzip.open(filename, 'wb') as f:
pickle.dump(obj, f)
# print('File saved!')
def save_text(text, name, root_dir=None, notebook_mode=True, default_overwrite=False):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.txt' % name) # directory + '/%s.pkgz' % name
if not default_overwrite and os.path.isfile(filename):
overwrite = input('A file named %s already exists. Overwrite (Leave string empty for NO!)?' % filename)
if not overwrite:
print('No changes done.')
return
print('Overwriting...')
with open(filename, "w") as text_file:
text_file.write(text)
def load_obj(name, root_dir=None, notebook_mode=True):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['OBJ_DIR']),
notebook_mode=notebook_mode, create=False)
filename = join_paths(directory, name if name.endswith('.pkgz') else name + '.pkgz')
with gzip.open(filename, 'rb') as f:
return pickle.load(f)
def save_model(session, model, step, root_dir=None, notebook_mode=True):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['MODELS_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s' % model.name)
model.saver.save(session, filename, global_step=step)
def load_model(session, model, step, root_dir=None, notebook_mode=True):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['MODELS_DIR']),
notebook_mode=notebook_mode, create=False)
filename = join_paths(directory, model.name)
model.saver.restore(session, filename + "-" + str(step))
def save_adjacency_matrix_for_gephi(matrix, name, root_dir=None, notebook_mode=True, class_names=None):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['GEPHI_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.csv' % name)
m, n = np.shape(matrix)
assert m == n, '%s should be a square matrix.' % matrix
if not class_names:
class_names = [str(k) for k in range(n)]
left = np.array([class_names]).T
matrix = np.hstack([left, matrix])
up = np.vstack([[''], left]).T
matrix = np.vstack([up, matrix])
np.savetxt(filename, matrix, delimiter=';', fmt='%s')
def save_setting(local_variables, root_dir=None, excluded=None, default_overwrite=False, collect_data=True,
notebook_mode=True, do_print=True, append_string=''):
dictionary = generate_setting_dict(local_variables, excluded=excluded)
if do_print:
if tabulate:
print(tabulate(dictionary.items(), headers=('settings var names', 'values')))
else:
print('SETTING:')
for k, v in dictionary.items():
print(k, v, sep=': ')
print()
if collect_data: save_obj(dictionary, 'setting' + append_string,
root_dir=root_dir,
default_overwrite=default_overwrite,
notebook_mode=notebook_mode)
def generate_setting_dict(local_variables, excluded=None):
"""
Generates a dictionary of (name, values) of local variables (typically obtained by vars()) that
can be saved at the beginning of the experiment. Furthermore, if an object obj in local_variables implements the
function setting(), it saves the result of obj.setting() as value in the dictionary.
:param local_variables:
:param excluded: (optional, default []) variable or list of variables to be excluded.
:return: A dictionary
"""
excluded = as_list(excluded) or []
setting_dict = {k: v.setting() if hasattr(v, 'setting') else v
for k, v in local_variables.items() if v not in excluded}
import datetime
setting_dict['datetime'] = str(datetime.datetime.now())
return setting_dict
class Timer:
"""
Stopwatch class for timing the experiments. Uses `time` module.
"""
_div_unit = {'ms': 1. / 1000,
'sec': 1.,
'min': 60.,
'hr': 3600.}
def __init__(self, unit='sec', round_off=True):
self._starting_times = []
self._stopping_times = []
self._running = False
self.round_off = round_off
assert unit in Timer._div_unit
self.unit = unit
def reset(self):
self._starting_times = []
self._stopping_times = []
self._running = False
def start(self):
if not self._running:
self._starting_times.append(time.time())
self._running = True
return self
def stop(self):
if self._running:
self._stopping_times.append(time.time())
self._running = False
return self
def raw_elapsed_time_list(self):
def _maybe_add_last():
t2 = self._stopping_times if len(self._starting_times) == len(self._stopping_times) else \
self._stopping_times + [time.time()]
return zip(self._starting_times, t2)
return [t2 - t1 for t1, t2 in _maybe_add_last()]
def elapsed_time(self):
res = sum(self.raw_elapsed_time_list()) / Timer._div_unit[self.unit]
return res if not self.round_off else int(res)
class Saver:
"""
Class for recording experiment data
"""
SKIP = 'SKIP' # skip saving value in save_dict
def __init__(self, experiment_names, *items, append_date_to_name=True,
root_directory=FOLDER_NAMINGS['EXP_ROOT'],
timer=None, do_print=True, collect_data=True, default_overwrite=False):
"""
Initialize a saver to collect data. (Intended to be used together with OnlinePlotStream.)
:param experiment_names: string or list of strings which represent the name of the folder (and sub-folders)
experiment oand
:param items: a list of (from pairs to at most) 5-tuples that represent the things you want to save.
The first arg of each tuple should be a string that will be the key of the save_dict.
Then there can be either a callable with signature (step) -> None
Should pass the various args in ths order:
fetches: tensor or list of tensors to compute;
feeds (optional): to be passed to tf.Session.run. Can be a
callable with signature (step) -> feed_dict
options (optional): to be passed to tf.Session.run
run_metadata (optional): to be passed to tf.Session.run
:param timer: optional timer object. If None creates a new one. If false does not register time.
If None or Timer it adds to the save_dict an entry time that record elapsed_time.
The time required to perform data collection and saving are not counted, since typically
the aim is to record the true algorithm execution time!
:param root_directory: string, name of root directory (default ~HOME/Experiments)
:param do_print: (optional, default True) will print by default `save_dict` each time method `save` is executed
:param collect_data: (optional, default True) will save by default `save_dict` each time
method `save` is executed
"""
experiment_names = as_list(experiment_names)
if append_date_to_name:
from datetime import datetime
experiment_names += [datetime.today().strftime('%d-%m-%y__%Hh%Mm')]
self.experiment_names = list(experiment_names)
if not os.path.isabs(experiment_names[0]):
self.directory = join_paths(root_directory) # otherwise assume no use of root_directory
if collect_data:
check_or_create_dir(root_directory, notebook_mode=False)
else: self.directory = ''
for name in self.experiment_names:
self.directory = join_paths(self.directory, name)
check_or_create_dir(self.directory, notebook_mode=False, create=collect_data)
self.do_print = do_print
self.collect_data = collect_data
self.default_overwrite = default_overwrite
assert isinstance(timer, Timer) or timer is None or timer is False, 'timer param not good...'
if timer is None:
timer = Timer()
self.timer = timer
self.clear_items()
self.add_items(*items)
# noinspection PyAttributeOutsideInit
def clear_items(self):
"""
Removes all previously inserted items
:return:
"""
self._processed_items = []
self._step = -1
@staticmethod
def process_items(*items):
"""
Add items to the save dictionary
:param items: a list of (from pairs to at most) 5-tuples that represent the things you want to save.
The first arg of each tuple should be a string that will be the key of the save_dict.
Then there can be either a callable with signature (step) -> result or () -> result
or tensorflow things... In this second case you should pass the following args in ths order:
fetches: tensor or list of tensors to compute;
feeds (optional): to be passed to tf.Session.run. Can be a
callable with signature (step) -> feed_dict
or () -> feed_dict
options (optional): to be passed to tf.Session.run
run_metadata (optional): to be passed to tf.Session.run
:return: None
"""
assert len(items) == 0 or isinstance(items[0], str), 'Check items! first arg %s. Should be a string.' \
'All args: %s' % (items[0], items)
processed_args = []
k = 0
while k < len(items):
part = [items[k]]
k += 1
while k < len(items) and not isinstance(items[k], str):
part.append(items[k])
k += 1
assert len(part) >= 2, 'Check args! Last part %s' % part
if callable(part[1]): # python stuff
if len(part) == 2: part.append(True) # always true default condition
else: # tensorflow stuff
part += [None] * (6 - len(part)) # representing name, fetches, feeds, options, metadata
if part[3] is None: part[3] = True # default condition
processed_args.append(part)
# self._processed_items += processed_args
# return [pt[0] for pt in processed_args]
return processed_args
def add_items(self, *items):
"""
Adds internally items to this saver
:param items:
:return:
"""
processed_items = Saver.process_items(*items)
self._processed_items += processed_items
return [pt[0] for pt in processed_items]
def save(self, step=None, session=None, append_string="", do_print=None, collect_data=None,
processed_items=None, _res=None):
"""
Builds and save a dictionary with the keys and values specified at construction time or by method
`add_items`
:param processed_items: optional, processed item list (returned by add_items)
if None uses internally stored items
:param session: Optional tensorflow session, otherwise uses default session
:param step: optional step, if None (default) uses internal step
(int preferred, otherwise does not work well with `pack_save_dictionaries`).
:param append_string: (optional str) string to append at the file name to `str(step)`
:param do_print: (default as object field)
:param collect_data: (default as object field)
:param _res: used internally by context managers
:return: the dictionary
"""
from tensorflow import get_default_session
if step is None:
self._step += 1
step = self._step
if not processed_items: processed_items = self._processed_items
if do_print is None: do_print = self.do_print
if collect_data is None: collect_data = self.collect_data
if session:
ss = session
else:
ss = get_default_session()
if ss is None and do_print: print('WARNING, No tensorflow session available')
if self.timer: self.timer.stop()
def _maybe_call(_method):
if not callable(_method): return _method
if len(signature(_method).parameters) == 0:
return _method()
elif len(signature(_method).parameters) == 1:
return _method(step)
else: # complete signature?
return _method(step, _res)
save_dict = OrderedDict([(pt[0], _maybe_call(pt[1]) if callable(pt[1])
else ss.run(pt[1], feed_dict=_maybe_call(pt[2]),
options=pt[4], run_metadata=pt[5])
if _maybe_call(pt[2 if callable(pt[1]) else 3]) else Saver.SKIP)
for pt in processed_items]
)
if self.timer: save_dict['Elapsed time (%s)' % self.timer.unit] = self.timer.elapsed_time()
if do_print:
if tabulate:
print(tabulate(save_dict.items(), headers=('Step %s' % step, 'Values'), floatfmt='.5f'))
else:
print('SAVE DICT:')
for key, v in save_dict.items():
print(key, v, sep=': ')
print()
if collect_data:
self.save_obj(save_dict, str(step) + append_string)
if self.timer: self.timer.start()
return save_dict
def pack_save_dictionaries(self, name='all', append_string='', erase_others=True):
"""
Creates an unique file starting from file created by method `save`.
The file contains a dictionary with keys equal to save_dict keys and values list of values form original files.
:param name:
:param append_string:
:param erase_others:
:return: The generated dictionary
"""
import glob
all_files = sorted(glob.glob(join_paths(
self.directory, FOLDER_NAMINGS['OBJ_DIR'], '[0-9]*%s.pkgz' % append_string)),
key=os.path.getctime) # sort by creation time
if len(all_files) == 0:
print('No file found')
return
objs = [load_obj(path, root_dir='', notebook_mode=False) for path in all_files]
# packed_dict = OrderedDict([(k, []) for k in objs[0]])
# noinspection PyArgumentList
packed_dict = defaultdict(list, OrderedDict())
for obj in objs:
[packed_dict[k].append(v) for k, v in obj.items()]
self.save_obj(packed_dict, name=name + append_string)
if erase_others:
[os.remove(f) for f in all_files]
return packed_dict
def record(self, *what, append_string=''): # TODO this is un initial (maybe bad) idea.
"""
Context manager for saver. saves executions
:param what:
:param append_string:
:return:
"""
return Records.on_hyperiteration(self, *what, append_string=append_string) # FIXME to be finished
def save_text(self, text, name):
return save_text(text=text, name=name, root_dir=self.directory, default_overwrite=self.default_overwrite,
notebook_mode=False)
def save_fig(self, name, extension='pdf', **savefig_kwargs):
"""
Object-oriented version of `save_fig`
:param extension:
:param name: name of the figure (.pdf extension automatically added)
:return:
"""
return save_fig(name, root_dir=self.directory, extension=extension,
default_overwrite=self.default_overwrite, notebook_mode=False,
**savefig_kwargs)
def save_obj(self, obj, name):
"""
Object-oriented version of `save_obj`
:param obj: object to save
:param name: name of the file (.pkgz extension automatically added)
:return:
"""
return save_obj(obj, name, root_dir=self.directory,
default_overwrite=self.default_overwrite, notebook_mode=False)
def save_adjacency_matrix_for_gephi(self, matrix, name, class_names=None):
"""
Object-oriented version of `save_adjacency_matrix_for_gephi`
:param matrix:
:param name:
:param class_names:
:return:
"""
return save_adjacency_matrix_for_gephi(matrix, name, root_dir=self.directory,
notebook_mode=False, class_names=class_names)
def save_setting(self, local_variables, excluded=None, append_string=''):
"""
Object-oriented version of `save_setting`
:param local_variables:
:param excluded:
:param append_string:
:return:
"""
excluded = as_list(excluded or [])
excluded.append(self) # no reason to save itself...
return save_setting(local_variables, root_dir=self.directory, excluded=excluded,
default_overwrite=self.default_overwrite, collect_data=self.collect_data,
notebook_mode=False, do_print=self.do_print, append_string=append_string)
def load_obj(self, name):
"""
Object-oriented version of `load_obj`
:param name: name of the file (.pkgz extension automatically added)
:return: unpacked object
"""
return load_obj(name, root_dir=self.directory, notebook_mode=False)
def save_model(self, session, model, step):
save_model(session, model, step, root_dir=self.directory, notebook_mode=False)
def load_model(self, session, model, step):
load_model(session, model, step, root_dir=self.directory)
# noinspection PyPep8Naming
def Loader(folder_name):
"""
utility method for creating a Saver with loading intentions,
does not create timer nor append time to name. just give the folder name
for the saver
:param folder_name: (string or list of strings)
either absolute or relative, in which case root_directory will be used
:return: a `Saver` object
"""
return Saver(folder_name, append_date_to_name=False, timer=False,
collect_data=False)
class Records:
"""
Contains (for the moment) static convenience methods for recording quantities
"""
class on_hyperiteration:
"""
context for record at each hyperiteration
"""
def __init__(self, saver, *record_what, append_string='', do_print=None, collect_data=None):
self.saver = saver
self.append_string = append_string
if self.append_string: self.append_string = '__' + self.append_string
self.do_print = do_print
self.collect_data = collect_data
self._unwrapped = []
self._record_what = record_what or []
self._processed_items = []
self._step = 0
def __enter__(self):
self._wrap()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.collect_data:
if exc_tb:
self.saver.save_obj((str(exc_type), str(exc_val), str(exc_tb)),
'exception' + self.append_string)
self.saver.pack_save_dictionaries(append_string=self.append_string)
self._unwrap()
# TODO is this a good thing? or should we leave it to do manually
self.saver.clear_items()
if self.saver.timer: self.saver.timer.stop()
def _wrap(self):
self._unwrapped.append(rf.HyperOptimizer.initialize)
rf.HyperOptimizer.initialize = self._initialize_wrapper(rf.HyperOptimizer.initialize)
self._unwrapped.append(rf.HyperOptimizer.run)
rf.HyperOptimizer.run = self._saver_wrapper(rf.HyperOptimizer.run) # mmm...
def _unwrap(self):
rf.HyperOptimizer.initialize = self._unwrapped[0]
rf.HyperOptimizer.run = self._unwrapped[1]
def _saver_wrapper(self, f):
@wraps(f)
def _saver_wrapped(*args, **kwargs):
res = f(*args, **kwargs)
self._execute_save(res, *args, **kwargs)
return res
return _saver_wrapped
def _initialize_wrapper(self, f): # this should be good since
@wraps(f)
def _initialize_wrapped(*args, **kwargs):
first_init = f(*args, **kwargs)
# add savers just at the first initialization
if first_init:
self._processed_items += rf.flatten_list(
[Saver.process_items(*e(*args, **kwargs)) for e in self._record_what])
self._execute_save('INIT', *args, **kwargs)
return first_init
return _initialize_wrapped
# noinspection PyUnusedLocal
def _execute_save(self, res, *args, **kwargs): # maybe args and kwargs could be useful...
self.saver.save(step=self._step, append_string=self.append_string,
processed_items=self._processed_items,
do_print=self.do_print, collect_data=self.collect_data,
_res=res)
self._step += 1
# noinspection PyClassHasNoInit,PyPep8Naming
class on_forward(on_hyperiteration): # context class
"""
Saves at every iteration (before call of method `step_forward`)
"""
def _wrap(self):
self._unwrapped.append(rf.HyperOptimizer.initialize)
rf.HyperOptimizer.initialize = self._initialize_wrapper(rf.HyperOptimizer.initialize)
self._unwrapped.append(rf.ForwardHG.step_forward)
rf.ForwardHG.step_forward = self._saver_wrapper(rf.ForwardHG.step_forward) # mmm...
def _unwrap(self):
rf.HyperOptimizer.initialize = self._unwrapped[0]
rf.ForwardHG.step_forward = self._unwrapped[1]
@staticmethod
def direct(*items):
"""
Everything passed in items is passed directly to `Saver.
:param items:
:return:
"""
# noinspection PyUnusedLocal
def _call(*args, **kwargs):
return items
return _call
@staticmethod
def norms_of_z():
"""
:return:
"""
def _call(*args, **kwargs):
hg = args[0]
if isinstance(hg, rf.HyperOptimizer): hg = hg.hyper_gradients # guess most common case
assert isinstance(hg, rf.ForwardHG)
_rs = Records.tensors(*hg.zs, op=tf.norm)(args, kwargs)
return _rs
return _call
@staticmethod
def norms_of_d_dynamics_d_hypers(fd=None):
"""
In `ForwardHG` records the norm of the partial derivatives of the dynamics w.r.t. the hyperparameters.
:param fd:
:return:
"""
if fd is None: fd = lambda stp, rs: rs
def _call(*args, **kwargs):
hg = args[0]
if isinstance(hg, rf.HyperOptimizer):
hg = hg.hyper_gradients # guess most common case
assert isinstance(hg, rf.ForwardHG)
_rs = Records.tensors(*hg.d_dynamics_d_hypers, op=tf.norm,
fd=fd,
condition=lambda stp, rs: rs != 'INIT')(args, kwargs)
return _rs
return _call
@staticmethod
def hyperparameters():
"""
Simple one! record all hyperparameter values, assuming the usage of `HyperOptimizer`
:return: a function
"""
# noinspection PyUnusedLocal
def _call(*args, **kwargs):
hyper_optimizer = args[0]
assert isinstance(hyper_optimizer, rf.HyperOptimizer)
return rf.flatten_list(
[rf.simple_name(hyp), hyp]
for hyp in hyper_optimizer.hyper_list)
return _call
@staticmethod
def hypergradients():
"""
Record all hypergradient values, assuming the usage of `HyperOptimizer`
:return:
"""
# noinspection PyUnusedLocal
def _call(*args, **kwargs):
hyper_optimizer = args[0]
assert isinstance(hyper_optimizer, rf.HyperOptimizer)
return rf.flatten_list(
['grad::' + rf.simple_name(hyp), hyper_optimizer.hyper_gradients.hyper_gradients_dict[hyp]]
for hyp in hyper_optimizer.hyper_list)
return _call
@staticmethod
def tensors(*tensors, key=None, scope=None, name_contains=None,
rec_name='', op=tf.identity, fd=None,
condition=True):
"""
Little more difficult... attempts to record tensor named name
:param name_contains: record all tensors which name contains this string. Can be a list.
:type condition: bool | function
:param condition: optional condition for triggering the saving of tensors, can have different
signatures
:param tensors: varargs of tensor names
:param scope: optional for collections
:param key: to record collections
:param op: optional operation to apply to each tensor
:param rec_name: optional name to prepend to all tensors recorded by this
:param fd: # given to _process_feed_dicts_for_rec
:return:
"""
if rec_name: rec_name += '::' # maybe find a better way
def _call(*args, **_kwargs):
if tensors:
_tensors = [tf.get_default_graph().get_tensor_by_name(tns + ':0') if isinstance(tns, str)
else tns for tns in tensors]
elif key:
_tensors = tf.get_collection(key, scope=scope)
elif name_contains:
_names = rf.flatten_list([[n.name for n in tf.get_default_graph().as_graph_def().node
if nc in n.name] for nc in as_list(name_contains)])
return Records.tensors(*_names, rec_name=rec_name, op=op, fd=fd, condition=True)(*args, **_kwargs)
else:
raise NotImplemented('One between key and names should be given')
# try with dictionary of form (string (simple name of placeholder), data)
_rs2 = rf.flatten_list([rec_name + rf.simple_name(tns.name),
op(tns),
Records._process_feed_dicts_for_rec(fd, *args, **_kwargs),
condition]
for tns in _tensors)
return _rs2
return _call
@staticmethod
def model(): # TODO discuss with others to see what's best way to save models...
"""
Should save the model(s) in a useful way..
:return:
"""
raise NotImplemented()
@staticmethod
def setting(): # TODO I have no precise idea on how to implement this...
"""
Should save experiment meta-info like params, dataset, beginning/end...
name of experiment function, git version and so on.
:return:
"""
raise NotImplemented()
@staticmethod
def _process_feed_dicts_for_rec(fd, *args, **kwargs):
# TODO add more functionality...
"""
# try with dictionary of form (string (simple name of placeholder), data)
:param fd:
:param args: # might be useful??
:param kwargs:
:return:
"""
if fd is None or callable(fd): return fd
def _std_process_dict(_dict):
return {tf.get_default_graph().get_tensor_by_name(n + ':0'): v for n, v in _dict.items()}
def _fds():
if isinstance(fd, dict):
_rs = _std_process_dict(fd)
elif isinstance(fd, (list, tuple)): # (x, y, dataset)
if len(fd) == 3 and isinstance(fd[2], rf.Dataset): # very common scenario
_rs = {tf.get_default_graph().get_tensor_by_name(fd[0] + ':0'): fd[2].data,
tf.get_default_graph().get_tensor_by_name(fd[1] + ':0'): fd[2].target,
}
else:
raise NotImplemented('not understood')
else:
raise NotImplemented('not understood')
return _rs
return _fds
if __name__ == '__main__':
sav1 = Saver('tbd',
'random', lambda step: np.random.randn(),
default_overwrite=True
)
sav1.timer.start()
sav1.save(0)
time.sleep(2)
sav1.save(1)
time.sleep(1)
sav1.save(2)
sav1.pack_save_dictionaries()
| [
"os.mkdir",
"os.remove",
"datetime.datetime.datetime.now",
"tensorflow.get_collection",
"numpy.shape",
"os.path.isfile",
"tensorflow.get_default_graph",
"os.path.join",
"rfho.simple_name",
"_pickle.load",
"numpy.random.randn",
"numpy.savetxt",
"os.path.exists",
"rfho.as_list",
"inspect.s... | [((801, 829), 'os.getenv', 'os.getenv', (['"""RFHO_EXP_FOLDER"""'], {}), "('RFHO_EXP_FOLDER')\n", (810, 829), False, 'import os\n'), ((2631, 2670), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename, **savefig_kwargs)\n', (2642, 2670), True, 'import matplotlib.pyplot as plt\n'), ((5758, 5774), 'numpy.shape', 'np.shape', (['matrix'], {}), '(matrix)\n', (5766, 5774), True, 'import numpy as np\n'), ((5959, 5984), 'numpy.hstack', 'np.hstack', (['[left, matrix]'], {}), '([left, matrix])\n', (5968, 5984), True, 'import numpy as np\n'), ((6033, 6056), 'numpy.vstack', 'np.vstack', (['[up, matrix]'], {}), '([up, matrix])\n', (6042, 6056), True, 'import numpy as np\n'), ((6062, 6115), 'numpy.savetxt', 'np.savetxt', (['filename', 'matrix'], {'delimiter': '""";"""', 'fmt': '"""%s"""'}), "(filename, matrix, delimiter=';', fmt='%s')\n", (6072, 6115), True, 'import numpy as np\n'), ((33169, 33182), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (33179, 33182), False, 'import time\n'), ((33204, 33217), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (33214, 33217), False, 'import time\n'), ((985, 996), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (994, 996), False, 'import os\n'), ((1401, 1420), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (1409, 1420), False, 'import os\n'), ((1968, 1979), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1977, 1979), False, 'import os\n'), ((2264, 2288), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2278, 2288), False, 'import os\n'), ((2820, 2831), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2829, 2831), False, 'import os\n'), ((3104, 3128), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3118, 3128), False, 'import os\n'), ((3366, 3391), 'gzip.open', 'gzip.open', (['filename', '"""wb"""'], {}), "(filename, 'wb')\n", (3375, 3391), False, 'import gzip\n'), ((3406, 3425), '_pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (3417, 3425), True, 'import _pickle as pickle\n'), ((3581, 3592), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3590, 3592), False, 'import os\n'), ((3837, 3861), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3851, 3861), False, 'import os\n'), ((4257, 4268), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4266, 4268), False, 'import os\n'), ((4532, 4557), 'gzip.open', 'gzip.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (4541, 4557), False, 'import gzip\n'), ((4579, 4593), '_pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4590, 4593), True, 'import _pickle as pickle\n'), ((4705, 4716), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4714, 4716), False, 'import os\n'), ((5096, 5107), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5105, 5107), False, 'import os\n'), ((5528, 5539), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5537, 5539), False, 'import os\n'), ((5920, 5943), 'numpy.array', 'np.array', (['[class_names]'], {}), '([class_names])\n', (5928, 5943), True, 'import numpy as np\n'), ((5994, 6017), 'numpy.vstack', 'np.vstack', (["[[''], left]"], {}), "([[''], left])\n", (6003, 6017), True, 'import numpy as np\n'), ((7433, 7450), 'rfho.as_list', 'as_list', (['excluded'], {}), '(excluded)\n', (7440, 7450), False, 'from rfho import as_list\n'), ((7657, 7680), 'datetime.datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7678, 7680), False, 'from datetime import datetime\n'), ((11291, 11316), 'rfho.as_list', 'as_list', (['experiment_names'], {}), '(experiment_names)\n', (11298, 11316), False, 'from rfho import as_list\n'), ((21262, 21285), 'rfho.as_list', 'as_list', (['(excluded or [])'], {}), '(excluded or [])\n', (21269, 21285), False, 'from rfho import as_list\n'), ((698, 725), 'os.path.join', 'os.path.join', (['acc', 'new_path'], {}), '(acc, new_path)\n', (710, 725), False, 'import os\n'), ((1355, 1380), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1369, 1380), False, 'import os\n'), ((1714, 1733), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (1722, 1733), False, 'import os\n'), ((11542, 11576), 'os.path.isabs', 'os.path.isabs', (['experiment_names[0]'], {}), '(experiment_names[0])\n', (11555, 11576), False, 'import os\n'), ((16398, 16419), 'tensorflow.get_default_session', 'get_default_session', ([], {}), '()\n', (16417, 16419), False, 'from tensorflow import get_default_session\n'), ((18886, 18899), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18897, 18899), False, 'from collections import OrderedDict, defaultdict\n'), ((24559, 24567), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (24564, 24567), False, 'from functools import reduce, wraps\n'), ((24862, 24870), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (24867, 24870), False, 'from functools import reduce, wraps\n'), ((33047, 33064), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (33062, 33064), True, 'import numpy as np\n'), ((1664, 1689), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1678, 1689), False, 'import os\n'), ((8389, 8400), 'time.time', 'time.time', ([], {}), '()\n', (8398, 8400), False, 'import time\n'), ((8542, 8553), 'time.time', 'time.time', ([], {}), '()\n', (8551, 8553), False, 'import time\n'), ((19090, 19102), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (19099, 19102), False, 'import os\n'), ((30287, 30322), 'tensorflow.get_collection', 'tf.get_collection', (['key'], {'scope': 'scope'}), '(key, scope=scope)\n', (30304, 30322), True, 'import tensorflow as tf\n'), ((8821, 8832), 'time.time', 'time.time', ([], {}), '()\n', (8830, 8832), False, 'import time\n'), ((11424, 11440), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (11438, 11440), False, 'from datetime import datetime\n'), ((16655, 16673), 'inspect.signature', 'signature', (['_method'], {}), '(_method)\n', (16664, 16673), False, 'from inspect import signature\n'), ((28375, 28394), 'rfho.simple_name', 'rf.simple_name', (['hyp'], {}), '(hyp)\n', (28389, 28394), True, 'import rfho as rf\n'), ((32189, 32211), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (32209, 32211), True, 'import tensorflow as tf\n'), ((16746, 16764), 'inspect.signature', 'signature', (['_method'], {}), '(_method)\n', (16755, 16764), False, 'from inspect import signature\n'), ((28887, 28906), 'rfho.simple_name', 'rf.simple_name', (['hyp'], {}), '(hyp)\n', (28901, 28906), True, 'import rfho as rf\n'), ((30900, 30924), 'rfho.simple_name', 'rf.simple_name', (['tns.name'], {}), '(tns.name)\n', (30914, 30924), True, 'import rfho as rf\n'), ((30103, 30125), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (30123, 30125), True, 'import tensorflow as tf\n'), ((30527, 30549), 'rfho.as_list', 'as_list', (['name_contains'], {}), '(name_contains)\n', (30534, 30549), False, 'from rfho import as_list\n'), ((32559, 32581), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (32579, 32581), True, 'import tensorflow as tf\n'), ((32655, 32677), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (32675, 32677), True, 'import tensorflow as tf\n'), ((30414, 30436), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (30434, 30436), True, 'import tensorflow as tf\n')] |
import numpy as np
import mdtraj as md
__all__ = ["COM", "index_atom_name", "atom_name_COM", "shift_COM", "parse_CG_pdb", "parse_AA_pdb"]
def COM(trj, inds):
return md.compute_center_of_mass(trj.atom_slice(inds))
def index_atom_name(trj, name):
return np.where(name == np.array([atom.name for atom in trj.top.atoms]))[0]
def atom_name_COM(trj, name):
inds = index_atom_name(trj, name)
return COM(trj, inds)
def shift_COM(trj, inds, target_com):
trj.xyz[:, inds] += target_com - COM(trj, inds)
def index_name(bead_array, name):
return np.where(name == bead_array)[0]
def parse_CG_pdb(CG_pdb_f_name):
CG_bead = list()
with open(CG_pdb_f_name) as f:
for line in f:
split_line = line.split()
if (split_line[0] == "HETATM") or (split_line[0] == "ATOM"):
CG_bead.append(split_line[-1])
return np.array(CG_bead)
def parse_AA_pdb(AA_pdb_f_name):
AA_bead = list()
with open(AA_pdb_f_name) as f:
for line in f:
split_line = line.split()
if (split_line[0] == "HETATM") or (split_line[0] == "ATOM"):
AA_bead.append(split_line[-1])
return np.array(AA_bead) | [
"numpy.where",
"numpy.array"
] | [((884, 901), 'numpy.array', 'np.array', (['CG_bead'], {}), '(CG_bead)\n', (892, 901), True, 'import numpy as np\n'), ((1185, 1202), 'numpy.array', 'np.array', (['AA_bead'], {}), '(AA_bead)\n', (1193, 1202), True, 'import numpy as np\n'), ((569, 597), 'numpy.where', 'np.where', (['(name == bead_array)'], {}), '(name == bead_array)\n', (577, 597), True, 'import numpy as np\n'), ((282, 329), 'numpy.array', 'np.array', (['[atom.name for atom in trj.top.atoms]'], {}), '([atom.name for atom in trj.top.atoms])\n', (290, 329), True, 'import numpy as np\n')] |
import numpy as np
from .. import utilities
POSITION_VALUES = np.array([[30, -12, 0, -1, -1, 0, -12, 30],
[-12, -15, -3, -3, -3, -3, -15, -12],
[0, -3, 0, -1, -1, 0, -3, 0],
[-1, -3, -1, -1, -1, -1, -3, -1],
[-1, -3, -1, -1, -1, -1, -3, -1],
[0, -3, 0, -1, -1, 0, -3, 0],
[-12, -15, -3, -3, -3, -3, -15, -12],
[30, -12, 0, -1, -1, 0, -12, 30]])
class Evaluator(object):
def __init__(self):
pass
def evaluate(self, board_state, player_color, opponent_color):
player_move_num = len(board_state.list_all_valid_moves(player_color))
opponent_move_num = len(
board_state.list_all_valid_moves(opponent_color))
matrix_form = board_state.as_numpy_matrix()
board_value = np.sum(np.multiply(POSITION_VALUES, matrix_form)) * \
utilities.color_string_to_number(player_color)
# print("player color: " + str(player_color))
# print("position values: \n" + str(POSITION_VALUES))
# print("state values: \n" + str(matrix_form))
# print("value: " + str(board_value))
return (player_move_num - opponent_move_num) * 0.1 + board_value
if __name__ == "__main__":
print("values: " + str(POSITION_VALUES))
print("values(0, 0): " + str(POSITION_VALUES[(0, 0)]))
print("values(4, 0): " + str(POSITION_VALUES[(4, 0)]))
| [
"numpy.multiply",
"numpy.array"
] | [((63, 359), 'numpy.array', 'np.array', (['[[30, -12, 0, -1, -1, 0, -12, 30], [-12, -15, -3, -3, -3, -3, -15, -12], [0,\n -3, 0, -1, -1, 0, -3, 0], [-1, -3, -1, -1, -1, -1, -3, -1], [-1, -3, -1,\n -1, -1, -1, -3, -1], [0, -3, 0, -1, -1, 0, -3, 0], [-12, -15, -3, -3, -\n 3, -3, -15, -12], [30, -12, 0, -1, -1, 0, -12, 30]]'], {}), '([[30, -12, 0, -1, -1, 0, -12, 30], [-12, -15, -3, -3, -3, -3, -15,\n -12], [0, -3, 0, -1, -1, 0, -3, 0], [-1, -3, -1, -1, -1, -1, -3, -1], [\n -1, -3, -1, -1, -1, -1, -3, -1], [0, -3, 0, -1, -1, 0, -3, 0], [-12, -\n 15, -3, -3, -3, -3, -15, -12], [30, -12, 0, -1, -1, 0, -12, 30]])\n', (71, 359), True, 'import numpy as np\n'), ((951, 992), 'numpy.multiply', 'np.multiply', (['POSITION_VALUES', 'matrix_form'], {}), '(POSITION_VALUES, matrix_form)\n', (962, 992), True, 'import numpy as np\n')] |
"""
Модуль работы над инклинометрией скважины
<NAME>. <NAME>. 18.07.2019 г.
"""
import pandas as pd
import numpy as np
import scipy.interpolate as interpolate
# TODO добавить логику для проверки ошибок - "защиту от дурака"
# TODO проверить методы интерполяции - где уместно линейную, где кубическую?
# TODO проверить простую модель, добавить возможность добавление точек для создания профиля любой сложности
# TODO переделать циклы for на numpy
class well_deviation_survey:
"""
Класс для задания профиля ствола скважины по инклинометрии, которая загружается из файла excel
"""
def __init__(self):
self.deviation_survey_dataframe = None
self.h_vert_interpolate_func = None
self.vert_angle_interpolate_func = None
self.curvature_rate_interpolate_func = None
self.column_h_mes_m = None
self.column_curvature_rate_grad10m = None
self.h_mes_m = None
self.vert_angle_grad = None
self.curvature_rate_grad10m = None
def __scip_last_row__(self):
"""
Функция удаляет последний ряд в DataFrame - он обычно пустой
:return: DataFrame без последней строки
"""
self.deviation_survey_dataframe = self.deviation_survey_dataframe.iloc[:-1]
def __change_column_type__(self, column):
"""
Функция меняет формат данных столбца с str на float64
:param column: столбец из PandasDataFrame в формате str
:return: столбец из PandasDataFrame в формате float64
"""
column = column.str.replace(',', '.')
column = column.astype('float64')
return column
def load_deviation_survey(self, path_to_file_str):
"""
Загрузка данных из Excel и удаление последней строки
:param path_to_file_str: путь к файлу, str
:return: None
"""
self.deviation_survey_dataframe = pd.read_excel(path_to_file_str)
self.__scip_last_row__()
def change_str_to_float(self):
"""
Функция меняет в столбцах str на float64
:return: None
"""
self.deviation_survey_dataframe['Координата Х (инклинометрия)'] = self.__change_column_type__(
self.deviation_survey_dataframe['Координата Х (инклинометрия)'])
self.deviation_survey_dataframe['Координата Y (инклинометрия)'] = self.__change_column_type__(
self.deviation_survey_dataframe['Координата Y (инклинометрия)'])
self.deviation_survey_dataframe['Вертикальная отметка'] = self.__change_column_type__(
self.deviation_survey_dataframe['Вертикальная отметка'])
self.deviation_survey_dataframe['Глубина конца интервала, м'] = self.__change_column_type__(
self.deviation_survey_dataframe['Глубина конца интервала, м'])
self.deviation_survey_dataframe['Угол, гpад'] = self.__change_column_type__(
self.deviation_survey_dataframe['Угол, гpад'])
def interpolate_all(self):
"""
Интерполяция вертикальной отметки и угла по измеренной глубине
:return: None
"""
self.h_vert_interpolate_func = interpolate.interp1d(
self.deviation_survey_dataframe['Глубина конца интервала, м'],
self.deviation_survey_dataframe['Вертикальная отметка'], kind='cubic')
self.vert_angle_interpolate_func = interpolate.interp1d(
self.deviation_survey_dataframe['Глубина конца интервала, м'],
self.deviation_survey_dataframe['Угол, гpад'], kind='cubic')
def get_h_vert_m(self, h_mes_m):
"""
Функция по интерполированным данным возвращает абсолютную вертикальную отметку в точке по измеренной глубине
:param h_mes_m: измеренная глубина вдоль ствола скважины, м
:return: абсолютная (вертикальная) глубина, м
"""
self.h_mes_m = self.h_vert_interpolate_func(h_mes_m)
return self.h_mes_m
def get_vert_angle_grad(self, h_mes_m):
"""
Функция по интерполированным данным возращает угол наклона от вертикали
:param h_mes_m: измеренная глубина вдоль ствола скважины, м
:return: угол отклонения ствола от вертикали, град
"""
self.vert_angle_grad = self.vert_angle_interpolate_func(h_mes_m)
return self.vert_angle_grad
def get_curvature_rate_grad10m(self, h_mes_m):
self.curvature_rate_grad10m = self.curvature_rate_interpolate_func(h_mes_m)
return self.curvature_rate_grad10m
def calc_curvature(self):
"""
Функция рассчитывает скорость набора кривизны (изменение угла на 10 м)
Результатом являются:
дополнительный столбец в DataFrame с рассчитаными значениями
отдельный массив с рассчитанными значениями
интерполяционная функция для нахождения скорости набора кривизны от глубины вдоль ствола скважины
:return: None
"""
h_mes_m = [0]
curvature_rate = [0]
column_h_mes = self.deviation_survey_dataframe['Глубина конца интервала, м']
borehole_lenth_m = column_h_mes.max() - column_h_mes.min()
lenth_of_one_part = 10
amounts_of_parts = int(
borehole_lenth_m / lenth_of_one_part - borehole_lenth_m % lenth_of_one_part / lenth_of_one_part)
for i in range(amounts_of_parts):
current_h_mes_m = h_mes_m[-1] + lenth_of_one_part
current_angle_grad = self.get_vert_angle_grad(current_h_mes_m)
last_angle_grad = self.get_vert_angle_grad(h_mes_m[-1])
current_curvature_rate = abs(current_angle_grad - last_angle_grad) / lenth_of_one_part
h_mes_m.append(current_h_mes_m)
curvature_rate.append(current_curvature_rate)
if h_mes_m[-1] < column_h_mes.max():
current_h_mes_m = column_h_mes.max() - lenth_of_one_part
current_angle_grad = self.get_vert_angle_grad(current_h_mes_m)
last_angle_grad = self.get_vert_angle_grad(column_h_mes.max())
current_curvature_rate = abs(current_angle_grad - last_angle_grad) / lenth_of_one_part
curvature_rate.append(current_curvature_rate)
h_mes_m.append(column_h_mes.max())
h_mes_m = np.asarray(h_mes_m)
curvature_rate = np.asarray(curvature_rate)
self.curvature_rate_interpolate_func = interpolate.interp1d(h_mes_m, curvature_rate, kind='cubic')
self.deviation_survey_dataframe['Интенсивность кривизны, град/10 м'] = self.curvature_rate_interpolate_func(
column_h_mes
)
self.column_curvature_rate_grad10m = self.deviation_survey_dataframe['Интенсивность кривизны, град/10 м']
def calc_all(self):
"""
Функция выполняет необходимые все необходимые расчеты, после которой класс может быть использован по назначению
:return: None
"""
self.change_str_to_float()
self.interpolate_all()
self.calc_curvature()
self.column_h_mes_m = self.deviation_survey_dataframe['Глубина конца интервала, м']
class simple_well_deviation_survey():
"""
Класс для задания простого профиля скважины по точкам
"""
def __init__(self):
self.h_conductor_mes_m = 500
self.h_conductor_vert_m = 500
self.h_pump_mes_m = 1200
self.h_pump_vert_m = 1000
self.h_bottomhole_mes_m = 2500
self.h_bottomhole_vert_m = 1500
self.lenth_of_one_part = 10
self.h_mes_init_data_for_interpolation_m = None
self.h_vert_init_data_for_interpolation_m = None
self.interpolation_func_slinear_h_vert_by_h_mes = None
self.interpolation_func_cubic_h_vert_by_h_mes = None
self.amounts_of_parts = None
self.h_mes_m = None
self.h_vert_m = None
self.angle_to_horizontal_grad = None
self.x_displacement_m = None
self.y_displacement_m = None
self.curvature_rate_grad10m = None
self.borehole_extension_m = None
self.interpolation_x_displacement_by_h_mes = None
self.interpolation_angle_to_horizontal_by_h_mes = None
self.interpolation_h_vert_by_h_mes = None
self.interpolation_borehole_extension_by_h_mes = None
self.interpolation_curvature_rate_by_h_mes = None
def calc_all(self):
"""
Функция с помощью интерполяции по нескольким точкам строит профиль скважины.
Исходными данными являются: точки абсолютной глубины и глубины вдоль ствола скважина кондуктора, приема
оборудования, забоя.
:return: None
"""
self.h_mes_init_data_for_interpolation_m = np.asarray([0, self.h_conductor_mes_m,
self.h_pump_mes_m, self.h_bottomhole_mes_m])
self.h_vert_init_data_for_interpolation_m = np.asarray([0, self.h_conductor_vert_m,
self.h_pump_vert_m, self.h_bottomhole_vert_m])
self.interpolation_func_slinear_h_vert_by_h_mes = interpolate.interp1d(self.h_mes_init_data_for_interpolation_m,
self.h_vert_init_data_for_interpolation_m,
kind='linear')
self.interpolation_func_cubic_h_vert_by_h_mes = interpolate.interp1d(self.h_mes_init_data_for_interpolation_m,
self.h_vert_init_data_for_interpolation_m,
kind='cubic')
self.amounts_of_parts = int(self.h_bottomhole_mes_m / self.lenth_of_one_part)
h_mes_m = [0]
h_vert_m = [0]
angle_to_horizontal_grad = [90]
x_displacement_m = [0]
curvature_rate_grad10m = [0]
borehole_extension = [0]
for i in range(self.amounts_of_parts):
current_h_mes_m = h_mes_m[-1] + self.lenth_of_one_part
current_h_vert_m = float(self.interpolation_func_slinear_h_vert_by_h_mes(current_h_mes_m))
if current_h_vert_m < current_h_mes_m:
current_h_vert_m = float(self.interpolation_func_cubic_h_vert_by_h_mes(current_h_mes_m))
current_borehole_extension = current_h_mes_m - current_h_vert_m
delta_h_vert_m = current_h_vert_m - h_vert_m[-1]
delta_x_displacement_m = (self.lenth_of_one_part ** 2 - delta_h_vert_m ** 2) ** (1 / 2)
if type(delta_x_displacement_m) == complex:
delta_x_displacement_m = delta_x_displacement_m.real
current_x_displacement_m = x_displacement_m[-1] + delta_x_displacement_m
cos_phi_angle_to_horizontal = delta_x_displacement_m / self.lenth_of_one_part
current_angle_to_horizontal = np.degrees(np.arccos(float(cos_phi_angle_to_horizontal)))
current_curvature_rate_grad10m = (angle_to_horizontal_grad[
-1] - current_angle_to_horizontal) / self.lenth_of_one_part
h_mes_m.append(current_h_mes_m)
h_vert_m.append(current_h_vert_m)
borehole_extension.append(current_borehole_extension)
x_displacement_m.append(current_x_displacement_m)
angle_to_horizontal_grad.append(current_angle_to_horizontal)
curvature_rate_grad10m.append(current_curvature_rate_grad10m)
self.h_mes_m = np.asarray(h_mes_m)
self.h_vert_m = np.asarray(h_vert_m)
self.angle_to_horizontal_grad = np.asarray(angle_to_horizontal_grad)
self.x_displacement_m = np.asarray(x_displacement_m)
self.y_displacement_m = np.asarray(x_displacement_m) * 0
self.curvature_rate_grad10m = np.asarray(curvature_rate_grad10m)
self.borehole_extension_m = np.asarray(borehole_extension)
self.interpolation_x_displacement_by_h_mes = interpolate.interp1d(self.h_mes_m,
self.x_displacement_m,
kind='cubic')
self.interpolation_h_vert_by_h_mes = interpolate.interp1d(self.h_mes_m,
self.h_vert_m,
kind='cubic')
self.interpolation_angle_to_horizontal_by_h_mes = interpolate.interp1d(self.h_mes_m,
self.angle_to_horizontal_grad,
kind='cubic')
self.interpolation_borehole_extension_by_h_mes = interpolate.interp1d(self.h_mes_m,
self.borehole_extension_m,
kind='cubic')
self.interpolation_curvature_rate_by_h_mes = interpolate.interp1d(self.h_mes_m,
self.curvature_rate_grad10m,
kind='cubic')
def get_x_displacement_m(self, h_mes_m):
"""
Функция по результатам выполненной ранее интерполяции возвращает горизонтально смещение от устья
:param h_mes_m: измеренная глубина вдоль ствола скважины, м
:return: горизонтальное смещение от устья, м
"""
return self.interpolation_x_displacement_by_h_mes(h_mes_m)
def get_h_vert_m(self, h_mes_m):
"""
Функция по результатам выполненной ранее интерполяции возвращает абсолютную глубину
:param h_mes_m: измеренная глубина вдоль ствола скважины, м
:return: абсолютная вертикальная глубина, м
"""
return self.interpolation_h_vert_by_h_mes(h_mes_m)
def get_angle_to_horizontal_grad(self, h_mes_m):
"""
Функция по результатам выполненной ранее интерполяции возвращает угол наклона от горизонтали
:param h_mes_m: измеренная глубина вдоль ствола скважины, м
:return: угол наклона от горизонтали, м
"""
return self.interpolation_angle_to_horizontal_by_h_mes(h_mes_m)
def get_borehole_extension_m(self, h_mes_m):
"""
Функция по результатам выполненной ранее интерполяции возвращает величину удлинения ствола скважины,
т.е. разницу между измеренной глубиной и абсолютной
:param h_mes_m: измеренная глубина вдоль ствола скважины, м
:return: удлинение ствола скважины, м
"""
return self.interpolation_borehole_extension_by_h_mes(h_mes_m)
def get_curvature_rate_grad10m(self, h_mes_m):
"""
Функция по результатам выполненной ранее интерполяции возвращает величину скорости набора кривизны
:param h_mes_m: измеренная глубина вдоль ствола скважины, м
:return: скорость набора кривизны, град / 10 м
"""
return self.interpolation_curvature_rate_by_h_mes(h_mes_m)
check = simple_well_deviation_survey()
check.calc_all() | [
"pandas.read_excel",
"scipy.interpolate.interp1d",
"numpy.asarray"
] | [((1890, 1921), 'pandas.read_excel', 'pd.read_excel', (['path_to_file_str'], {}), '(path_to_file_str)\n', (1903, 1921), True, 'import pandas as pd\n'), ((3120, 3284), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["self.deviation_survey_dataframe['Глубина конца интервала, м']", "self.deviation_survey_dataframe['Вертикальная отметка']"], {'kind': '"""cubic"""'}), "(self.deviation_survey_dataframe[\n 'Глубина конца интервала, м'], self.deviation_survey_dataframe[\n 'Вертикальная отметка'], kind='cubic')\n", (3140, 3284), True, 'import scipy.interpolate as interpolate\n'), ((3343, 3497), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["self.deviation_survey_dataframe['Глубина конца интервала, м']", "self.deviation_survey_dataframe['Угол, гpад']"], {'kind': '"""cubic"""'}), "(self.deviation_survey_dataframe[\n 'Глубина конца интервала, м'], self.deviation_survey_dataframe[\n 'Угол, гpад'], kind='cubic')\n", (3363, 3497), True, 'import scipy.interpolate as interpolate\n'), ((6209, 6228), 'numpy.asarray', 'np.asarray', (['h_mes_m'], {}), '(h_mes_m)\n', (6219, 6228), True, 'import numpy as np\n'), ((6254, 6280), 'numpy.asarray', 'np.asarray', (['curvature_rate'], {}), '(curvature_rate)\n', (6264, 6280), True, 'import numpy as np\n'), ((6328, 6387), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['h_mes_m', 'curvature_rate'], {'kind': '"""cubic"""'}), "(h_mes_m, curvature_rate, kind='cubic')\n", (6348, 6387), True, 'import scipy.interpolate as interpolate\n'), ((8610, 8698), 'numpy.asarray', 'np.asarray', (['[0, self.h_conductor_mes_m, self.h_pump_mes_m, self.h_bottomhole_mes_m]'], {}), '([0, self.h_conductor_mes_m, self.h_pump_mes_m, self.\n h_bottomhole_mes_m])\n', (8620, 8698), True, 'import numpy as np\n'), ((8809, 8900), 'numpy.asarray', 'np.asarray', (['[0, self.h_conductor_vert_m, self.h_pump_vert_m, self.h_bottomhole_vert_m]'], {}), '([0, self.h_conductor_vert_m, self.h_pump_vert_m, self.\n h_bottomhole_vert_m])\n', (8819, 8900), True, 'import numpy as np\n'), ((9018, 9143), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.h_mes_init_data_for_interpolation_m', 'self.h_vert_init_data_for_interpolation_m'], {'kind': '"""linear"""'}), "(self.h_mes_init_data_for_interpolation_m, self.\n h_vert_init_data_for_interpolation_m, kind='linear')\n", (9038, 9143), True, 'import scipy.interpolate as interpolate\n'), ((9353, 9477), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.h_mes_init_data_for_interpolation_m', 'self.h_vert_init_data_for_interpolation_m'], {'kind': '"""cubic"""'}), "(self.h_mes_init_data_for_interpolation_m, self.\n h_vert_init_data_for_interpolation_m, kind='cubic')\n", (9373, 9477), True, 'import scipy.interpolate as interpolate\n'), ((11496, 11515), 'numpy.asarray', 'np.asarray', (['h_mes_m'], {}), '(h_mes_m)\n', (11506, 11515), True, 'import numpy as np\n'), ((11540, 11560), 'numpy.asarray', 'np.asarray', (['h_vert_m'], {}), '(h_vert_m)\n', (11550, 11560), True, 'import numpy as np\n'), ((11601, 11637), 'numpy.asarray', 'np.asarray', (['angle_to_horizontal_grad'], {}), '(angle_to_horizontal_grad)\n', (11611, 11637), True, 'import numpy as np\n'), ((11670, 11698), 'numpy.asarray', 'np.asarray', (['x_displacement_m'], {}), '(x_displacement_m)\n', (11680, 11698), True, 'import numpy as np\n'), ((11802, 11836), 'numpy.asarray', 'np.asarray', (['curvature_rate_grad10m'], {}), '(curvature_rate_grad10m)\n', (11812, 11836), True, 'import numpy as np\n'), ((11873, 11903), 'numpy.asarray', 'np.asarray', (['borehole_extension'], {}), '(borehole_extension)\n', (11883, 11903), True, 'import numpy as np\n'), ((11958, 12029), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.h_mes_m', 'self.x_displacement_m'], {'kind': '"""cubic"""'}), "(self.h_mes_m, self.x_displacement_m, kind='cubic')\n", (11978, 12029), True, 'import scipy.interpolate as interpolate\n'), ((12224, 12287), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.h_mes_m', 'self.h_vert_m'], {'kind': '"""cubic"""'}), "(self.h_mes_m, self.h_vert_m, kind='cubic')\n", (12244, 12287), True, 'import scipy.interpolate as interpolate\n'), ((12479, 12558), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.h_mes_m', 'self.angle_to_horizontal_grad'], {'kind': '"""cubic"""'}), "(self.h_mes_m, self.angle_to_horizontal_grad, kind='cubic')\n", (12499, 12558), True, 'import scipy.interpolate as interpolate\n'), ((12775, 12850), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.h_mes_m', 'self.borehole_extension_m'], {'kind': '"""cubic"""'}), "(self.h_mes_m, self.borehole_extension_m, kind='cubic')\n", (12795, 12850), True, 'import scipy.interpolate as interpolate\n'), ((13061, 13138), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.h_mes_m', 'self.curvature_rate_grad10m'], {'kind': '"""cubic"""'}), "(self.h_mes_m, self.curvature_rate_grad10m, kind='cubic')\n", (13081, 13138), True, 'import scipy.interpolate as interpolate\n'), ((11731, 11759), 'numpy.asarray', 'np.asarray', (['x_displacement_m'], {}), '(x_displacement_m)\n', (11741, 11759), True, 'import numpy as np\n')] |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
if len(sys.argv) < 2:
print('No data file found ...')
sys.exit()
data = pd.read_csv(sys.argv[1])
#data=pd.read_csv("data2.txt")
data = data.sample(frac=1)
#nor_data=(data-data.mean())/data.std() #Why? normilazation VVIP
nor_data=np.array(data)
#nor_data = np.array(data)
print(nor_data.shape)
x=np.array(nor_data[:,0:2])#; print(x)
y=np.array(nor_data[:,2])
y=y.reshape(-1,1)
xstack = np.zeros(np.size(x, 0), int)
xstack = xstack.reshape(-1, 1)
xstack += 1
x = np.hstack((xstack, x))
#print(y)
#plotting the data
fig=plt.figure()
ax=plt.axes(projection='3d')
ax.scatter3D(x[:,1],x[:,2],y, color='blue')
plt.show()
#initializing gradient descent
iterations=100 #works well with 200 iterations and alpha being set to 0.1 also
alpha=0.001
m=np.size(data,0)
theta=np.zeros((np.size(x,1),1), float);print(theta)
temp_theta=np.zeros((np.size(x,1),1), float)
for i in range(iterations):
h=np.dot(x, theta)#; print(h)
temp_theta=theta-alpha*(1/m)*sum(np.dot(x.T, (h-y)))#; print(temp_theta)
theta=temp_theta
print("the values of theta are "+str(theta))
#Revert
# y_pred=(np.dot(x, theta) * data.std()[2]) + data.mean()[2]
# x[1] = (data[0] * data.std()[0]) + data.mean()[0]
# x[2] = (data[1] * data.std()[1]) + data.mean()[1]
y_pred=np.inner(theta.transpose(),x).transpose()
y_pred=np.dot(x, theta)
fig=plt.figure()
ax=plt.axes(projection='3d')
ax.scatter3D(x[:,1],x[:,2],y,color='blue')
ax.scatter3D(x[:,1],x[:,2],y_pred,color='black')
plt.show() | [
"numpy.size",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.axes",
"numpy.hstack",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.dot",
"sys.exit"
] | [((191, 215), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (202, 215), True, 'import pandas as pd\n'), ((351, 365), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (359, 365), True, 'import numpy as np\n'), ((421, 447), 'numpy.array', 'np.array', (['nor_data[:, 0:2]'], {}), '(nor_data[:, 0:2])\n', (429, 447), True, 'import numpy as np\n'), ((460, 484), 'numpy.array', 'np.array', (['nor_data[:, 2]'], {}), '(nor_data[:, 2])\n', (468, 484), True, 'import numpy as np\n'), ((587, 609), 'numpy.hstack', 'np.hstack', (['(xstack, x)'], {}), '((xstack, x))\n', (596, 609), True, 'import numpy as np\n'), ((644, 656), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (654, 656), True, 'import matplotlib.pyplot as plt\n'), ((660, 685), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (668, 685), True, 'import matplotlib.pyplot as plt\n'), ((730, 740), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (738, 740), True, 'import matplotlib.pyplot as plt\n'), ((866, 882), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (873, 882), True, 'import numpy as np\n'), ((1408, 1424), 'numpy.dot', 'np.dot', (['x', 'theta'], {}), '(x, theta)\n', (1414, 1424), True, 'import numpy as np\n'), ((1429, 1441), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1439, 1441), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1470), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1453, 1470), True, 'import matplotlib.pyplot as plt\n'), ((1563, 1573), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1571, 1573), True, 'import matplotlib.pyplot as plt\n'), ((172, 182), 'sys.exit', 'sys.exit', ([], {}), '()\n', (180, 182), False, 'import sys\n'), ((520, 533), 'numpy.size', 'np.size', (['x', '(0)'], {}), '(x, 0)\n', (527, 533), True, 'import numpy as np\n'), ((1011, 1027), 'numpy.dot', 'np.dot', (['x', 'theta'], {}), '(x, theta)\n', (1017, 1027), True, 'import numpy as np\n'), ((898, 911), 'numpy.size', 'np.size', (['x', '(1)'], {}), '(x, 1)\n', (905, 911), True, 'import numpy as np\n'), ((956, 969), 'numpy.size', 'np.size', (['x', '(1)'], {}), '(x, 1)\n', (963, 969), True, 'import numpy as np\n'), ((1073, 1091), 'numpy.dot', 'np.dot', (['x.T', '(h - y)'], {}), '(x.T, h - y)\n', (1079, 1091), True, 'import numpy as np\n')] |
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
import os
import sys
# scipt dirctory
yolo_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0,yolo_dir)
from util import *
import argparse
import os.path as osp
from darknet import Darknet
from preprocess import prep_image, inp_to_image
import random
sys.path.pop(0)
num_classes = 80
class test_net(nn.Module):
def __init__(self, num_layers, input_size):
super(test_net, self).__init__()
self.num_layers= num_layers
self.linear_1 = nn.Linear(input_size, 5)
self.middle = nn.ModuleList([nn.Linear(5,5) for x in range(num_layers)])
self.output = nn.Linear(5,2)
def forward(self, x):
x = x.view(-1)
fwd = nn.Sequential(self.linear_1, *self.middle, self.output)
return fwd(x)
class args():
bs = 1
nms_thresh = 0.4
cfgfile = yolo_dir + '/cfg/yolov3.cfg'
weightsfile =yolo_dir + '/yolov3.weights'
reso = '416'
scales='1,2,3'
confidence = 0.8
scales = args.scales
batch_size = int(args.bs)
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
CUDA = torch.cuda.is_available()
def load_model():
start = 0
classes = load_classes(yolo_dir + '/data/coco.names')
#Set up the neural network
print("Loading YOLO network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
if CUDA:
model.cuda()
model.eval()
return model
def inference(images, model):
'''
images: numpy array
model: yolo model
return: human bboxs, scores
'''
start = 0
classes = load_classes(yolo_dir + '/data/coco.names')
imlist = [images]
inp_dim = int(model.net_info["height"])
batches = list(map(prep_image, imlist, [inp_dim for x in range(len(imlist))]))
im_batches = [x[0] for x in batches]
orig_ims = [x[1] for x in batches]
im_dim_list = [x[2] for x in batches]
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
if CUDA:
im_dim_list = im_dim_list.cuda()
for batch in im_batches:
#load the image
if CUDA:
batch = batch.cuda()
with torch.no_grad():
prediction = model(Variable(batch), CUDA)
output = write_results(prediction, confidence, num_classes, nms = True, nms_conf = nms_thesh)
if CUDA:
torch.cuda.synchronize()
try:
output
except NameError:
print("No detections were made")
exit()
im_dim_list = torch.index_select(im_dim_list, 0, output[:,0].long())
scaling_factor = torch.min(inp_dim/im_dim_list,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim_list[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim_list[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
# select human and export bbox
human_candidates = []
scores = []
for i in range(len(output)):
item = output[i]
im_id = item[-1]
if int(im_id) == 0:
# x1,y1,x2,y2 = bbox
bbox = item[1:5].cpu().numpy()
# conver float32 to .2f data
bbox = [round(i, 2) for i in list(bbox)]
score = item[5]
human_candidates.append(bbox)
scores.append(score)
scores = np.expand_dims(np.array(scores), 0)
human_candidates = np.array(human_candidates)
return human_candidates, scores
| [
"sys.path.pop",
"torch.cuda.synchronize",
"darknet.Darknet",
"torch.nn.Sequential",
"torch.autograd.Variable",
"os.path.realpath",
"torch.FloatTensor",
"sys.path.insert",
"torch.cuda.is_available",
"numpy.array",
"torch.nn.Linear",
"torch.no_grad",
"torch.min"
] | [((208, 236), 'sys.path.insert', 'sys.path.insert', (['(0)', 'yolo_dir'], {}), '(0, yolo_dir)\n', (223, 236), False, 'import sys\n'), ((384, 399), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (396, 399), False, 'import sys\n'), ((1199, 1224), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1222, 1224), False, 'import torch\n'), ((180, 206), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((1399, 1420), 'darknet.Darknet', 'Darknet', (['args.cfgfile'], {}), '(args.cfgfile)\n', (1406, 1420), False, 'from darknet import Darknet\n'), ((3621, 3647), 'numpy.array', 'np.array', (['human_candidates'], {}), '(human_candidates)\n', (3629, 3647), True, 'import numpy as np\n'), ((595, 619), 'torch.nn.Linear', 'nn.Linear', (['input_size', '(5)'], {}), '(input_size, 5)\n', (604, 619), True, 'import torch.nn as nn\n'), ((723, 738), 'torch.nn.Linear', 'nn.Linear', (['(5)', '(2)'], {}), '(5, 2)\n', (732, 738), True, 'import torch.nn as nn\n'), ((802, 857), 'torch.nn.Sequential', 'nn.Sequential', (['self.linear_1', '*self.middle', 'self.output'], {}), '(self.linear_1, *self.middle, self.output)\n', (815, 857), True, 'import torch.nn as nn\n'), ((3577, 3593), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3585, 3593), True, 'import numpy as np\n'), ((2200, 2230), 'torch.FloatTensor', 'torch.FloatTensor', (['im_dim_list'], {}), '(im_dim_list)\n', (2217, 2230), False, 'import torch\n'), ((2415, 2430), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2428, 2430), False, 'import torch\n'), ((2619, 2643), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2641, 2643), False, 'import torch\n'), ((657, 672), 'torch.nn.Linear', 'nn.Linear', (['(5)', '(5)'], {}), '(5, 5)\n', (666, 672), True, 'import torch.nn as nn\n'), ((2463, 2478), 'torch.autograd.Variable', 'Variable', (['batch'], {}), '(batch)\n', (2471, 2478), False, 'from torch.autograd import Variable\n'), ((2843, 2878), 'torch.min', 'torch.min', (['(inp_dim / im_dim_list)', '(1)'], {}), '(inp_dim / im_dim_list, 1)\n', (2852, 2878), False, 'import torch\n')] |
from comodels import penn
import numpy as np
def test_rolling_sum():
a = np.array([1, 2, 3, 4, 5])
window = 2
expected = [3, 5, 7, 9]
assert penn.rolling_sum(a, window).tolist() == expected
| [
"comodels.penn.rolling_sum",
"numpy.array"
] | [((79, 104), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (87, 104), True, 'import numpy as np\n'), ((159, 186), 'comodels.penn.rolling_sum', 'penn.rolling_sum', (['a', 'window'], {}), '(a, window)\n', (175, 186), False, 'from comodels import penn\n')] |
"""Compile BBox position from meta file into training vector.
The training output `y` is a feature map with 5 features: label, BBox centre
relative to anchor, and BBox absolute width/height.
The label values, ie the entries in y[0, :, :], are non-negative integers. A
label of zero always means background.
"""
import os
import bz2
import sys
import glob
import tqdm
import json
import pickle
import argparse
import multiprocessing
import inspect_feature
import numpy as np
import PIL.Image as Image
import PIL.ImageFilter as ImageFilter
from feature_utils import ft2im, downsampleMatrix
def parseCmdline():
"""Parse the command line arguments."""
# Create a parser and program description.
parser = argparse.ArgumentParser(description='Compile training data')
parser.add_argument(
'path', nargs='?', type=str,
metavar='path',
help='File or Path with training images and *-meta.json.bz2')
parser.add_argument(
'--debug', action='store_true', default=False,
help='Create debug plots for instant inspection')
param = parser.parse_args()
if param.path is None:
cur_dir = os.path.dirname(os.path.abspath(__file__))
param.path = os.path.join(cur_dir, 'data', '3dflight')
if not os.path.exists(param.path):
print(f'Error: cannot open <{param.path}>')
sys.exit(1)
if os.path.isdir(param.path):
fnames = glob.glob(os.path.join(param.path, '*.jpg'))
else:
fnames = [param.path]
param.fnames = [_[:-4] for _ in sorted(fnames)]
return param
def _computeBBoxes(bb_rects, objID_at_pixel_ft, im_dim):
# Find all feature map locations that show anything but background.
fg_idx = np.nonzero(objID_at_pixel_ft)
ft_dim = objID_at_pixel_ft.shape
# Convert the absolute BBox corners to relative values with respect to
# the anchor point (all in image coordinates).
bboxes = np.zeros((4, *ft_dim), np.float32)
for y, x in zip(*fg_idx):
objID = objID_at_pixel_ft[y, x]
anchor_x = ft2im(x, ft_dim[1], im_dim[1])
anchor_y = ft2im(y, ft_dim[0], im_dim[0])
x0, y0, x1, y1 = bb_rects[objID]
x0 = float(x0 - anchor_x)
x1 = float(x1 - anchor_x)
y0 = float(y0 - anchor_y)
y1 = float(y1 - anchor_y)
bboxes[:, y, x] = (x0, y0, x1, y1)
return bboxes
def _maskForeground(objID_at_pixel_ft):
"""Return the "this-is-not-a-background-pixel" mask."""
mask = np.zeros(objID_at_pixel_ft.shape, np.uint8)
# Activate all feature map locations that show anything but background.
fg_idx = np.nonzero(objID_at_pixel_ft)
mask[fg_idx] = 1
return mask
def _maskBBox(objID_at_pixel_ft, obj_pixels_ft):
"""Return the "you-can-estimate-bbox-size-at-this-anchor" mask.
To estimating the BBox it often suffices to see only a small portion of the
object. In this case, all objects that have more than 10% of its pixels
visible are considered "visible enough for BBox estimation". NOTE: just
because it is possible to estimate the BBox size does *not* mean it is also
possible to estimate its label (ie the number on the cube), as considerably
more pixels may have to be visible for that (see `_maskFgLabel`).
"""
# Iterate over each object and determine (mostly guess) if it is possible
# to recognise the object.
mask = np.zeros(objID_at_pixel_ft.shape, np.uint8)
for objID, pixels in obj_pixels_ft.items():
# Skip background locations.
if objID == 0:
continue
# Find out which pixels belong to the current object AND are visible in
# the scene right now.
idx_visible = np.nonzero(objID_at_pixel_ft == objID)
# We decide that BBox estimation is possible if at least 10% of all
# pixels for the object are visible.
num_visible = len(idx_visible[0])
num_tot = np.count_nonzero(pixels)
if num_visible >= 9 and num_visible >= 0.1 * num_tot:
mask[idx_visible] = 1
return mask
def _maskValid(objID_at_pixels_ft):
"""Return the "only-train-on-these-pixels" mask.
The main purpose of this mask is to remove all pixels close to
foreground/background boundaries. This will avoid anchor positions where
the foreground/background label is ambiguous.
"""
src = np.array(objID_at_pixels_ft, np.uint8)
out = np.array(Image.fromarray(src).filter(ImageFilter.FIND_EDGES))
mask = np.zeros(src.shape, np.uint8)
mask[np.nonzero(out == 0)] = 1
return mask
def _maskFgLabel(img, objID_at_pixel_ft, obj_pixels_ft):
"""Return the "it is possible to estimate the label at that pixel" mask.
To estimate the label the object must be
1. large enough to see the number
2. bright enough to see the number
3. unobstructed enough to see the number
4. not too so close to the screen edge that the number is clipped
For Condition 1, objects are large enough if it occupies least 9 pixels in
feature space. 9 pixels (ie a 3x3 patch) corresponds to a 24x24 receptive
field for our default feature and image sizes of 64x64 and 512x512,
respectively.
Objects are bright enough if their average pixel value is at least 40. I
determine this threshold with an empirical study of one image :)
We consider the object unobstructed if at least 50% of its pixels are
actually visible in the scene (Condition 3).
I do not know how to recognise Condition 4 with the given data.
This is not foolproof but works well enough for now. A notable case where
this fails is a cube close to the camera but clipped by the image boundary.
These cubes may occupy a lot of screen real estate, yet its only visible
portion is the red frame and parts of the white surface.
"""
# Compute the average pixel intensity.
img = np.mean(np.array(img, np.int64), axis=2)
assert img.shape == objID_at_pixel_ft.shape
# Iterate over each object and determine (mostly guess) if it is possible
# to recognise the object.
mask = np.zeros(objID_at_pixel_ft.shape, np.uint8)
for objID, pixels in obj_pixels_ft.items():
idx_visible = np.nonzero(objID_at_pixel_ft == objID)
if len(idx_visible[0]) == 0:
continue
# Is it bright enough.
avg_brightness = np.mean(img[idx_visible])
if avg_brightness < 40:
continue
# Are enough pixels visible in the scene.
num_visible = len(idx_visible[0])
num_tot = np.count_nonzero(pixels)
if num_visible >= 9 and num_visible >= 0.5 * num_tot:
mask[idx_visible] = 1
return mask
def generate(fname, img, ft_dims):
assert img.ndim == 3 and img.shape[2] == 3 and img.dtype == np.uint8
im_dim = img.shape[:2]
# Load the True output and verify that all files use the same
# int->label mapping.
img_meta = bz2.open(fname + '-meta.json.bz2', 'rb').read()
img_meta = json.loads(img_meta.decode('utf8'))
# Undo JSON's int->str conversion for dict keys.
int2name = {int(k): v for k, v in img_meta['int2name'].items()}
bb_rects = {int(k): v for k, v in img_meta['bb_rects'].items()}
obj_pixels = {int(k): v for k, v in img_meta['obj-pixels'].items()}
objID2label = {int(k): v for k, v in img_meta['objID2label'].items()}
objID_at_pixel = np.array(img_meta['objID-at-pixel'], np.int32)
del img_meta
# The label map *must* contain a None labels -> these are the background
# pixels.
assert int2name[0] == 'None'
name2int = {v: k for k, v in int2name.items()}
# For each non-zero pixel, map the object ID to its label. This
# will produce an image where each pixel corresponds to a label
# that can be looked up with `int2name`.
label_at_pixel = np.zeros_like(objID_at_pixel)
for idx in zip(*np.nonzero(objID_at_pixel)):
label_name = objID2label[objID_at_pixel[idx]]
assert label_name != 'None'
label_at_pixel[idx] = name2int[label_name]
# Add the int2name map to the function output.
out = {}
out['int2name'] = int2name
# Compile dictionary with feature size specific data. This includes the
# BBox data relative to the anchor point.
for ft_dim in ft_dims:
img_ft = Image.fromarray(img).resize((ft_dim[1], ft_dim[0]))
img_ft = np.array(img_ft)
# Downsample the label/objID maps to the feature size.
label_at_pixel_ft = downsampleMatrix(label_at_pixel, ft_dim)
objID_at_pixel_ft = downsampleMatrix(objID_at_pixel, ft_dim)
obj_pixels_ft = {k: downsampleMatrix(v, ft_dim) for k, v in obj_pixels.items()}
bboxes = _computeBBoxes(bb_rects, objID_at_pixel_ft, im_dim)
mask_fg = _maskForeground(objID_at_pixel_ft)
mask_bbox = _maskBBox(objID_at_pixel_ft, obj_pixels_ft)
mask_valid = _maskValid(objID_at_pixel_ft)
mask_cls = _maskFgLabel(img_ft, objID_at_pixel_ft, obj_pixels_ft)
# Compile all the information into the output dictionary.
out[ft_dim] = {
'bboxes': np.array(bboxes, np.float32),
'objID_at_pixel': objID_at_pixel_ft,
'label_at_pixel': label_at_pixel_ft,
'mask_fg': mask_fg,
'mask_bbox': mask_bbox,
'mask_cls': mask_cls,
'mask_valid': mask_valid,
}
return out
def compileSingle(args):
fname, ft_dims = args
img = np.array(Image.open(fname + '.jpg').convert('RGB'))
features = generate(fname, img, ft_dims)
pickle.dump(features, open(fname + '-compiled.pickle', 'wb'))
def main():
param = parseCmdline()
ft_dims = [(64, 64)]
args = [(_, ft_dims) for _ in param.fnames]
if len(args) == 1:
compileSingle(args[0])
else:
with multiprocessing.Pool() as pool:
# Setup parallel execution and wrap it into a TQDM progress bar. Then
# consume the iterator.
progbar = tqdm.tqdm(
pool.imap_unordered(compileSingle, args),
total=len(args), desc='Compiling Features', leave=False
)
[_ for _ in progbar]
# Show debug plots for the first file in the list.
if param.debug:
inspect_feature.main(param.fnames[0] + '.jpg')
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"numpy.mean",
"bz2.open",
"os.path.join",
"os.path.abspath",
"numpy.zeros_like",
"os.path.exists",
"feature_utils.downsampleMatrix",
"inspect_feature.main",
"multiprocessing.Pool",
"sys.exit",
"numpy.count_nonzero",
"feature_utils.ft2im",
"os.path.isdir",
"nump... | [((717, 777), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compile training data"""'}), "(description='Compile training data')\n", (740, 777), False, 'import argparse\n'), ((1376, 1401), 'os.path.isdir', 'os.path.isdir', (['param.path'], {}), '(param.path)\n', (1389, 1401), False, 'import os\n'), ((1719, 1748), 'numpy.nonzero', 'np.nonzero', (['objID_at_pixel_ft'], {}), '(objID_at_pixel_ft)\n', (1729, 1748), True, 'import numpy as np\n'), ((1926, 1960), 'numpy.zeros', 'np.zeros', (['(4, *ft_dim)', 'np.float32'], {}), '((4, *ft_dim), np.float32)\n', (1934, 1960), True, 'import numpy as np\n'), ((2482, 2525), 'numpy.zeros', 'np.zeros', (['objID_at_pixel_ft.shape', 'np.uint8'], {}), '(objID_at_pixel_ft.shape, np.uint8)\n', (2490, 2525), True, 'import numpy as np\n'), ((2616, 2645), 'numpy.nonzero', 'np.nonzero', (['objID_at_pixel_ft'], {}), '(objID_at_pixel_ft)\n', (2626, 2645), True, 'import numpy as np\n'), ((3393, 3436), 'numpy.zeros', 'np.zeros', (['objID_at_pixel_ft.shape', 'np.uint8'], {}), '(objID_at_pixel_ft.shape, np.uint8)\n', (3401, 3436), True, 'import numpy as np\n'), ((4362, 4400), 'numpy.array', 'np.array', (['objID_at_pixels_ft', 'np.uint8'], {}), '(objID_at_pixels_ft, np.uint8)\n', (4370, 4400), True, 'import numpy as np\n'), ((4485, 4514), 'numpy.zeros', 'np.zeros', (['src.shape', 'np.uint8'], {}), '(src.shape, np.uint8)\n', (4493, 4514), True, 'import numpy as np\n'), ((6104, 6147), 'numpy.zeros', 'np.zeros', (['objID_at_pixel_ft.shape', 'np.uint8'], {}), '(objID_at_pixel_ft.shape, np.uint8)\n', (6112, 6147), True, 'import numpy as np\n'), ((7400, 7446), 'numpy.array', 'np.array', (["img_meta['objID-at-pixel']", 'np.int32'], {}), "(img_meta['objID-at-pixel'], np.int32)\n", (7408, 7446), True, 'import numpy as np\n'), ((7843, 7872), 'numpy.zeros_like', 'np.zeros_like', (['objID_at_pixel'], {}), '(objID_at_pixel)\n', (7856, 7872), True, 'import numpy as np\n'), ((1214, 1255), 'os.path.join', 'os.path.join', (['cur_dir', '"""data"""', '"""3dflight"""'], {}), "(cur_dir, 'data', '3dflight')\n", (1226, 1255), False, 'import os\n'), ((1268, 1294), 'os.path.exists', 'os.path.exists', (['param.path'], {}), '(param.path)\n', (1282, 1294), False, 'import os\n'), ((1356, 1367), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1364, 1367), False, 'import sys\n'), ((2050, 2080), 'feature_utils.ft2im', 'ft2im', (['x', 'ft_dim[1]', 'im_dim[1]'], {}), '(x, ft_dim[1], im_dim[1])\n', (2055, 2080), False, 'from feature_utils import ft2im, downsampleMatrix\n'), ((2100, 2130), 'feature_utils.ft2im', 'ft2im', (['y', 'ft_dim[0]', 'im_dim[0]'], {}), '(y, ft_dim[0], im_dim[0])\n', (2105, 2130), False, 'from feature_utils import ft2im, downsampleMatrix\n'), ((3700, 3738), 'numpy.nonzero', 'np.nonzero', (['(objID_at_pixel_ft == objID)'], {}), '(objID_at_pixel_ft == objID)\n', (3710, 3738), True, 'import numpy as np\n'), ((3921, 3945), 'numpy.count_nonzero', 'np.count_nonzero', (['pixels'], {}), '(pixels)\n', (3937, 3945), True, 'import numpy as np\n'), ((4524, 4544), 'numpy.nonzero', 'np.nonzero', (['(out == 0)'], {}), '(out == 0)\n', (4534, 4544), True, 'import numpy as np\n'), ((5902, 5925), 'numpy.array', 'np.array', (['img', 'np.int64'], {}), '(img, np.int64)\n', (5910, 5925), True, 'import numpy as np\n'), ((6218, 6256), 'numpy.nonzero', 'np.nonzero', (['(objID_at_pixel_ft == objID)'], {}), '(objID_at_pixel_ft == objID)\n', (6228, 6256), True, 'import numpy as np\n'), ((6372, 6397), 'numpy.mean', 'np.mean', (['img[idx_visible]'], {}), '(img[idx_visible])\n', (6379, 6397), True, 'import numpy as np\n'), ((6562, 6586), 'numpy.count_nonzero', 'np.count_nonzero', (['pixels'], {}), '(pixels)\n', (6578, 6586), True, 'import numpy as np\n'), ((8395, 8411), 'numpy.array', 'np.array', (['img_ft'], {}), '(img_ft)\n', (8403, 8411), True, 'import numpy as np\n'), ((8504, 8544), 'feature_utils.downsampleMatrix', 'downsampleMatrix', (['label_at_pixel', 'ft_dim'], {}), '(label_at_pixel, ft_dim)\n', (8520, 8544), False, 'from feature_utils import ft2im, downsampleMatrix\n'), ((8573, 8613), 'feature_utils.downsampleMatrix', 'downsampleMatrix', (['objID_at_pixel', 'ft_dim'], {}), '(objID_at_pixel, ft_dim)\n', (8589, 8613), False, 'from feature_utils import ft2im, downsampleMatrix\n'), ((10283, 10329), 'inspect_feature.main', 'inspect_feature.main', (["(param.fnames[0] + '.jpg')"], {}), "(param.fnames[0] + '.jpg')\n", (10303, 10329), False, 'import inspect_feature\n'), ((1166, 1191), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1181, 1191), False, 'import os\n'), ((1430, 1463), 'os.path.join', 'os.path.join', (['param.path', '"""*.jpg"""'], {}), "(param.path, '*.jpg')\n", (1442, 1463), False, 'import os\n'), ((6944, 6984), 'bz2.open', 'bz2.open', (["(fname + '-meta.json.bz2')", '"""rb"""'], {}), "(fname + '-meta.json.bz2', 'rb')\n", (6952, 6984), False, 'import bz2\n'), ((7893, 7919), 'numpy.nonzero', 'np.nonzero', (['objID_at_pixel'], {}), '(objID_at_pixel)\n', (7903, 7919), True, 'import numpy as np\n'), ((8642, 8669), 'feature_utils.downsampleMatrix', 'downsampleMatrix', (['v', 'ft_dim'], {}), '(v, ft_dim)\n', (8658, 8669), False, 'from feature_utils import ft2im, downsampleMatrix\n'), ((9127, 9155), 'numpy.array', 'np.array', (['bboxes', 'np.float32'], {}), '(bboxes, np.float32)\n', (9135, 9155), True, 'import numpy as np\n'), ((9839, 9861), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (9859, 9861), False, 'import multiprocessing\n'), ((4420, 4440), 'PIL.Image.fromarray', 'Image.fromarray', (['src'], {}), '(src)\n', (4435, 4440), True, 'import PIL.Image as Image\n'), ((8326, 8346), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (8341, 8346), True, 'import PIL.Image as Image\n'), ((9492, 9518), 'PIL.Image.open', 'Image.open', (["(fname + '.jpg')"], {}), "(fname + '.jpg')\n", (9502, 9518), True, 'import PIL.Image as Image\n')] |
import numpy as np
import taichi as ti
def cook_image_to_bytes(img):
"""
Takes a NumPy array or Taichi tensor of any type.
Returns a NumPy array of uint8.
This is used by ti.imwrite and ti.imdisplay.
"""
if not isinstance(img, np.ndarray):
img = img.to_numpy()
if img.dtype in [np.uint16, np.uint32, np.uint64]:
img = (img // (np.iinfo(img.dtype).max // 256)).astype(np.uint8)
elif img.dtype in [np.float32, np.float64]:
img = (np.clip(img, 0, 1) * 255.0 + 0.5).astype(np.uint8)
elif img.dtype != np.uint8:
raise ValueError(f'Data type {img.dtype} not supported in ti.imwrite')
assert len(img.shape) in [2,
3], "Image must be either RGB/RGBA or greyscale"
if len(img.shape) == 2:
img = img.reshape(*img.shape, 1)
assert img.shape[2] in [1, 3,
4], "Image must be either RGB/RGBA or greyscale"
return img.swapaxes(0, 1)[::-1, :]
def imdisplay(img):
"""
Try to display image in interactive shell.
"""
if ti.lang.shell.oinspect.name == ti.lang.shell.ShellType.JUPYTER:
import PIL.Image
from io import BytesIO
import IPython.display
import numpy as np
img = cook_image_to_bytes(img)
with BytesIO() as f:
PIL.Image.fromarray(img).save(f, 'png')
IPython.display.display(IPython.display.Image(data=f.getvalue()))
else:
ti.imshow(img)
def imwrite(img, filename):
"""
Save image to a specific file.
"""
img = cook_image_to_bytes(img)
img = np.ascontiguousarray(img)
ptr = img.ctypes.data
resy, resx, comp = img.shape
ti.core.imwrite(filename, ptr, resx, resy, comp)
def imread(filename, channels=0):
"""
Load image from a specific file.
"""
ptr, resx, resy, comp = ti.core.imread(filename, channels)
img = np.ndarray(shape=(resy, resx, comp), dtype=np.uint8)
img = np.ascontiguousarray(img)
# TODO(archibate): Figure out how np.ndarray constructor works and replace:
ti.core.C_memcpy(img.ctypes.data, ptr, resx * resy * comp)
# Discussion: https://github.com/taichi-dev/taichi/issues/802
return img.swapaxes(0, 1)[:, ::-1, :]
def imshow(img, window_name='Taichi'):
"""
Show image in a Taichi GUI.
"""
if not isinstance(img, np.ndarray):
img = img.to_numpy()
assert len(img.shape) in [2,
3], "Image must be either RGB/RGBA or greyscale"
with ti.GUI(window_name, res=img.shape[:2]) as gui:
img = gui.cook_image(img)
while gui.running:
if gui.get_event(ti.GUI.ESCAPE):
gui.running = False
gui.set_image(img)
gui.show()
| [
"taichi.imshow",
"io.BytesIO",
"taichi.GUI",
"numpy.ascontiguousarray",
"numpy.iinfo",
"numpy.clip",
"taichi.core.imread",
"taichi.core.imwrite",
"taichi.core.C_memcpy",
"numpy.ndarray"
] | [((1611, 1636), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (1631, 1636), True, 'import numpy as np\n'), ((1700, 1748), 'taichi.core.imwrite', 'ti.core.imwrite', (['filename', 'ptr', 'resx', 'resy', 'comp'], {}), '(filename, ptr, resx, resy, comp)\n', (1715, 1748), True, 'import taichi as ti\n'), ((1866, 1900), 'taichi.core.imread', 'ti.core.imread', (['filename', 'channels'], {}), '(filename, channels)\n', (1880, 1900), True, 'import taichi as ti\n'), ((1911, 1963), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(resy, resx, comp)', 'dtype': 'np.uint8'}), '(shape=(resy, resx, comp), dtype=np.uint8)\n', (1921, 1963), True, 'import numpy as np\n'), ((1974, 1999), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (1994, 1999), True, 'import numpy as np\n'), ((2084, 2142), 'taichi.core.C_memcpy', 'ti.core.C_memcpy', (['img.ctypes.data', 'ptr', '(resx * resy * comp)'], {}), '(img.ctypes.data, ptr, resx * resy * comp)\n', (2100, 2142), True, 'import taichi as ti\n'), ((1470, 1484), 'taichi.imshow', 'ti.imshow', (['img'], {}), '(img)\n', (1479, 1484), True, 'import taichi as ti\n'), ((2531, 2569), 'taichi.GUI', 'ti.GUI', (['window_name'], {'res': 'img.shape[:2]'}), '(window_name, res=img.shape[:2])\n', (2537, 2569), True, 'import taichi as ti\n'), ((1306, 1315), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1313, 1315), False, 'from io import BytesIO\n'), ((374, 393), 'numpy.iinfo', 'np.iinfo', (['img.dtype'], {}), '(img.dtype)\n', (382, 393), True, 'import numpy as np\n'), ((487, 505), 'numpy.clip', 'np.clip', (['img', '(0)', '(1)'], {}), '(img, 0, 1)\n', (494, 505), True, 'import numpy as np\n')] |
from config import *
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from getpass import getpass
from os import remove
import zipfile
import pandas as pd
import numpy as np
from lxml import etree as et
def _parseBgeXml(f):
timestamp = []
consumed = []
cost = []
timezone = config.get('global','timezone')
for e,elem in et.iterparse(f, tag='{http://naesb.org/espi}IntervalReading'):
timestamp.append(elem.findall('.//{http://naesb.org/espi}start')[0].text)
consumed.append( elem.findall('{http://naesb.org/espi}value')[0].text)
cost.append( elem.findall('{http://naesb.org/espi}cost')[0].text)
nt = np.array(timestamp,dtype=int).astype('datetime64[s]')
nc = np.array(consumed,dtype=float)
no = np.array(cost,dtype=float)
nc /= 1e5
no /= 1e5
consumed = pd.Series(nc,index=nt).tz_localize('UTC').tz_convert(timezone)
cost = pd.Series(no,index=nt).tz_localize('UTC').tz_convert(timezone)
return (consumed,cost)
def getData(daterange):
downloadDir = cache_directory
customer_id = config.get('bge','customer_id')
if config.has_option('bge','username') and config.has_option('bge','password'):
username = config.get('bge','username')
password = config.get('bge','password')
else:
username = raw_input('Username for bge.com:')
password = getpass()
begin = daterange[0].strftime('%Y-%m-%d')
end = daterange[-1].strftime('%Y-%m-%d')
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList',2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir',downloadDir)
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/octet-stream')
browser = webdriver.Firefox(profile)
browser.implicitly_wait(60)
browser.get('https://www.bge.com/')
username_element = browser.find_element_by_id('USER')
password_element = browser.find_element_by_id('PASSWORD')
username_element.send_keys(username)
password_element.send_keys(password)
password_element.send_keys(Keys.RETURN)
browser.find_element_by_link_text('My Energy Use').click()
browser.find_element_by_link_text('My Usage Details').click()
download_url = 'https://bgesmartenergymanager.com/ei/app/modules/customer/%s/energy/download?exportFormat=ESPI_AMI&xmlFrom=%s&xmlTo=%s'%(customer_id,begin,end)
browser.get(download_url)
sleep(5)
browser.quit()
zf = downloadDir + '/bgec_interval_data_%s_to_%s.zip'%(begin,end)
with zipfile.ZipFile(zf,'r') as z:
files = [ data_directory + '/' + fn for fn in z.namelist() ]
z.extractall(data_directory)
df = pd.DataFrame()
for f in files:
if 'gas' in f:
consumed_name = 'gas_consumed'
cost_name = 'gas_cost'
if 'electric' in f:
consumed_name = 'electric_consumed'
cost_name = 'electric_cost'
consumed,cost = _parseBgeXml(f)
df.loc[:,consumed_name] = consumed
df.loc[:,cost_name] = cost
remove(zf)
for f in files:
remove(f)
return df
| [
"pandas.DataFrame",
"os.remove",
"zipfile.ZipFile",
"selenium.webdriver.Firefox",
"selenium.webdriver.FirefoxProfile",
"getpass.getpass",
"time.sleep",
"lxml.etree.iterparse",
"numpy.array",
"pandas.Series"
] | [((399, 460), 'lxml.etree.iterparse', 'et.iterparse', (['f'], {'tag': '"""{http://naesb.org/espi}IntervalReading"""'}), "(f, tag='{http://naesb.org/espi}IntervalReading')\n", (411, 460), True, 'from lxml import etree as et\n'), ((1570, 1596), 'selenium.webdriver.FirefoxProfile', 'webdriver.FirefoxProfile', ([], {}), '()\n', (1594, 1596), False, 'from selenium import webdriver\n'), ((1911, 1937), 'selenium.webdriver.Firefox', 'webdriver.Firefox', (['profile'], {}), '(profile)\n', (1928, 1937), False, 'from selenium import webdriver\n'), ((2590, 2598), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (2595, 2598), False, 'from time import sleep\n'), ((2846, 2860), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2858, 2860), True, 'import pandas as pd\n'), ((3237, 3247), 'os.remove', 'remove', (['zf'], {}), '(zf)\n', (3243, 3247), False, 'from os import remove\n'), ((781, 812), 'numpy.array', 'np.array', (['consumed'], {'dtype': 'float'}), '(consumed, dtype=float)\n', (789, 812), True, 'import numpy as np\n'), ((825, 852), 'numpy.array', 'np.array', (['cost'], {'dtype': 'float'}), '(cost, dtype=float)\n', (833, 852), True, 'import numpy as np\n'), ((1451, 1460), 'getpass.getpass', 'getpass', ([], {}), '()\n', (1458, 1460), False, 'from getpass import getpass\n'), ((2700, 2724), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zf', '"""r"""'], {}), "(zf, 'r')\n", (2715, 2724), False, 'import zipfile\n'), ((3276, 3285), 'os.remove', 'remove', (['f'], {}), '(f)\n', (3282, 3285), False, 'from os import remove\n'), ((714, 744), 'numpy.array', 'np.array', (['timestamp'], {'dtype': 'int'}), '(timestamp, dtype=int)\n', (722, 744), True, 'import numpy as np\n'), ((897, 920), 'pandas.Series', 'pd.Series', (['nc'], {'index': 'nt'}), '(nc, index=nt)\n', (906, 920), True, 'import pandas as pd\n'), ((975, 998), 'pandas.Series', 'pd.Series', (['no'], {'index': 'nt'}), '(no, index=nt)\n', (984, 998), True, 'import pandas as pd\n')] |
import numpy as np
import seaborn as sns
from numpy import genfromtxt
from matplotlib import pyplot as plt
from sklearn.decomposition import FastICA
import pandas as pd
# data = genfromtxt('Z:/nani/experiment/aldoh/dry laugh/dry laugh_2019.06.01_12.26.08.csv', skip_header=1, delimiter=',')
# data = genfromtxt('Z:/nani/experiment/aldoh/funny laugh/funny laugh_2019.06.01_12.33.38.csv', skip_header=1, delimiter=',')
# data = genfromtxt('Z:/nani/experiment/aldoh/short laugh 2/short laugh 2_2019.06.01_11.56.53.csv', skip_header=1, delimiter=',')
# data = genfromtxt('Z:/nani/experiment/cra/funny laugh/funny laugh_2019.06.02_14.22.42.csv', skip_header=1, delimiter=',')
# data = genfromtxt('Z:/nani/experiment/ila/dry laugh/dry laugh_2019.05.22_17.47.10.csv', skip_header=1, delimiter=',')
# loc = 'Z:/nani/experiment/ila/dry laugh/dry laugh_2019.05.22_17.47.10.csv'
# loc = 'Z:/nani/experiment/ovi/funny laugh/forced laugh funny_2019.05.22_12.32.52.csv'
# loc = 'Z:/nani/experiment/ila/funny laugh/forced funny laugh_2019.05.22_17.55.35.csv'
# loc = 'Z:/nani/experiment/aldoh/funny laugh/funny laugh_2019.06.01_12.33.38.csv'
# loc = 'Z:/nani/experiment/aldoh/dry laugh/dry laugh_2019.06.01_12.26.08.csv'
# loc = 'Z:/nani/experiment/cra/funny laugh/funny laugh_2019.06.02_14.22.42.csv'
# loc = 'Z:/nani/experiment/cra/dry laugh/dry laugh_2019.06.02_14.18.15.csv'
# loc = 'Z:/nani/experiment/rijuu/funny laugh/funny laugh_2019.06.07_15.51.39.csv'
# loc = 'Z:/nani/experiment/skot/funny laugh/funny laugh_2019.06.12_17.45.30.csv'
# loc = 'Z:/nani/experiment/skot/dry laugh/dry laugh_2019.06.12_17.40.45.csv'
# loc = 'Z:/nani/experiment/sinlo/funny laugh/funny laugh_2019.06.10_15.15.58.csv'
# loc = 'Z:/nani/experiment/sinlo/dry laugh/dry laugh_2019.06.10_15.12.31.csv'
# loc = 'Z:/nani/experiment/vyn/funny laugh/funny laugh_2019.06.05_13.46.41.csv'
# loc = 'Z:/nani/experiment/vyn/dry laugh/dry laugh_2019.06.05_13.39.19.csv'
# loc = 'Z:/nani/experiment/cips/funny laugh/funny laugh_2019.06.03_15.30.48.csv'
# loc = 'Z:/nani/experiment/cips/dry laugh/dry laugh_2019.06.03_15.26.57.csv'
# loc = 'Z:/nani/experiment/gav/funny laugh/forced funny laugh_2019.05.20_16.34.26.csv'
# loc = 'Z:/nani/experiment/gav/dry laugh/forced dry laugh_2019.05.20_16.29.02.csv'
# loc = 'Z:/nani/experiment/rot/funny laugh/forced laugh_2019.05.21_17.49.21.csv' #troubled
# loc = 'Z:/nani/experiment/rot/dry laugh/forced dry 2_2019.05.21_17.42.58.csv'
# loc = 'Z:/nani/experiment/manai/funny laugh/funny laugh_2019.06.13_17.25.34.csv'
# loc = 'Z:/nani/experiment/manai/dry laugh/dry laugh_2019.06.13_17.20.36.csv'
# loc = 'Z:/nani/experiment/nature/funny laugh/funny laugh_2019.06.14_16.27.00.csv'
# loc = 'Z:/nani/experiment/nature/dry laugh/dry laugh_2019.06.14_16.23.10.csv'
# loc = 'Z:/nani/experiment/hrz/funny laugh/funny laugh_2019.05.27_17.07.44.csv'
# loc = 'Z:/nani/experiment/fira/funny laugh/funny laugh_2019.06.19_14.27.42.csv'
# loc = 'Z:/nani/experiment/fira/dry laugh/dry laugh_2019.06.19_14.25.20.csv'
# loc = 'Z:/nani/experiment/alin/funny laugh/funny laugh_2019.06.21_14.11.54.csv'
# loc = 'Z:/nani/experiment/kirk/funny laugh/funny laugh_2019.06.20_17.33.16.csv'
# loc = 'Z:/nani/experiment/prg/funny laugh/funny laugh_2019.06.21_11.58.33.csv'
# loc = 'Z:/nani/experiment/ovi2/dry laugh/dry laugh_2019.06.25_12.07.10.csv'
# loc = 'Z:/nani/experiment/ovi2/funny laugh/funny laugh_2019.06.25_12.11.08.csv'
# loc = 'Z:/nani/experiment/ovi2/funny laugh/funny laugh_2019.06.25_12.17.13.csv'
loc = 'Z:/nani/experiment/cdef/funny laugh/funny laugh2_2019.06.25_14.32.48.csv'
name = 'cdef'
type = 'funnylaugh'
# type = 'funnylaugh'
data = genfromtxt(loc, skip_header=1, delimiter=',')
df = pd.read_csv(loc, header=None, skiprows=1)
df.columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z','A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O']
peteek = (df.query('t == "100"').index)
startpoint = (df.query('t == "100"').index[0])
# startpoint = startpoint +128 #adding 1 second after
# startpoint = 0
data = data[:,2:16]
print(data.shape)
useica = 0
multivalue = 200
if useica==1:
ica = FastICA(n_components=14, max_iter=500)
data = ica.fit_transform(data)
multivalue /= 10000
for x in range(0,14):
data[:,x] = data[:,x] + (multivalue*x)
mbohtalah = startpoint
ranges = [mbohtalah]
for y in range(0,5):
mbohtalah = mbohtalah+(5*128)
ranges.append(mbohtalah)
mbohtalah = mbohtalah+(1*128)
ranges.append(mbohtalah)
mbohtalah = mbohtalah+(5*128)
ranges.append(mbohtalah)
mbohtalah = mbohtalah+(3.15*128)
ranges.append(mbohtalah)
# for y in range(0,5):
# mbohtalah = mbohtalah+(5*128)
# ranges.append(mbohtalah)
# mbohtalah = mbohtalah+(2*128)
# ranges.append(mbohtalah)
# mbohtalah = mbohtalah+(5*128)
# ranges.append(mbohtalah)
# mbohtalah = mbohtalah+(2.15*128)
# ranges.append(mbohtalah)
ds1 = []
ds2 = []
for z in range(0,len(ranges)-1):
if z%4 == 0:
ds1.append(data[int(round(ranges[z])):int(round(ranges[z+1]))])
elif z%4 == 2:
ds2.append(data[int(round(ranges[z])):int(round(ranges[z+1]))])
print(np.array(ds1).shape)
print(np.array(ds2).shape)
import pickle
with open('Z:/nani/experiment/'+name+'/'+type+'_yes.pkl', 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([ds1],f)
with open('Z:/nani/experiment/'+name+'/'+type+'_no.pkl', 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([ds2],f)
print('OK')
exit()
# for mbuoh in range(0,20):
# if mbuoh%4 ==0:
# plt.axvspan(ranges[mbuoh], ranges[mbuoh+1], facecolor='g', alpha=0.5,zorder=1)
# elif mbuoh%4 ==1:
# plt.axvspan(ranges[mbuoh], ranges[mbuoh+1], facecolor='b', alpha=0.5,zorder=1)
# elif mbuoh%4 ==2:
# plt.axvspan(ranges[mbuoh], ranges[mbuoh+1], facecolor='r', alpha=0.5,zorder=1)
# else:
# plt.axvspan(ranges[mbuoh], ranges[mbuoh+1], facecolor='b', alpha=0.5,zorder=1)
# # plt.axvspan(peteek[0], peteek[1], facecolor='y', alpha=0.5,zorder=1)
# plt.axvspan(0, peteek[1]-peteek[0], facecolor='y', alpha=0.5,zorder=1)
# plt.plot(data[peteek[0]:],lw=0.2)
# plt.show()
# plt.plot(np.array(ds1).reshape((3200,14)),lw=0.5, color="k")
# plt.plot(np.array(ds2).reshape((3200,14)),lw=0.5, color="b")
# plt.axvline(x=640, color="r")
# plt.axvline(x=1280, color="r")
# plt.axvline(x=1920, color="r")
# plt.axvline(x=2560, color="r")
# plt.show()
# dsa = np.mean(ds1,axis=0)
# dsb = np.mean(ds2,axis=0)
# plt.plot(dsa, color="k")
# plt.plot(dsb, color="b")
# plt.show()
####################FFT,WELCH########################
import numpy as np
# data = np.array(ds1).reshape((3200,14))[:,9]
# data2 = np.array(ds2).reshape((3200,14))[:,9]
# data = np.array(ds1)[1,:,9]
# data2 = np.array(ds1)[2,:,9]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1.2)
# Define sampling frequency and time vector
sf = 128.
# Plot the signal
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
for ww in range(0,5):
data = np.array(ds1)[ww,:,4]
# data = data * np.hamming(640)
time = np.arange(data.size) / sf
plt.plot(time, data, lw=1.5, color='k', alpha=0.5)
# data = np.mean(ds1,axis=0)[:,0]
# data = data*(np.concatenate((np.hanning(128), np.hanning(128), np.hanning(128), np.hanning(128), np.hanning(128)), axis=None))
time = np.arange(data.size) / sf
plt.plot(time, data, lw=1.5, color='k', alpha=0.5)
for ww in range(0,5):
data = np.array(ds2)[ww,:,4]
# data = data * np.hamming(640)
time = np.arange(data.size) / sf
plt.plot(time, data, lw=1.5, color='b', alpha=0.5)
# data = np.mean(ds2,axis=0)[:,0]
# data = data*(np.concatenate((np.hanning(128), np.hanning(128), np.hanning(128), np.hanning(128), np.hanning(128)), axis=None))
time = np.arange(data.size) / sf
plt.plot(time, data, lw=1.5, color='b', alpha=0.5)
plt.xlabel('Time (seconds)')
plt.ylabel('Voltage')
plt.xlim([time.min(), time.max()])
plt.title('N3 sleep EEG data (9)')
sns.despine()
plt.show()
from scipy import signal
# Define window length (4 seconds)
win = 4 * sf
# Plot the power spectrum
sns.set(font_scale=1.2, style='white')
plt.figure(figsize=(8, 4))
for ww in range(0,5):
data = np.array(ds1)[ww,:,4]
freqs, psd = signal.welch(data, sf, nperseg=win)
# psd = psd*psd
plt.plot(freqs, psd, color='k', lw=1, alpha=0.5)
# data = np.mean(ds1,axis=0)[:,0]
# data = data*(np.concatenate((np.hanning(128), np.hanning(128), np.hanning(128), np.hanning(128), np.hanning(128)), axis=None))
freqs, psd = signal.welch(data, sf, nperseg=win)
plt.plot(freqs, psd, color='k', lw=1, alpha=0.5)
for ww in range(0,5):
data = np.array(ds2)[ww,:,4]
freqs, psd = signal.welch(data, sf, nperseg=win)
# psd = psd*psd
plt.plot(freqs, psd, color='b', lw=1, alpha=0.5)
# data = np.mean(ds2,axis=0)[:,0]
# data = data*(np.concatenate((np.hanning(128), np.hanning(128), np.hanning(128), np.hanning(128), np.hanning(128)), axis=None))
freqs, psd = signal.welch(data, sf, nperseg=win)
plt.plot(freqs, psd, color='k', lw=1, alpha=0.5)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power spectral density (V^2 / Hz)')
# plt.ylim([0, 2500])
plt.title("Welch's periodogram")
plt.xlim([0, freqs.max()])
sns.despine()
plt.show()
exit()
#####################################################
# plt.figure(figsize=(64,64))
# plt.xticks(rotation=90)
data = np.cov(np.transpose(dsa))
sns.heatmap(data)
plt.show()
data = np.cov(np.transpose(dsb))
sns.heatmap(data)
plt.show()
exit()
data = np.cov(np.transpose(ds1[0]))
sns.heatmap(data)
plt.show()
data = np.cov(np.transpose(ds1[1]))
sns.heatmap(data)
plt.show()
data = np.cov(np.transpose(ds1[2]))
sns.heatmap(data)
plt.show()
data = np.cov(np.transpose(ds1[3]))
sns.heatmap(data)
plt.show()
data = np.cov(np.transpose(ds1[4]))
sns.heatmap(data)
plt.show()
| [
"matplotlib.pyplot.title",
"sklearn.decomposition.FastICA",
"pickle.dump",
"matplotlib.pyplot.show",
"scipy.signal.welch",
"matplotlib.pyplot.plot",
"seaborn.heatmap",
"pandas.read_csv",
"numpy.transpose",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots",
"seaborn.despine",
"matplotlib.pyplo... | [((3629, 3674), 'numpy.genfromtxt', 'genfromtxt', (['loc'], {'skip_header': '(1)', 'delimiter': '""","""'}), "(loc, skip_header=1, delimiter=',')\n", (3639, 3674), False, 'from numpy import genfromtxt\n'), ((3680, 3721), 'pandas.read_csv', 'pd.read_csv', (['loc'], {'header': 'None', 'skiprows': '(1)'}), '(loc, header=None, skiprows=1)\n', (3691, 3721), True, 'import pandas as pd\n'), ((6871, 6894), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)'}), '(font_scale=1.2)\n', (6878, 6894), True, 'import seaborn as sns\n'), ((6979, 7014), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 4)'}), '(1, 1, figsize=(12, 4))\n', (6991, 7014), True, 'import matplotlib.pyplot as plt\n'), ((7394, 7444), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'data'], {'lw': '(1.5)', 'color': '"""k"""', 'alpha': '(0.5)'}), "(time, data, lw=1.5, color='k', alpha=0.5)\n", (7402, 7444), True, 'import matplotlib.pyplot as plt\n'), ((7824, 7874), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'data'], {'lw': '(1.5)', 'color': '"""b"""', 'alpha': '(0.5)'}), "(time, data, lw=1.5, color='b', alpha=0.5)\n", (7832, 7874), True, 'import matplotlib.pyplot as plt\n'), ((7875, 7903), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (seconds)"""'], {}), "('Time (seconds)')\n", (7885, 7903), True, 'import matplotlib.pyplot as plt\n'), ((7904, 7925), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Voltage"""'], {}), "('Voltage')\n", (7914, 7925), True, 'import matplotlib.pyplot as plt\n'), ((7961, 7995), 'matplotlib.pyplot.title', 'plt.title', (['"""N3 sleep EEG data (9)"""'], {}), "('N3 sleep EEG data (9)')\n", (7970, 7995), True, 'import matplotlib.pyplot as plt\n'), ((7996, 8009), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (8007, 8009), True, 'import seaborn as sns\n'), ((8010, 8020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8018, 8020), True, 'import matplotlib.pyplot as plt\n'), ((8123, 8161), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)', 'style': '"""white"""'}), "(font_scale=1.2, style='white')\n", (8130, 8161), True, 'import seaborn as sns\n'), ((8162, 8188), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (8172, 8188), True, 'import matplotlib.pyplot as plt\n'), ((8546, 8581), 'scipy.signal.welch', 'signal.welch', (['data', 'sf'], {'nperseg': 'win'}), '(data, sf, nperseg=win)\n', (8558, 8581), False, 'from scipy import signal\n'), ((8582, 8630), 'matplotlib.pyplot.plot', 'plt.plot', (['freqs', 'psd'], {'color': '"""k"""', 'lw': '(1)', 'alpha': '(0.5)'}), "(freqs, psd, color='k', lw=1, alpha=0.5)\n", (8590, 8630), True, 'import matplotlib.pyplot as plt\n'), ((8988, 9023), 'scipy.signal.welch', 'signal.welch', (['data', 'sf'], {'nperseg': 'win'}), '(data, sf, nperseg=win)\n', (9000, 9023), False, 'from scipy import signal\n'), ((9024, 9072), 'matplotlib.pyplot.plot', 'plt.plot', (['freqs', 'psd'], {'color': '"""k"""', 'lw': '(1)', 'alpha': '(0.5)'}), "(freqs, psd, color='k', lw=1, alpha=0.5)\n", (9032, 9072), True, 'import matplotlib.pyplot as plt\n'), ((9073, 9101), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (9083, 9101), True, 'import matplotlib.pyplot as plt\n'), ((9102, 9149), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power spectral density (V^2 / Hz)"""'], {}), "('Power spectral density (V^2 / Hz)')\n", (9112, 9149), True, 'import matplotlib.pyplot as plt\n'), ((9172, 9204), 'matplotlib.pyplot.title', 'plt.title', (['"""Welch\'s periodogram"""'], {}), '("Welch\'s periodogram")\n', (9181, 9204), True, 'import matplotlib.pyplot as plt\n'), ((9232, 9245), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (9243, 9245), True, 'import seaborn as sns\n'), ((9246, 9256), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9254, 9256), True, 'import matplotlib.pyplot as plt\n'), ((9409, 9426), 'seaborn.heatmap', 'sns.heatmap', (['data'], {}), '(data)\n', (9420, 9426), True, 'import seaborn as sns\n'), ((9427, 9437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9435, 9437), True, 'import matplotlib.pyplot as plt\n'), ((9471, 9488), 'seaborn.heatmap', 'sns.heatmap', (['data'], {}), '(data)\n', (9482, 9488), True, 'import seaborn as sns\n'), ((9489, 9499), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9497, 9499), True, 'import matplotlib.pyplot as plt\n'), ((9543, 9560), 'seaborn.heatmap', 'sns.heatmap', (['data'], {}), '(data)\n', (9554, 9560), True, 'import seaborn as sns\n'), ((9561, 9571), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9569, 9571), True, 'import matplotlib.pyplot as plt\n'), ((9608, 9625), 'seaborn.heatmap', 'sns.heatmap', (['data'], {}), '(data)\n', (9619, 9625), True, 'import seaborn as sns\n'), ((9626, 9636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9634, 9636), True, 'import matplotlib.pyplot as plt\n'), ((9673, 9690), 'seaborn.heatmap', 'sns.heatmap', (['data'], {}), '(data)\n', (9684, 9690), True, 'import seaborn as sns\n'), ((9691, 9701), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9699, 9701), True, 'import matplotlib.pyplot as plt\n'), ((9738, 9755), 'seaborn.heatmap', 'sns.heatmap', (['data'], {}), '(data)\n', (9749, 9755), True, 'import seaborn as sns\n'), ((9756, 9766), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9764, 9766), True, 'import matplotlib.pyplot as plt\n'), ((9803, 9820), 'seaborn.heatmap', 'sns.heatmap', (['data'], {}), '(data)\n', (9814, 9820), True, 'import seaborn as sns\n'), ((9821, 9831), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9829, 9831), True, 'import matplotlib.pyplot as plt\n'), ((4188, 4226), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': '(14)', 'max_iter': '(500)'}), '(n_components=14, max_iter=500)\n', (4195, 4226), False, 'from sklearn.decomposition import FastICA\n'), ((5367, 5388), 'pickle.dump', 'pickle.dump', (['[ds1]', 'f'], {}), '([ds1], f)\n', (5378, 5388), False, 'import pickle\n'), ((5490, 5511), 'pickle.dump', 'pickle.dump', (['[ds2]', 'f'], {}), '([ds2], f)\n', (5501, 5511), False, 'import pickle\n'), ((7147, 7197), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'data'], {'lw': '(1.5)', 'color': '"""k"""', 'alpha': '(0.5)'}), "(time, data, lw=1.5, color='k', alpha=0.5)\n", (7155, 7197), True, 'import matplotlib.pyplot as plt\n'), ((7368, 7388), 'numpy.arange', 'np.arange', (['data.size'], {}), '(data.size)\n', (7377, 7388), True, 'import numpy as np\n'), ((7577, 7627), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'data'], {'lw': '(1.5)', 'color': '"""b"""', 'alpha': '(0.5)'}), "(time, data, lw=1.5, color='b', alpha=0.5)\n", (7585, 7627), True, 'import matplotlib.pyplot as plt\n'), ((7798, 7818), 'numpy.arange', 'np.arange', (['data.size'], {}), '(data.size)\n', (7807, 7818), True, 'import numpy as np\n'), ((8261, 8296), 'scipy.signal.welch', 'signal.welch', (['data', 'sf'], {'nperseg': 'win'}), '(data, sf, nperseg=win)\n', (8273, 8296), False, 'from scipy import signal\n'), ((8321, 8369), 'matplotlib.pyplot.plot', 'plt.plot', (['freqs', 'psd'], {'color': '"""k"""', 'lw': '(1)', 'alpha': '(0.5)'}), "(freqs, psd, color='k', lw=1, alpha=0.5)\n", (8329, 8369), True, 'import matplotlib.pyplot as plt\n'), ((8703, 8738), 'scipy.signal.welch', 'signal.welch', (['data', 'sf'], {'nperseg': 'win'}), '(data, sf, nperseg=win)\n', (8715, 8738), False, 'from scipy import signal\n'), ((8763, 8811), 'matplotlib.pyplot.plot', 'plt.plot', (['freqs', 'psd'], {'color': '"""b"""', 'lw': '(1)', 'alpha': '(0.5)'}), "(freqs, psd, color='b', lw=1, alpha=0.5)\n", (8771, 8811), True, 'import matplotlib.pyplot as plt\n'), ((9390, 9407), 'numpy.transpose', 'np.transpose', (['dsa'], {}), '(dsa)\n', (9402, 9407), True, 'import numpy as np\n'), ((9452, 9469), 'numpy.transpose', 'np.transpose', (['dsb'], {}), '(dsb)\n', (9464, 9469), True, 'import numpy as np\n'), ((9521, 9541), 'numpy.transpose', 'np.transpose', (['ds1[0]'], {}), '(ds1[0])\n', (9533, 9541), True, 'import numpy as np\n'), ((9586, 9606), 'numpy.transpose', 'np.transpose', (['ds1[1]'], {}), '(ds1[1])\n', (9598, 9606), True, 'import numpy as np\n'), ((9651, 9671), 'numpy.transpose', 'np.transpose', (['ds1[2]'], {}), '(ds1[2])\n', (9663, 9671), True, 'import numpy as np\n'), ((9716, 9736), 'numpy.transpose', 'np.transpose', (['ds1[3]'], {}), '(ds1[3])\n', (9728, 9736), True, 'import numpy as np\n'), ((9781, 9801), 'numpy.transpose', 'np.transpose', (['ds1[4]'], {}), '(ds1[4])\n', (9793, 9801), True, 'import numpy as np\n'), ((5202, 5215), 'numpy.array', 'np.array', (['ds1'], {}), '(ds1)\n', (5210, 5215), True, 'import numpy as np\n'), ((5229, 5242), 'numpy.array', 'np.array', (['ds2'], {}), '(ds2)\n', (5237, 5242), True, 'import numpy as np\n'), ((7048, 7061), 'numpy.array', 'np.array', (['ds1'], {}), '(ds1)\n', (7056, 7061), True, 'import numpy as np\n'), ((7117, 7137), 'numpy.arange', 'np.arange', (['data.size'], {}), '(data.size)\n', (7126, 7137), True, 'import numpy as np\n'), ((7478, 7491), 'numpy.array', 'np.array', (['ds2'], {}), '(ds2)\n', (7486, 7491), True, 'import numpy as np\n'), ((7547, 7567), 'numpy.arange', 'np.arange', (['data.size'], {}), '(data.size)\n', (7556, 7567), True, 'import numpy as np\n'), ((8222, 8235), 'numpy.array', 'np.array', (['ds1'], {}), '(ds1)\n', (8230, 8235), True, 'import numpy as np\n'), ((8664, 8677), 'numpy.array', 'np.array', (['ds2'], {}), '(ds2)\n', (8672, 8677), True, 'import numpy as np\n')] |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from functools import reduce
import logging
import math
from copy import deepcopy
import numpy as np
from graph.types.rnn import GRUParameters
from quantization.multiplicative.quantizers.rnn_mult_ne16 import (
calculatate_weight_q, limit_input_precision, roundup)
from quantization.multiplicative.scaling_qtypes import MultMulBiasScaleQType
from quantization.new_qrec import QRec
from quantization.qtype import QType
from quantization.quantizer_options import *
from quantization.quantizer_options import (FORCE_EXTERNAL_SIZE_OPTION,
NARROW_STATE_OPTION,
NARROW_WEIGHTS_OPTION,
NE16_WEIGHT_BITS_OPTION,
USE_NE16_OPTION)
from quantization.unified_quantization_handler import (in_qs_constraint,
option_constraint,
options,
out_qs_constraint,
params_type)
from utils.stats_funcs import calc_bits
from .rnn_mult_ne16 import NE16RNNMultQuantizionHandler, calc_bias_offset, calc_weight_q
LOG = logging.getLogger('nntool.' + __name__)
def get_maxq_val(stats, scale):
return np.ceil(np.maximum(np.abs(stats['min']), np.abs(stats['max'])) / scale)
def get_max(stat):
return np.maximum(np.abs(stat['min']), np.abs(stat['max']))
def get_max_or_one(stat):
gate_max = np.maximum(np.abs(stat['min']), np.abs(stat['max']))
return np.where(gate_max == 0, 1.0, gate_max)
def combine_stats(stats, *keys):
stats = [stats[k] for k in keys if k in stats]
if not stats:
return None
def reduction(state, item):
return {'min': min(state['min'], item['min']), 'max': max(state['max'], item['max'])}
return reduce(reduction, stats)
def combine_qtype_ranges(qtypes, *indexes):
qtypes = [qtypes[i] for i in indexes if qtypes[i] is not None]
if not qtypes:
return None
def reduction(state, item):
if state is None:
return {'min': item.min_val, 'max': item.max_val}
return {'min': np.min(np.minimum(state['min'], item.min_val)), 'max': np.max(np.maximum(state['max'], item.max_val))}
return reduce(reduction, qtypes, None)
@options(
NE16_WEIGHT_BITS_OPTION,
FORCE_EXTERNAL_SIZE_OPTION,
NARROW_WEIGHTS_OPTION,
USE_NE16_OPTION,
MAX_PRECISION_LIMIT_OPTION
)
class GRUMultMultNE16Base(NE16RNNMultQuantizionHandler):
@classmethod
def _quantize_gru(cls, params, in_qs, stats, input_bits, **kwargs):
force_out_qs, out_dtype = cls.get_mult_opts(**kwargs)
force_out_q = force_out_qs and force_out_qs[0]
if force_out_qs and any(force_out_q is not None for force_out_q in force_out_qs):
return None
opts = kwargs.get('opts', {})
if input_bits == 16:
in_out_dtype = np.uint16
else:
in_out_dtype = np.uint8
if in_qs is None:
return None
in_qs = deepcopy(in_qs)
G = kwargs['G']
in_q = in_qs[0]
cls.check_valid_ranges(params, stats, idx=0, dirs='out')
in_edges = G.indexed_in_edges(params.name)
names = {val: idx for idx, val in enumerate(
GRUParameters.INPUT_NAMES)}
# o_q = in_qs[names['h_state']] = QType.from_min_max_sq(
# min_val=-1,
# max_val=1,
# dtype=in_out_dtype,
# narrow_range=opts['narrow_state'])
o_q = in_qs[names['h_state']] = QType(
q=15 if input_bits == 16 else 7,
zero_point=in_out_dtype(math.pow(2, input_bits-1)),
min_val=-1,
max_val=1,
dtype=in_out_dtype)
# set weight qtypes
int_num_inp = roundup(params.n_inputs, input_bits == 16)
int_num_states = roundup(params.n_states, input_bits == 16)
for gate in ['z', 'r', 'h']:
i_idx = names[f'w_2_{gate}_w']
r_idx = names[f'r_2_{gate}_w']
in_qs[i_idx] = calc_weight_q(in_edges[i_idx].from_node, (params.n_states, params.n_inputs),
(params.n_states, int_num_inp),
opts['weight_bits'],
opts.get('narrow_weights'))
in_qs[r_idx] = calc_weight_q(in_edges[r_idx].from_node, (params.n_states, params.n_states),
(params.n_states, int_num_states),
opts['weight_bits'],
opts.get('narrow_weights'))
# check for overflow
in_q = limit_input_precision(
params,
input_bits,
in_q,
int_num_inp,
opts['narrow_weights'],
opts['weight_bits'],
opts.get('max_precision_limit', MAX_PRECISION_LIMIT_OPTION['default']),
w_qs=[in_qs[names[f'w_2_{gate}_w']] for gate in ['z', 'r']],
out_ranges=[stats.get(f'range_{gate}_gate_inp') for gate in ['z', 'r']])
# The state out is not limited but include this to print warnings
o_q = limit_input_precision(
params,
input_bits,
o_q,
int_num_states,
opts['narrow_weights'],
opts['weight_bits'],
0,
w_qs=[in_qs[names[f'r_2_{gate}_w']] for gate in ['z', 'r', 'h']],
out_ranges=[stats.get(f'range_{gate}_gate_state') for gate in ['z', 'r', 'h']])
# setup zero offset bias adjustment
woffs = {}
for gate in ['z', 'r', 'h']:
i_idx = names[f'w_2_{gate}_w']
r_idx = names[f'r_2_{gate}_w']
woffs[gate] = [
calc_bias_offset(in_edges[i_idx].from_node, in_qs[i_idx], in_q.zero_point),
calc_bias_offset(in_edges[r_idx].from_node, in_qs[r_idx], o_q.zero_point),
]
# get weight scales
scale_pairs = {chan: ('w_2_%s_w' % chan, 'r_2_%s_w' % chan)
for chan in ['z', 'r', 'h']}
w_scales = [(in_qs[names[namei]].scale, in_qs[names[namer]].scale)
for k, (namei, namer) in scale_pairs.items()]
gate_sum_max = [
(get_max_or_one(stats[f'range_{gate}_gate_inp']),
get_max_or_one(stats[f'range_{gate}_gate_state']))
for gate in ['z', 'r', 'h']
]
gate_sum_max_bits = [
(np.ceil(np.log2(gsm_i / (in_qs[0].scale * i_w))),
np.ceil(np.log2(gsm_r / (o_q.scale * r_w))))
for (gsm_i, gsm_r), (i_w, r_w) in zip(gate_sum_max, w_scales)]
for gate, (max_i, max_r) in zip(['z', 'r', 'h'], gate_sum_max_bits):
if np.max(max_i) > 30:
LOG.warning(
'max bits in accumulation input %s gate %s - there may be errors',
max_i, gate)
if np.max(max_r) > 30:
LOG.warning(
'max bits in accumulation state %s gate %s - there may be errors',
max_i, gate)
# LUT activations Q12 -> Q15
act_in_q = 12
act_out_q = 15
int_scale = math.pow(2, -act_in_q)
out_tanh_sig_scale = math.pow(2, -act_out_q)
scale_qtypes = {}
r_pscales = {}
i_pscales = {}
scale_qtypes['r_pscales'] = r_pscales
scale_qtypes['i_pscales'] = i_pscales
for gate, w_scale, max_bits in zip(['z', 'r', 'h'], w_scales, gate_sum_max_bits):
weight_scale_ratio = w_scale[0]/w_scale[1]
# TODO - decide to scale weights equal
i_pscales[gate] = w_scale[0] * in_q.scale
r_pscales[gate] = w_scale[1] * o_q.scale
# h gate input is added manually to state in Q12
if input_bits == 16 or gate == 'h':
scale_qtypes[f"w_2_{gate}_q"] = qscale = MultMulBiasScaleQType(
scale=i_pscales[gate] / int_scale
)
else:
scale_qtypes[f"w_2_{gate}_q"] = qscale = MultMulBiasScaleQType(
scale=i_pscales[gate] / r_pscales[gate]
)
if input_bits == 16:
i_zp_b = woffs[gate][0]
if gate == "h":
in_qs[names['w_h_b']] = QType(
dtype=np.int32,
scale=i_pscales[gate],
offset=i_zp_b,
quantized_dimension=0
)
else:
i_zp_b = woffs[gate][0] * qscale.qbiases.astype(
np.int32) + (1 << (qscale.qnorms.astype(np.int32) - 1))
if gate == "h":
in_qs[names['w_h_b']] = QType(
dtype=np.int32,
scale=i_pscales[gate] / qscale.qbiases,
offset=i_zp_b,
quantized_dimension=0
)
scale_qtypes[f"r_2_{gate}_q"] = qscale = MultMulBiasScaleQType(
scale=r_pscales[gate] / int_scale
)
if gate == 'h':
bias_name = 'r_h_b'
interleaved_values = None
else:
bias_name = f'{gate}_b'
interleaved_values = [i_zp_b]
if input_bits == 16:
r_zp_b = woffs[gate][1]
in_qs[names[bias_name]] = QType(
dtype=np.int32,
scale=r_pscales[gate],
offset=r_zp_b,
interleaved_values=interleaved_values,
quantized_dimension=0
)
else:
r_zp_b = woffs[gate][1] * qscale.qbiases.astype(
np.int32) + (1 << (qscale.qnorms.astype(np.int32) - 1))
in_qs[names[bias_name]] = QType(
dtype=np.int32,
scale=r_pscales[gate] / qscale.qbiases,
offset=r_zp_b,
interleaved_values=interleaved_values,
quantized_dimension=0
)
# NOTE - for 16 bit pre-normalize the scales to give us room but make sure it isn't negative
if input_bits == 16:
gate_prenorm = min(np.min([
np.min(scale_qtypes[f"{inp}_2_{gate}_q"].qnorms) for gate in ['z', 'r', 'h'] for inp in ['w', 'r']
]), 8)
for gate in ['z', 'r', 'h']:
for inp in ['w', 'r']:
scale_qtypes[f"{inp}_2_{gate}_q"].pre_normalization = gate_prenorm
else:
gate_prenorm = 0
scales = {
'i': i_pscales,
'r': r_pscales,
'state': o_q.scale,
'in': in_q.scale,
'act_in': int_scale,
'act_out': out_tanh_sig_scale,
'act_in_q': act_in_q,
'act_out_q': act_out_q
}
scale_qtypes['i_qtype'] = QType(q=act_in_q, dtype=np.int32)
return QRec.scaled(
in_qs=in_qs,
out_qs=[o_q],
ne16=True,
gate_prenorm=gate_prenorm,
scales=scales,
**scale_qtypes,
)
@params_type(GRUParameters)
@in_qs_constraint({'dtype': np.uint8})
@out_qs_constraint({'dtype': np.uint8})
@option_constraint(force_external_size={8, None}, use_ne16=True)
class GRUMultMultNE16UInt8(GRUMultMultNE16Base):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
return cls._quantize_gru(params, in_qs, stats, 8, **kwargs)
@params_type(GRUParameters)
@in_qs_constraint({'dtype': np.uint16})
@out_qs_constraint({'dtype': np.uint16})
@option_constraint(force_external_size=16, use_ne16=True)
class GRUMultMultNE16UInt16(GRUMultMultNE16Base):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
return cls._quantize_gru(params, in_qs, stats, 16, **kwargs)
| [
"numpy.abs",
"numpy.maximum",
"quantization.unified_quantization_handler.out_qs_constraint",
"quantization.multiplicative.scaling_qtypes.MultMulBiasScaleQType",
"math.pow",
"numpy.max",
"quantization.qtype.QType",
"quantization.unified_quantization_handler.in_qs_constraint",
"copy.deepcopy",
"nump... | [((1998, 2037), 'logging.getLogger', 'logging.getLogger', (["('nntool.' + __name__)"], {}), "('nntool.' + __name__)\n", (2015, 2037), False, 'import logging\n'), ((3113, 3245), 'quantization.unified_quantization_handler.options', 'options', (['NE16_WEIGHT_BITS_OPTION', 'FORCE_EXTERNAL_SIZE_OPTION', 'NARROW_WEIGHTS_OPTION', 'USE_NE16_OPTION', 'MAX_PRECISION_LIMIT_OPTION'], {}), '(NE16_WEIGHT_BITS_OPTION, FORCE_EXTERNAL_SIZE_OPTION,\n NARROW_WEIGHTS_OPTION, USE_NE16_OPTION, MAX_PRECISION_LIMIT_OPTION)\n', (3120, 3245), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((11979, 12005), 'quantization.unified_quantization_handler.params_type', 'params_type', (['GRUParameters'], {}), '(GRUParameters)\n', (11990, 12005), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((12007, 12044), 'quantization.unified_quantization_handler.in_qs_constraint', 'in_qs_constraint', (["{'dtype': np.uint8}"], {}), "({'dtype': np.uint8})\n", (12023, 12044), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((12046, 12084), 'quantization.unified_quantization_handler.out_qs_constraint', 'out_qs_constraint', (["{'dtype': np.uint8}"], {}), "({'dtype': np.uint8})\n", (12063, 12084), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((12086, 12149), 'quantization.unified_quantization_handler.option_constraint', 'option_constraint', ([], {'force_external_size': '{8, None}', 'use_ne16': '(True)'}), '(force_external_size={8, None}, use_ne16=True)\n', (12103, 12149), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((12344, 12370), 'quantization.unified_quantization_handler.params_type', 'params_type', (['GRUParameters'], {}), '(GRUParameters)\n', (12355, 12370), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((12372, 12410), 'quantization.unified_quantization_handler.in_qs_constraint', 'in_qs_constraint', (["{'dtype': np.uint16}"], {}), "({'dtype': np.uint16})\n", (12388, 12410), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((12412, 12451), 'quantization.unified_quantization_handler.out_qs_constraint', 'out_qs_constraint', (["{'dtype': np.uint16}"], {}), "({'dtype': np.uint16})\n", (12429, 12451), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((12453, 12509), 'quantization.unified_quantization_handler.option_constraint', 'option_constraint', ([], {'force_external_size': '(16)', 'use_ne16': '(True)'}), '(force_external_size=16, use_ne16=True)\n', (12470, 12509), False, 'from quantization.unified_quantization_handler import in_qs_constraint, option_constraint, options, out_qs_constraint, params_type\n'), ((2347, 2385), 'numpy.where', 'np.where', (['(gate_max == 0)', '(1.0)', 'gate_max'], {}), '(gate_max == 0, 1.0, gate_max)\n', (2355, 2385), True, 'import numpy as np\n'), ((2646, 2670), 'functools.reduce', 'reduce', (['reduction', 'stats'], {}), '(reduction, stats)\n', (2652, 2670), False, 'from functools import reduce\n'), ((3079, 3110), 'functools.reduce', 'reduce', (['reduction', 'qtypes', 'None'], {}), '(reduction, qtypes, None)\n', (3085, 3110), False, 'from functools import reduce\n'), ((2198, 2217), 'numpy.abs', 'np.abs', (["stat['min']"], {}), "(stat['min'])\n", (2204, 2217), True, 'import numpy as np\n'), ((2219, 2238), 'numpy.abs', 'np.abs', (["stat['max']"], {}), "(stat['max'])\n", (2225, 2238), True, 'import numpy as np\n'), ((2294, 2313), 'numpy.abs', 'np.abs', (["stat['min']"], {}), "(stat['min'])\n", (2300, 2313), True, 'import numpy as np\n'), ((2315, 2334), 'numpy.abs', 'np.abs', (["stat['max']"], {}), "(stat['max'])\n", (2321, 2334), True, 'import numpy as np\n'), ((3865, 3880), 'copy.deepcopy', 'deepcopy', (['in_qs'], {}), '(in_qs)\n', (3873, 3880), False, 'from copy import deepcopy\n'), ((4629, 4671), 'quantization.multiplicative.quantizers.rnn_mult_ne16.roundup', 'roundup', (['params.n_inputs', '(input_bits == 16)'], {}), '(params.n_inputs, input_bits == 16)\n', (4636, 4671), False, 'from quantization.multiplicative.quantizers.rnn_mult_ne16 import calculatate_weight_q, limit_input_precision, roundup\n'), ((4697, 4739), 'quantization.multiplicative.quantizers.rnn_mult_ne16.roundup', 'roundup', (['params.n_states', '(input_bits == 16)'], {}), '(params.n_states, input_bits == 16)\n', (4704, 4739), False, 'from quantization.multiplicative.quantizers.rnn_mult_ne16 import calculatate_weight_q, limit_input_precision, roundup\n'), ((7940, 7962), 'math.pow', 'math.pow', (['(2)', '(-act_in_q)'], {}), '(2, -act_in_q)\n', (7948, 7962), False, 'import math\n'), ((7992, 8015), 'math.pow', 'math.pow', (['(2)', '(-act_out_q)'], {}), '(2, -act_out_q)\n', (8000, 8015), False, 'import math\n'), ((11735, 11768), 'quantization.qtype.QType', 'QType', ([], {'q': 'act_in_q', 'dtype': 'np.int32'}), '(q=act_in_q, dtype=np.int32)\n', (11740, 11768), False, 'from quantization.qtype import QType\n'), ((11785, 11896), 'quantization.new_qrec.QRec.scaled', 'QRec.scaled', ([], {'in_qs': 'in_qs', 'out_qs': '[o_q]', 'ne16': '(True)', 'gate_prenorm': 'gate_prenorm', 'scales': 'scales'}), '(in_qs=in_qs, out_qs=[o_q], ne16=True, gate_prenorm=gate_prenorm,\n scales=scales, **scale_qtypes)\n', (11796, 11896), False, 'from quantization.new_qrec import QRec\n'), ((9779, 9835), 'quantization.multiplicative.scaling_qtypes.MultMulBiasScaleQType', 'MultMulBiasScaleQType', ([], {'scale': '(r_pscales[gate] / int_scale)'}), '(scale=r_pscales[gate] / int_scale)\n', (9800, 9835), False, 'from quantization.multiplicative.scaling_qtypes import MultMulBiasScaleQType\n'), ((2102, 2122), 'numpy.abs', 'np.abs', (["stats['min']"], {}), "(stats['min'])\n", (2108, 2122), True, 'import numpy as np\n'), ((2124, 2144), 'numpy.abs', 'np.abs', (["stats['max']"], {}), "(stats['max'])\n", (2130, 2144), True, 'import numpy as np\n'), ((2972, 3010), 'numpy.minimum', 'np.minimum', (["state['min']", 'item.min_val'], {}), "(state['min'], item.min_val)\n", (2982, 3010), True, 'import numpy as np\n'), ((3027, 3065), 'numpy.maximum', 'np.maximum', (["state['max']", 'item.max_val'], {}), "(state['max'], item.max_val)\n", (3037, 3065), True, 'import numpy as np\n'), ((7484, 7497), 'numpy.max', 'np.max', (['max_i'], {}), '(max_i)\n', (7490, 7497), True, 'import numpy as np\n'), ((7668, 7681), 'numpy.max', 'np.max', (['max_r'], {}), '(max_r)\n', (7674, 7681), True, 'import numpy as np\n'), ((8651, 8707), 'quantization.multiplicative.scaling_qtypes.MultMulBiasScaleQType', 'MultMulBiasScaleQType', ([], {'scale': '(i_pscales[gate] / int_scale)'}), '(scale=i_pscales[gate] / int_scale)\n', (8672, 8707), False, 'from quantization.multiplicative.scaling_qtypes import MultMulBiasScaleQType\n'), ((8821, 8883), 'quantization.multiplicative.scaling_qtypes.MultMulBiasScaleQType', 'MultMulBiasScaleQType', ([], {'scale': '(i_pscales[gate] / r_pscales[gate])'}), '(scale=i_pscales[gate] / r_pscales[gate])\n', (8842, 8883), False, 'from quantization.multiplicative.scaling_qtypes import MultMulBiasScaleQType\n'), ((10192, 10317), 'quantization.qtype.QType', 'QType', ([], {'dtype': 'np.int32', 'scale': 'r_pscales[gate]', 'offset': 'r_zp_b', 'interleaved_values': 'interleaved_values', 'quantized_dimension': '(0)'}), '(dtype=np.int32, scale=r_pscales[gate], offset=r_zp_b,\n interleaved_values=interleaved_values, quantized_dimension=0)\n', (10197, 10317), False, 'from quantization.qtype import QType\n'), ((10633, 10775), 'quantization.qtype.QType', 'QType', ([], {'dtype': 'np.int32', 'scale': '(r_pscales[gate] / qscale.qbiases)', 'offset': 'r_zp_b', 'interleaved_values': 'interleaved_values', 'quantized_dimension': '(0)'}), '(dtype=np.int32, scale=r_pscales[gate] / qscale.qbiases, offset=r_zp_b,\n interleaved_values=interleaved_values, quantized_dimension=0)\n', (10638, 10775), False, 'from quantization.qtype import QType\n'), ((4471, 4498), 'math.pow', 'math.pow', (['(2)', '(input_bits - 1)'], {}), '(2, input_bits - 1)\n', (4479, 4498), False, 'import math\n'), ((7213, 7252), 'numpy.log2', 'np.log2', (['(gsm_i / (in_qs[0].scale * i_w))'], {}), '(gsm_i / (in_qs[0].scale * i_w))\n', (7220, 7252), True, 'import numpy as np\n'), ((7279, 7313), 'numpy.log2', 'np.log2', (['(gsm_r / (o_q.scale * r_w))'], {}), '(gsm_r / (o_q.scale * r_w))\n', (7286, 7313), True, 'import numpy as np\n'), ((9071, 9157), 'quantization.qtype.QType', 'QType', ([], {'dtype': 'np.int32', 'scale': 'i_pscales[gate]', 'offset': 'i_zp_b', 'quantized_dimension': '(0)'}), '(dtype=np.int32, scale=i_pscales[gate], offset=i_zp_b,\n quantized_dimension=0)\n', (9076, 9157), False, 'from quantization.qtype import QType\n'), ((9507, 9610), 'quantization.qtype.QType', 'QType', ([], {'dtype': 'np.int32', 'scale': '(i_pscales[gate] / qscale.qbiases)', 'offset': 'i_zp_b', 'quantized_dimension': '(0)'}), '(dtype=np.int32, scale=i_pscales[gate] / qscale.qbiases, offset=i_zp_b,\n quantized_dimension=0)\n', (9512, 9610), False, 'from quantization.qtype import QType\n'), ((11077, 11125), 'numpy.min', 'np.min', (["scale_qtypes[f'{inp}_2_{gate}_q'].qnorms"], {}), "(scale_qtypes[f'{inp}_2_{gate}_q'].qnorms)\n", (11083, 11125), True, 'import numpy as np\n')] |
import datetime as dt
import numpy as np
from sqlalchemy import create_engine, func
from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy.ext.automap import automap_base
import sqlalchemy
from flask import Flask, jsonify, render_template
app = Flask(__name__)
# Database setup
engine = create_engine("sqlite:///Resources/hawaii.sqlite",
connect_args={'check_same_thread': False})
conn = engine.connect()
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
Session = sessionmaker(bind=engine)
s = Session()
# Routes
@app.route("/")
def index():
return render_template("index.html")
@app.route("/api/v1.0/precipitation")
def precipitation():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
result = s.query(Measurement.date, Measurement.prcp)\
.filter(Measurement.date > prev_year)\
.order_by(Measurement.date.desc())\
.all()
dict = {date: prcp for date, prcp in result}
return jsonify(dict)
@app.route("/api/v1.0/stations")
def stations():
result = s.query(Station.station).all()
dict = {'stations': [x[0] for x in result]}
return jsonify(dict)
@app.route("/api/v1.0/tobs")
def tobs():
start_date = "2016-08-23"
end_date = "2017-08-23"
station = "USC00519397"
result = s.query(Measurement.tobs)\
.filter(Measurement.date >= start_date)\
.filter(Measurement.date <= end_date)\
.filter(Station.station == station)\
.all()
result = list(np.ravel(result))
return jsonify(result)
@app.route("/api/v1.0/temp/<start>/<end>")
@app.route("/api/v1.0/temp/<start>")
def dates(start=None, end=None):
sel = [func.min(Measurement.tobs), func.avg(
Measurement.tobs), func.max(Measurement.tobs)]
if not end:
result = s.query(*sel)\
.filter(Measurement.date >= start)\
.all()
else:
result = s.query(*sel)\
.filter(Measurement.date >= start)\
.filter(Measurement.date <= end)\
.all()
result = list(np.ravel(result))
return jsonify(result)
if __name__ == '__main__':
app.run()
| [
"sqlalchemy.func.avg",
"numpy.ravel",
"flask.Flask",
"datetime.date",
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.orm.Session",
"flask.jsonify",
"sqlalchemy.func.min",
"datetime.timedelta",
"flask.render_template",
"sqlalchemy.create_engine",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy... | [((257, 272), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (262, 272), False, 'from flask import Flask, jsonify, render_template\n'), ((300, 398), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///Resources/hawaii.sqlite"""'], {'connect_args': "{'check_same_thread': False}"}), "('sqlite:///Resources/hawaii.sqlite', connect_args={\n 'check_same_thread': False})\n", (313, 398), False, 'from sqlalchemy import create_engine, func\n'), ((448, 462), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (460, 462), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((578, 603), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (590, 603), False, 'from sqlalchemy.orm import Session, sessionmaker\n'), ((608, 617), 'sqlalchemy.orm.Session', 'Session', ([], {}), '()\n', (615, 617), False, 'from sqlalchemy.orm import Session, sessionmaker\n'), ((667, 696), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (682, 696), False, 'from flask import Flask, jsonify, render_template\n'), ((1030, 1043), 'flask.jsonify', 'jsonify', (['dict'], {}), '(dict)\n', (1037, 1043), False, 'from flask import Flask, jsonify, render_template\n'), ((1192, 1205), 'flask.jsonify', 'jsonify', (['dict'], {}), '(dict)\n', (1199, 1205), False, 'from flask import Flask, jsonify, render_template\n'), ((1558, 1573), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (1565, 1573), False, 'from flask import Flask, jsonify, render_template\n'), ((2070, 2085), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (2077, 2085), False, 'from flask import Flask, jsonify, render_template\n'), ((772, 792), 'datetime.date', 'dt.date', (['(2017)', '(8)', '(23)'], {}), '(2017, 8, 23)\n', (779, 792), True, 'import datetime as dt\n'), ((795, 817), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (807, 817), True, 'import datetime as dt\n'), ((1531, 1547), 'numpy.ravel', 'np.ravel', (['result'], {}), '(result)\n', (1539, 1547), True, 'import numpy as np\n'), ((1698, 1724), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (1706, 1724), False, 'from sqlalchemy import create_engine, func\n'), ((1726, 1752), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (1734, 1752), False, 'from sqlalchemy import create_engine, func\n'), ((1761, 1787), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (1769, 1787), False, 'from sqlalchemy import create_engine, func\n'), ((2043, 2059), 'numpy.ravel', 'np.ravel', (['result'], {}), '(result)\n', (2051, 2059), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Qt4Agg')
import pylab
import crash_on_ipy
import matplotlib.pyplot as plt
from pydmd import MrDMD
from pydmd import DMD
import numpy as np
from past.utils import old_div
def create_sample_data():
x = np.linspace(-10, 10, 80)
t = np.linspace(0, 20, 1600)
Xm, Tm = np.meshgrid(x, t)
D = np.exp(-np.power(Xm/2, 2)) * np.exp(0.8j * Tm)
D += np.sin(0.9 * Xm) * np.exp(1j * Tm)
D += np.cos(1.1 * Xm) * np.exp(2j * Tm)
D += 0.6 * np.sin(1.2 * Xm) * np.exp(3j * Tm)
D += 0.6 * np.cos(1.3 * Xm) * np.exp(4j * Tm)
D += 0.2 * np.sin(2.0 * Xm) * np.exp(6j * Tm)
D += 0.2 * np.cos(2.1 * Xm) * np.exp(8j * Tm)
D += 0.1 * np.sin(5.7 * Xm) * np.exp(10j * Tm)
D += 0.1 * np.cos(5.9 * Xm) * np.exp(12j * Tm)
D += 0.1 * np.random.randn(*Xm.shape)
D += 0.03 * np.random.randn(*Xm.shape)
D += 5 * np.exp(-np.power((Xm+5)/5, 2)) * np.exp(-np.power((Tm-5)/5, 2))
D[:800, 40:] += 2
D[200:600, 50:70] -= 3
D[800:, :40] -= 2
D[1000:1400, 10:30] += 3
D[1000:1080, 50:70] += 2
D[1160:1240, 50:70] += 2
D[1320:1400, 50:70] += 2
return D.T
def make_plot(X, x=None, y=None, title=''):
"""
Plot of the data X
"""
plt.title(title)
X = np.real(X)
CS = plt.pcolor(x, y, X)
cbar = plt.colorbar(CS)
plt.xlabel('Space')
plt.ylabel('Time')
sample_data = create_sample_data()
x = np.linspace(-10, 10, 80)
t = np.linspace(0, 20, 1600)
plt.figure(1)
make_plot(sample_data.T, x=x, y=t)
first_dmd = DMD(svd_rank=-1)
first_dmd.fit(X=sample_data) # 80*1600
plt.figure(2)
modes = first_dmd.modes
eig = first_dmd.eigs
# make_plot(first_dmd.recon_fn_data(modes, eig).T, x=x, y=t)
index = np.argsort(np.abs(old_div(np.log(eig), (2. * np.pi))))
slow_models = index <= 0
s_modes = modes[:, index]
print(np.shape(s_modes))
for i in range(0, 80):
plt.clf()
make_plot(first_dmd.recon_fn_data(np.resize(modes[i], [80, 1]), eig[i]).T, x=x, y=t)
print(np.abs(old_div(np.log(eig[i]), (2. * np.pi))))
dmd = MrDMD(svd_rank=-1, max_level=7, max_cycles=1)
dmd.fit(X=sample_data)
plt.figure(3)
plt.title('MrDMD')
make_plot(dmd.reconstructed_data.T, x=x, y=t)
#
plt.show()
# print 'The number of eigenvalues is ' + str(dmd.eigs.shape[0])
# dmd.plot_eigs(show_axes=True, show_unit_circle=True, figsize=(8, 8))
#
# dmd.plot_eigs(show_axes=True, show_unit_circle=True, figsize=(8, 8), level=3, node=0)
#
# pmodes = dmd.partial_modes(level=0)
# fig = plt.plot(x, pmodes.real)
#
# pdyna = dmd.partial_dynamics(level=0)
# fig = plt.plot(t, pdyna.real.T)
#
# pdyna = dmd.partial_dynamics(level=1)
# print 'The number of modes in the level number 1 is ' + str(pdyna.shape[0])
# fig = plt.plot(t, pdyna.real.T)
#
# pdata = dmd.partial_reconstructed_data(level=0)
# make_plot(pdata.T, x=x, y=t, title='level 0', figsize=(7.5, 5))
#
# for i in range(1, 7):
# pdata += dmd.partial_reconstructed_data(level=i)
# make_plot(pdata.T, x=x, y=t, title='levels 0-' + str(i), figsize=(7.5, 5))
| [
"matplotlib.pyplot.title",
"numpy.resize",
"matplotlib.pyplot.clf",
"pydmd.DMD",
"numpy.shape",
"pydmd.MrDMD",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.exp",
"numpy.meshgrid",
"numpy.random.randn",
"numpy.power",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"numpy.real",
"mat... | [((19, 43), 'matplotlib.use', 'matplotlib.use', (['"""Qt4Agg"""'], {}), "('Qt4Agg')\n", (33, 43), False, 'import matplotlib\n'), ((1456, 1480), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(80)'], {}), '(-10, 10, 80)\n', (1467, 1480), True, 'import numpy as np\n'), ((1486, 1510), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(1600)'], {}), '(0, 20, 1600)\n', (1497, 1510), True, 'import numpy as np\n'), ((1512, 1525), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1522, 1525), True, 'import matplotlib.pyplot as plt\n'), ((1577, 1593), 'pydmd.DMD', 'DMD', ([], {'svd_rank': '(-1)'}), '(svd_rank=-1)\n', (1580, 1593), False, 'from pydmd import DMD\n'), ((1635, 1648), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1645, 1648), True, 'import matplotlib.pyplot as plt\n'), ((2097, 2142), 'pydmd.MrDMD', 'MrDMD', ([], {'svd_rank': '(-1)', 'max_level': '(7)', 'max_cycles': '(1)'}), '(svd_rank=-1, max_level=7, max_cycles=1)\n', (2102, 2142), False, 'from pydmd import MrDMD\n'), ((2168, 2181), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (2178, 2181), True, 'import matplotlib.pyplot as plt\n'), ((2183, 2201), 'matplotlib.pyplot.title', 'plt.title', (['"""MrDMD"""'], {}), "('MrDMD')\n", (2192, 2201), True, 'import matplotlib.pyplot as plt\n'), ((2253, 2263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2261, 2263), True, 'import matplotlib.pyplot as plt\n'), ((252, 276), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(80)'], {}), '(-10, 10, 80)\n', (263, 276), True, 'import numpy as np\n'), ((286, 310), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(1600)'], {}), '(0, 20, 1600)\n', (297, 310), True, 'import numpy as np\n'), ((325, 342), 'numpy.meshgrid', 'np.meshgrid', (['x', 't'], {}), '(x, t)\n', (336, 342), True, 'import numpy as np\n'), ((1268, 1284), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1277, 1284), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1304), 'numpy.real', 'np.real', (['X'], {}), '(X)\n', (1301, 1304), True, 'import numpy as np\n'), ((1315, 1334), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['x', 'y', 'X'], {}), '(x, y, X)\n', (1325, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1363), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['CS'], {}), '(CS)\n', (1359, 1363), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1388), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Space"""'], {}), "('Space')\n", (1379, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1412), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time"""'], {}), "('Time')\n", (1404, 1412), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1899), 'numpy.shape', 'np.shape', (['s_modes'], {}), '(s_modes)\n', (1890, 1899), True, 'import numpy as np\n'), ((1930, 1939), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1937, 1939), True, 'import matplotlib.pyplot as plt\n'), ((381, 398), 'numpy.exp', 'np.exp', (['(0.8j * Tm)'], {}), '(0.8j * Tm)\n', (387, 398), True, 'import numpy as np\n'), ((409, 425), 'numpy.sin', 'np.sin', (['(0.9 * Xm)'], {}), '(0.9 * Xm)\n', (415, 425), True, 'import numpy as np\n'), ((428, 445), 'numpy.exp', 'np.exp', (['(1.0j * Tm)'], {}), '(1.0j * Tm)\n', (434, 445), True, 'import numpy as np\n'), ((454, 470), 'numpy.cos', 'np.cos', (['(1.1 * Xm)'], {}), '(1.1 * Xm)\n', (460, 470), True, 'import numpy as np\n'), ((473, 490), 'numpy.exp', 'np.exp', (['(2.0j * Tm)'], {}), '(2.0j * Tm)\n', (479, 490), True, 'import numpy as np\n'), ((524, 541), 'numpy.exp', 'np.exp', (['(3.0j * Tm)'], {}), '(3.0j * Tm)\n', (530, 541), True, 'import numpy as np\n'), ((575, 592), 'numpy.exp', 'np.exp', (['(4.0j * Tm)'], {}), '(4.0j * Tm)\n', (581, 592), True, 'import numpy as np\n'), ((626, 643), 'numpy.exp', 'np.exp', (['(6.0j * Tm)'], {}), '(6.0j * Tm)\n', (632, 643), True, 'import numpy as np\n'), ((677, 694), 'numpy.exp', 'np.exp', (['(8.0j * Tm)'], {}), '(8.0j * Tm)\n', (683, 694), True, 'import numpy as np\n'), ((728, 746), 'numpy.exp', 'np.exp', (['(10.0j * Tm)'], {}), '(10.0j * Tm)\n', (734, 746), True, 'import numpy as np\n'), ((780, 798), 'numpy.exp', 'np.exp', (['(12.0j * Tm)'], {}), '(12.0j * Tm)\n', (786, 798), True, 'import numpy as np\n'), ((813, 839), 'numpy.random.randn', 'np.random.randn', (['*Xm.shape'], {}), '(*Xm.shape)\n', (828, 839), True, 'import numpy as np\n'), ((857, 883), 'numpy.random.randn', 'np.random.randn', (['*Xm.shape'], {}), '(*Xm.shape)\n', (872, 883), True, 'import numpy as np\n'), ((505, 521), 'numpy.sin', 'np.sin', (['(1.2 * Xm)'], {}), '(1.2 * Xm)\n', (511, 521), True, 'import numpy as np\n'), ((556, 572), 'numpy.cos', 'np.cos', (['(1.3 * Xm)'], {}), '(1.3 * Xm)\n', (562, 572), True, 'import numpy as np\n'), ((607, 623), 'numpy.sin', 'np.sin', (['(2.0 * Xm)'], {}), '(2.0 * Xm)\n', (613, 623), True, 'import numpy as np\n'), ((658, 674), 'numpy.cos', 'np.cos', (['(2.1 * Xm)'], {}), '(2.1 * Xm)\n', (664, 674), True, 'import numpy as np\n'), ((709, 725), 'numpy.sin', 'np.sin', (['(5.7 * Xm)'], {}), '(5.7 * Xm)\n', (715, 725), True, 'import numpy as np\n'), ((761, 777), 'numpy.cos', 'np.cos', (['(5.9 * Xm)'], {}), '(5.9 * Xm)\n', (767, 777), True, 'import numpy as np\n'), ((1793, 1804), 'numpy.log', 'np.log', (['eig'], {}), '(eig)\n', (1799, 1804), True, 'import numpy as np\n'), ((360, 379), 'numpy.power', 'np.power', (['(Xm / 2)', '(2)'], {}), '(Xm / 2, 2)\n', (368, 379), True, 'import numpy as np\n'), ((939, 964), 'numpy.power', 'np.power', (['((Tm - 5) / 5)', '(2)'], {}), '((Tm - 5) / 5, 2)\n', (947, 964), True, 'import numpy as np\n'), ((1979, 2007), 'numpy.resize', 'np.resize', (['modes[i]', '[80, 1]'], {}), '(modes[i], [80, 1])\n', (1988, 2007), True, 'import numpy as np\n'), ((2056, 2070), 'numpy.log', 'np.log', (['eig[i]'], {}), '(eig[i])\n', (2062, 2070), True, 'import numpy as np\n'), ((906, 931), 'numpy.power', 'np.power', (['((Xm + 5) / 5)', '(2)'], {}), '((Xm + 5) / 5, 2)\n', (914, 931), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
import uuid
import scipy.stats as stat
from math import log, gamma, exp, pi, sqrt, erf, atan
from scipy.special import gammainc
from scipy.interpolate import interp1d
import sys
def Exponential_rate(t,rate, alpha):
return rate
def Weibull_rate(t,rate,alpha):
#only works for alpha >= 1
beta = (alpha) * (rate * gamma((alpha + 1)/(alpha)))**(alpha)
rate = (t**(alpha-1))*beta
return rate
def Gamma_rate(t,rate,alpha):
if alpha < 1 and t == 0:
return 0
#only works for alpha >= 1
beta = alpha*rate
pdf = (beta**alpha)*(t**(alpha-1))*exp(-beta*t)/gamma(alpha)
cdf = gammainc(alpha,beta*t)
rate = pdf/(1-cdf)
return rate
def Gaussian_rate(t, rate, alpha):
if alpha < 1 and t == 0:
return 0
#here, alpha is the ratio between std and mean
sigma = 1/rate*alpha
mu = 1/rate
if t > mu + 7*sigma: #necessary as the precision of cdf calculation is not precise enough
rate = (t-mu)/sigma**2
else:
pdf = 1/(sigma*sqrt(2*pi)) * exp(-(1/2) * (t-mu)**2/(sigma**2))
cdf = 1/2*(1+erf((t-mu)/(sigma*sqrt(2))))
rate = pdf/(1-cdf)
return rate
def Lognormal_rate(t, rate, alpha):
#here, alpha is the ratio between std and mean
sigma0 = 1/rate*alpha
mu0 = 1/rate
mu = log(mu0**2 / sqrt(mu0**2 + sigma0**2))
sigma = sqrt(log(1+ sigma0**2/mu0**2))
if t == 0:
rate = 0
else:
pdf = 1/(t*sigma*sqrt(2*pi)) * exp(-(1/2) * (log(t)-mu)**2/(sigma**2))
cdf = 1/2*(1+erf((log(t)-mu)/(sigma*sqrt(2))))
rate = pdf/(1-cdf)
return rate
def Cauchy_rate(t, rate,gam):
mu = 1/rate
#alternative parametrization with rate: t0 = 1/rate
pdf = 1 / (pi*gam*(1 + ((t-mu)/gam)**2))
cdf = (1/pi) * atan( (t-mu)/gam ) + 1/2
rate = pdf/(1-cdf)
return rate
class counts:
channel_numbers = 0
class Reaction_channel():
def __repr__(self):
return "Reaction_channel()"
def __str__(self):
print_str = ''
print_str += '\n %s' % self.name
print_str += '\n reactants = %s' % self.reactants
print_str += '\n products = %s' % self.products
print_str += '\n rate = %s' % self.rate
print_str += '\n shape_parameter = %s' % self.shape_param
print_str += '\n distribution = %s' % self.distribution
print_str += '\n transfer_identity = %s' % self.transfer_identity
return print_str
def __init__(self,param_simulation, rate=1, shape_param=1, distribution = 'Weibull', name='', reactants = [], products = [], transfer_identity = False):
counts.channel_numbers += 1
""" Fixed parameters """
self.reactants = reactants #reactant list of str
self.products = products #produect list of str
self.rate = rate #reaction rate
self.shape_param = shape_param #alpha shape parameter of Weibull distribution
if name == '':
self.name = "Number %s" % (counts.channel_numbers)
self.name = name
self.distribution = distribution #distribution type ('Weibull, Gamma, Gaussian)
self.transfer_identity = transfer_identity
""" Variable parameters """
self.rmax = rate #maximum reaction rate for this chanel (initialized at rmax)
self.wait_times = [] ##Store the waiting time for this reaction channel, only for plotting purposes
self.number_of_rejected_reactions = 0
self.number_of_accepted_reactions = 0
""" Distribution specific parameters """
if rate < 0:
print(' Reaction %s:' % name)
print(' Rate cannot be negative')
sys.exit()
elif rate == 0:
self.distribution = 'Exponential'
self.rate_function = Exponential_rate
self.rmax_fixed = True
elif distribution.lower() in ['exponential', 'exp']:
self.distribution = 'Exponential'
self.rate_function = Exponential_rate
self.rmax_fixed = True
elif distribution.lower() in ['weibull','weib']:
self.rate_function = Weibull_rate
self.rmax_fixed = False
if shape_param < 1:
print(' Reaction %s:' % name)
print(' Shape parameter < 1 for Weiull distribution is')
print(' currently not supported in this implementation')
print(' (Only non infinite rate at t=0 are supported)')
sys.exit()
if shape_param == 1:
self.distribution = 'Exponential'
self.rate_function = Exponential_rate
self.rmax_fixed = True
elif distribution.lower() in ['gamma','gam']:
self.rate_function = Gamma_rate
self.rmax_fixed = False
if shape_param < 1:
print(' Reaction %s:' % name)
print(' Shape parameter < 1 for Gamma distribution is')
print(' currently not supported in this implementation')
print(' (Only non infinite rate at t=0 are supported)')
sys.exit()
if shape_param == 1:
self.distribution = 'Exponential'
self.rate_function = Exponential_rate
self.rmax_fixed = True
elif distribution.lower() in ['gaussian', 'normal', 'norm']:
self.rate_function = Gaussian_rate
self.rmax_fixed = False
if shape_param <= 0:
print(' Reaction %s:' % name)
print(' Zero or negative variance for LogNormal is not supported')
sys.exit()
elif distribution.lower() in ['lognormal','lognorm']:
self.rate_function = Lognormal_rate
if shape_param <= 0:
print(' Reaction %s:' % name)
print(' Zero or negative variance for Lognorm is not supported')
sys.exit()
elif shape_param >= 0.25:
self.rmax_fixed = True
t = np.linspace(0,param_simulation.Tend,num=100)
rate_t = np.array([Lognormal_rate(ti, self.rate, self.shape_param) for ti in t])
self.rmax = np.nanmax(rate_t[rate_t != np.inf])
else: #when the shape parameter is below 0.25, the max reaction rate is very high and we can approximate the rate as a monoteneously increasing function
self.rmax_fixed = False
elif distribution.lower() in ['cauchy', 'cau']:
self.rate_function = Cauchy_rate
self.rmax_fixed = True
if shape_param <= 0:
print(' Reaction %s:' % name)
print(' Zero or negative variance for Gaussian is not supported')
sys.exit()
else:
rate_t = np.array([Cauchy_rate(ti, self.rate, self.shape_param) for ti in np.linspace(0,param_simulation.Tend,num=100)])
self.rmax = np.max(rate_t)
else:
print(' Unsupported distribution: %s' % distribution)
sys.exit()
self.temp_rmax = self.rmax #temporary rmax to make sure the Dt<<1 is notviolated
class Reactant():
def __init__(self, ID = None):
"""
Store the individual properties of a reactant
such as the time since their last reaction (t_start)
or other relevant parameters
"""
if ID is None:
self.id = self.gen_uuid() #unique cell ID
else:
self.id = ID
def gen_uuid(self):
"""
Generate a 32char hex uuid to use as a key
for each cell in the sorted dictionary
"""
return uuid.UUID(int=random.getrandbits(128),version=4).hex
class Gillespie_simulation():
def __repr__(self):
return "Gillespie_simulation()"
def __str__(self):
print_str = ''
print_str += 'REGIR Gillespie model:'
print_str += '\n'
print_str += '\n Initial population:'
print_str += '\n %s' % self.reactant_population_init
print_str += '\n'
print_str += '\n Reaction Channels:'
for ci in range(len(self.reaction_channel_list)):
print_str += ' %s\n' % self.reaction_channel_list[ci]
return print_str
def __init__(self, N_init, param, min_ratio = 10, print_warnings = False):
self.param = param
self.param.min_ratio = min_ratio
self.param.print_warnings = print_warnings
self.reaction_channel_list = [] #list of reaction chanels
self.reactant_population_init = N_init
def reinitialize_pop(self):
"""
Reset the reactant population list to its initial parameters
Note that the stored inter event times of each channel and not reset
"""
self.reactant_population = self.reactant_population_init.copy() # dictionary with reactant name as key and population as value
self.reactant_list, self.reactant_times = self.initialise_reactant_list(self.reactant_population, self.reaction_channel_list) # dictionary with reactant name as key and list of reactant as value
def initialise_reactant_list(self,N_init, reaction_channel_list):
"""
Initialize Reactant list with a dynamic list of reactants
Input:
N_r1, N_r2, .. ,N_rk = N_init
Output:
dict of dict containing reactants for each reactant type:
the dict contains the reactant ID as key and reactant object as value
- reactant_list[r1] = contain list of reactant r1 (dictionary)
- reactant_list[r2] = contain list of reactant r2 (dictionary)
...
- reactant_list[rk] = contain list of reactant rk (dictionary)
"""
reactant_list = dict()
reactant_times = dict()
ci = 0
for ri in N_init:
react_i = []
react_times_i = dict()
for channel in reaction_channel_list:
react_times_i[channel.name] = dict()
if N_init[ri] > 0:
for j in range(N_init[ri]):
new_reactant = Reactant()
react_i.append(new_reactant)
for channel in reaction_channel_list:
react_times_i[channel.name][new_reactant.id] = 0
ci += 1
reactant_list[ri] = react_i
reactant_times[ri] = react_times_i
return reactant_list, reactant_times
def run_simulations(self, Tend, verbose = True):
"""
Run several Gillespie simulations and plot the results
"""
"""Quick check if transfert ID = False for innapropriate reactions"""
for channel_i in self.reaction_channel_list:
if len(set(channel_i.products)) < len(channel_i.products):
if channel_i.transfer_identity:
print(" WARNING: Setting transfer_identity to True for channels where products are")
print(" of the same kind is ambigious due to duplicated ID in the dictionary")
sys.exit()
population = np.empty((self.param.N_simulations, self.param.timepoints, len(self.reactant_population_init)+1))
for ni in range(self.param.N_simulations):
if verbose: print(" Simulation",ni+1,"/",self.param.N_simulations,"...")
self.reinitialize_pop() #reset to initial conditions
self.run_simulation(Tend)
#G_simul.plot_populations_single()
#G_simul.plot_inter_event_time_distribution()
population[ni,:,:] = self.get_populations()
self.population_compiled = population
return population
def run_simulation(self, Tend):
"""
Run Gillespie simulation until the final time is reached,
or the total population surpass a given threshold (10k reactants).
"""
timepoints = self.param.timepoints
self.t = 0
ti = 0
self.Tend = Tend
self.population_t = -np.ones((timepoints,len(self.reactant_population)+1))
while self.t < Tend: #Monte Carlo step
if self.t >= ti*Tend/timepoints: #record the populations
self.population_t[ti,:-1] = list(self.reactant_population.values())
ti = int(self.t/Tend*(timepoints))+1
propensity = self.compute_propensities()
a0 = np.sum(propensity)
if a0 == 0: #if propensities are zero, quickly end the simulation
self.t += Tend/timepoints/2
elif sum(self.reactant_population.values()) > 1000000: #if number of reactant is too high, quickly end the simulation to avoid exploding complexity
self.t += Tend/timepoints/2
else:
#2 ----- Generate random time step (exponential distribution)
r1 = random.random()
tau = 1/a0*log(1/r1)
self.t += tau
#3 ----- Chose the reaction mu that will occurs (depends on propensities)
r2 = random.random()
mu = 0
p_sum = 0.0
while p_sum < r2*a0:
p_sum += propensity[mu]
mu += 1
mu = mu - 1
#4 ----- Perform the reaction
self.perform_reaction(mu)
self.population_t = interpolate_minusones(self.population_t)
self.population_t[:,-1] = np.sum(self.population_t, axis = 1)
def compute_propensities(self):
"""
Compute the propensities of each reaction at current time
"""
propensity = []
self.update_all_rmax()
N_reactants = []
for reaction_chanel in self.reaction_channel_list:
if len(reaction_chanel.reactants) > 0:
Nreactants = np.product([self.reactant_population[reactant_str] for reactant_str in reaction_chanel.reactants])
else:
Nreactants = 1
N_reactants.append(Nreactants)
propensity.append(Nreactants * reaction_chanel.rmax)
for ri,reaction_chanel in enumerate(self.reaction_channel_list):
if propensity[ri] > 0:
ratio = np.sum(propensity)/reaction_chanel.rmax
if np.isnan(ratio) or np.isinf(ratio):
print(' Error rmax should not be zero')
sys.exit()
if ratio < self.param.min_ratio and reaction_chanel.distribution != 'Exponential':
reaction_chanel.temp_rmax = reaction_chanel.rmax*self.param.min_ratio/ratio
propensity[ri] = N_reactants[ri] * reaction_chanel.temp_rmax
return propensity
"""
Verify if the non-markovian assumption can be applied
we require Dt to be at least 100 times smaller than sum(propensity)
yield to a max error of 1%.
"""
def perform_reaction(self,mu):
"""
Perform the selected reaction mu by updating the populations of reactants and
products i.e. Remove reactant and add a product from their corresponding list
mu: index of the reaction
Note 1:
Some customization is necessary for this function if one want
to pass specific reactant properties to a product.
Note 2:
With the current implementation, Weibull rate are supported for channels
with only one reactant. If a chanel has more than one reactant, it is
considered as a Poisson process
"""
reaction_chanel = self.reaction_channel_list[mu]
reaction_rejected = True
if len(reaction_chanel.reactants) != 1: #more than 1 reactant or 0 reactants, cannot record the time before reaction
Dt = 0
if reaction_chanel.rate >= reaction_chanel.temp_rmax*random.random():
reaction_rejected = False
else:
reactant_str = reaction_chanel.reactants[0]
index, reactant = get_random_element(self.reactant_list[reactant_str])
reactant_id = reactant.id
Dt = self.t - self.reactant_times[reactant_str][reaction_chanel.name][reactant_id]
reactant_rate = reaction_chanel.rate_function(Dt, reaction_chanel.rate, reaction_chanel.shape_param)
if np.isnan(reactant_rate) or np.isinf(reactant_rate):
if self.param.print_warnings:
print('Problem: computed rate is', reactant_rate, 'for time %.2e' % Dt, 'and reaction', reaction_chanel.name)
reactant_rate = reaction_chanel.temp_rmax #ie accept the reaction
if reactant_rate >= reaction_chanel.temp_rmax*random.random():
reaction_rejected = False
if reaction_rejected == False: #perform the reaction
reaction_chanel.number_of_accepted_reactions += 1
reaction_chanel.wait_times.append(Dt)
for reactant_str in reaction_chanel.reactants:
if len(reaction_chanel.reactants) != 1:
index, reactant = get_random_element(self.reactant_list[reactant_str])
reactant_id = reactant.id
#Remove the reactant from reactant list and time list
if (reactant_str not in reaction_chanel.products) or reaction_chanel.transfer_identity == False: #dont remove the reactant if we transfert ID
self.reactant_list[reactant_str].pop(index)
for channel_i in self.reaction_channel_list:
self.reactant_times[reactant_str][channel_i.name].pop(reactant_id, None)
#update population value
self.reactant_population[reactant_str] -= 1
for product_str in reaction_chanel.products:
if (product_str in reaction_chanel.reactants) and reaction_chanel.transfer_identity:
product = Reactant(ID = reactant_id) #take the reactant with the same ID as before
else:
product = Reactant() #make a new reactant
product_id = product.id
#Add the reactant to reactant list and update time list
if (product_str not in reaction_chanel.reactants) or reaction_chanel.transfer_identity == False:
self.reactant_list[product_str].append(product)
for channel_i in self.reaction_channel_list: #need
self.reactant_times[product_str][channel_i.name][product_id] = self.t
#update population value
self.reactant_population[product_str] += 1
self.reactant_times[product_str][reaction_chanel.name][product_id] = self.t
else:
reaction_chanel.number_of_rejected_reactions += 1
def update_all_rmax(self):
"""
Update rmax for all chanells
Since the dict of reactant is ordered by t_start values,
the maximum rate is simply the on of first value of the dictionary
(this is only the case for monotically increasing rates)
"""
for reaction_chanel in self.reaction_channel_list:
if len(reaction_chanel.reactants) > 0:
reactant_str = reaction_chanel.reactants[0]
if (len(self.reactant_list[reactant_str]) > 0 and reaction_chanel.rmax_fixed == False):
first_reactant_ID, t_start_min = next(iter(self.reactant_times[reactant_str][reaction_chanel.name].items()))
tau_max = self.t - t_start_min
rmax = reaction_chanel.rate_function(tau_max, reaction_chanel.rate, reaction_chanel.shape_param)
if np.isnan(rmax) or np.isinf(rmax):
if self.param.print_warnings:
print('Problem computed rmax is', rmax, 'for time %.2e' % tau_max, 'and reaction', reaction_chanel.name)
rmax = 5*reaction_chanel.rate #most distribution rarely pass 5*r0
elif rmax > reaction_chanel.rate:
reaction_chanel.rmax = rmax
else:
rmax = reaction_chanel.rate
reaction_chanel.temp_rmax = reaction_chanel.rmax
def get_populations(self):
return self.population_t
def get_reactant_list(self):
return self.reactant_list
def get_channel_waiting_times(self):
return [channel.wait_times for channel in self.reaction_channel_list]
def plot_populations_single(self):
timepoints = self.population_t.shape[0]
time_points = np.linspace(0, self.Tend, timepoints)
plt.figure(figsize = (7,4))
plt.rcParams.update({'font.size': 16})
for ri, reactant in enumerate(self.reactant_population.keys()):
plt.plot(time_points, self.population_t[:,ri], 'k-', lw=2, alpha=1,color=sns.color_palette()[ri], label=reactant)
plt.xlabel('Time [%s]' % self.param.unit)
plt.ylabel('Population')
plt.legend()
plt.show()
def plot_populations(self, reactant_list = None, log_scale = False, color_list = [], figsize = (6.2, 4.2)):
reactant_poplist = list(self.reactant_population_init.keys())
reactant_poplist.append('Total')
if reactant_list is None:
reactant_list = reactant_poplist
if len(color_list) < len(reactant_list):
c_init = len(color_list)
for i in range(len(color_list),len(reactant_list)):
color_list.append(sns.color_palette()[i + 1 - c_init])
population = self.population_compiled
"""ploting the population"""
N_simulations = population.shape[0]
N_reactants = population.shape[2]
timepoints = population.shape[1]
time_points = np.linspace(0, self.Tend, timepoints)
lwm = 3
plt.figure(figsize = figsize)
plt.rcParams.update({'font.size': 16})
rii = 0
for ri in range(N_reactants):
if reactant_poplist[ri] in reactant_list:
for i in range(N_simulations):
plt.plot(time_points, population[i,:,ri], 'k-', lw=0.3, alpha=0.1,color=sns.color_palette()[0])
plt.plot(time_points, population[:,:,ri].mean(axis=0), 'r-', lw=lwm, color=color_list[rii], label=reactant_poplist[ri])
rii += 1
plt.xlabel('Time [%s]' % self.param.unit)
plt.ylabel('Population')
plt.legend()
if log_scale: plt.yscale('log')
plt.show()
def plot_inter_event_time_distribution(self, color_list = None, plot_fitted = True, plot_theory = True, theory_color = 'green', fitted_color = 'red', bins = None, figsize = (6.2, 4.2)):
for ri,reaction_chanel in enumerate(self.reaction_channel_list):
wait_times = np.array(reaction_chanel.wait_times)
print("\n Reaction channel %s:" % reaction_chanel.name)
if len(wait_times) == 0:
print(' This reaction has never occured')
else:
if len(reaction_chanel.reactants) != 1:
print(' This channel reacted %.2f times per simulation on average.' % (len(wait_times)/self.param.N_simulations))
print(' No distribution can be plotted as the definition of time')
print(' before reaction for non unique reactant is ambigious.')
else:
if color_list is None:
colori = sns.color_palette()[(ri+4) % 10]
else:
colori = color_list[ri]
#bins = np.linspace(0,30,30)
if bins is None: bins = 30
plt.figure(figsize = figsize)
plt.hist(wait_times, bins = bins, color = colori, density=True, edgecolor='black')
plt.xlabel("Time before reaction [%s]" % self.param.unit)
plt.ylabel("PDF")
plt.title(reaction_chanel.name)
t = np.linspace(np.max(wait_times)/100000, np.max(wait_times), 10000)
shape_param = reaction_chanel.shape_param
rate = reaction_chanel.rate
distribution = reaction_chanel.distribution
if distribution.lower() in ['exponential', 'exp']:
pdf_true = rate*np.exp(-rate*t)
P = stat.expon.fit(wait_times,floc=0)
pdf = stat.expon.pdf(t, *P)
elif distribution.lower() in ['weibull','weib']:
alpha = shape_param
beta = (alpha) * (rate * gamma((alpha + 1)/(alpha)))**(alpha)
pdf_true = beta*np.power(t,alpha-1)*np.exp(-beta*np.power(t,alpha)/(alpha))
if reaction_chanel.shape_param == 1:
P = stat.expon.fit(wait_times,floc=0)
pdf = stat.expon.pdf(t, *P)
else:
P = stat.weibull_min.fit(wait_times,3, floc=0, scale = 30)
pdf = stat.weibull_min.pdf(t, *P)
elif distribution.lower() in ['gaussian', 'normal', 'norm']:
mu = 1/rate
sigma = mu*shape_param
pdf_true = 1/(sigma*sqrt(2*pi)) * np.exp(-(1/2) * np.power((t-mu)/sigma,2))
P = stat.norm.fit(wait_times)
pdf = stat.norm.pdf(t, *P)
elif distribution.lower() in ['gamma','gam']:
alpha = shape_param
beta = alpha*rate
pdf_true = (beta**alpha)*np.power(t,alpha-1)*np.exp(-beta*t)/gamma(alpha)
P = stat.gamma.fit(wait_times,floc=0)
pdf = stat.gamma.pdf(t, *P)
elif distribution.lower() in ['lognormal','lognorm']:
mu0 = 1/rate
sigma0 = mu0*shape_param
mu = log(mu0**2 / sqrt(mu0**2 + sigma0**2))
sigma = sqrt(log(1+ sigma0**2/mu0**2))
pdf_true = 1/(t*sigma*sqrt(2*pi)) * np.exp(-(1/2) * np.power((np.log(t)-mu)/sigma,2))
P = stat.lognorm.fit(wait_times,floc=0)
pdf = stat.lognorm.pdf(t, *P)
elif distribution.lower() in ['cauchy', 'cau']:
gam = shape_param
mu = 1/rate
pdf_true = 1 / (pi*gam*(1 + ((t-mu)/gam)**2))
P = stat.cauchy.fit(wait_times)
pdf = stat.cauchy.pdf(t, *P)
if plot_fitted: plt.plot(t, pdf, fitted_color,lw=2, label = 'Fitted')
if plot_theory:plt.plot(t, pdf_true, theory_color,lw=2, linestyle = '--', label = 'Theory')
plt.legend()
if isinstance(bins, int):
plt.xlim(0,np.max(wait_times)*1.03)
else:
plt.xlim(0,np.max(bins)*1.03)
plt.show()
print(" Obtained rate is %.3f vs %.3f" % (1/np.mean(wait_times),reaction_chanel.rate))
print(" Corresponding to %.1fh vs %.1fh" % (np.mean(wait_times),1/reaction_chanel.rate))
if reaction_chanel.shape_param != 0:
print(" Fitted params [%.2f,%.2f] vs shape_param %.2f" % (P[0],P[1],reaction_chanel.shape_param))
print(" std is %.1fh" % (np.std(wait_times)))
print(" Number of rejected reactions = %s" % reaction_chanel.number_of_rejected_reactions)
print(" Number of accepted reactions = %s" % reaction_chanel.number_of_accepted_reactions)
ratio = reaction_chanel.number_of_rejected_reactions/reaction_chanel.number_of_accepted_reactions
print(" Accepted/rejected reactions ratio = %.2f" % ratio)
print()
print(' Notes:')
print(' - It is possible that the distribution do not match for reactions')
print(' where the same reactant is involved in more than one reaction,')
print(' or if at least one product of the reaction is a reactant.')
print()
print(' - Aslo, if not in the steady state, a continiously increasing population')
print(' will bias the time to event distribution to lower average time by')
print(' continiously generating new reactants with Dt = 0')
print()
print(' - Other reasons include a too small simulation time (Tend)')
print(' or not enough repetition of the Gillepie simulations.')
def get_model_in_SBML(self):
import simplesbml
"""
install:
pip install simplesbml
pip install tellurium
pip install REGIR
pip/conda install libSBML
See https://simplesbml.readthedocs.io/en/latest/
"""
def Distribution_index(distribution):
if distribution.lower() in ['exponential', 'exp']:
return 0
elif distribution.lower() in ['weibull','weib']:
return 1
elif distribution.lower() in ['gaussian', 'normal', 'norm']:
return 2
elif distribution.lower() in ['gamma','gam']:
return 3
elif distribution.lower() in ['lognormal','lognorm']:
return 4
elif distribution.lower() in ['cauchy', 'cau']:
return 5
N_init = self.reactant_population_init
model = simplesbml.SbmlModel()
model.addCompartment(1, comp_id='comp')
for entity,n_init in N_init.items():
model.addSpecies(entity, n_init, comp='comp')
Channel_list = self.reaction_channel_list
for ri,reaction_channel in enumerate(Channel_list):
rate = reaction_channel.rate
local_params = {}
local_params['shape_param'] = reaction_channel.shape_param
local_params['distribution_index'] = Distribution_index(reaction_channel.distribution)
local_params['transfer_identity'] = reaction_channel.transfer_identity
reactants_list = reaction_channel.reactants
product_list = reaction_channel.products
rate_label = 'r%s'%ri
model.addParameter(rate_label, rate)
rate_law = rate_label
for reactant in reactants_list:
rate_law += ' * %s' % reactant
from string import punctuation
rxn_id = reaction_channel.name.replace(' ','').replace(':','_').replace('->','_').replace('+','and').replace('-','_')
if any(p in rxn_id for p in punctuation):
model.addReaction(reactants_list, product_list, rate_law, local_params = local_params, rxn_id=rxn_id)
else:
rxn_id = 'reaction_%s'% ri
model.addReaction(reactants_list, product_list, rate_law, local_params = local_params, rxn_id=rxn_id)
"""
model.addReaction(reactants, products, expression, local_params={}, rxn_id='')
-> reactants and products are lists of species ids that the user wishes to
define as reactants and products, respectively.
-> Expression is a string that represents the reaction rate expression.
-> local_params is a dictionary where the keys are local parameter ids and the
values are the desired values of the respective parameters.
"""
return model
def get_random_element(a_huge_key_list):
L = len(a_huge_key_list)
i = np.random.randint(0, L)
return i, a_huge_key_list[i]
def interpolate_minusones(y):
"""
Replace -1 in the array by the interpolation between their neighbor non zeros points
y is a [t] x [n] array
"""
x = np.arange(y.shape[0])
ynew = np.zeros(y.shape)
for ni in range(y.shape[1]):
idx = np.where(y[:,ni] != -1)[0]
if len(idx)>1:
last_value = y[idx[-1],ni]
interp = interp1d(x[idx],y[idx,ni], kind='previous',fill_value=(0,last_value),bounds_error = False)
ynew[:,ni] = interp(x)
elif len(idx) == 1:
last_value = y[idx[-1],ni]
ynew[:,ni] = last_value
return ynew
if __name__ == "__main__":
plt.rcParams.update({'font.size': 16})
test = __import__('Examples/__TEST_REGIR_distributions')
test.main()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"numpy.sum",
"numpy.isnan",
"scipy.stats.cauchy.pdf",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.arange",
"numpy.product",
"numpy.exp",
"numpy.mean",
"scipy.interpolate.interp1d",
"scipy.stats.cauchy.fit",
"scipy.stats.... | [((704, 729), 'scipy.special.gammainc', 'gammainc', (['alpha', '(beta * t)'], {}), '(alpha, beta * t)\n', (712, 729), False, 'from scipy.special import gammainc\n'), ((34210, 34233), 'numpy.random.randint', 'np.random.randint', (['(0)', 'L'], {}), '(0, L)\n', (34227, 34233), True, 'import numpy as np\n'), ((34451, 34472), 'numpy.arange', 'np.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (34460, 34472), True, 'import numpy as np\n'), ((34484, 34501), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (34492, 34501), True, 'import numpy as np\n'), ((34963, 35001), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (34982, 35001), True, 'import matplotlib.pyplot as plt\n'), ((681, 693), 'math.gamma', 'gamma', (['alpha'], {}), '(alpha)\n', (686, 693), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((1436, 1467), 'math.log', 'log', (['(1 + sigma0 ** 2 / mu0 ** 2)'], {}), '(1 + sigma0 ** 2 / mu0 ** 2)\n', (1439, 1467), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((14311, 14344), 'numpy.sum', 'np.sum', (['self.population_t'], {'axis': '(1)'}), '(self.population_t, axis=1)\n', (14317, 14344), True, 'import numpy as np\n'), ((22067, 22104), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Tend', 'timepoints'], {}), '(0, self.Tend, timepoints)\n', (22078, 22104), True, 'import numpy as np\n'), ((22114, 22140), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 4)'}), '(figsize=(7, 4))\n', (22124, 22140), True, 'import matplotlib.pyplot as plt\n'), ((22150, 22188), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (22169, 22188), True, 'import matplotlib.pyplot as plt\n'), ((22395, 22436), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Time [%s]' % self.param.unit)"], {}), "('Time [%s]' % self.param.unit)\n", (22405, 22436), True, 'import matplotlib.pyplot as plt\n'), ((22445, 22469), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Population"""'], {}), "('Population')\n", (22455, 22469), True, 'import matplotlib.pyplot as plt\n'), ((22478, 22490), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22488, 22490), True, 'import matplotlib.pyplot as plt\n'), ((22499, 22509), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22507, 22509), True, 'import matplotlib.pyplot as plt\n'), ((23323, 23360), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Tend', 'timepoints'], {}), '(0, self.Tend, timepoints)\n', (23334, 23360), True, 'import numpy as np\n'), ((23385, 23412), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (23395, 23412), True, 'import matplotlib.pyplot as plt\n'), ((23423, 23461), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (23442, 23461), True, 'import matplotlib.pyplot as plt\n'), ((23956, 23980), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Population"""'], {}), "('Population')\n", (23966, 23980), True, 'import matplotlib.pyplot as plt\n'), ((23989, 24001), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (23999, 24001), True, 'import matplotlib.pyplot as plt\n'), ((24050, 24060), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24058, 24060), True, 'import matplotlib.pyplot as plt\n'), ((31882, 31904), 'simplesbml.SbmlModel', 'simplesbml.SbmlModel', ([], {}), '()\n', (31902, 31904), False, 'import simplesbml\n'), ((668, 682), 'math.exp', 'exp', (['(-beta * t)'], {}), '(-beta * t)\n', (671, 682), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((1112, 1154), 'math.exp', 'exp', (['(-(1 / 2) * (t - mu) ** 2 / sigma ** 2)'], {}), '(-(1 / 2) * (t - mu) ** 2 / sigma ** 2)\n', (1115, 1154), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((1393, 1421), 'math.sqrt', 'sqrt', (['(mu0 ** 2 + sigma0 ** 2)'], {}), '(mu0 ** 2 + sigma0 ** 2)\n', (1397, 1421), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((1858, 1878), 'math.atan', 'atan', (['((t - mu) / gam)'], {}), '((t - mu) / gam)\n', (1862, 1878), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((3867, 3877), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3875, 3877), False, 'import sys\n'), ((13139, 13157), 'numpy.sum', 'np.sum', (['propensity'], {}), '(propensity)\n', (13145, 13157), True, 'import numpy as np\n'), ((23906, 23947), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Time [%s]' % self.param.unit)"], {}), "('Time [%s]' % self.param.unit)\n", (23916, 23947), True, 'import matplotlib.pyplot as plt\n'), ((24024, 24041), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (24034, 24041), True, 'import matplotlib.pyplot as plt\n'), ((24385, 24421), 'numpy.array', 'np.array', (['reaction_chanel.wait_times'], {}), '(reaction_chanel.wait_times)\n', (24393, 24421), True, 'import numpy as np\n'), ((34549, 34573), 'numpy.where', 'np.where', (['(y[:, ni] != -1)'], {}), '(y[:, ni] != -1)\n', (34557, 34573), True, 'import numpy as np\n'), ((34659, 34756), 'scipy.interpolate.interp1d', 'interp1d', (['x[idx]', 'y[idx, ni]'], {'kind': '"""previous"""', 'fill_value': '(0, last_value)', 'bounds_error': '(False)'}), "(x[idx], y[idx, ni], kind='previous', fill_value=(0, last_value),\n bounds_error=False)\n", (34667, 34756), False, 'from scipy.interpolate import interp1d\n'), ((414, 440), 'math.gamma', 'gamma', (['((alpha + 1) / alpha)'], {}), '((alpha + 1) / alpha)\n', (419, 440), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((14716, 14818), 'numpy.product', 'np.product', (['[self.reactant_population[reactant_str] for reactant_str in reaction_chanel\n .reactants]'], {}), '([self.reactant_population[reactant_str] for reactant_str in\n reaction_chanel.reactants])\n', (14726, 14818), True, 'import numpy as np\n'), ((17341, 17364), 'numpy.isnan', 'np.isnan', (['reactant_rate'], {}), '(reactant_rate)\n', (17349, 17364), True, 'import numpy as np\n'), ((17368, 17391), 'numpy.isinf', 'np.isinf', (['reactant_rate'], {}), '(reactant_rate)\n', (17376, 17391), True, 'import numpy as np\n'), ((1098, 1110), 'math.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (1102, 1110), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((1529, 1541), 'math.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (1533, 1541), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((8088, 8111), 'random.getrandbits', 'random.getrandbits', (['(128)'], {}), '(128)\n', (8106, 8111), False, 'import random\n'), ((11729, 11739), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11737, 11739), False, 'import sys\n'), ((13645, 13660), 'random.random', 'random.random', ([], {}), '()\n', (13658, 13660), False, 'import random\n'), ((13873, 13888), 'random.random', 'random.random', ([], {}), '()\n', (13886, 13888), False, 'import random\n'), ((15114, 15132), 'numpy.sum', 'np.sum', (['propensity'], {}), '(propensity)\n', (15120, 15132), True, 'import numpy as np\n'), ((15173, 15188), 'numpy.isnan', 'np.isnan', (['ratio'], {}), '(ratio)\n', (15181, 15188), True, 'import numpy as np\n'), ((15192, 15207), 'numpy.isinf', 'np.isinf', (['ratio'], {}), '(ratio)\n', (15200, 15207), True, 'import numpy as np\n'), ((15290, 15300), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15298, 15300), False, 'import sys\n'), ((16843, 16858), 'random.random', 'random.random', ([], {}), '()\n', (16856, 16858), False, 'import random\n'), ((17709, 17724), 'random.random', 'random.random', ([], {}), '()\n', (17722, 17724), False, 'import random\n'), ((25324, 25351), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (25334, 25351), True, 'import matplotlib.pyplot as plt\n'), ((25374, 25452), 'matplotlib.pyplot.hist', 'plt.hist', (['wait_times'], {'bins': 'bins', 'color': 'colori', 'density': '(True)', 'edgecolor': '"""black"""'}), "(wait_times, bins=bins, color=colori, density=True, edgecolor='black')\n", (25382, 25452), True, 'import matplotlib.pyplot as plt\n'), ((25477, 25534), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Time before reaction [%s]' % self.param.unit)"], {}), "('Time before reaction [%s]' % self.param.unit)\n", (25487, 25534), True, 'import matplotlib.pyplot as plt\n'), ((25555, 25572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PDF"""'], {}), "('PDF')\n", (25565, 25572), True, 'import matplotlib.pyplot as plt\n'), ((25593, 25624), 'matplotlib.pyplot.title', 'plt.title', (['reaction_chanel.name'], {}), '(reaction_chanel.name)\n', (25602, 25624), True, 'import matplotlib.pyplot as plt\n'), ((28794, 28806), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (28804, 28806), True, 'import matplotlib.pyplot as plt\n'), ((29016, 29026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29024, 29026), True, 'import matplotlib.pyplot as plt\n'), ((13688, 13699), 'math.log', 'log', (['(1 / r1)'], {}), '(1 / r1)\n', (13691, 13699), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((21045, 21059), 'numpy.isnan', 'np.isnan', (['rmax'], {}), '(rmax)\n', (21053, 21059), True, 'import numpy as np\n'), ((21063, 21077), 'numpy.isinf', 'np.isinf', (['rmax'], {}), '(rmax)\n', (21071, 21077), True, 'import numpy as np\n'), ((22346, 22365), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (22363, 22365), True, 'import seaborn as sns\n'), ((23045, 23064), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (23062, 23064), True, 'import seaborn as sns\n'), ((25688, 25706), 'numpy.max', 'np.max', (['wait_times'], {}), '(wait_times)\n', (25694, 25706), True, 'import numpy as np\n'), ((26086, 26120), 'scipy.stats.expon.fit', 'stat.expon.fit', (['wait_times'], {'floc': '(0)'}), '(wait_times, floc=0)\n', (26100, 26120), True, 'import scipy.stats as stat\n'), ((26150, 26171), 'scipy.stats.expon.pdf', 'stat.expon.pdf', (['t', '*P'], {}), '(t, *P)\n', (26164, 26171), True, 'import scipy.stats as stat\n'), ((28604, 28656), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'pdf', 'fitted_color'], {'lw': '(2)', 'label': '"""Fitted"""'}), "(t, pdf, fitted_color, lw=2, label='Fitted')\n", (28612, 28656), True, 'import matplotlib.pyplot as plt\n'), ((28696, 28769), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'pdf_true', 'theory_color'], {'lw': '(2)', 'linestyle': '"""--"""', 'label': '"""Theory"""'}), "(t, pdf_true, theory_color, lw=2, linestyle='--', label='Theory')\n", (28704, 28769), True, 'import matplotlib.pyplot as plt\n'), ((1186, 1193), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1190, 1193), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((1609, 1615), 'math.log', 'log', (['t'], {}), '(t)\n', (1612, 1615), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((1627, 1634), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1631, 1634), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((4717, 4727), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4725, 4727), False, 'import sys\n'), ((25101, 25120), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (25118, 25120), True, 'import seaborn as sns\n'), ((25661, 25679), 'numpy.max', 'np.max', (['wait_times'], {}), '(wait_times)\n', (25667, 25679), True, 'import numpy as np\n'), ((26042, 26059), 'numpy.exp', 'np.exp', (['(-rate * t)'], {}), '(-rate * t)\n', (26048, 26059), True, 'import numpy as np\n'), ((29496, 29514), 'numpy.std', 'np.std', (['wait_times'], {}), '(wait_times)\n', (29502, 29514), True, 'import numpy as np\n'), ((1557, 1563), 'math.log', 'log', (['t'], {}), '(t)\n', (1560, 1563), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((5372, 5382), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5380, 5382), False, 'import sys\n'), ((23709, 23728), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (23726, 23728), True, 'import seaborn as sns\n'), ((26593, 26627), 'scipy.stats.expon.fit', 'stat.expon.fit', (['wait_times'], {'floc': '(0)'}), '(wait_times, floc=0)\n', (26607, 26627), True, 'import scipy.stats as stat\n'), ((26661, 26682), 'scipy.stats.expon.pdf', 'stat.expon.pdf', (['t', '*P'], {}), '(t, *P)\n', (26675, 26682), True, 'import scipy.stats as stat\n'), ((26745, 26798), 'scipy.stats.weibull_min.fit', 'stat.weibull_min.fit', (['wait_times', '(3)'], {'floc': '(0)', 'scale': '(30)'}), '(wait_times, 3, floc=0, scale=30)\n', (26765, 26798), True, 'import scipy.stats as stat\n'), ((26834, 26861), 'scipy.stats.weibull_min.pdf', 'stat.weibull_min.pdf', (['t', '*P'], {}), '(t, *P)\n', (26854, 26861), True, 'import scipy.stats as stat\n'), ((27183, 27208), 'scipy.stats.norm.fit', 'stat.norm.fit', (['wait_times'], {}), '(wait_times)\n', (27196, 27208), True, 'import scipy.stats as stat\n'), ((27239, 27259), 'scipy.stats.norm.pdf', 'stat.norm.pdf', (['t', '*P'], {}), '(t, *P)\n', (27252, 27259), True, 'import scipy.stats as stat\n'), ((28889, 28907), 'numpy.max', 'np.max', (['wait_times'], {}), '(wait_times)\n', (28895, 28907), True, 'import numpy as np\n'), ((28976, 28988), 'numpy.max', 'np.max', (['bins'], {}), '(bins)\n', (28982, 28988), True, 'import numpy as np\n'), ((29223, 29242), 'numpy.mean', 'np.mean', (['wait_times'], {}), '(wait_times)\n', (29230, 29242), True, 'import numpy as np\n'), ((5908, 5918), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5916, 5918), False, 'import sys\n'), ((26440, 26462), 'numpy.power', 'np.power', (['t', '(alpha - 1)'], {}), '(t, alpha - 1)\n', (26448, 26462), True, 'import numpy as np\n'), ((27563, 27597), 'scipy.stats.gamma.fit', 'stat.gamma.fit', (['wait_times'], {'floc': '(0)'}), '(wait_times, floc=0)\n', (27577, 27597), True, 'import scipy.stats as stat\n'), ((27627, 27648), 'scipy.stats.gamma.pdf', 'stat.gamma.pdf', (['t', '*P'], {}), '(t, *P)\n', (27641, 27648), True, 'import scipy.stats as stat\n'), ((29114, 29133), 'numpy.mean', 'np.mean', (['wait_times'], {}), '(wait_times)\n', (29121, 29133), True, 'import numpy as np\n'), ((6224, 6234), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6232, 6234), False, 'import sys\n'), ((7378, 7388), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7386, 7388), False, 'import sys\n'), ((26363, 26389), 'math.gamma', 'gamma', (['((alpha + 1) / alpha)'], {}), '((alpha + 1) / alpha)\n', (26368, 26389), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((27522, 27534), 'math.gamma', 'gamma', (['alpha'], {}), '(alpha)\n', (27527, 27534), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((28103, 28139), 'scipy.stats.lognorm.fit', 'stat.lognorm.fit', (['wait_times'], {'floc': '(0)'}), '(wait_times, floc=0)\n', (28119, 28139), True, 'import scipy.stats as stat\n'), ((28169, 28192), 'scipy.stats.lognorm.pdf', 'stat.lognorm.pdf', (['t', '*P'], {}), '(t, *P)\n', (28185, 28192), True, 'import scipy.stats as stat\n'), ((6332, 6378), 'numpy.linspace', 'np.linspace', (['(0)', 'param_simulation.Tend'], {'num': '(100)'}), '(0, param_simulation.Tend, num=100)\n', (6343, 6378), True, 'import numpy as np\n'), ((6502, 6537), 'numpy.nanmax', 'np.nanmax', (['rate_t[rate_t != np.inf]'], {}), '(rate_t[rate_t != np.inf])\n', (6511, 6537), True, 'import numpy as np\n'), ((7075, 7085), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7083, 7085), False, 'import sys\n'), ((7269, 7283), 'numpy.max', 'np.max', (['rate_t'], {}), '(rate_t)\n', (7275, 7283), True, 'import numpy as np\n'), ((26473, 26491), 'numpy.power', 'np.power', (['t', 'alpha'], {}), '(t, alpha)\n', (26481, 26491), True, 'import numpy as np\n'), ((27099, 27111), 'math.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (27103, 27111), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((27129, 27158), 'numpy.power', 'np.power', (['((t - mu) / sigma)', '(2)'], {}), '((t - mu) / sigma, 2)\n', (27137, 27158), True, 'import numpy as np\n'), ((27506, 27523), 'numpy.exp', 'np.exp', (['(-beta * t)'], {}), '(-beta * t)\n', (27512, 27523), True, 'import numpy as np\n'), ((27939, 27970), 'math.log', 'log', (['(1 + sigma0 ** 2 / mu0 ** 2)'], {}), '(1 + sigma0 ** 2 / mu0 ** 2)\n', (27942, 27970), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((28462, 28489), 'scipy.stats.cauchy.fit', 'stat.cauchy.fit', (['wait_times'], {}), '(wait_times)\n', (28477, 28489), True, 'import scipy.stats as stat\n'), ((28520, 28542), 'scipy.stats.cauchy.pdf', 'stat.cauchy.pdf', (['t', '*P'], {}), '(t, *P)\n', (28535, 28542), True, 'import scipy.stats as stat\n'), ((27486, 27508), 'numpy.power', 'np.power', (['t', '(alpha - 1)'], {}), '(t, alpha - 1)\n', (27494, 27508), True, 'import numpy as np\n'), ((27876, 27904), 'math.sqrt', 'sqrt', (['(mu0 ** 2 + sigma0 ** 2)'], {}), '(mu0 ** 2 + sigma0 ** 2)\n', (27880, 27904), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((28011, 28023), 'math.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (28015, 28023), False, 'from math import log, gamma, exp, pi, sqrt, erf, atan\n'), ((7194, 7240), 'numpy.linspace', 'np.linspace', (['(0)', 'param_simulation.Tend'], {'num': '(100)'}), '(0, param_simulation.Tend, num=100)\n', (7205, 7240), True, 'import numpy as np\n'), ((28051, 28060), 'numpy.log', 'np.log', (['t'], {}), '(t)\n', (28057, 28060), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.