code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import os.path as osp
import os
import sys
import argparse
import numpy as np
# add paths
parent_dir = osp.dirname(osp.abspath(__file__))
if parent_dir not in sys.path:
sys.path.append(parent_dir)
from src.autoencoder import Configuration as Conf
from src.point_net_ae import PointNetAutoEncoder
from src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, \
load_all_point_clouds_under_folder, \
load_all_point_clouds_from_filenames
from src.tf_utils import reset_tf_graph
from src.general_utils import plot_3d_point_cloud
from src.evaluation_metrics import minimum_mathing_distance, \
jsd_between_point_cloud_sets, coverage
# command line arguments
# fmt: off
parser = argparse.ArgumentParser()
parser.add_argument('--class_name', type=str, default='chair', help='Single class name (for example: chair) [default: chair]')
parser.add_argument('--experiment_name', type=str, default='single_class_ae', help='Folder for saving data form the training [default: single_class_ae]')
parser.add_argument('--batch_size', type=int, default=50, help='Batch size [default: 50]')
parser.add_argument('--restore_epoch', type=int, default=500, help='Take the checkpoint from this epoch [default: 500]')
parser.add_argument('--dont_use_splits', action='store_false', help='Use pre-split data from data/data_splits folder')
flags = parser.parse_args()
# fmt: on
print(("Evaluation flags:", flags))
# ##### MANUAL FLAGS
# flags.experiment_name = "train_ae_jar"
# flags.batch_size = 10
# flags.class_name = "jar"
## Define Basic Parameters
experiment_name = flags.experiment_name
class_name = flags.class_name
# class_name = raw_input('Give me the class name (e.g. "chair"): ').lower()
## Paths
project_dir = osp.dirname(osp.abspath(__file__))
top_in_dir = osp.join(project_dir, "data", "shape_net_core_uniform_samples_2048")
train_dir = osp.join(project_dir, "log", experiment_name)
if not osp.exists(train_dir): os.mkdir(train_dir)
eval_dir = osp.join(train_dir, 'eval')
if not osp.exists(eval_dir): os.mkdir(eval_dir)
## Load Point-Clouds - Test
if flags.dont_use_splits: # use predefined train/val/test splits
with open(osp.join(project_dir, "data", "data_splits", "test.txt"), "r") as f_test:
filenames_test = f_test.read().split('\n')[:-1]
pc_data_test = load_all_point_clouds_from_filenames(
file_names=filenames_test, n_threads=8, file_ending=".ply", verbose=True)
else:
syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir , syn_id)
pc_data_test = load_all_point_clouds_under_folder(
class_dir, n_threads=8, file_ending='.ply', verbose=True)
## Load/restore pretrained model
try:
conf = Conf.load(train_dir + '/configuration')
except:
print("Configuration cannot be loaded, check paths. Exiting...")
exit()
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(conf.train_dir, epoch=flags.restore_epoch)
## Evaluate AE
reconstructions, losses, input_pointcloud, _, _ = ae.evaluate(pc_data_test, conf)
# Compare reconstructions with input data - evaluation metrics
mmd, matched_dists = minimum_mathing_distance(reconstructions, input_pointcloud, flags.batch_size, normalize=True, use_EMD=False)
cov, matched_ids = coverage(reconstructions, input_pointcloud, flags.batch_size, normalize=True, use_EMD=False)
jsd = jsd_between_point_cloud_sets(reconstructions, input_pointcloud, resolution=28)
## Save outputs
# Reconstructions
file_name_reconstructions = ("_".join(["reconstructions", class_name, experiment_name]) + ".npy")
file_path_reconstructions = osp.join(eval_dir, file_name_reconstructions)
np.save(file_path_reconstructions, reconstructions)
# Losses
file_name_losses = "_".join(["ae_loss", class_name, experiment_name]) + ".npy"
file_path_losses = osp.join(eval_dir, file_name_losses)
np.save(file_path_losses, losses)
# save log file
log_file_name = "_".join(["eval_stats", class_name, experiment_name]) + ".txt"
log_file = open(osp.join(eval_dir, log_file_name), "w", 1)
log_file.write("Mean ae loss: %.9f\n" % losses.mean())
log_file.write("Minimum Mathing Distance (MMD) score: %.9f\n" % mmd)
log_file.write("Coverage score: %.9f\n" % cov)
log_file.write("Jensen-Shannon Divergence (JSD) score: %.9f\n" % jsd)
log_file.close()
## Exapmle visualization
feed_pc, feed_model_names, _ = pc_data_test.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)[0]
latent_codes = ae.transform(feed_pc)
test_id = 4
plot_3d_point_cloud(reconstructions[test_id][:, 0],
reconstructions[test_id][:, 1],
reconstructions[test_id][:, 2], in_u_sphere=True);
print("------- THE END of EVALUATION ----------")
| [
"sys.path.append",
"os.mkdir",
"src.evaluation_metrics.coverage",
"numpy.save",
"src.evaluation_metrics.minimum_mathing_distance",
"argparse.ArgumentParser",
"os.path.abspath",
"src.in_out.load_all_point_clouds_from_filenames",
"src.in_out.load_all_point_clouds_under_folder",
"src.in_out.snc_categ... | [((806, 831), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (829, 831), False, 'import argparse\n'), ((1886, 1954), 'os.path.join', 'osp.join', (['project_dir', '"""data"""', '"""shape_net_core_uniform_samples_2048"""'], {}), "(project_dir, 'data', 'shape_net_core_uniform_samples_2048')\n", (1894, 1954), True, 'import os.path as osp\n'), ((1967, 2012), 'os.path.join', 'osp.join', (['project_dir', '"""log"""', 'experiment_name'], {}), "(project_dir, 'log', experiment_name)\n", (1975, 2012), True, 'import os.path as osp\n'), ((2074, 2101), 'os.path.join', 'osp.join', (['train_dir', '"""eval"""'], {}), "(train_dir, 'eval')\n", (2082, 2101), True, 'import os.path as osp\n'), ((2933, 2949), 'src.tf_utils.reset_tf_graph', 'reset_tf_graph', ([], {}), '()\n', (2947, 2949), False, 'from src.tf_utils import reset_tf_graph\n'), ((2955, 3002), 'src.point_net_ae.PointNetAutoEncoder', 'PointNetAutoEncoder', (['conf.experiment_name', 'conf'], {}), '(conf.experiment_name, conf)\n', (2974, 3002), False, 'from src.point_net_ae import PointNetAutoEncoder\n'), ((3248, 3361), 'src.evaluation_metrics.minimum_mathing_distance', 'minimum_mathing_distance', (['reconstructions', 'input_pointcloud', 'flags.batch_size'], {'normalize': '(True)', 'use_EMD': '(False)'}), '(reconstructions, input_pointcloud, flags.\n batch_size, normalize=True, use_EMD=False)\n', (3272, 3361), False, 'from src.evaluation_metrics import minimum_mathing_distance, jsd_between_point_cloud_sets, coverage\n'), ((3376, 3473), 'src.evaluation_metrics.coverage', 'coverage', (['reconstructions', 'input_pointcloud', 'flags.batch_size'], {'normalize': '(True)', 'use_EMD': '(False)'}), '(reconstructions, input_pointcloud, flags.batch_size, normalize=\n True, use_EMD=False)\n', (3384, 3473), False, 'from src.evaluation_metrics import minimum_mathing_distance, jsd_between_point_cloud_sets, coverage\n'), ((3475, 3553), 'src.evaluation_metrics.jsd_between_point_cloud_sets', 'jsd_between_point_cloud_sets', (['reconstructions', 'input_pointcloud'], {'resolution': '(28)'}), '(reconstructions, input_pointcloud, resolution=28)\n', (3503, 3553), False, 'from src.evaluation_metrics import minimum_mathing_distance, jsd_between_point_cloud_sets, coverage\n'), ((3716, 3761), 'os.path.join', 'osp.join', (['eval_dir', 'file_name_reconstructions'], {}), '(eval_dir, file_name_reconstructions)\n', (3724, 3761), True, 'import os.path as osp\n'), ((3762, 3813), 'numpy.save', 'np.save', (['file_path_reconstructions', 'reconstructions'], {}), '(file_path_reconstructions, reconstructions)\n', (3769, 3813), True, 'import numpy as np\n'), ((3922, 3958), 'os.path.join', 'osp.join', (['eval_dir', 'file_name_losses'], {}), '(eval_dir, file_name_losses)\n', (3930, 3958), True, 'import os.path as osp\n'), ((3959, 3992), 'numpy.save', 'np.save', (['file_path_losses', 'losses'], {}), '(file_path_losses, losses)\n', (3966, 3992), True, 'import numpy as np\n'), ((4590, 4728), 'src.general_utils.plot_3d_point_cloud', 'plot_3d_point_cloud', (['reconstructions[test_id][:, 0]', 'reconstructions[test_id][:, 1]', 'reconstructions[test_id][:, 2]'], {'in_u_sphere': '(True)'}), '(reconstructions[test_id][:, 0], reconstructions[test_id\n ][:, 1], reconstructions[test_id][:, 2], in_u_sphere=True)\n', (4609, 4728), False, 'from src.general_utils import plot_3d_point_cloud\n'), ((116, 137), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (127, 137), True, 'import os.path as osp\n'), ((174, 201), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (189, 201), False, 'import sys\n'), ((1850, 1871), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (1861, 1871), True, 'import os.path as osp\n'), ((2020, 2041), 'os.path.exists', 'osp.exists', (['train_dir'], {}), '(train_dir)\n', (2030, 2041), True, 'import os.path as osp\n'), ((2043, 2062), 'os.mkdir', 'os.mkdir', (['train_dir'], {}), '(train_dir)\n', (2051, 2062), False, 'import os\n'), ((2109, 2129), 'os.path.exists', 'osp.exists', (['eval_dir'], {}), '(eval_dir)\n', (2119, 2129), True, 'import os.path as osp\n'), ((2131, 2149), 'os.mkdir', 'os.mkdir', (['eval_dir'], {}), '(eval_dir)\n', (2139, 2149), False, 'import os\n'), ((2408, 2522), 'src.in_out.load_all_point_clouds_from_filenames', 'load_all_point_clouds_from_filenames', ([], {'file_names': 'filenames_test', 'n_threads': '(8)', 'file_ending': '""".ply"""', 'verbose': '(True)'}), "(file_names=filenames_test, n_threads=8,\n file_ending='.ply', verbose=True)\n", (2444, 2522), False, 'from src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, load_all_point_clouds_under_folder, load_all_point_clouds_from_filenames\n'), ((2603, 2631), 'os.path.join', 'osp.join', (['top_in_dir', 'syn_id'], {}), '(top_in_dir, syn_id)\n', (2611, 2631), True, 'import os.path as osp\n'), ((2652, 2749), 'src.in_out.load_all_point_clouds_under_folder', 'load_all_point_clouds_under_folder', (['class_dir'], {'n_threads': '(8)', 'file_ending': '""".ply"""', 'verbose': '(True)'}), "(class_dir, n_threads=8, file_ending=\n '.ply', verbose=True)\n", (2686, 2749), False, 'from src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, load_all_point_clouds_under_folder, load_all_point_clouds_from_filenames\n'), ((2805, 2844), 'src.autoencoder.Configuration.load', 'Conf.load', (["(train_dir + '/configuration')"], {}), "(train_dir + '/configuration')\n", (2814, 2844), True, 'from src.autoencoder import Configuration as Conf\n'), ((4105, 4138), 'os.path.join', 'osp.join', (['eval_dir', 'log_file_name'], {}), '(eval_dir, log_file_name)\n', (4113, 4138), True, 'import os.path as osp\n'), ((2548, 2574), 'src.in_out.snc_category_to_synth_id', 'snc_category_to_synth_id', ([], {}), '()\n', (2572, 2574), False, 'from src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, load_all_point_clouds_under_folder, load_all_point_clouds_from_filenames\n'), ((2259, 2315), 'os.path.join', 'osp.join', (['project_dir', '"""data"""', '"""data_splits"""', '"""test.txt"""'], {}), "(project_dir, 'data', 'data_splits', 'test.txt')\n", (2267, 2315), True, 'import os.path as osp\n')] |
import numpy as np
a = np.arange(10,22).reshape((3, 4))
print("Original array:")
print(a)
print("Each element of the array is:")
for x in np.nditer(a):
print(x,end=" ") | [
"numpy.nditer",
"numpy.arange"
] | [((138, 150), 'numpy.nditer', 'np.nditer', (['a'], {}), '(a)\n', (147, 150), True, 'import numpy as np\n'), ((23, 40), 'numpy.arange', 'np.arange', (['(10)', '(22)'], {}), '(10, 22)\n', (32, 40), True, 'import numpy as np\n')] |
"""PCA anomaly detection a la Shyu, Chen, Sarinnaparkorn and Chang.
A function and an scikit-learn style anomaly detector based off of
Shyu, Chen, Sarinnapakorn and Chang's paper 'A Novel Anomaly Detection
Scheme Based on Prinicpal Component Classifier'.
The scheme has three steps:
1. Get a robust representation of the input data X by trimming
extreme observatons in terms of the Mahalanobis distance from the
mean. Done with the `trim` function.
2. Train a classifier off of the robust representative. The classifier
is built using principal components analysis of the robust matrix.
Done with a `PCADetector` object's `fit` method.
3. Categorize data as normal or anomalous. Done with a fitted
`PCADetector` object's `predict` method.
"""
# general imports
import numpy as np
# import for trimming function
from heapq import nsmallest
# imports for ML
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
class MahalanobisTrimmer(BaseEstimator, TransformerMixin):
"""Mahalanobis distance based trimmer.
Trim the most extreme elements of the data set X in terms of
Mahalanobis distance to the mean. Gamma determines proportion of
data to remove.
Parameters
----------
X : array of size (n_samples, n_features)
The data to tim
gamma : float
The proportion of data to be trimmed.
Returns
-------
X_trim : array of shape (n_samples, n_features)
The data with the gamma-most extreme observations removed.
"""
def __init__(self, gamma=0.005):
self.gamma = gamma
def fit(self, X, y=None):
# number of rows to keep
n_keep = int(X.shape[0] * (1 - self.gamma))
self._n_keep = n_keep
# get correlation matrix
S = np.corrcoef(X.transpose())
S_inv = np.linalg.inv(S)
self._S_inv = S_inv
# get means of features
self.means_ = np.mean(X, axis=1)
return self
def transform(self, X, y=None):
x_bar = self.means_
S_inv = self._S_inv
# Mahalanobis distance
def dist_man(x): return (x - x_bar) @ S_inv @ (x - x_bar)
# trimmed data
X_trim = nsmallest(self._n_keep, X, key=dist_man)
return np.array(X_trim)
class PCADetector(BaseEstimator):
"""Anomaly detection using PCA.
A scikit-learn style anomaly detector based off of Shyu, Chen,
Sarinnapakorn and Chang's paper 'A Novel Anomaly Detection Scheme
Based on Prinicpal Component Classifier'.
The underscore conventions in names are not correct for sklearn.
Parameters
----------
alpha : float, optional (default=0.05)
Acceptable false positive rate.
n_major : int (optional, default=None)
Number of principal components to use for major component, i.e.,
components detecting general trend of the data. If None passed
then takes the first `n_major` principal components which account
for 85% of total variance.
n_minor: int (optional, default=None)
Number of principal components to use for minor component, i.e.,
components detecting the general correlations between features.
If None, then uses all components which individually account for
less than 20% of the total variance.
"""
def __init__(self, alpha=0.05, n_major=None, n_minor=None):
# public attributes
self.alpha = alpha
self.n_major = n_major
self.n_minor = n_minor
def _get_eigens(self, X_trim):
""" Get correlation matrix and its eigenvector/values."""
self._S = np.corrcoef(X_trim.transpose())
self._eigvals, self._eigvects = np.linalg.eigh(self._S)
def _get_comps(self, z):
"""Input vector z should be normalized.
Parameters
----------
z : array
A normalized vector, with mean zero and standard deviation one.
Returns
-------
major : array
The projection of a normalized vector z onto the major components
determined by the PCA detector.
minor : array
The projection of a normalized vector z onto the minor components
determined by the PCA detector.
"""
y = self._eigvects @ z
summands = y ** 2 / self._eigvals
p_major = self.n_major
p_minor = len(self._eigenvals) - self.n_minor
major = summands[:self.p_major].sum()
minor = summands[self.p_minor:].sum()
return major, minor
def fit(self, X_trim, y=None):
"""Fit the classifier to the trimmed data. """
# get eigenvalues and vectors
self._get_eigens(X_trim)
# instantiate scaler for z-scores
scaler = StandardScaler()
scaler.fit(X_trim)
self._scaler = scaler
Z = scaler.transform(X_trim)
# get major and minor components of each input vector
self.majors_ = []
self.minors_ = []
for i in range(X_trim.shape[0]):
major, minor = self._get_comps(Z[i, :])
self.majors_.append(major)
self.minors_.append(minor)
# get cutoffs for classification
c1 = np.percentile(self._majors, 100 * (1 - self.alpha))
c2 = np.percentile(self._minors, 100 * (1 - self.alpha))
self.c_ = (c1, c2)
# autoset n_major and n_minor if not specifiec
if (self.n_major is None) or (self.n_minor is None):
var = self._eigenvals / self._eigenvals.sum()
var = np.sort(variances)[::-1]
if self.n_major is None:
# set n_major to account for 85% of total variance
self.n_major = 1 + (var.cumsum() < 0.85).sum()
if self.n_minor is None:
# set n_minor to components with < 20% variance
n_minor = (var < 0.2).sum()
n_minor_max = len(self._eignevals) - self.n_major
self.n_minor = min(n_minor_max, n_minor)
return self
def predict(self, X, y=None):
"""Classify as normal or anomalous.
Returns
-------
cl : array
Whether normal (1) or anomalous (-1).
"""
Z = self._scaler.transform(X)
cl = []
for i in range(Z.shape[0]):
z_major, z_minor = self._get_comps(Z[i, :])
if (z_major < self.c_[0]) & (z_minor < self.c_[1]):
cl.append(1)
else:
cl.append(-1)
return np.array(cl)
| [
"heapq.nsmallest",
"sklearn.preprocessing.StandardScaler",
"numpy.percentile",
"numpy.linalg.eigh",
"numpy.sort",
"numpy.mean",
"numpy.linalg.inv",
"numpy.array"
] | [((1911, 1927), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (1924, 1927), True, 'import numpy as np\n'), ((2010, 2028), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (2017, 2028), True, 'import numpy as np\n'), ((2287, 2327), 'heapq.nsmallest', 'nsmallest', (['self._n_keep', 'X'], {'key': 'dist_man'}), '(self._n_keep, X, key=dist_man)\n', (2296, 2327), False, 'from heapq import nsmallest\n'), ((2343, 2359), 'numpy.array', 'np.array', (['X_trim'], {}), '(X_trim)\n', (2351, 2359), True, 'import numpy as np\n'), ((3817, 3840), 'numpy.linalg.eigh', 'np.linalg.eigh', (['self._S'], {}), '(self._S)\n', (3831, 3840), True, 'import numpy as np\n'), ((4925, 4941), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4939, 4941), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5375, 5426), 'numpy.percentile', 'np.percentile', (['self._majors', '(100 * (1 - self.alpha))'], {}), '(self._majors, 100 * (1 - self.alpha))\n', (5388, 5426), True, 'import numpy as np\n'), ((5440, 5491), 'numpy.percentile', 'np.percentile', (['self._minors', '(100 * (1 - self.alpha))'], {}), '(self._minors, 100 * (1 - self.alpha))\n', (5453, 5491), True, 'import numpy as np\n'), ((6696, 6708), 'numpy.array', 'np.array', (['cl'], {}), '(cl)\n', (6704, 6708), True, 'import numpy as np\n'), ((5711, 5729), 'numpy.sort', 'np.sort', (['variances'], {}), '(variances)\n', (5718, 5729), True, 'import numpy as np\n')] |
from typing import List
import dgl.function as fn
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn import SumPooling, AvgPooling
from ogb.graphproppred.mol_encoder import AtomEncoder
def aggregate_mean(h):
"""mean aggregation"""
return torch.mean(h, dim=1)
def aggregate_max(h):
"""max aggregation"""
return torch.max(h, dim=1)[0]
def aggregate_min(h):
"""min aggregation"""
return torch.min(h, dim=1)[0]
def aggregate_sum(h):
"""sum aggregation"""
return torch.sum(h, dim=1)
def aggregate_var(h):
"""variance aggregation"""
h_mean_squares = torch.mean(h * h, dim=1)
h_mean = torch.mean(h, dim=1)
var = torch.relu(h_mean_squares - h_mean * h_mean)
return var
def aggregate_std(h):
"""standard deviation aggregation"""
return torch.sqrt(aggregate_var(h) + 1e-5)
AGGREGATORS = {'mean': aggregate_mean, 'sum': aggregate_sum, 'max': aggregate_max,
'min': aggregate_min, 'std': aggregate_std, 'var': aggregate_var}
def scale_identity(h, D, delta):
"""identity scaling (no scaling operation)"""
return h
def scale_amplification(h, D, delta):
"""amplification scaling"""
return h * (np.log(D + 1) / delta)
def scale_attenuation(h, D, delta):
"""attenuation scaling"""
return h * (delta / np.log(D + 1))
SCALERS = {
'identity': scale_identity,
'amplification': scale_amplification,
'attenuation': scale_attenuation
}
class MLP(nn.Module):
def __init__(self,
in_feat_size: int,
out_feat_size: int,
num_layers: int=3,
decreasing_hidden_size=False):
"""Multilayer Perceptron (MLP)"""
super(MLP, self).__init__()
self.layers = nn.ModuleList()
if decreasing_hidden_size:
for i in range(num_layers - 1):
self.layers.append(nn.Linear(in_feat_size // 2 ** i,
in_feat_size // 2 ** (i + 1)))
self.layers.append(nn.Linear(in_feat_size // 2 ** (num_layers - 1),
out_feat_size))
else:
self.layers.append(nn.Linear(in_feat_size, out_feat_size))
for _ in range(num_layers - 1):
self.layers.append(nn.Linear(out_feat_size, out_feat_size))
self.num_layers = num_layers
def forward(self, h):
for i, layer in enumerate(self.layers):
h = layer(h)
if i != self.num_layers - 1:
h = F.relu(h)
return h
class SimplePNAConv(nn.Module):
r"""A simplified PNAConv variant used in OGB submissions"""
def __init__(self,
feat_size: int,
aggregators: List[str],
scalers: List[str],
delta: float,
dropout: float,
batch_norm: bool,
residual: bool,
num_mlp_layers: int):
super(SimplePNAConv, self).__init__()
self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
self.scalers = [SCALERS[scale] for scale in scalers]
self.delta = delta
self.mlp = MLP(in_feat_size=(len(aggregators) * len(scalers)) * feat_size,
out_feat_size=feat_size, num_layers=num_mlp_layers)
self.dropout = nn.Dropout(dropout)
self.residual = residual
if batch_norm:
self.bn = nn.BatchNorm1d(feat_size)
else:
self.bn = None
def reduce(self, nodes):
h = nodes.mailbox['m']
D = h.shape[-2]
h = torch.cat([aggregate(h) for aggregate in self.aggregators], dim=1)
h = torch.cat([scale(h, D=D, delta=self.delta) for scale in self.scalers], dim=1)
return {'h': h}
def forward(self, g, h):
with g.local_scope():
g.ndata['h'] = h
g.update_all(fn.copy_u('h', 'm'), self.reduce)
h_new = g.ndata['h']
h_new = self.mlp(h_new)
if self.bn is not None:
h_new = self.bn(h_new)
h_new = F.relu(h_new)
if self.residual:
h_new = h_new + h
h_new = self.dropout(h_new)
return h_new
class PNA(nn.Module):
def __init__(self,
data_info: dict,
embed_size: int = 80,
aggregators: str = 'mean max min std',
scalers: str = 'identity amplification attenuation',
dropout: float = 0.3,
batch_norm: bool = True,
residual: bool = True,
num_mlp_layers: int = 1,
num_layers: int = 4,
readout: str = 'mean'):
"""Principal Neighbourhood Aggregation
Parameters
----------
data_info : dict
The information about the input dataset.
embed_size : int
Embedding size.
aggregators : str
Aggregation function names separated by space, can include mean, max, min, std, sum
scalers : str
Scaler function names separated by space, can include identity, amplification, and attenuation
dropout : float
Dropout rate.
batch_norm : bool
Whether to use batch normalization.
residual : bool
Whether to use residual connection.
num_mlp_layers : int
Number of MLP layers to use after message aggregation in each PNA layer.
num_layers : int
Number of PNA layers.
readout : str
Readout for computing graph-level representations, can be 'sum' or 'mean'.
"""
super(PNA, self).__init__()
self.data_info = data_info
self.embed_size = embed_size
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
self.num_mlp_layers = num_mlp_layers
self.num_layers = num_layers
self.readout = readout
if aggregators is None:
aggregators = ['mean', 'max', 'min', 'std']
else:
aggregators = [agg.strip() for agg in aggregators.split(' ')]
assert set(aggregators).issubset({'mean', 'max', 'min', 'std', 'sum'}), \
"Expect aggregators to be a subset of ['mean', 'max', 'min', 'std', 'sum'], \
got {}".format(aggregators)
if scalers is None:
scalers = ['identity', 'amplification', 'attenuation']
else:
scalers = [scl.strip() for scl in scalers.split(' ')]
assert set(scalers).issubset({'identity', 'amplification', 'attenuation'}), \
"Expect scalers to be a subset of ['identity', 'amplification', 'attenuation'], \
got {}".format(scalers)
self.aggregators = aggregators
self.scalers = scalers
if data_info['name'] in ['ogbg-molhiv', 'ogbg-molpcba']:
self.node_encoder = AtomEncoder(embed_size)
else:
# Handle other datasets
self.node_encoder = nn.Linear(data_info['node_feat_size'], embed_size)
self.conv_layers = nn.ModuleList([SimplePNAConv(feat_size=embed_size,
aggregators=aggregators,
scalers=scalers,
delta=data_info['delta'],
dropout=dropout,
batch_norm=batch_norm,
residual=residual,
num_mlp_layers=num_mlp_layers)
for _ in range(num_layers)])
if readout == 'sum':
self.pool = SumPooling()
elif readout == 'mean':
self.pool = AvgPooling()
else:
raise ValueError("Expect readout to be 'sum' or 'mean', got {}".format(readout))
self.pred = MLP(embed_size, data_info['out_size'], decreasing_hidden_size=True)
def forward(self, graph, node_feat, edge_feat=None):
hn = self.node_encoder(node_feat)
for conv in self.conv_layers:
hn = conv(graph, hn)
hg = self.pool(graph, hn)
return self.pred(hg)
| [
"torch.mean",
"torch.nn.Dropout",
"torch.relu",
"numpy.log",
"torch.nn.ModuleList",
"dgl.nn.SumPooling",
"dgl.function.copy_u",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"dgl.nn.AvgPooling",
"torch.max",
"torch.nn.functional.relu",
"torch.sum",
"torch.min",
"ogb.graphproppred.mol_encode... | [((294, 314), 'torch.mean', 'torch.mean', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (304, 314), False, 'import torch\n'), ((541, 560), 'torch.sum', 'torch.sum', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (550, 560), False, 'import torch\n'), ((636, 660), 'torch.mean', 'torch.mean', (['(h * h)'], {'dim': '(1)'}), '(h * h, dim=1)\n', (646, 660), False, 'import torch\n'), ((674, 694), 'torch.mean', 'torch.mean', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (684, 694), False, 'import torch\n'), ((705, 749), 'torch.relu', 'torch.relu', (['(h_mean_squares - h_mean * h_mean)'], {}), '(h_mean_squares - h_mean * h_mean)\n', (715, 749), False, 'import torch\n'), ((375, 394), 'torch.max', 'torch.max', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (384, 394), False, 'import torch\n'), ((458, 477), 'torch.min', 'torch.min', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (467, 477), False, 'import torch\n'), ((1784, 1799), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1797, 1799), True, 'import torch.nn as nn\n'), ((3380, 3399), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3390, 3399), True, 'import torch.nn as nn\n'), ((4121, 4134), 'torch.nn.functional.relu', 'F.relu', (['h_new'], {}), '(h_new)\n', (4127, 4134), True, 'import torch.nn.functional as F\n'), ((1225, 1238), 'numpy.log', 'np.log', (['(D + 1)'], {}), '(D + 1)\n', (1231, 1238), True, 'import numpy as np\n'), ((1339, 1352), 'numpy.log', 'np.log', (['(D + 1)'], {}), '(D + 1)\n', (1345, 1352), True, 'import numpy as np\n'), ((3479, 3504), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['feat_size'], {}), '(feat_size)\n', (3493, 3504), True, 'import torch.nn as nn\n'), ((6997, 7020), 'ogb.graphproppred.mol_encoder.AtomEncoder', 'AtomEncoder', (['embed_size'], {}), '(embed_size)\n', (7008, 7020), False, 'from ogb.graphproppred.mol_encoder import AtomEncoder\n'), ((7103, 7153), 'torch.nn.Linear', 'nn.Linear', (["data_info['node_feat_size']", 'embed_size'], {}), "(data_info['node_feat_size'], embed_size)\n", (7112, 7153), True, 'import torch.nn as nn\n'), ((7907, 7919), 'dgl.nn.SumPooling', 'SumPooling', ([], {}), '()\n', (7917, 7919), False, 'from dgl.nn import SumPooling, AvgPooling\n'), ((2055, 2118), 'torch.nn.Linear', 'nn.Linear', (['(in_feat_size // 2 ** (num_layers - 1))', 'out_feat_size'], {}), '(in_feat_size // 2 ** (num_layers - 1), out_feat_size)\n', (2064, 2118), True, 'import torch.nn as nn\n'), ((2206, 2244), 'torch.nn.Linear', 'nn.Linear', (['in_feat_size', 'out_feat_size'], {}), '(in_feat_size, out_feat_size)\n', (2215, 2244), True, 'import torch.nn as nn\n'), ((2564, 2573), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (2570, 2573), True, 'import torch.nn.functional as F\n'), ((3938, 3957), 'dgl.function.copy_u', 'fn.copy_u', (['"""h"""', '"""m"""'], {}), "('h', 'm')\n", (3947, 3957), True, 'import dgl.function as fn\n'), ((7976, 7988), 'dgl.nn.AvgPooling', 'AvgPooling', ([], {}), '()\n', (7986, 7988), False, 'from dgl.nn import SumPooling, AvgPooling\n'), ((1914, 1977), 'torch.nn.Linear', 'nn.Linear', (['(in_feat_size // 2 ** i)', '(in_feat_size // 2 ** (i + 1))'], {}), '(in_feat_size // 2 ** i, in_feat_size // 2 ** (i + 1))\n', (1923, 1977), True, 'import torch.nn as nn\n'), ((2325, 2364), 'torch.nn.Linear', 'nn.Linear', (['out_feat_size', 'out_feat_size'], {}), '(out_feat_size, out_feat_size)\n', (2334, 2364), True, 'import torch.nn as nn\n')] |
import numpy as np
import cv2
import pandas as pd
import os
from time import time
from pytesseract import image_to_string
from re import search
from glob import glob as listdir
from collections import Counter
from argparse import ArgumentParser
from bs4 import BeautifulSoup as bs
from requests import get as geturl
class ShardCounter():
def __init__(self):
# measured values from own data
self.N_SCALE = 98
self.N_BOUNDS = np.array([70, 93, 70, 95]) / self.N_SCALE
# url for table of champions from wiki
self.CHAMP_URL = "https://leagueoflegends.fandom.com/wiki/List_of_champions"
# blue essence exchange rates
self.EXCHANGE = {
450 : 90,
1350 : 270,
3150 : 630,
4800 : 960,
6300 : 1260,
7800 : 1560
}
# instead of 256 colours have only 64
self.IM_QUANT_FACTOR = 4
# bounds on boundary thresholding
self.LOWER_B_THRESH = np.array([30, 22, 18])
self.UPPER_B_THRESH = np.array([80, 72, 48])
# limit on the area of a champion square
self.SQ_AREA_LIMIT = 1500
# confidence level of new champ or recognized
self.CHAMP_CONFIDENCE = 0.79
# one day for csv refresh time limit
self.TIME_LIMIT = 86400
# config for tesseract
self.P_CONFIG = ("-l eng --oem 1 --psm 7")
# pre-load the champion images
self.champ_images = {champ : cv2.imread(champ) for champ in listdir("champs/*")}
# keeps track of champ table update
self.last_update = 0
## locate
def assign_champ_to_square(self, imagelist, display=False):
all_champs = dict()
# iterate through images
for base in imagelist:
# quantize the image slightly
smoothed = base // self.IM_QUANT_FACTOR
smoothed = smoothed * self.IM_QUANT_FACTOR
# threshold based on RGB values, more accurate than just BW
rgbthresh = cv2.inRange(smoothed, self.LOWER_B_THRESH,
self.UPPER_B_THRESH)
rgbthresh = cv2.filter2D(rgbthresh, -1, np.ones((2, 2)))
# find contours within the images, essentially shape finding
contours, h = cv2.findContours(rgbthresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# holds bounding boxes and frequency of areas
boxes = dict()
areas = list()
# iterate through contours
for cnt in contours:
# calculate the approximate shape
arclength = 0.1 * cv2.arcLength(cnt, True)
shape = cv2.approxPolyDP(cnt,arclength,True)
# if the shape has four points, i.e. a square
if(len(shape) == 4):
# obtain the bounding rectangle
(x, y, w, h) = cv2.boundingRect(shape)
# calculate the area
ar = w * h
# presume the area is above a limit
if ar > self.SQ_AREA_LIMIT:
# if not present in dictionary, add an entry
if ar not in boxes:
boxes[ar] = list()
# keep track of areas and bounding boxes
areas.append(ar)
boxes[ar].append([y, x, w, h])
# find the most common area
base_ar = Counter(areas).most_common()[0][0]
# also look at similar areas
freqs = {x for x in areas if abs(base_ar - x) < base_ar / 20}
# collect the actual image slices
imboxes = list()
for ar in freqs:
for idx, bbox in enumerate(boxes[ar]):
imboxes.append(base[bbox[0]:bbox[0] + bbox[2],
bbox[1]:bbox[1] + bbox[3]])
# calculate the correlations between each of the champions
corrs = self.list_corr(imboxes)
# look through rates and see if any new champions have been found
for idx, (rate, champ, box) in enumerate(corrs):
if display:
print("Found {} at {}".format(champ, rate))
if rate < self.CHAMP_CONFIDENCE:
print("New Champion Found")
cv2.imwrite("champs/champ_" + str(idx).zfill(3) + ".png", box)
# if the champion is not in the last, add em
if champ not in all_champs:
all_champs[champ] = (rate, champ, box)
# info
print("Found {} champions".format(len(corrs)))
return list(all_champs.values())
## produce a list of correlations of each bbox against known champions
def list_corr(self, boxes):
champ_corrs = list()
# for each bbox, compute relative correlation
for box in boxes:
max_match = 0
c_champ = None
# compare against every known champion
for champ in self.champ_images:
match = self.correlate(box, self.champ_images[champ])
# overwrite if higher confidence
if match > max_match:
max_match = match
c_champ = champ
# append highest match
champ_corrs.append((max_match, c_champ, box))
return champ_corrs
## find correlation of base image against a comparison
def correlate(self, base, comp):
# resize the larger image
if base.shape[0] > comp.shape[0]:
base = cv2.resize(base, (comp.shape[1], comp.shape[0]))
else:
comp = cv2.resize(comp, (base.shape[1], base.shape[0]))
# compute normalized correlation
results = cv2.matchTemplate(base, comp, cv2.TM_CCORR_NORMED)
return results.max()
## get datasheet of current champions and their essence costs
def getChampTable(self):
if (time() - self.last_update) > self.TIME_LIMIT:
# obtain the html
res = geturl(self.CHAMP_URL)
soup = bs(res.content, 'lxml')
# find the table and convert to dataframe
table = soup.find_all("table", {"class" : "sortable"})[0]
self.champ_table = pd.read_html(str(table))[0]
# sift through and extract just the name
for idx, row in self.champ_table.iterrows():
name = row["Champion"].replace(",", "\xa0").split("\xa0")[0]
self.champ_table.at[idx, "Champion"] = name
return self.champ_table
## convert a champion from pathname into proper name and blue essence cost
def convChamp(self, champ, cdata):
# find the basename of the champion
champ = os.path.splitext(os.path.basename(champ))[0]
# find corresponding champion in champ table
row = cdata[cdata["Champion"].str.lower() == champ]
# convert to cost
cost = int(self.EXCHANGE[row["Blue Essence"].values[0]])
name = row["Champion"].values[0]
return cost, name
## get the bounding box of the number
def getNumberBox(self, img):
boundsy = np.int32(self.N_BOUNDS[:2] * img.shape[1])
boundsx = np.int32(self.N_BOUNDS[2:] * img.shape[0])
return img[boundsy[0]:boundsy[1], boundsx[0]:boundsx[1]]
## retrieve the digit as text from the image
def getDigit(self, image):
# convert to gray
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# thresh on anything over 127, inverted for OCR as similar to MNIST - black on white bg
_, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
# get the string from pytesseract
result = image_to_string(image, config=self.P_CONFIG)
# search for digit
groups = search("\d+", result)
# if digit found use it, else assume one
if groups:
result = int(groups.group())
else:
result = 1
return result
## count the shards in the given set of images
def countShardCosts(self, images):
corrs = sc.assign_champ_to_square(images, display=False)
total_cost = 0
total_shards = 0
df = pd.DataFrame()
# update known champ data
self.getChampTable()
# sort the list based on the champion name
corrs.sort(key=lambda x : x[1])
for rate, champ, box in corrs:
# find the champion name and cost
cost, name = self.convChamp(champ, self.champ_table)
# retrieve number of shards via OCR
num_shards = self.getDigit(self.getNumberBox(box))
# calculate cost
n_cost = cost * num_shards
total_shards += num_shards
total_cost += n_cost
# print result
print("{} champion shards of {} is worth {} blue essence".format(
num_shards, name, n_cost))
df = df.append({"Champion" : name, "Cost": cost, "Shards" : num_shards,
"Total Cost" : n_cost}, ignore_index=True)
print("Total cost of all champion shards are {} blue essence".format(
total_cost))
print("{} shards found in total from {} unique champions".format(total_shards,
len(corrs)))
return df
## count shards using a list of paths rather than images
def countShardsPath(self, imagepaths):
images = [cv2.imread(image) for image in imagepaths]
return self.countShardCosts(images)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("base", help="Base Image", nargs="+")
args = parser.parse_args()
sc = ShardCounter()
sc.countShardsPath(args.base)
| [
"argparse.ArgumentParser",
"cv2.approxPolyDP",
"cv2.arcLength",
"numpy.ones",
"glob.glob",
"cv2.inRange",
"cv2.matchTemplate",
"pandas.DataFrame",
"cv2.cvtColor",
"numpy.int32",
"requests.get",
"collections.Counter",
"cv2.boundingRect",
"re.search",
"cv2.resize",
"os.path.basename",
... | [((9743, 9759), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (9757, 9759), False, 'from argparse import ArgumentParser\n'), ((1000, 1022), 'numpy.array', 'np.array', (['[30, 22, 18]'], {}), '([30, 22, 18])\n', (1008, 1022), True, 'import numpy as np\n'), ((1053, 1075), 'numpy.array', 'np.array', (['[80, 72, 48]'], {}), '([80, 72, 48])\n', (1061, 1075), True, 'import numpy as np\n'), ((5881, 5931), 'cv2.matchTemplate', 'cv2.matchTemplate', (['base', 'comp', 'cv2.TM_CCORR_NORMED'], {}), '(base, comp, cv2.TM_CCORR_NORMED)\n', (5898, 5931), False, 'import cv2\n'), ((7306, 7348), 'numpy.int32', 'np.int32', (['(self.N_BOUNDS[:2] * img.shape[1])'], {}), '(self.N_BOUNDS[:2] * img.shape[1])\n', (7314, 7348), True, 'import numpy as np\n'), ((7367, 7409), 'numpy.int32', 'np.int32', (['(self.N_BOUNDS[2:] * img.shape[0])'], {}), '(self.N_BOUNDS[2:] * img.shape[0])\n', (7375, 7409), True, 'import numpy as np\n'), ((7598, 7637), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (7610, 7637), False, 'import cv2\n'), ((7753, 7806), 'cv2.threshold', 'cv2.threshold', (['image', '(127)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(image, 127, 255, cv2.THRESH_BINARY_INV)\n', (7766, 7806), False, 'import cv2\n'), ((7875, 7919), 'pytesseract.image_to_string', 'image_to_string', (['image'], {'config': 'self.P_CONFIG'}), '(image, config=self.P_CONFIG)\n', (7890, 7919), False, 'from pytesseract import image_to_string\n'), ((7965, 7987), 're.search', 'search', (['"""\\\\d+"""', 'result'], {}), "('\\\\d+', result)\n", (7971, 7987), False, 'from re import search\n'), ((8384, 8398), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8396, 8398), True, 'import pandas as pd\n'), ((454, 480), 'numpy.array', 'np.array', (['[70, 93, 70, 95]'], {}), '([70, 93, 70, 95])\n', (462, 480), True, 'import numpy as np\n'), ((1498, 1515), 'cv2.imread', 'cv2.imread', (['champ'], {}), '(champ)\n', (1508, 1515), False, 'import cv2\n'), ((2042, 2105), 'cv2.inRange', 'cv2.inRange', (['smoothed', 'self.LOWER_B_THRESH', 'self.UPPER_B_THRESH'], {}), '(smoothed, self.LOWER_B_THRESH, self.UPPER_B_THRESH)\n', (2053, 2105), False, 'import cv2\n'), ((2292, 2363), 'cv2.findContours', 'cv2.findContours', (['rgbthresh', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(rgbthresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (2308, 2363), False, 'import cv2\n'), ((5690, 5738), 'cv2.resize', 'cv2.resize', (['base', '(comp.shape[1], comp.shape[0])'], {}), '(base, (comp.shape[1], comp.shape[0]))\n', (5700, 5738), False, 'import cv2\n'), ((5772, 5820), 'cv2.resize', 'cv2.resize', (['comp', '(base.shape[1], base.shape[0])'], {}), '(comp, (base.shape[1], base.shape[0]))\n', (5782, 5820), False, 'import cv2\n'), ((6166, 6188), 'requests.get', 'geturl', (['self.CHAMP_URL'], {}), '(self.CHAMP_URL)\n', (6172, 6188), True, 'from requests import get as geturl\n'), ((6212, 6235), 'bs4.BeautifulSoup', 'bs', (['res.content', '"""lxml"""'], {}), "(res.content, 'lxml')\n", (6214, 6235), True, 'from bs4 import BeautifulSoup as bs\n'), ((9615, 9632), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (9625, 9632), False, 'import cv2\n'), ((1529, 1548), 'glob.glob', 'listdir', (['"""champs/*"""'], {}), "('champs/*')\n", (1536, 1548), True, 'from glob import glob as listdir\n'), ((2174, 2189), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (2181, 2189), True, 'import numpy as np\n'), ((2700, 2738), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', 'arclength', '(True)'], {}), '(cnt, arclength, True)\n', (2716, 2738), False, 'import cv2\n'), ((6071, 6077), 'time.time', 'time', ([], {}), '()\n', (6075, 6077), False, 'from time import time\n'), ((6910, 6933), 'os.path.basename', 'os.path.basename', (['champ'], {}), '(champ)\n', (6926, 6933), False, 'import os\n'), ((2651, 2675), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (2664, 2675), False, 'import cv2\n'), ((2925, 2948), 'cv2.boundingRect', 'cv2.boundingRect', (['shape'], {}), '(shape)\n', (2941, 2948), False, 'import cv2\n'), ((3512, 3526), 'collections.Counter', 'Counter', (['areas'], {}), '(areas)\n', (3519, 3526), False, 'from collections import Counter\n')] |
# -*- coding: UTF-8 -*-
from math import e
import os
from numpy.lib.type_check import _imag_dispatcher
import pandas as pd
import shutil
import numpy as np
import cv2
import random
from tqdm import tqdm
import pyfastcopy
import json,sklearn
from sklearn.model_selection import train_test_split
pos_data_base='D:/WWF_Det/WWF_Data/Pos_Data/'
raw_data_base='D:/WWF_Det/WWF_Data/Raw_Data/'
annotation_base='D:/WWF_Det/WWF_Det/Raw_annoations/'
Final_data_base='D:/WWF_Det/WWF_Data/Final_Data/rest-vid-clean/'
try:
os.makedirs(Final_data_base)
except:
pass
combine_data_list=['rest-part1','rest-part2']
for dataset in combine_data_list:
source_vid_path_all=[]
target_vid_path_all=[]
valuableset_dir=pos_data_base+dataset+'/allset/visualizations/'
source_base=raw_data_base+dataset+'/'
source_base=source_base.replace('part','p')
annotation_dir=annotation_base+dataset+'.csv'
df=pd.read_csv(annotation_dir)
pic_id_list=[i.replace('.jpg','',1) for i in os.listdir(valuableset_dir)]
for pic_id in tqdm(pic_id_list):
pic_df=df.loc[df['题目ID']== int(pic_id)]
timu_str=pic_df['题目数据'].values[0]
timu_data=json.loads(timu_str)
video_path_list=timu_data['video_path'].split('/')
vid_path=video_path_list[-3]+'/'+video_path_list[-2]+'/'+video_path_list[-1]
source_vid_path_all.append(vid_path)
target_vid_path_all.append(video_path_list[-3]+'-'+video_path_list[-1])
source_vid_path_all=np.unique(np.array(source_vid_path_all)).tolist()
target_vid_path_all=np.unique(np.array(target_vid_path_all)).tolist()
for source_vid,target_vid in zip(source_vid_path_all,target_vid_path_all):
shutil.copyfile(source_base+source_vid,Final_data_base+target_vid) | [
"tqdm.tqdm",
"os.makedirs",
"json.loads",
"pandas.read_csv",
"numpy.array",
"shutil.copyfile",
"os.listdir"
] | [((517, 545), 'os.makedirs', 'os.makedirs', (['Final_data_base'], {}), '(Final_data_base)\n', (528, 545), False, 'import os\n'), ((915, 942), 'pandas.read_csv', 'pd.read_csv', (['annotation_dir'], {}), '(annotation_dir)\n', (926, 942), True, 'import pandas as pd\n'), ((1039, 1056), 'tqdm.tqdm', 'tqdm', (['pic_id_list'], {}), '(pic_id_list)\n', (1043, 1056), False, 'from tqdm import tqdm\n'), ((1179, 1199), 'json.loads', 'json.loads', (['timu_str'], {}), '(timu_str)\n', (1189, 1199), False, 'import json, sklearn\n'), ((1725, 1796), 'shutil.copyfile', 'shutil.copyfile', (['(source_base + source_vid)', '(Final_data_base + target_vid)'], {}), '(source_base + source_vid, Final_data_base + target_vid)\n', (1740, 1796), False, 'import shutil\n'), ((992, 1019), 'os.listdir', 'os.listdir', (['valuableset_dir'], {}), '(valuableset_dir)\n', (1002, 1019), False, 'import os\n'), ((1519, 1548), 'numpy.array', 'np.array', (['source_vid_path_all'], {}), '(source_vid_path_all)\n', (1527, 1548), True, 'import numpy as np\n'), ((1593, 1622), 'numpy.array', 'np.array', (['target_vid_path_all'], {}), '(target_vid_path_all)\n', (1601, 1622), True, 'import numpy as np\n')] |
import numpy as np
from keras import backend as K
from keras import activations, initializers
from keras.initializers import Constant, Initializer
from keras.layers import Layer
from scipy import signal
from scipy import linalg as la
import math
import tensorflow as tf
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
if measure == 'tlagt':
# beta = 1 corresponds to no tilt
b = measure_args.get('beta', 1.0)
A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Generalized Laguerre
# alpha 0, beta small is most stable (limits to the 'lagt' measure)
# alpha 0, beta 1 has transition matrix A = [lower triangular 1]
if measure == 'glagt':
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1)
B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None]
L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
A = (1./L[:, None]) * A * L[None, :]
B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# LMU: equivalent to LegT up to normalization
elif measure == 'lmu':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
return A, B
forward_aliases = ['euler', 'forward_euler', 'forward', 'forward_diff']
backward_aliases = ['backward', 'backward_diff', 'backward_euler']
bilinear_aliases = ['bilinear', 'tustin', 'trapezoidal', 'trapezoid']
zoh_aliases = ['zoh']
class HippoTCell(Layer):
def __init__(self,
units,
memory_order,
theta, # relative to dt=1
measure='legt',
method='zoh',
trainable_input_encoders=True,
trainable_hidden_encoders=True,
trainable_memory_encoders=True,
trainable_input_kernel=True,
trainable_hidden_kernel=True,
trainable_memory_kernel=True,
trainable_A=False,
trainable_B=False,
input_encoders_initializer='lecun_uniform',
hidden_encoders_initializer='lecun_uniform',
memory_encoders_initializer=Constant(0), # 'lecun_uniform',
input_kernel_initializer='glorot_normal',
hidden_kernel_initializer='glorot_normal',
memory_kernel_initializer='glorot_normal',
hidden_activation='tanh',
**kwargs):
super().__init__(**kwargs)
self.units = units
self.memory_order = memory_order
self.theta = theta
self.method = method
self.trainable_input_encoders = trainable_input_encoders
self.trainable_hidden_encoders = trainable_hidden_encoders
self.trainable_memory_encoders = trainable_memory_encoders
self.trainable_input_kernel = trainable_input_kernel
self.trainable_hidden_kernel = trainable_hidden_kernel
self.trainable_memory_kernel = trainable_memory_kernel
self.trainable_A = trainable_A
self.trainable_B = trainable_B
self.input_encoders_initializer = initializers.get(
input_encoders_initializer)
self.hidden_encoders_initializer = initializers.get(
hidden_encoders_initializer)
self.memory_encoders_initializer = initializers.get(
memory_encoders_initializer)
self.input_kernel_initializer = initializers.get(
input_kernel_initializer)
self.hidden_kernel_initializer = initializers.get(
hidden_kernel_initializer)
self.memory_kernel_initializer = initializers.get(
memory_kernel_initializer)
self.hidden_activation = activations.get(hidden_activation)
A, B = transition(measure, memory_order)
# Construct A and B matrices
C = np.ones((1, memory_order))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=1./theta, method=method)
self._A = dA - np.eye(memory_order) # puts into form: x += Ax
self._B = dB
self.state_size = (self.units, self.memory_order)
self.output_size = self.units
def build(self, input_shape):
input_dim = input_shape[-1]
self.input_encoders = self.add_weight(
name='input_encoders',
shape=(input_dim, 1),
initializer=self.input_encoders_initializer,
trainable=self.trainable_input_encoders)
self.hidden_encoders = self.add_weight(
name='hidden_encoders',
shape=(self.units, 1),
initializer=self.hidden_encoders_initializer,
trainable=self.trainable_hidden_encoders)
self.memory_encoders = self.add_weight(
name='memory_encoders',
shape=(self.memory_order, 1),
initializer=self.memory_encoders_initializer,
trainable=self.trainable_memory_encoders)
self.input_kernel = self.add_weight(
name='input_kernel',
shape=(input_dim, self.units),
initializer=self.input_kernel_initializer,
trainable=self.trainable_input_kernel)
self.hidden_kernel = self.add_weight(
name='hidden_kernel',
shape=(self.units, self.units),
initializer=self.hidden_kernel_initializer,
trainable=self.trainable_hidden_kernel)
self.memory_kernel = self.add_weight(
name='memory_kernel',
shape=(self.memory_order, self.units),
initializer=self.memory_kernel_initializer,
trainable=self.trainable_memory_kernel)
self.AT = self.add_weight(
name='AT',
shape=(self.memory_order, self.memory_order),
initializer=Constant(self._A.T), # note: transposed
trainable=self.trainable_A)
self.BT = self.add_weight(
name='BT',
shape=(1, self.memory_order), # system is SISO
initializer=Constant(self._B.T), # note: transposed
trainable=self.trainable_B)
self.built = True
def call(self, inputs, states):
h, m = states
u = (K.dot(inputs, self.input_encoders) +
K.dot(h, self.hidden_encoders) +
K.dot(m, self.memory_encoders))
m = m + K.dot(m, self.AT) + K.dot(u, self.BT)
h = self.hidden_activation(
K.dot(inputs, self.input_kernel) +
K.dot(h, self.hidden_kernel) +
K.dot(m, self.memory_kernel))
return h, [h, m]
class HippoSCell(Layer):
def __init__(self,
units,
memory_order,
measure='legt',
method='zoh',
max_length=256,
trainable_input_encoders=True,
trainable_hidden_encoders=True,
trainable_memory_encoders=True,
trainable_input_kernel=True,
trainable_hidden_kernel=True,
trainable_memory_kernel=True,
trainable_A=False,
trainable_B=False,
input_encoders_initializer='lecun_uniform',
hidden_encoders_initializer='lecun_uniform',
memory_encoders_initializer=Constant(0), # 'lecun_uniform',
input_kernel_initializer='glorot_normal',
hidden_kernel_initializer='glorot_normal',
memory_kernel_initializer='glorot_normal',
hidden_activation='tanh',
gate=False,
**kwargs):
super().__init__(**kwargs)
self.units = units
self.memory_order = memory_order
self.method = method
self.max_length = max_length
self.trainable_input_encoders = trainable_input_encoders
self.trainable_hidden_encoders = trainable_hidden_encoders
self.trainable_memory_encoders = trainable_memory_encoders
self.trainable_input_kernel = trainable_input_kernel
self.trainable_hidden_kernel = trainable_hidden_kernel
self.trainable_memory_kernel = trainable_memory_kernel
self.trainable_A = trainable_A
self.trainable_B = trainable_B
self.gate = gate
self.input_encoders_initializer = initializers.get(
input_encoders_initializer)
self.hidden_encoders_initializer = initializers.get(
hidden_encoders_initializer)
self.memory_encoders_initializer = initializers.get(
memory_encoders_initializer)
self.input_kernel_initializer = initializers.get(
input_kernel_initializer)
self.hidden_kernel_initializer = initializers.get(
hidden_kernel_initializer)
self.memory_kernel_initializer = initializers.get(
memory_kernel_initializer)
self.hidden_activation = activations.get(hidden_activation)
A, B = transition(measure, memory_order)
# Construct A and B matrices
A_stacked = np.empty((max_length, memory_order, memory_order), dtype=A.dtype)
B_stacked = np.empty((max_length, memory_order), dtype=B.dtype)
B = B[:,0]
N = memory_order
for t in range(1, max_length + 1):
At = A / t
Bt = B / t
# if discretization in forward_aliases:
if method in forward_aliases:
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
# elif discretization in backward_aliases:
elif method in backward_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif method in bilinear_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
elif method in zoh_aliases:
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
B_stacked = B_stacked[:, :, None]
A_stacked -= np.eye(memory_order) # puts into form: x += Ax
self._A = A_stacked - np.eye(memory_order) # puts into form: x += Ax
self._B = B_stacked
self.state_size = (self.units, self.memory_order, 1)
self.output_size = self.units
def build(self, input_shape):
input_dim = input_shape[-1]
self.input_encoders = self.add_weight(
name='input_encoders',
shape=(input_dim, 1),
initializer=self.input_encoders_initializer,
trainable=self.trainable_input_encoders)
self.hidden_encoders = self.add_weight(
name='hidden_encoders',
shape=(self.units, 1),
initializer=self.hidden_encoders_initializer,
trainable=self.trainable_hidden_encoders)
self.memory_encoders = self.add_weight(
name='memory_encoders',
shape=(self.memory_order, 1),
initializer=self.memory_encoders_initializer,
trainable=self.trainable_memory_encoders)
self.input_kernel = self.add_weight(
name='input_kernel',
shape=(input_dim, self.units),
initializer=self.input_kernel_initializer,
trainable=self.trainable_input_kernel)
if self.trainable_hidden_kernel:
self.hidden_kernel = self.add_weight(
name='hidden_kernel',
shape=(self.units, self.units),
initializer=self.hidden_kernel_initializer,
trainable=self.trainable_hidden_kernel)
else:
self.hidden_kernel = self.add_weight(
name='hidden_kernel',
shape=(self.units, self.units),
initializer=Constant(0.),
trainable=False)
self.memory_kernel = self.add_weight(
name='memory_kernel',
shape=(self.memory_order, self.units),
initializer=self.memory_kernel_initializer,
trainable=self.trainable_memory_kernel)
self.A = self.add_weight(
name='A',
shape=(self.max_length, self.memory_order, self.memory_order),
initializer=Constant(self._A), # note: transposed
trainable=self.trainable_A)
self.B = self.add_weight(
name='B',
shape=(self.max_length, self.memory_order, 1), # system is SISO
initializer=Constant(self._B), # note: transposed
trainable=self.trainable_B)
if self.gate:
self.W_gate = self.add_weight(
name='gate',
shape=(self.units+self.memory_order, self.units), # system is SISO
initializer=initializers.get('glorot_normal'), # note: transposed
trainable=True)
self.built = True
def call(self, inputs, states):
h, m, t = states
tt = tf.cast(t, tf.int32)
tt = tt[0,0]
tt = tf.math.minimum(tt, self.max_length-1)
u = (K.dot(inputs, self.input_encoders) +
K.dot(h, self.hidden_encoders) +
K.dot(m, self.memory_encoders))
m = m + K.dot(m, tf.transpose(self.A[tt])) + K.dot(u, tf.transpose(self.B[tt]))
new_h = self.hidden_activation(
K.dot(inputs, self.input_kernel) +
K.dot(h, self.hidden_kernel) +
K.dot(m, self.memory_kernel))
if self.gate:
g = tf.sigmoid(K.dot(tf.concat([h, m], axis=-1), self.W_gate))
h = (1.-g)*h + g*new_h
else:
h = new_h
return h, [h, m, t+1]
| [
"keras.backend.dot",
"scipy.linalg.solve_triangular",
"numpy.empty",
"numpy.ones",
"numpy.arange",
"numpy.diag",
"numpy.meshgrid",
"keras.activations.get",
"tensorflow.concat",
"tensorflow.cast",
"math.log",
"keras.initializers.get",
"scipy.signal.cont2discrete",
"keras.initializers.Consta... | [((3525, 3536), 'keras.initializers.Constant', 'Constant', (['(0)'], {}), '(0)\n', (3533, 3536), False, 'from keras.initializers import Constant, Initializer\n'), ((4475, 4519), 'keras.initializers.get', 'initializers.get', (['input_encoders_initializer'], {}), '(input_encoders_initializer)\n', (4491, 4519), False, 'from keras import activations, initializers\n'), ((4576, 4621), 'keras.initializers.get', 'initializers.get', (['hidden_encoders_initializer'], {}), '(hidden_encoders_initializer)\n', (4592, 4621), False, 'from keras import activations, initializers\n'), ((4678, 4723), 'keras.initializers.get', 'initializers.get', (['memory_encoders_initializer'], {}), '(memory_encoders_initializer)\n', (4694, 4723), False, 'from keras import activations, initializers\n'), ((4777, 4819), 'keras.initializers.get', 'initializers.get', (['input_kernel_initializer'], {}), '(input_kernel_initializer)\n', (4793, 4819), False, 'from keras import activations, initializers\n'), ((4874, 4917), 'keras.initializers.get', 'initializers.get', (['hidden_kernel_initializer'], {}), '(hidden_kernel_initializer)\n', (4890, 4917), False, 'from keras import activations, initializers\n'), ((4972, 5015), 'keras.initializers.get', 'initializers.get', (['memory_kernel_initializer'], {}), '(memory_kernel_initializer)\n', (4988, 5015), False, 'from keras import activations, initializers\n'), ((5063, 5097), 'keras.activations.get', 'activations.get', (['hidden_activation'], {}), '(hidden_activation)\n', (5078, 5097), False, 'from keras import activations, initializers\n'), ((5197, 5223), 'numpy.ones', 'np.ones', (['(1, memory_order)'], {}), '((1, memory_order))\n', (5204, 5223), True, 'import numpy as np\n'), ((5236, 5250), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (5244, 5250), True, 'import numpy as np\n'), ((5277, 5342), 'scipy.signal.cont2discrete', 'signal.cont2discrete', (['(A, B, C, D)'], {'dt': '(1.0 / theta)', 'method': 'method'}), '((A, B, C, D), dt=1.0 / theta, method=method)\n', (5297, 5342), False, 'from scipy import signal\n'), ((8655, 8666), 'keras.initializers.Constant', 'Constant', (['(0)'], {}), '(0)\n', (8663, 8666), False, 'from keras.initializers import Constant, Initializer\n'), ((9793, 9837), 'keras.initializers.get', 'initializers.get', (['input_encoders_initializer'], {}), '(input_encoders_initializer)\n', (9809, 9837), False, 'from keras import activations, initializers\n'), ((9894, 9939), 'keras.initializers.get', 'initializers.get', (['hidden_encoders_initializer'], {}), '(hidden_encoders_initializer)\n', (9910, 9939), False, 'from keras import activations, initializers\n'), ((9996, 10041), 'keras.initializers.get', 'initializers.get', (['memory_encoders_initializer'], {}), '(memory_encoders_initializer)\n', (10012, 10041), False, 'from keras import activations, initializers\n'), ((10095, 10137), 'keras.initializers.get', 'initializers.get', (['input_kernel_initializer'], {}), '(input_kernel_initializer)\n', (10111, 10137), False, 'from keras import activations, initializers\n'), ((10192, 10235), 'keras.initializers.get', 'initializers.get', (['hidden_kernel_initializer'], {}), '(hidden_kernel_initializer)\n', (10208, 10235), False, 'from keras import activations, initializers\n'), ((10290, 10333), 'keras.initializers.get', 'initializers.get', (['memory_kernel_initializer'], {}), '(memory_kernel_initializer)\n', (10306, 10333), False, 'from keras import activations, initializers\n'), ((10381, 10415), 'keras.activations.get', 'activations.get', (['hidden_activation'], {}), '(hidden_activation)\n', (10396, 10415), False, 'from keras import activations, initializers\n'), ((10524, 10589), 'numpy.empty', 'np.empty', (['(max_length, memory_order, memory_order)'], {'dtype': 'A.dtype'}), '((max_length, memory_order, memory_order), dtype=A.dtype)\n', (10532, 10589), True, 'import numpy as np\n'), ((10610, 10661), 'numpy.empty', 'np.empty', (['(max_length, memory_order)'], {'dtype': 'B.dtype'}), '((max_length, memory_order), dtype=B.dtype)\n', (10618, 10661), True, 'import numpy as np\n'), ((11781, 11801), 'numpy.eye', 'np.eye', (['memory_order'], {}), '(memory_order)\n', (11787, 11801), True, 'import numpy as np\n'), ((14660, 14680), 'tensorflow.cast', 'tf.cast', (['t', 'tf.int32'], {}), '(t, tf.int32)\n', (14667, 14680), True, 'import tensorflow as tf\n'), ((14716, 14756), 'tensorflow.math.minimum', 'tf.math.minimum', (['tt', '(self.max_length - 1)'], {}), '(tt, self.max_length - 1)\n', (14731, 14756), True, 'import tensorflow as tf\n'), ((792, 807), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (799, 807), True, 'import numpy as np\n'), ((995, 1010), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (1002, 1010), True, 'import numpy as np\n'), ((1729, 1759), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'np.float64'}), '(N, dtype=np.float64)\n', (1738, 1759), True, 'import numpy as np\n'), ((1803, 1820), 'numpy.meshgrid', 'np.meshgrid', (['Q', 'Q'], {}), '(Q, Q)\n', (1814, 1820), True, 'import numpy as np\n'), ((5364, 5384), 'numpy.eye', 'np.eye', (['memory_order'], {}), '(memory_order)\n', (5370, 5384), True, 'import numpy as np\n'), ((7642, 7672), 'keras.backend.dot', 'K.dot', (['m', 'self.memory_encoders'], {}), '(m, self.memory_encoders)\n', (7647, 7672), True, 'from keras import backend as K\n'), ((7711, 7728), 'keras.backend.dot', 'K.dot', (['u', 'self.BT'], {}), '(u, self.BT)\n', (7716, 7728), True, 'from keras import backend as K\n'), ((11859, 11879), 'numpy.eye', 'np.eye', (['memory_order'], {}), '(memory_order)\n', (11865, 11879), True, 'import numpy as np\n'), ((14864, 14894), 'keras.backend.dot', 'K.dot', (['m', 'self.memory_encoders'], {}), '(m, self.memory_encoders)\n', (14869, 14894), True, 'from keras import backend as K\n'), ((735, 744), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (741, 744), True, 'import numpy as np\n'), ((759, 774), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (766, 774), True, 'import numpy as np\n'), ((942, 951), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (948, 951), True, 'import numpy as np\n'), ((962, 977), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (969, 977), True, 'import numpy as np\n'), ((1349, 1364), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (1356, 1364), True, 'import numpy as np\n'), ((1413, 1425), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1422, 1425), True, 'import numpy as np\n'), ((2019, 2049), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'np.float64'}), '(N, dtype=np.float64)\n', (2028, 2049), True, 'import numpy as np\n'), ((2106, 2123), 'numpy.meshgrid', 'np.meshgrid', (['Q', 'Q'], {}), '(Q, Q)\n', (2117, 2123), True, 'import numpy as np\n'), ((7141, 7160), 'keras.initializers.Constant', 'Constant', (['self._A.T'], {}), '(self._A.T)\n', (7149, 7160), False, 'from keras.initializers import Constant, Initializer\n'), ((7365, 7384), 'keras.initializers.Constant', 'Constant', (['self._B.T'], {}), '(self._B.T)\n', (7373, 7384), False, 'from keras.initializers import Constant, Initializer\n'), ((7546, 7580), 'keras.backend.dot', 'K.dot', (['inputs', 'self.input_encoders'], {}), '(inputs, self.input_encoders)\n', (7551, 7580), True, 'from keras import backend as K\n'), ((7596, 7626), 'keras.backend.dot', 'K.dot', (['h', 'self.hidden_encoders'], {}), '(h, self.hidden_encoders)\n', (7601, 7626), True, 'from keras import backend as K\n'), ((7691, 7708), 'keras.backend.dot', 'K.dot', (['m', 'self.AT'], {}), '(m, self.AT)\n', (7696, 7708), True, 'from keras import backend as K\n'), ((7871, 7899), 'keras.backend.dot', 'K.dot', (['m', 'self.memory_kernel'], {}), '(m, self.memory_kernel)\n', (7876, 7899), True, 'from keras import backend as K\n'), ((13947, 13964), 'keras.initializers.Constant', 'Constant', (['self._A'], {}), '(self._A)\n', (13955, 13964), False, 'from keras.initializers import Constant, Initializer\n'), ((14184, 14201), 'keras.initializers.Constant', 'Constant', (['self._B'], {}), '(self._B)\n', (14192, 14201), False, 'from keras.initializers import Constant, Initializer\n'), ((14768, 14802), 'keras.backend.dot', 'K.dot', (['inputs', 'self.input_encoders'], {}), '(inputs, self.input_encoders)\n', (14773, 14802), True, 'from keras import backend as K\n'), ((14818, 14848), 'keras.backend.dot', 'K.dot', (['h', 'self.hidden_encoders'], {}), '(h, self.hidden_encoders)\n', (14823, 14848), True, 'from keras import backend as K\n'), ((14959, 14983), 'tensorflow.transpose', 'tf.transpose', (['self.B[tt]'], {}), '(self.B[tt])\n', (14971, 14983), True, 'import tensorflow as tf\n'), ((15131, 15159), 'keras.backend.dot', 'K.dot', (['m', 'self.memory_kernel'], {}), '(m, self.memory_kernel)\n', (15136, 15159), True, 'from keras import backend as K\n'), ((1399, 1411), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1408, 1411), True, 'import numpy as np\n'), ((1846, 1883), 'numpy.where', 'np.where', (['(i < j)', '((-1.0) ** (i - j))', '(1)'], {}), '(i < j, (-1.0) ** (i - j), 1)\n', (1854, 1883), True, 'import numpy as np\n'), ((2136, 2178), 'numpy.where', 'np.where', (['(i < j)', '(-1)', '((-1.0) ** (i - j + 1))'], {}), '(i < j, -1, (-1.0) ** (i - j + 1))\n', (2144, 2178), True, 'import numpy as np\n'), ((2274, 2304), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'np.float64'}), '(N, dtype=np.float64)\n', (2283, 2304), True, 'import numpy as np\n'), ((2324, 2341), 'numpy.meshgrid', 'np.meshgrid', (['q', 'q'], {}), '(q, q)\n', (2335, 2341), True, 'import numpy as np\n'), ((7779, 7811), 'keras.backend.dot', 'K.dot', (['inputs', 'self.input_kernel'], {}), '(inputs, self.input_kernel)\n', (7784, 7811), True, 'from keras import backend as K\n'), ((7827, 7855), 'keras.backend.dot', 'K.dot', (['h', 'self.hidden_kernel'], {}), '(h, self.hidden_kernel)\n', (7832, 7855), True, 'from keras import backend as K\n'), ((10924, 10933), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (10930, 10933), True, 'import numpy as np\n'), ((13504, 13517), 'keras.initializers.Constant', 'Constant', (['(0.0)'], {}), '(0.0)\n', (13512, 13517), False, 'from keras.initializers import Constant, Initializer\n'), ((14471, 14504), 'keras.initializers.get', 'initializers.get', (['"""glorot_normal"""'], {}), "('glorot_normal')\n", (14487, 14504), False, 'from keras import activations, initializers\n'), ((14922, 14946), 'tensorflow.transpose', 'tf.transpose', (['self.A[tt]'], {}), '(self.A[tt])\n', (14934, 14946), True, 'import tensorflow as tf\n'), ((15039, 15071), 'keras.backend.dot', 'K.dot', (['inputs', 'self.input_kernel'], {}), '(inputs, self.input_kernel)\n', (15044, 15071), True, 'from keras import backend as K\n'), ((15087, 15115), 'keras.backend.dot', 'K.dot', (['h', 'self.hidden_kernel'], {}), '(h, self.hidden_kernel)\n', (15092, 15115), True, 'from keras import backend as K\n'), ((15216, 15242), 'tensorflow.concat', 'tf.concat', (['[h, m]'], {'axis': '(-1)'}), '([h, m], axis=-1)\n', (15225, 15242), True, 'import tensorflow as tf\n'), ((1312, 1321), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (1318, 1321), True, 'import numpy as np\n'), ((2439, 2457), 'numpy.diag', 'np.diag', (['(2 * q + 1)'], {}), '(2 * q + 1)\n', (2446, 2457), True, 'import numpy as np\n'), ((2479, 2495), 'numpy.linalg.inv', 'np.linalg.inv', (['T'], {}), '(T)\n', (2492, 2495), True, 'import numpy as np\n'), ((2508, 2518), 'numpy.diag', 'np.diag', (['T'], {}), '(T)\n', (2515, 2518), True, 'import numpy as np\n'), ((11148, 11157), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (11154, 11157), True, 'import numpy as np\n'), ((1508, 1520), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1517, 1520), True, 'import numpy as np\n'), ((2378, 2404), 'numpy.where', 'np.where', (['(row >= col)', 'r', '(0)'], {}), '(row >= col, r, 0)\n', (2386, 2404), True, 'import numpy as np\n'), ((2407, 2417), 'numpy.diag', 'np.diag', (['q'], {}), '(q)\n', (2414, 2417), True, 'import numpy as np\n'), ((11132, 11141), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (11138, 11141), True, 'import numpy as np\n'), ((11226, 11235), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (11232, 11235), True, 'import numpy as np\n'), ((11656, 11716), 'scipy.linalg.solve_triangular', 'la.solve_triangular', (['A', '(A_stacked[t - 1] @ B - B)'], {'lower': '(True)'}), '(A, A_stacked[t - 1] @ B - B, lower=True)\n', (11675, 11716), True, 'from scipy import linalg as la\n'), ((1473, 1485), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1482, 1485), True, 'import numpy as np\n'), ((11358, 11367), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (11364, 11367), True, 'import numpy as np\n'), ((11378, 11387), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (11384, 11387), True, 'import numpy as np\n'), ((11465, 11474), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (11471, 11474), True, 'import numpy as np\n'), ((11589, 11604), 'math.log', 'math.log', (['(t + 1)'], {}), '(t + 1)\n', (11597, 11604), False, 'import math\n'), ((11607, 11618), 'math.log', 'math.log', (['t'], {}), '(t)\n', (11615, 11618), False, 'import math\n')] |
"""
@author: <NAME>
"""
import numpy as np
name_dict = {0:'<NAME>' ,
1:'<NAME>',
2:'<NAME>',
3:'<NAME>',
4:'<NAME>',
5:'RVNK Neeraj',
6:'<NAME>',
7:'<NAME>',
8:'<NAME>',
9:'<NAME>',
}
np.save('name_dict.npy',name_dict) | [
"numpy.save"
] | [((337, 372), 'numpy.save', 'np.save', (['"""name_dict.npy"""', 'name_dict'], {}), "('name_dict.npy', name_dict)\n", (344, 372), True, 'import numpy as np\n')] |
import numpy as np
from scipy.linalg import eigh, inv
def eigen_to_G(evals, evecs, efermi, energy):
""" calculate green's function from eigenvalue/eigenvector for energy(e-ef): G(e-ef).
:param evals: eigen values
:param evecs: eigen vectors
:param efermi: fermi energy
:param energy: energy
:returns: Green's function G,
:rtype: Matrix with same shape of the Hamiltonian (and eigenvector)
"""
G=evecs.dot(np.diag(1.0 / (-evals + (energy + efermi)))).dot(
evecs.conj().T)
return G
def green_H(H, energy, S=np.eye(3)):
return inv(S*energy - H)
def green_H_eig(H,energy):
evals, evecs = eigh(H)
return eigen_to_G(evals, evecs, 0.0, energy)
def test_eigh():
H=np.random.random((3,3))
H=H+H.T.conj()
evals ,evecs=eigh(H)
#print(f"VT@V: {evecs.T.conj()@evecs}")
green_H(H, 1)
green_H_eig(H, 1)
print(f"H: {H}")
S=np.random.random((3,3))+np.random.random((3,3))*1j
S=S+S.T.conj()
S=np.eye(3)*0.4+S
evals, evecs=eigh(H, S)
print(f"VT@V: {evecs.T.conj()@evecs}")
print(f"VT@S@V: {evecs.T.conj()@S@evecs}") # I
print(f"V@S@VT: {evecs@S@evecs.T.conj()}") # Not I
print(f"S@VT@evals@V: {S@evecs.T.conj()@np.diag(evals)@evecs}")
print(f"V@evals@VT: {evecs@np.diag(evals)@evecs.T.conj()}")
print(f"VT@evals@V: {evecs.T.conj()@np.diag(evals)@evecs}")
G1=green_H(H, 0.3, S=S)
#print("G1=", G1)
evals, evecs= eigh(H, S)
G2= eigen_to_G(evals, evecs, 0.3, 0)
G2=green_H(H, 0.3, S=S)
print(f"G1-G2={G1-G2}")
test_eigh()
| [
"scipy.linalg.inv",
"numpy.random.random",
"scipy.linalg.eigh",
"numpy.eye",
"numpy.diag"
] | [((559, 568), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (565, 568), True, 'import numpy as np\n'), ((582, 601), 'scipy.linalg.inv', 'inv', (['(S * energy - H)'], {}), '(S * energy - H)\n', (585, 601), False, 'from scipy.linalg import eigh, inv\n'), ((647, 654), 'scipy.linalg.eigh', 'eigh', (['H'], {}), '(H)\n', (651, 654), False, 'from scipy.linalg import eigh, inv\n'), ((729, 753), 'numpy.random.random', 'np.random.random', (['(3, 3)'], {}), '((3, 3))\n', (745, 753), True, 'import numpy as np\n'), ((789, 796), 'scipy.linalg.eigh', 'eigh', (['H'], {}), '(H)\n', (793, 796), False, 'from scipy.linalg import eigh, inv\n'), ((1018, 1028), 'scipy.linalg.eigh', 'eigh', (['H', 'S'], {}), '(H, S)\n', (1022, 1028), False, 'from scipy.linalg import eigh, inv\n'), ((1447, 1457), 'scipy.linalg.eigh', 'eigh', (['H', 'S'], {}), '(H, S)\n', (1451, 1457), False, 'from scipy.linalg import eigh, inv\n'), ((909, 933), 'numpy.random.random', 'np.random.random', (['(3, 3)'], {}), '((3, 3))\n', (925, 933), True, 'import numpy as np\n'), ((933, 957), 'numpy.random.random', 'np.random.random', (['(3, 3)'], {}), '((3, 3))\n', (949, 957), True, 'import numpy as np\n'), ((985, 994), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (991, 994), True, 'import numpy as np\n'), ((446, 489), 'numpy.diag', 'np.diag', (['(1.0 / (-evals + (energy + efermi)))'], {}), '(1.0 / (-evals + (energy + efermi)))\n', (453, 489), True, 'import numpy as np\n'), ((1225, 1239), 'numpy.diag', 'np.diag', (['evals'], {}), '(evals)\n', (1232, 1239), True, 'import numpy as np\n'), ((1280, 1294), 'numpy.diag', 'np.diag', (['evals'], {}), '(evals)\n', (1287, 1294), True, 'import numpy as np\n'), ((1353, 1367), 'numpy.diag', 'np.diag', (['evals'], {}), '(evals)\n', (1360, 1367), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sympy import nroots, re, im, symbols, sympify
from types import FunctionType
from collections import defaultdict
from tqdm.auto import tqdm
from .varma import Varma
class LedoitPecheShrinkage:
"""
Given sample eigenvalues lambda_i, i = 1, ..., N, of an estimator E,
as well as the number of samples T:
- estimate the eigenvalue density and Hilbert transform of E
via the (adaptive) Epanechnikov kernel
[<NAME>., <NAME>., Analytical nonlinear shrinkage of large-dimensional covariance matrices];
- calculate the Ledoit-Peche shrunk eigenvalues xi_i, i = 1, ..., N.
"""
def __init__(self, lambdas, T):
"""
Instantiated with a 1-dimensional array of eigenvalues, lambdas, and a number of samples, T.
Calculates:
- N = number of eigenvalues (length of lambdas);
- q = N/T;
- rescaling factor "bandwidth" for kernel estimation; set to T^(-1/3) for each eigenvalue;
- Epanechnikov kernel estimation of the density and Hilbert transform of lambdas;
- intermediate and final Ledoit-Peche variables alpha_i, beta_i, u_i, xi_LP_i;
- Epanechnikov kernel estimation of the density and Hilbert transform of xi_LP_i.
"""
self.lambdas = np.array(lambdas)
self.T = T
self.N = len(self.lambdas)
self.q = self.N / self.T
self.bandwidth = (self.T ** (-1/3)) * np.ones(self.N)
self.name = 'Ledoit-Peche'
self._calculate_epanechnikov_estimates(eigval='lambdas')
self._calculate_LP_variables()
self._calculate_epanechnikov_estimates(eigval='xi_LP')
self.batch_idx = None
self.N_batch = None
self.chi_roots_branch = None
self.xi_branch = None
self.M_of_N_branch = None
self.n_branches = None
self.xi = None
self.chi_roots = None
self.chi_grads = None
self.xi_grads = None
self.xi_kernel_density = None
self.xi_kernel_Hilbert = None
def _calculate_epanechnikov_estimates(self, eigval='lambdas'):
"""
Perform Epanechnikov kernel estimation of the density and Hilbert transform
of a given array "eigval", be it "lambdas", "xi_LP" or "xi".
"""
if eigval=='lambdas':
self.lambdas_kernel_density, self.lambdas_kernel_Hilbert = __class__.epanechnikov_estimates(
x=self.lambdas,
bandwidth=self.bandwidth
)
elif eigval=='xi_LP':
self.xi_LP_kernel_density, self.xi_LP_kernel_Hilbert = __class__.epanechnikov_estimates(
x=self.xi_LP,
bandwidth=self.bandwidth
)
elif eigval=='xi':
if self.xi is None or len(self.xi) < self.N:
self.predict()
self.xi_kernel_density, self.xi_kernel_Hilbert = __class__.epanechnikov_estimates(
x=self.xi,
bandwidth=self.bandwidth
)
else:
raise Exception('The argument "eigval" must be either "lambdas", "xi_LP", or "xi".')
def _calculate_LP_variables(self):
"""
Calculate the intermediate variables alpha_i and beta_i,
plus the complex numbers u_i = alpha_i + i * beta_i,
of the Ledoit-Peche nonlinear shrinkage xi_i = xi_LP_i, which also compute here,
of the sample eigenvalues lambda_i.
"""
self.alpha = self.q * (self.lambdas * self.lambdas_kernel_Hilbert - 1.)
self.beta = np.pi * self.q * self.lambdas * self.lambdas_kernel_density
self.u_range = np.array([complex(al, be) for al, be in zip(self.alpha, self.beta)])
self.xi_LP = self.lambdas / ((self.alpha + 1.) ** 2 + self.beta ** 2)
def _set_batch_idx(self, batch_idx=None):
"""
Choose indices i, from 0 to (N - 1), on which to perform the calculation of xi_i.
By default, all the indices are chosen, i.e. we calculate for every i = 0, ..., N - 1.
"""
self.batch_idx = list(range(self.N)) if batch_idx is None else batch_idx
self.N_batch = len(self.batch_idx)
def predict(self, batch_idx=None):
"""
Calculate the Ledoit-Peche nonlinear shrinkage xi_i
of the sample eigenvalues lambda_i.
Note: We've already calculated xi_LP_i = xi_i, so this is a simple substitution.
A "predict" method is needed for consistency here and in every child class.
"""
self._set_batch_idx(batch_idx=batch_idx)
self.xi = self.xi_LP[self.batch_idx]
self.xi_branch = [self.xi]
self.n_branches = len(self.xi_branch)
def hist(
self,
show_lambdas=False,
show_lambdas_density=False,
show_lambdas_Hilbert=False,
show_xi=False,
show_xi_density=False,
show_xi_Hilbert=False,
show_xi_LP=False,
show_xi_LP_density=False,
show_xi_LP_Hilbert=False,
bins=None,
xlim=None,
ylim=None,
legend=True,
savefig=None
):
"""
Plot three histograms:
- of the sample eigenvalues lambda_i,
- the Ledoit-Peche shrunk eigenvalues xi_LP_i,
- and the shrunk eigenvalues xi_i,
optionally with their Epanechnikov-kernel-estimated density,
and/or Hilbert transforms.
(In other words, any combination of these nine plots.)
"""
bns = self.N // 4 if bins is None else bins
if show_lambdas:
plt.hist(
self.lambdas,
bins=bns,
alpha=0.5,
color='tab:orange',
density=True,
label='sample eigval'
)
if show_lambdas_density:
plt.plot(
self.lambdas,
self.lambdas_kernel_density,
color='tab:red',
label='sample eigval density'
)
if show_lambdas_Hilbert:
plt.plot(
self.lambdas,
self.lambdas_kernel_Hilbert,
color='tab:green',
label='sample eigval Hilbert'
)
if show_xi_LP:
plt.hist(
self.xi_LP,
bins=bns,
alpha=0.5,
color='tab:pink',
density=True,
label=f'Ledoit-Peche shrunk eigval'
)
if show_xi_LP_density:
plt.plot(
self.xi_LP,
self.xi_LP_kernel_density,
color='brown',
label=f'Ledoit-Peche shrunk eigval density'
)
if show_xi_LP_Hilbert:
plt.plot(
self.xi_LP,
self.xi_LP_kernel_Hilbert,
color='tab:cyan',
label=f'Ledoit-Peche shrunk eigval Hilbert'
)
if show_xi:
if self.xi is None or len(self.xi) < self.N:
self.predict()
plt.hist(
self.xi,
bins=bns,
alpha=0.5,
color='fuchsia',
density=True,
label=f'{self.name} shrunk eigval'
)
if show_xi_density:
if self.xi_kernel_density is None:
self._calculate_epanechnikov_estimates(eigval='xi')
plt.plot(
self.xi,
self.xi_kernel_density,
color='purple',
label=f'{self.name} shrunk eigval density'
)
if show_xi_Hilbert:
if self.xi_kernel_Hilbert is None:
self._calculate_epanechnikov_estimates(eigval='xi')
plt.plot(
self.xi,
self.xi_kernel_Hilbert,
color='tab:olive',
label=f'{self.name} shrunk eigval Hilbert'
)
plt.xlim(xlim)
plt.ylim(ylim)
if legend:
plt.legend()
plt.savefig(fname=savefig) if savefig else plt.show()
def plot(self, branch=None, xlim=None, ylim=None, legend=True, savefig=None):
"""
Plot the shrunk eigenvalues xi_i (vertical axis)
versus the sample eigenvalues lambda_i (horizontal axis).
Often, there are multiple solutions for xi_i (branches), stored in "xi_branch",
and you can plot all or some of them here, besides the correct branch xi_i:
set "branch" to either "all", or a string number of a branch (one-indexed),
or a list of such string numbers.
"""
if self.xi is None or len(self.xi) < self.N:
self.predict()
if branch is None:
plt.plot(
self.lambdas,
self.xi,
color='purple',
label=f'{self.name} shrunk vs. sample eigval'
)
else:
if branch=='all':
branches_to_plot = range(self.n_branches)
elif isinstance(branch, list):
branches_to_plot = [int(br) - 1 for br in branch]
elif isinstance(branch, str):
branches_to_plot = [int(branch) - 1]
for br in branches_to_plot:
plt.plot(
self.lambdas,
self.xi_branch[br],
color=cm.hot(0.2 + 0.6 * br / self.n_branches),
label=f'{self.name} shrunk (branch {br + 1}) vs. sample eigval'
)
plt.xlabel('lambda')
plt.ylabel('xi')
plt.xlim(xlim)
plt.ylim(ylim)
if legend:
plt.legend()
plt.savefig(fname=savefig) if savefig else plt.show()
def plot_with_oracle(self, xi_oracle, show_LP=False, xlim=None, ylim=None, legend=True, savefig=None):
"""
Plot the shrunk eigenvalues xi_i (vertical axis)
versus the sample eigenvalues lambda_i (horizontal axis).
Optionally, make an analogous plot of the Ledoit-Peche xi_LP_i versus lambda_i.
Moreover, make a scatter plot of the oracle eigenvalues xi_oracle_i versus lambda_i.
"""
plt.plot(
self.lambdas,
self.xi,
color='purple',
label=f'{self.name} shrunk vs. sample eigval'
)
if show_LP:
plt.plot(
self.lambdas,
self.xi_LP,
color='brown',
label=f'Ledoit-Peche shrunk vs. sample eigval'
)
plt.scatter(
self.lambdas,
xi_oracle,
marker='^',
alpha=0.3,
label='oracle eigval'
)
plt.xlabel('lambda')
plt.ylabel('xi')
plt.xlim(xlim)
plt.ylim(ylim)
if legend:
plt.legend()
plt.savefig(fname=savefig) if savefig else plt.show()
@staticmethod
def epanechnikov(x):
"""
Calculate the Epanechnikov kernel and its Hilbert transform
at the elements of a given array x.
"""
assert isinstance(x, np.ndarray)
y = (3 / (4 * np.sqrt(5))) * (1 - x ** 2 / 5)
z = np.where(
np.abs(np.abs(x) - np.sqrt(5)) < 1e-10,
0.,
y * np.log(np.abs((np.sqrt(5) - x) / (np.sqrt(5) + x)))
)
kernel_density = np.maximum(y, 0)
kernel_Hilbert = 0.3 * x - z
return kernel_density, kernel_Hilbert
@staticmethod
def epanechnikov_estimates(x, bandwidth):
"""
Perform Epanechnikov-kernel estimation
of the density and Hilbert transform of an array x
(using the "bandwidth" rescaling factor).
"""
l1 = x * bandwidth
l2 = np.array([(x_ - x) / l1 for x_ in x])
l3, l4 = __class__.epanechnikov(l2)
kernel_density = (l3 / l1).mean(axis=1)
kernel_Hilbert = (l4 / l1).mean(axis=1)
return kernel_density, kernel_Hilbert
class EwmaShrinkage(LedoitPecheShrinkage):
"""
Given sample eigenvalues lambda_i, i = 1, ..., N, of an estimator E,
as well as the number of samples T,
perform nonlinear shrinkage, computing shrunk eigenvalues xi_i,
in the case of auto-correlations given by the EWMA model.
"""
def __init__(self, lambdas, T, delta):
super().__init__(lambdas=lambdas, T=T)
self.name = 'EWMA'
self.delta = delta
def predict(self):
"""
Calculate the EWMA nonlinear shrinkage xi_i
of the sample eigenvalues lambda_i.
"""
if self.alpha is None or self.beta is None:
super().calculate_LP_variables()
ed = np.exp(self.delta)
eda = np.exp(self.delta * self.alpha)
self.xi = (
(self.lambdas / self.beta) * eda * (ed - 1.) ** 2 * np.sin(self.delta * self.beta)
/ self.delta
/ ((ed * eda) ** 2 + 1. - 2 * ed * eda * np.cos(self.delta * self.beta))
)
self.xi_branch = [self.xi]
self.n_branches = len(self.xi_branch)
class VarmaShrinkage(LedoitPecheShrinkage, Varma):
"""
Given sample eigenvalues lambda_i, i = 1, ..., N, of an estimator E,
as well as the number of samples T,
perform nonlinear shrinkage, computing shrunk eigenvalues xi_i,
in the case of auto-correlations given by the VARMA model.
"""
def __init__(self, lambdas, T):
"""
"Adapt" the model to the sample eigenvalues lambdas (and to T).
"""
LedoitPecheShrinkage.__init__(self, lambdas=lambdas, T=T)
def set_params(self, **kwargs):
"""
Set the model's parameters: either tau or a_list and b_list,
by initializing the parent class handling the parameters;
it also calculates the autocorrelation matrix A.
"""
Varma.__init__(self, T=self.T, get_chi_equations=True, **kwargs)
def get_params(self):
"""
Return a dictionary with the model's parameters.
"""
return {
'a_list': self.a_list,
'b_list': self.b_list
}
def predict(self, batch_idx=None, calculate_grads=False):
"""
Calculate the VARMA nonlinear shrinkage xi_i
of the sample eigenvalues lambda_i,
for several solvable cases.
"""
self._solve_chi_equation(
batch_idx=batch_idx,
calculate_grads=calculate_grads
)
def _solve_chi_equation(self, batch_idx=None, calculate_grads=False):
"""
For each value of u_i = alpha_i + i * beta_i, for i = 1, ..., N,
solve an algebraic equation pol = 0 in the variable chi,
where "pol" will come from a separate file with VARMA polynomials,
with coefficients depending on u_i, as well as the model's parameters.
Take the imaginary part of each solution,
and collect them in branches of the shrunk eigenvalues xi_i.
Moreover, create a list of the values of M_A(1/chi) for each u_i,
which should be equal to u_i on the correct branch.
(Recall, M_A(z) is the M-transform of A,
and chi = chi_A(z) = 1/N_A(z) is the chi-transform of A.)
"""
self._set_batch_idx(batch_idx=batch_idx)
u_batch = self.u_range[self.batch_idx]
# solve the fundamental polynomial equation in chi at each u in the batch
# (which will produce n_branches solutions at each u)
chi = symbols('chi')
self.chi_roots_branch = []
chi_roots_im_branch = []
self.M_of_N_branch = []
for u in u_batch:
chi_roots = nroots(
self.pol(sympify(u), chi, *self.a_list, *self.b_list)
)
self.chi_roots_branch.append(chi_roots)
chi_roots_im = [
float(im(chi_root))
for chi_root in chi_roots
]
chi_roots_im_branch.append(chi_roots_im)
M_of_N = [
self.calculate_M_transform_A(
z_re=float(re(1 / chi_root)),
z_im=float(im(1 / chi_root)),
method='eig'
)
for chi_root in chi_roots
]
self.M_of_N_branch.append(M_of_N)
# reshape to (n_branches, N_batch)
self.chi_roots_branch = np.array(self.chi_roots_branch).T
prefactor = self.lambdas[self.batch_idx] / self.beta[self.batch_idx]
self.xi_branch = prefactor * np.array(chi_roots_im_branch).T
self.M_of_N_branch = np.array(self.M_of_N_branch).T
self.n_branches = len(self.xi_branch)
# sort the branches according to xi, for convenience
sort_idx = np.argsort(self.xi_branch, axis=0)
self.chi_roots_branch = np.take_along_axis(self.chi_roots_branch, sort_idx, axis=0)
self.xi_branch = np.take_along_axis(self.xi_branch, sort_idx, axis=0)
self.M_of_N_branch = np.take_along_axis(self.M_of_N_branch, sort_idx, axis=0)
# choose one "good" branch xi, by which we mean the one on which M_A(N_A(u)) = u
# these are now 1D arrays of length N_batch
sort_idx = np.argsort(np.abs(self.M_of_N_branch - u_batch), axis=0)
self.chi_roots = np.take_along_axis(self.chi_roots_branch, sort_idx, axis=0)[0]
self.xi = np.take_along_axis(self.xi_branch, sort_idx, axis=0)[0]
# calculate gradients of the solution chi (also the shrunk eigenvalues xi) w.r.t. VARMA parameters
if calculate_grads:
self.chi_grads = defaultdict(list)
for u, chi in zip(u_batch, self.chi_roots):
pol_grad_chi = self.pol_grads['chi'](sympify(u), chi, *self.a_list, *self.b_list)
for param in self.ab:
self.chi_grads[param].append(
(- self.pol_grads[param](sympify(u), chi, *self.a_list, *self.b_list) / pol_grad_chi).evalf()
)
self.chi_grads = dict(self.chi_grads)
self.xi_grads = {}
for param in self.ab:
self.xi_grads[param] = prefactor * np.array([
float(im(chi_grad)) for chi_grad in self.chi_grads[param]
])
def fit(
self,
xi_oracle,
loss='mse',
loss_grad=None,
optimizer='brute',
**kwargs
):
"""
Find the VARMA parameters
for which an error (specified by the loss function)
between the shrunk eigenvalues and the given oracle eigenvalues
is minimal.
Use one of several provided optimization methods ("optimizer").
"""
self._set_loss(loss=loss, loss_grad=loss_grad)
self.loss_list = []
if optimizer=='brute':
self.grid = kwargs.get('grid')
for params_dict in tqdm(self.grid):
self.set_params(**params_dict)
self.predict()
self.loss_list.append(
np.mean(self.loss(xi_oracle, self.xi))
)
elif optimizer=='gd':
lr = kwargs.get('lr')
n_epochs = kwargs.get('n_epochs')
N_batch = kwargs.get('N_batch')
n_batches = int(self.N // N_batch)
r1 = kwargs.get('r1', None)
r2 = kwargs.get('r2', None)
self._set_random_params(r1, r2)
self.grid = []
rng = np.random.default_rng()
for epoch in range(n_epochs):
print(f'Epoch {epoch} commencing...')
for _ in tqdm(range(n_batches)):
params_dict = self.get_params()
batch_idx=rng.integers(low=0, high=self.N, size=N_batch) if N_batch < self.N else None
self.predict(batch_idx=batch_idx, calculate_grads=True)
self.loss_grads = [
np.mean(
self.loss_grad(xi_oracle[self.batch_idx], self.xi)
* self.xi_grads[param]
)
for param in self.ab
]
self.set_params(
a_list=[
a_prev - lr * gd
for a_prev, gd in zip(params_dict['a_list'], self.loss_grads[:(self.r2 + 1)])
],
b_list=[
b_prev - lr * gd
for b_prev, gd in zip(params_dict['b_list'], self.loss_grads[(self.r2 + 1):])
]
)
print('Making prediction on the whole dataset...')
self.grid.append(params_dict)
self.predict()
self.loss_list.append(
np.mean(self.loss(xi_oracle, self.xi))
)
print('... done.')
idx_best = np.argmin(self.loss_list)
self.params_dict_best = self.grid[idx_best]
self.set_params(**self.params_dict_best)
self.predict(calculate_grads=True)
self.loss_best = self.loss_list[idx_best]
self.loss_LP = np.mean(self.loss(xi_oracle, self.xi_LP))
def _set_loss(self, loss, loss_grad=None):
"""
Set the loss function (and its gradient w.r.t. xi_pred),
being a function of xi_true and xi_pred,
based on the "loss" argument.
"""
if type(loss)==FunctionType:
self.loss = loss
self.loss_grad = loss_grad
elif loss=='mse':
self.loss = lambda xi_true, xi_pred: (xi_true - xi_pred) ** 2
self.loss_grad = lambda xi_true, xi_pred: -2 * (xi_true - xi_pred)
else:
raise Exception('Unknown error function.')
def _set_random_params(self, r1=None, r2=None):
rng = np.random.default_rng()
if r1 is None and r2 is None:
self.set_params(
tau=rng.random()
)
else:
eps = 0.1
random_list = list(eps * rng.random(size=(r1 + r2 + 1)))
self.set_params(
a_list=[1. - random_list[0]] + random_list[1:(r2 + 1)],
b_list=random_list[(r2 + 1):]
) | [
"numpy.maximum",
"numpy.abs",
"sympy.im",
"numpy.ones",
"numpy.argmin",
"numpy.argsort",
"numpy.random.default_rng",
"collections.defaultdict",
"numpy.sin",
"numpy.exp",
"sympy.re",
"sympy.sympify",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"tqdm.a... | [((1355, 1372), 'numpy.array', 'np.array', (['lambdas'], {}), '(lambdas)\n', (1363, 1372), True, 'import numpy as np\n'), ((8155, 8169), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (8163, 8169), True, 'import matplotlib.pyplot as plt\n'), ((8178, 8192), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (8186, 8192), True, 'import matplotlib.pyplot as plt\n'), ((9768, 9788), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""lambda"""'], {}), "('lambda')\n", (9778, 9788), True, 'import matplotlib.pyplot as plt\n'), ((9797, 9813), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""xi"""'], {}), "('xi')\n", (9807, 9813), True, 'import matplotlib.pyplot as plt\n'), ((9822, 9836), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (9830, 9836), True, 'import matplotlib.pyplot as plt\n'), ((9845, 9859), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (9853, 9859), True, 'import matplotlib.pyplot as plt\n'), ((10415, 10514), 'matplotlib.pyplot.plot', 'plt.plot', (['self.lambdas', 'self.xi'], {'color': '"""purple"""', 'label': 'f"""{self.name} shrunk vs. sample eigval"""'}), "(self.lambdas, self.xi, color='purple', label=\n f'{self.name} shrunk vs. sample eigval')\n", (10423, 10514), True, 'import matplotlib.pyplot as plt\n'), ((10784, 10871), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.lambdas', 'xi_oracle'], {'marker': '"""^"""', 'alpha': '(0.3)', 'label': '"""oracle eigval"""'}), "(self.lambdas, xi_oracle, marker='^', alpha=0.3, label=\n 'oracle eigval')\n", (10795, 10871), True, 'import matplotlib.pyplot as plt\n'), ((10946, 10966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""lambda"""'], {}), "('lambda')\n", (10956, 10966), True, 'import matplotlib.pyplot as plt\n'), ((10975, 10991), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""xi"""'], {}), "('xi')\n", (10985, 10991), True, 'import matplotlib.pyplot as plt\n'), ((11000, 11014), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (11008, 11014), True, 'import matplotlib.pyplot as plt\n'), ((11023, 11037), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (11031, 11037), True, 'import matplotlib.pyplot as plt\n'), ((11635, 11651), 'numpy.maximum', 'np.maximum', (['y', '(0)'], {}), '(y, 0)\n', (11645, 11651), True, 'import numpy as np\n'), ((12034, 12073), 'numpy.array', 'np.array', (['[((x_ - x) / l1) for x_ in x]'], {}), '([((x_ - x) / l1) for x_ in x])\n', (12042, 12073), True, 'import numpy as np\n'), ((12980, 12998), 'numpy.exp', 'np.exp', (['self.delta'], {}), '(self.delta)\n', (12986, 12998), True, 'import numpy as np\n'), ((13013, 13044), 'numpy.exp', 'np.exp', (['(self.delta * self.alpha)'], {}), '(self.delta * self.alpha)\n', (13019, 13044), True, 'import numpy as np\n'), ((15781, 15795), 'sympy.symbols', 'symbols', (['"""chi"""'], {}), "('chi')\n", (15788, 15795), False, 'from sympy import nroots, re, im, symbols, sympify\n'), ((17052, 17086), 'numpy.argsort', 'np.argsort', (['self.xi_branch'], {'axis': '(0)'}), '(self.xi_branch, axis=0)\n', (17062, 17086), True, 'import numpy as np\n'), ((17120, 17179), 'numpy.take_along_axis', 'np.take_along_axis', (['self.chi_roots_branch', 'sort_idx'], {'axis': '(0)'}), '(self.chi_roots_branch, sort_idx, axis=0)\n', (17138, 17179), True, 'import numpy as np\n'), ((17205, 17257), 'numpy.take_along_axis', 'np.take_along_axis', (['self.xi_branch', 'sort_idx'], {'axis': '(0)'}), '(self.xi_branch, sort_idx, axis=0)\n', (17223, 17257), True, 'import numpy as np\n'), ((17287, 17343), 'numpy.take_along_axis', 'np.take_along_axis', (['self.M_of_N_branch', 'sort_idx'], {'axis': '(0)'}), '(self.M_of_N_branch, sort_idx, axis=0)\n', (17305, 17343), True, 'import numpy as np\n'), ((21315, 21340), 'numpy.argmin', 'np.argmin', (['self.loss_list'], {}), '(self.loss_list)\n', (21324, 21340), True, 'import numpy as np\n'), ((22254, 22277), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (22275, 22277), True, 'import numpy as np\n'), ((1524, 1539), 'numpy.ones', 'np.ones', (['self.N'], {}), '(self.N)\n', (1531, 1539), True, 'import numpy as np\n'), ((5689, 5794), 'matplotlib.pyplot.hist', 'plt.hist', (['self.lambdas'], {'bins': 'bns', 'alpha': '(0.5)', 'color': '"""tab:orange"""', 'density': '(True)', 'label': '"""sample eigval"""'}), "(self.lambdas, bins=bns, alpha=0.5, color='tab:orange', density=\n True, label='sample eigval')\n", (5697, 5794), True, 'import matplotlib.pyplot as plt\n'), ((5954, 6058), 'matplotlib.pyplot.plot', 'plt.plot', (['self.lambdas', 'self.lambdas_kernel_density'], {'color': '"""tab:red"""', 'label': '"""sample eigval density"""'}), "(self.lambdas, self.lambdas_kernel_density, color='tab:red', label=\n 'sample eigval density')\n", (5962, 6058), True, 'import matplotlib.pyplot as plt\n'), ((6186, 6291), 'matplotlib.pyplot.plot', 'plt.plot', (['self.lambdas', 'self.lambdas_kernel_Hilbert'], {'color': '"""tab:green"""', 'label': '"""sample eigval Hilbert"""'}), "(self.lambdas, self.lambdas_kernel_Hilbert, color='tab:green',\n label='sample eigval Hilbert')\n", (6194, 6291), True, 'import matplotlib.pyplot as plt\n'), ((6410, 6524), 'matplotlib.pyplot.hist', 'plt.hist', (['self.xi_LP'], {'bins': 'bns', 'alpha': '(0.5)', 'color': '"""tab:pink"""', 'density': '(True)', 'label': 'f"""Ledoit-Peche shrunk eigval"""'}), "(self.xi_LP, bins=bns, alpha=0.5, color='tab:pink', density=True,\n label=f'Ledoit-Peche shrunk eigval')\n", (6418, 6524), True, 'import matplotlib.pyplot as plt\n'), ((6683, 6795), 'matplotlib.pyplot.plot', 'plt.plot', (['self.xi_LP', 'self.xi_LP_kernel_density'], {'color': '"""brown"""', 'label': 'f"""Ledoit-Peche shrunk eigval density"""'}), "(self.xi_LP, self.xi_LP_kernel_density, color='brown', label=\n f'Ledoit-Peche shrunk eigval density')\n", (6691, 6795), True, 'import matplotlib.pyplot as plt\n'), ((6921, 7036), 'matplotlib.pyplot.plot', 'plt.plot', (['self.xi_LP', 'self.xi_LP_kernel_Hilbert'], {'color': '"""tab:cyan"""', 'label': 'f"""Ledoit-Peche shrunk eigval Hilbert"""'}), "(self.xi_LP, self.xi_LP_kernel_Hilbert, color='tab:cyan', label=\n f'Ledoit-Peche shrunk eigval Hilbert')\n", (6929, 7036), True, 'import matplotlib.pyplot as plt\n'), ((7239, 7349), 'matplotlib.pyplot.hist', 'plt.hist', (['self.xi'], {'bins': 'bns', 'alpha': '(0.5)', 'color': '"""fuchsia"""', 'density': '(True)', 'label': 'f"""{self.name} shrunk eigval"""'}), "(self.xi, bins=bns, alpha=0.5, color='fuchsia', density=True, label\n =f'{self.name} shrunk eigval')\n", (7247, 7349), True, 'import matplotlib.pyplot as plt\n'), ((7619, 7725), 'matplotlib.pyplot.plot', 'plt.plot', (['self.xi', 'self.xi_kernel_density'], {'color': '"""purple"""', 'label': 'f"""{self.name} shrunk eigval density"""'}), "(self.xi, self.xi_kernel_density, color='purple', label=\n f'{self.name} shrunk eigval density')\n", (7627, 7725), True, 'import matplotlib.pyplot as plt\n'), ((7963, 8072), 'matplotlib.pyplot.plot', 'plt.plot', (['self.xi', 'self.xi_kernel_Hilbert'], {'color': '"""tab:olive"""', 'label': 'f"""{self.name} shrunk eigval Hilbert"""'}), "(self.xi, self.xi_kernel_Hilbert, color='tab:olive', label=\n f'{self.name} shrunk eigval Hilbert')\n", (7971, 8072), True, 'import matplotlib.pyplot as plt\n'), ((8224, 8236), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8234, 8236), True, 'import matplotlib.pyplot as plt\n'), ((8245, 8271), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'savefig'}), '(fname=savefig)\n', (8256, 8271), True, 'import matplotlib.pyplot as plt\n'), ((8288, 8298), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8296, 8298), True, 'import matplotlib.pyplot as plt\n'), ((8956, 9055), 'matplotlib.pyplot.plot', 'plt.plot', (['self.lambdas', 'self.xi'], {'color': '"""purple"""', 'label': 'f"""{self.name} shrunk vs. sample eigval"""'}), "(self.lambdas, self.xi, color='purple', label=\n f'{self.name} shrunk vs. sample eigval')\n", (8964, 9055), True, 'import matplotlib.pyplot as plt\n'), ((9891, 9903), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9901, 9903), True, 'import matplotlib.pyplot as plt\n'), ((9912, 9938), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'savefig'}), '(fname=savefig)\n', (9923, 9938), True, 'import matplotlib.pyplot as plt\n'), ((9955, 9965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9963, 9965), True, 'import matplotlib.pyplot as plt\n'), ((10600, 10702), 'matplotlib.pyplot.plot', 'plt.plot', (['self.lambdas', 'self.xi_LP'], {'color': '"""brown"""', 'label': 'f"""Ledoit-Peche shrunk vs. sample eigval"""'}), "(self.lambdas, self.xi_LP, color='brown', label=\n f'Ledoit-Peche shrunk vs. sample eigval')\n", (10608, 10702), True, 'import matplotlib.pyplot as plt\n'), ((11069, 11081), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11079, 11081), True, 'import matplotlib.pyplot as plt\n'), ((11090, 11116), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'savefig'}), '(fname=savefig)\n', (11101, 11116), True, 'import matplotlib.pyplot as plt\n'), ((11133, 11143), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11141, 11143), True, 'import matplotlib.pyplot as plt\n'), ((16683, 16714), 'numpy.array', 'np.array', (['self.chi_roots_branch'], {}), '(self.chi_roots_branch)\n', (16691, 16714), True, 'import numpy as np\n'), ((16892, 16920), 'numpy.array', 'np.array', (['self.M_of_N_branch'], {}), '(self.M_of_N_branch)\n', (16900, 16920), True, 'import numpy as np\n'), ((17517, 17553), 'numpy.abs', 'np.abs', (['(self.M_of_N_branch - u_batch)'], {}), '(self.M_of_N_branch - u_batch)\n', (17523, 17553), True, 'import numpy as np\n'), ((17589, 17648), 'numpy.take_along_axis', 'np.take_along_axis', (['self.chi_roots_branch', 'sort_idx'], {'axis': '(0)'}), '(self.chi_roots_branch, sort_idx, axis=0)\n', (17607, 17648), True, 'import numpy as np\n'), ((17670, 17722), 'numpy.take_along_axis', 'np.take_along_axis', (['self.xi_branch', 'sort_idx'], {'axis': '(0)'}), '(self.xi_branch, sort_idx, axis=0)\n', (17688, 17722), True, 'import numpy as np\n'), ((17900, 17917), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (17911, 17917), False, 'from collections import defaultdict\n'), ((19203, 19218), 'tqdm.auto.tqdm', 'tqdm', (['self.grid'], {}), '(self.grid)\n', (19207, 19218), False, 'from tqdm.auto import tqdm\n'), ((16831, 16860), 'numpy.array', 'np.array', (['chi_roots_im_branch'], {}), '(chi_roots_im_branch)\n', (16839, 16860), True, 'import numpy as np\n'), ((19796, 19819), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (19817, 19819), True, 'import numpy as np\n'), ((11401, 11411), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (11408, 11411), True, 'import numpy as np\n'), ((13138, 13168), 'numpy.sin', 'np.sin', (['(self.delta * self.beta)'], {}), '(self.delta * self.beta)\n', (13144, 13168), True, 'import numpy as np\n'), ((13247, 13277), 'numpy.cos', 'np.cos', (['(self.delta * self.beta)'], {}), '(self.delta * self.beta)\n', (13253, 13277), True, 'import numpy as np\n'), ((15981, 15991), 'sympy.sympify', 'sympify', (['u'], {}), '(u)\n', (15988, 15991), False, 'from sympy import nroots, re, im, symbols, sympify\n'), ((16144, 16156), 'sympy.im', 'im', (['chi_root'], {}), '(chi_root)\n', (16146, 16156), False, 'from sympy import nroots, re, im, symbols, sympify\n'), ((18027, 18037), 'sympy.sympify', 'sympify', (['u'], {}), '(u)\n', (18034, 18037), False, 'from sympy import nroots, re, im, symbols, sympify\n'), ((9615, 9655), 'matplotlib.cm.hot', 'cm.hot', (['(0.2 + 0.6 * br / self.n_branches)'], {}), '(0.2 + 0.6 * br / self.n_branches)\n', (9621, 9655), False, 'from matplotlib import cm\n'), ((11474, 11483), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (11480, 11483), True, 'import numpy as np\n'), ((11486, 11496), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (11493, 11496), True, 'import numpy as np\n'), ((16368, 16384), 'sympy.re', 're', (['(1 / chi_root)'], {}), '(1 / chi_root)\n', (16370, 16384), False, 'from sympy import nroots, re, im, symbols, sympify\n'), ((16418, 16434), 'sympy.im', 'im', (['(1 / chi_root)'], {}), '(1 / chi_root)\n', (16420, 16434), False, 'from sympy import nroots, re, im, symbols, sympify\n'), ((11554, 11564), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (11561, 11564), True, 'import numpy as np\n'), ((11573, 11583), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (11580, 11583), True, 'import numpy as np\n'), ((18504, 18516), 'sympy.im', 'im', (['chi_grad'], {}), '(chi_grad)\n', (18506, 18516), False, 'from sympy import nroots, re, im, symbols, sympify\n'), ((18209, 18219), 'sympy.sympify', 'sympify', (['u'], {}), '(u)\n', (18216, 18219), False, 'from sympy import nroots, re, im, symbols, sympify\n')] |
import numpy as np
# Adam optimizer
class OptimizerADAM:
"""
Adaptive Momentum Stochastic Gradient Decent Optimizer.
"""
def __init__(
self,
learning_rate=0.001,
decay=0.,
epsilon=1e-7,
beta_1=0.9,
beta_2=0.999
):
"""
Initialize ADAM optimizer parameters.
:param learning_rate: The learning rate of the optimizer.
:param decay: Decay value of the optimizer
:param epsilon: Value for epsilon portion for the optimizer.
:param beta_1: Beta1 value of the optimizer.
:param beta_2: Beta2 value of the optimizer.
"""
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.beta_1 = beta_1
self.beta_2 = beta_2
def pre_update_params(self):
"""
Configure the needed configurations before updating the parameters of the layers.
Call once before any parameter updates, to update the current learning rate based on the decay and iterations.
"""
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
def update_params(self, layer):
"""
Updates the parameters of a given layer.
:param layer: Layer to update it's parameters.
"""
# If layer does not contain cache arrays,
# create them filled with zeros
if not hasattr(layer, 'weight_cache'):
layer.weight_momentums = np.zeros_like(layer.weights)
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_momentums = np.zeros_like(layer.biases)
layer.bias_cache = np.zeros_like(layer.biases)
# Update momentum with current gradients
layer.weight_momentums = \
self.beta_1 * \
layer.weight_momentums + \
(1 - self.beta_1) * layer.dweights
layer.bias_momentums = \
self.beta_1 * \
layer.bias_momentums + \
(1 - self.beta_1) * layer.dbiases
# Get corrected momentum
# self.iteration is 0 at first pass
# and we need to start with 1 here
weight_momentums_corrected = \
layer.weight_momentums / \
(1 - self.beta_1 ** (self.iterations + 1))
bias_momentums_corrected = \
layer.bias_momentums / \
(1 - self.beta_1 ** (self.iterations + 1))
# Update cache with squared current gradients
layer.weight_cache = \
self.beta_2 * layer.weight_cache + \
(1 - self.beta_2) * layer.dweights**2
layer.bias_cache = \
self.beta_2 * layer.bias_cache + \
(1 - self.beta_2) * layer.dbiases**2
# Get corrected cache
weight_cache_corrected = \
layer.weight_cache / \
(1 - self.beta_2 ** (self.iterations + 1))
bias_cache_corrected = \
layer.bias_cache / \
(1 - self.beta_2 ** (self.iterations + 1))
# Vanilla SGD parameter update + normalization
# with square rooted cache
layer.weights += \
-self.current_learning_rate * \
weight_momentums_corrected / \
(np.sqrt(weight_cache_corrected) + self.epsilon)
layer.biases += \
-self.current_learning_rate * \
bias_momentums_corrected / \
(np.sqrt(bias_cache_corrected) + self.epsilon)
def post_update_params(self):
"""
Post update parameters configurations.
Call once after any parameter updates, to update the iterations number after done updating parameters.
"""
self.iterations += 1
| [
"numpy.zeros_like",
"numpy.sqrt"
] | [((1664, 1692), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (1677, 1692), True, 'import numpy as np\n'), ((1726, 1754), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (1739, 1754), True, 'import numpy as np\n'), ((1790, 1817), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (1803, 1817), True, 'import numpy as np\n'), ((1849, 1876), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (1862, 1876), True, 'import numpy as np\n'), ((3409, 3440), 'numpy.sqrt', 'np.sqrt', (['weight_cache_corrected'], {}), '(weight_cache_corrected)\n', (3416, 3440), True, 'import numpy as np\n'), ((3581, 3610), 'numpy.sqrt', 'np.sqrt', (['bias_cache_corrected'], {}), '(bias_cache_corrected)\n', (3588, 3610), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-5, 5, 100)
y1 = 0.5 * x
y2 = x * x
plt.figure()
plt.xlabel('X axis...')
plt.ylabel('Y axis...') #设置坐标轴的文字标签
ax = plt.gca() # get current axis 获得坐标轴对象
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none') # 将右边 上边的两条边颜色设置为空 其实就相当于抹掉这两条边
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left') # 指定下边的边作为 x 轴 指定左边的边为 y 轴
ax.spines['bottom'].set_position(('data', 0)) #指定 data 设置的bottom(也就是指定的x轴)绑定到y轴的0这个点上
ax.spines['left'].set_position(('data', 0))
plt.plot(x, y1, linestyle='--')
plt.plot(x, y2)
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((56, 79), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (67, 79), True, 'import numpy as np\n'), ((105, 117), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (115, 117), True, 'import matplotlib.pyplot as plt\n'), ((118, 141), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X axis..."""'], {}), "('X axis...')\n", (128, 141), True, 'import matplotlib.pyplot as plt\n'), ((142, 165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y axis..."""'], {}), "('Y axis...')\n", (152, 165), True, 'import matplotlib.pyplot as plt\n'), ((215, 224), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (222, 224), True, 'import matplotlib.pyplot as plt\n'), ((656, 687), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'linestyle': '"""--"""'}), "(x, y1, linestyle='--')\n", (664, 687), True, 'import matplotlib.pyplot as plt\n'), ((688, 703), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {}), '(x, y2)\n', (696, 703), True, 'import matplotlib.pyplot as plt\n'), ((705, 715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (713, 715), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from os import makedirs, listdir
from os.path import join, isfile, isdir, exists, splitext
from pak.datasets.MOT import MOT16
from pak import utils
from time import time
from skimage.transform import resize
from cabbage.data.ReId import get_positive_pairs_by_index
def get_element(X, bb, shape):
""" returns the bounding box area from the image X
"""
x,y,w,h = bb
x,y,w,h = int(x),int(y),int(w),int(h)
return resize(X[y:y+h,x:x+w], shape, mode='constant')
def get_visible_pedestrains(Y_gt):
""" return people without distractors
"""
#Y_gt = utils.extract_eq(Y_gt, col=0, value=frame)
Y_gt = utils.extract_eq(Y_gt, col=7, value=1)
Y_gt = utils.extract_eq(Y_gt, col=8, value=1)
return Y_gt
class MOT16Sampler:
""" Sample ids from MOT16
"""
def get_all_batch(self, num_pos, num_neg):
"""
"""
X_ = []
Y_ = []
for key, _ in self.pos_pairs.items():
X, Y = self.get_named_batch(key, num_pos, num_neg)
X_.append(X)
Y_.append(Y)
X = np.vstack(X_)
Y = np.vstack(Y_)
n = len(self.pos_pairs.items()) * (num_pos + num_neg)
order = np.random.choice(n, size=n, replace=False)
return X[order], Y[order]
def get_named_batch(self, key, num_pos, num_neg):
""" get a batch based on the video (e.g. MOT16-02)
"""
assert key in self.lookup
assert key in self.pos_pairs
assert num_pos > 0
assert num_neg > 0
X, Y = self.lookup[key]
pos_pairs = self.pos_pairs[key]
pos_indx = np.random.choice(len(pos_pairs), size=num_pos, replace=False)
sampled_pos_pairs = pos_pairs[pos_indx]
sampled_neg_pairs = []
assert len(X) == len(Y)
n = len(X)
while len(sampled_neg_pairs) < num_neg:
a, b = np.random.choice(n, size=2, replace=False)
if Y[a] != Y[b]:
sampled_neg_pairs.append((a,b))
sampled_neg_pairs = np.array(sampled_neg_pairs)
Ap = sampled_pos_pairs[:,0]
Bp = sampled_pos_pairs[:,1]
An = sampled_neg_pairs[:,0]
Bn = sampled_neg_pairs[:,1]
X_a_pos = X[Ap]
X_b_pos = X[Bp]
X_a_neg = X[An]
X_b_neg = X[Bn]
X_a = np.concatenate([X_a_pos, X_a_neg])
X_b = np.concatenate([X_b_pos, X_b_neg])
X = np.concatenate((X_a, X_b), axis=3)
Y = np.array([(1, 0)] * num_pos + [(0, 1)] * num_neg)
return X, Y
def __init__(self, root, shape):
mot16 = MOT16(root, verbose=False)
data_loc = join(root, 'mot16_data_sampler')
if not isdir(data_loc):
makedirs(data_loc)
self.lookup = {}
self.pos_pairs = {}
for F in mot16.get_train_folders():
#for F in ['MOT16-02']:
start = time()
fX = join(data_loc, 'X_' + F + '.npy')
fY = join(data_loc, 'Y_' + F + '.npy')
X_is_loaded, Y_is_loaded = False, False
if isfile(fX):
_X = np.load(fX)
X_is_loaded = True
if isfile(fY):
_Y = np.load(fY)
Y_is_loaded = True
if Y_is_loaded and X_is_loaded:
self.lookup[F] = (_X, _Y)
else:
X, Y_det, Y_gt = mot16.get_train(F, memmapped=True)
Y_gt = get_visible_pedestrains(Y_gt)
_X = []
_Y = []
for f, pid, x, y, w, h, _, _, _ in Y_gt:
pid = int(pid)
I = X[int(f)-1] # f starts at 1, not at 0
try:
person = get_element(I, (x,y,w,h), shape)
_X.append(person * 255)
_Y.append(pid)
except:
print("skip in " + F, (x,y,w,h))
assert len(_X) == len(_Y)
assert len(_X) > 0
_X = np.array(_X, 'uint8')
_Y = np.array(_Y, 'int32')
if not X_is_loaded:
np.save(fX, _X)
if not Y_is_loaded:
np.save(fY, _Y)
self.lookup[F] = (_X, _Y)
del X # free memory
del Y_det
del Y_gt
print("finished generating X and Y for " + F)
fPos_pairs = join(data_loc, "pos_pairs_" + F + ".npy")
if isfile(fPos_pairs):
print('load positive pairs from disk')
self.pos_pairs[F] = np.load(fPos_pairs)
else:
print('generate positive pairs')
pos_pairs = get_positive_pairs_by_index(_Y)
np.save(fPos_pairs, pos_pairs)
self.pos_pairs[F] = pos_pairs
print("pos pairs:", self.pos_pairs[F].shape)
end = time()
print(F + " .. elapsed", (end-start))
| [
"numpy.load",
"numpy.save",
"numpy.concatenate",
"os.makedirs",
"os.path.isdir",
"pak.datasets.MOT.MOT16",
"time.time",
"os.path.isfile",
"numpy.array",
"pak.utils.extract_eq",
"skimage.transform.resize",
"numpy.random.choice",
"cabbage.data.ReId.get_positive_pairs_by_index",
"os.path.join... | [((449, 500), 'skimage.transform.resize', 'resize', (['X[y:y + h, x:x + w]', 'shape'], {'mode': '"""constant"""'}), "(X[y:y + h, x:x + w], shape, mode='constant')\n", (455, 500), False, 'from skimage.transform import resize\n'), ((649, 687), 'pak.utils.extract_eq', 'utils.extract_eq', (['Y_gt'], {'col': '(7)', 'value': '(1)'}), '(Y_gt, col=7, value=1)\n', (665, 687), False, 'from pak import utils\n'), ((699, 737), 'pak.utils.extract_eq', 'utils.extract_eq', (['Y_gt'], {'col': '(8)', 'value': '(1)'}), '(Y_gt, col=8, value=1)\n', (715, 737), False, 'from pak import utils\n'), ((1090, 1103), 'numpy.vstack', 'np.vstack', (['X_'], {}), '(X_)\n', (1099, 1103), True, 'import numpy as np\n'), ((1116, 1129), 'numpy.vstack', 'np.vstack', (['Y_'], {}), '(Y_)\n', (1125, 1129), True, 'import numpy as np\n'), ((1209, 1251), 'numpy.random.choice', 'np.random.choice', (['n'], {'size': 'n', 'replace': '(False)'}), '(n, size=n, replace=False)\n', (1225, 1251), True, 'import numpy as np\n'), ((2038, 2065), 'numpy.array', 'np.array', (['sampled_neg_pairs'], {}), '(sampled_neg_pairs)\n', (2046, 2065), True, 'import numpy as np\n'), ((2322, 2356), 'numpy.concatenate', 'np.concatenate', (['[X_a_pos, X_a_neg]'], {}), '([X_a_pos, X_a_neg])\n', (2336, 2356), True, 'import numpy as np\n'), ((2371, 2405), 'numpy.concatenate', 'np.concatenate', (['[X_b_pos, X_b_neg]'], {}), '([X_b_pos, X_b_neg])\n', (2385, 2405), True, 'import numpy as np\n'), ((2419, 2453), 'numpy.concatenate', 'np.concatenate', (['(X_a, X_b)'], {'axis': '(3)'}), '((X_a, X_b), axis=3)\n', (2433, 2453), True, 'import numpy as np\n'), ((2466, 2515), 'numpy.array', 'np.array', (['([(1, 0)] * num_pos + [(0, 1)] * num_neg)'], {}), '([(1, 0)] * num_pos + [(0, 1)] * num_neg)\n', (2474, 2515), True, 'import numpy as np\n'), ((2592, 2618), 'pak.datasets.MOT.MOT16', 'MOT16', (['root'], {'verbose': '(False)'}), '(root, verbose=False)\n', (2597, 2618), False, 'from pak.datasets.MOT import MOT16\n'), ((2639, 2671), 'os.path.join', 'join', (['root', '"""mot16_data_sampler"""'], {}), "(root, 'mot16_data_sampler')\n", (2643, 2671), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((1890, 1932), 'numpy.random.choice', 'np.random.choice', (['n'], {'size': '(2)', 'replace': '(False)'}), '(n, size=2, replace=False)\n', (1906, 1932), True, 'import numpy as np\n'), ((2687, 2702), 'os.path.isdir', 'isdir', (['data_loc'], {}), '(data_loc)\n', (2692, 2702), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((2716, 2734), 'os.makedirs', 'makedirs', (['data_loc'], {}), '(data_loc)\n', (2724, 2734), False, 'from os import makedirs, listdir\n'), ((2886, 2892), 'time.time', 'time', ([], {}), '()\n', (2890, 2892), False, 'from time import time\n'), ((2910, 2943), 'os.path.join', 'join', (['data_loc', "('X_' + F + '.npy')"], {}), "(data_loc, 'X_' + F + '.npy')\n", (2914, 2943), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((2961, 2994), 'os.path.join', 'join', (['data_loc', "('Y_' + F + '.npy')"], {}), "(data_loc, 'Y_' + F + '.npy')\n", (2965, 2994), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((3063, 3073), 'os.path.isfile', 'isfile', (['fX'], {}), '(fX)\n', (3069, 3073), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((3159, 3169), 'os.path.isfile', 'isfile', (['fY'], {}), '(fY)\n', (3165, 3169), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((4459, 4500), 'os.path.join', 'join', (['data_loc', "('pos_pairs_' + F + '.npy')"], {}), "(data_loc, 'pos_pairs_' + F + '.npy')\n", (4463, 4500), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((4516, 4534), 'os.path.isfile', 'isfile', (['fPos_pairs'], {}), '(fPos_pairs)\n', (4522, 4534), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((4945, 4951), 'time.time', 'time', ([], {}), '()\n', (4949, 4951), False, 'from time import time\n'), ((3096, 3107), 'numpy.load', 'np.load', (['fX'], {}), '(fX)\n', (3103, 3107), True, 'import numpy as np\n'), ((3192, 3203), 'numpy.load', 'np.load', (['fY'], {}), '(fY)\n', (3199, 3203), True, 'import numpy as np\n'), ((4032, 4053), 'numpy.array', 'np.array', (['_X', '"""uint8"""'], {}), "(_X, 'uint8')\n", (4040, 4053), True, 'import numpy as np\n'), ((4075, 4096), 'numpy.array', 'np.array', (['_Y', '"""int32"""'], {}), "(_Y, 'int32')\n", (4083, 4096), True, 'import numpy as np\n'), ((4627, 4646), 'numpy.load', 'np.load', (['fPos_pairs'], {}), '(fPos_pairs)\n', (4634, 4646), True, 'import numpy as np\n'), ((4742, 4773), 'cabbage.data.ReId.get_positive_pairs_by_index', 'get_positive_pairs_by_index', (['_Y'], {}), '(_Y)\n', (4769, 4773), False, 'from cabbage.data.ReId import get_positive_pairs_by_index\n'), ((4790, 4820), 'numpy.save', 'np.save', (['fPos_pairs', 'pos_pairs'], {}), '(fPos_pairs, pos_pairs)\n', (4797, 4820), True, 'import numpy as np\n'), ((4153, 4168), 'numpy.save', 'np.save', (['fX', '_X'], {}), '(fX, _X)\n', (4160, 4168), True, 'import numpy as np\n'), ((4226, 4241), 'numpy.save', 'np.save', (['fY', '_Y'], {}), '(fY, _Y)\n', (4233, 4241), True, 'import numpy as np\n')] |
import numpy as np
import csv
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class clustering:
def __init__(self):
self.specimen = np.zeros((125, 3))
# 使用欧氏距离
def calculate_distance(self, x, y):
res = 0
for i in range(len(x)):
res += (x[i] - y[i]) ** 2
return np.sqrt(res)
# 读取数据并进行归一化处理
def data_ready(self):
with open("meituan.csv", encoding='utf8') as f:
reader = csv.reader(f)
header = next(reader)
i = 0
for row in reader:
self.specimen[i] = [row[1], row[2], row[3]]
i += 1
max_numbers_col, min_numbers_col = self.specimen.max(axis=0), self.specimen.min(axis=0)
for i in range(len(self.specimen)):
for j in range(len(self.specimen[0])):
self.specimen[i][j] = (self.specimen[i][j] - min_numbers_col[j]) / (
max_numbers_col[j] - min_numbers_col[j]) * 100
# 分三组,好中差
def clustering(self):
# 初始化阶段,先找出3个样本作为初始矩阵向量
cnt = 1 # 记循环次数
mean_vector = np.zeros((3, 3))
temp = set()
i = 0
cluster = [[] for _ in range(3)]
while i != 3:
ran_data = np.random.randint(0, len(self.specimen))
if ran_data not in temp:
mean_vector[i] = self.specimen[ran_data]
cluster[i].append(ran_data)
temp.add(ran_data)
i += 1
while True:
# 令第i个簇为空集
cluster[np.random.randint(0, 3)] = []
# 计算出每个样本到各均值向量的距离
for i in range(0, len(self.specimen)):
first_distance, second_distance, third_distance = \
self.calculate_distance(self.specimen[i], mean_vector[0]), \
self.calculate_distance(self.specimen[i], mean_vector[1]), \
self.calculate_distance(self.specimen[i], mean_vector[2])
if first_distance == min(first_distance, second_distance, third_distance):
found, m = False, 0
while m < 3 and not found:
n = 0
while n < len(cluster[m]) and not found:
if cluster[m][n] == i:
cluster[m].remove(i)
found = True
n += 1
m += 1
cluster[0].append(i)
elif second_distance == min(first_distance, second_distance, third_distance):
found, m = False, 0
while m < 3 and not found:
n = 0
while n < len(cluster[m]) and not found:
if cluster[m][n] == i:
cluster[m].remove(i)
found = True
n += 1
m += 1
cluster[1].append(i)
else:
found, m = False, 0
while m < 3 and not found:
n = 0
while n < len(cluster[m]) and not found:
if cluster[m][n] == i:
cluster[m].remove(i)
found = True
n += 1
m += 1
cluster[2].append(i)
# 计算新的均值向量
temp = np.zeros((3, len(self.specimen[0])))
for i in range(3):
length = len(cluster[i])
for item in cluster[i]:
temp[i] += self.specimen[item]
for j in range(len(temp[i])):
temp[i][j] /= length
delta = 0
for i in range(3):
for j in range(3):
delta += (temp[i][j] - mean_vector[i][j]) ** 2
delta /= 3
print("第{}次循环的delta为{}".format(cnt, delta))
print(cluster)
if delta == 0:
return cluster
break
else:
mean_vector = temp
cnt += 1
if __name__ == '__main__':
test = clustering()
test.data_ready()
res = test.clustering()
fig = plt.figure()
ax1 = plt.axes(projection='3d')
first_feature1, first_feature2, first_feature3 = [], [], []
second_feature1, second_feature2, second_feature3 = [], [], []
third_feature1, third_feature2, third_feature3 = [], [], []
with open('meituan.csv', encoding='utf8') as f:
reader = csv.reader(f)
header = next(reader)
i = 0
for row in reader:
if i in res[0]:
first_feature1.append(float(row[1]))
first_feature2.append(float(row[2]))
first_feature3.append(float(row[3]))
elif i in res[1]:
second_feature1.append(float(row[1]))
second_feature2.append(float(row[2]))
second_feature3.append(float(row[3]))
else:
third_feature1.append(float(row[1]))
third_feature2.append(float(row[2]))
third_feature3.append(float(row[3]))
i += 1
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
ax1.scatter3D(first_feature1, first_feature2, first_feature3, c='blue')
ax1.scatter3D(second_feature1, second_feature2, second_feature3, c='green')
ax1.scatter3D(third_feature1,third_feature2,third_feature3,c="red")
ax1.set_xlabel("销售量")
ax1.set_ylabel("配送时间")
ax1.set_zlabel("距离")
plt.title("西北大学外卖店家信息聚类结果")
plt.savefig("未经PCA出来后的聚类情况")
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"csv.reader",
"matplotlib.pyplot.axes",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((4502, 4514), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4512, 4514), True, 'from matplotlib import pyplot as plt\n'), ((4526, 4551), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (4534, 4551), True, 'from matplotlib import pyplot as plt\n'), ((5914, 5941), 'matplotlib.pyplot.title', 'plt.title', (['"""西北大学外卖店家信息聚类结果"""'], {}), "('西北大学外卖店家信息聚类结果')\n", (5923, 5941), True, 'from matplotlib import pyplot as plt\n'), ((5947, 5975), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""未经PCA出来后的聚类情况"""'], {}), "('未经PCA出来后的聚类情况')\n", (5958, 5975), True, 'from matplotlib import pyplot as plt\n'), ((5981, 5991), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5989, 5991), True, 'from matplotlib import pyplot as plt\n'), ((183, 201), 'numpy.zeros', 'np.zeros', (['(125, 3)'], {}), '((125, 3))\n', (191, 201), True, 'import numpy as np\n'), ((364, 376), 'numpy.sqrt', 'np.sqrt', (['res'], {}), '(res)\n', (371, 376), True, 'import numpy as np\n'), ((1174, 1190), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1182, 1190), True, 'import numpy as np\n'), ((4821, 4834), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4831, 4834), False, 'import csv\n'), ((507, 520), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (517, 520), False, 'import csv\n'), ((1627, 1650), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (1644, 1650), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
def binary_encode(i, j):
return np.array([i >> d & 1 for d in range(j)])
def fizz_buzz_encode(i):
if i % 15 == 0:
return np.array([0, 0, 0, 1])
elif i % 5 == 0:
return np.array([0, 0, 1, 0])
elif i % 3 == 0:
return np.array([0, 1, 0, 0])
else:
return np.array([1, 0, 0, 0])
trX = np.array([binary_encode(i, 10) for i in range(101, 2 ** 10)])
trY = np.array([fizz_buzz_encode(i) for i in range(101, 2 ** 10)])
print(trX)
print(trY)
X = tf.placeholder("float", [None, 10])
Y = tf.placeholder("float", [None, 4])
w_h = tf.Variable(tf.random_normal([10, 100], stddev = 0.01))
h = tf.nn.relu(tf.matmul(X, w_h))
w_o = tf.Variable(tf.random_normal([100, 4], stddev = 0.01))
out = tf.matmul(h, w_o)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = out, labels = Y))
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
predict_op = tf.argmax(out, 1)
def fizz_buzz(i, prediction):
return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]
with tf.Session() as sess:
tf.initialize_all_variables().run()
for epoch in range(10000):
p = np.random.permutation(range(len(trX)))
trX, trY = trX[p], trY[p]
for start in range(0, len(trX), 128):
end = start + 128
sess.run(train_op, feed_dict = {
X: trX[start:end],
Y: trY[start:end]
})
if epoch % 500 == 0:
print(epoch, np.mean(np.argmax(trY, axis = 1) == sess.run(predict_op, feed_dict = {
X: trX,
Y: trY
})))
numbers = np.arange(1, 101)
teX = np.transpose(binary_encode(numbers, 10))
teY = sess.run(predict_op, feed_dict = {
X: teX
})
output = np.vectorize(fizz_buzz)(numbers, teY)
print(output)
| [
"numpy.vectorize",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.argmax",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.random_normal",
"numpy.arange",
"numpy.array",
"tensorflow.initialize_all_variables",
"tensorflow.train.Gr... | [((512, 547), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 10]'], {}), "('float', [None, 10])\n", (526, 547), True, 'import tensorflow as tf\n'), ((553, 587), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 4]'], {}), "('float', [None, 4])\n", (567, 587), True, 'import tensorflow as tf\n'), ((757, 774), 'tensorflow.matmul', 'tf.matmul', (['h', 'w_o'], {}), '(h, w_o)\n', (766, 774), True, 'import tensorflow as tf\n'), ((946, 963), 'tensorflow.argmax', 'tf.argmax', (['out', '(1)'], {}), '(out, 1)\n', (955, 963), True, 'import tensorflow as tf\n'), ((609, 649), 'tensorflow.random_normal', 'tf.random_normal', (['[10, 100]'], {'stddev': '(0.01)'}), '([10, 100], stddev=0.01)\n', (625, 649), True, 'import tensorflow as tf\n'), ((669, 686), 'tensorflow.matmul', 'tf.matmul', (['X', 'w_h'], {}), '(X, w_h)\n', (678, 686), True, 'import tensorflow as tf\n'), ((707, 746), 'tensorflow.random_normal', 'tf.random_normal', (['[100, 4]'], {'stddev': '(0.01)'}), '([100, 4], stddev=0.01)\n', (723, 746), True, 'import tensorflow as tf\n'), ((798, 859), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'out', 'labels': 'Y'}), '(logits=out, labels=Y)\n', (837, 859), True, 'import tensorflow as tf\n'), ((1063, 1075), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1073, 1075), True, 'import tensorflow as tf\n'), ((1539, 1556), 'numpy.arange', 'np.arange', (['(1)', '(101)'], {}), '(1, 101)\n', (1548, 1556), True, 'import numpy as np\n'), ((176, 198), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (184, 198), True, 'import numpy as np\n'), ((877, 916), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.05)'], {}), '(0.05)\n', (910, 916), True, 'import tensorflow as tf\n'), ((1675, 1698), 'numpy.vectorize', 'np.vectorize', (['fizz_buzz'], {}), '(fizz_buzz)\n', (1687, 1698), True, 'import numpy as np\n'), ((228, 250), 'numpy.array', 'np.array', (['[0, 0, 1, 0]'], {}), '([0, 0, 1, 0])\n', (236, 250), True, 'import numpy as np\n'), ((1087, 1116), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1114, 1116), True, 'import tensorflow as tf\n'), ((280, 302), 'numpy.array', 'np.array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (288, 302), True, 'import numpy as np\n'), ((321, 343), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (329, 343), True, 'import numpy as np\n'), ((1430, 1452), 'numpy.argmax', 'np.argmax', (['trY'], {'axis': '(1)'}), '(trY, axis=1)\n', (1439, 1452), True, 'import numpy as np\n')] |
import os
import sys
import unittest
import numpy as np
from math import isclose
# Make sure we can import the /src.
sys.path.append(os.path.abspath("../../code"))
from src.porosity import Porosity
from src.tree_roots import TreeRoots
from src.water_content import WaterContent
from src.soil_properties import SoilProperties
class TestTreeRoots(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
print(" >> TestTreeRoots - START -")
# _end_def_
@classmethod
def tearDownClass(cls) -> None:
print(" >> TestTreeRoots - FINISH -")
# _end_def_
def setUp(self) -> None:
"""
Add fields to the main object. These will be used to create
different root density profiles with identical input values.
:return: None.
"""
# Grid parameters [L: cm].
self.dz, z_max = 5.0, 1500.0
# Test vertical domain.
self.z_grid = np.arange(0.0, z_max + self.dz, self.dz)
# Layers.
self.layers = (0, 50, 200, z_max)
# Water content object with default parameters.
self.theta = WaterContent()
# Soil properties object with default parameters.
self.soil = SoilProperties()
# Number of discrete cells forming the root zone.
self.ln = 200
# Test grid (0 - max_depth) [L: cm].
self.z_roots = np.linspace(0.0, self.ln * self.dz, self.ln)
# Create a linear porosity object.
self.porous = Porosity(self.z_grid, self.layers, self.theta, self.soil, 'Linear')
# _end_def_
def test_call(self):
"""
Test the __call__() method with all the known tree root pdf models.
:return: None
"""
# Make a list with all the know models.
root_models = ["uniform", "negative_exp", "gamma_pdf", "mixture"]
# Test all the models one at a time.
for model_i in root_models:
# Print info.
print(" Testing {0} model ... ".format(model_i))
# Create a linear porosity object.
test_obj = TreeRoots(self.ln, self.dz, model_i, self.porous)
# Get the full root profile.
root_pdf_1 = test_obj()
# Check if the integrated density sums to one.
self.assertTrue(isclose(np.sum(root_pdf_1) * self.dz, 1.0, rel_tol=1.0e-5))
# Make sure the max root depths match.
self.assertEqual(test_obj.max_root_depth, (self.ln * self.dz))
# Get the root profile on the test grid.
root_pdf_2 = test_obj(self.z_roots)
# Check if the densities are almost equal.
self.assertTrue(isclose(np.sum(root_pdf_1) * self.dz,
np.sum(root_pdf_2) * self.dz,
rel_tol=1.0e-4))
# _end_for_
# _end_def_
def test_efficiency(self):
# Create a linear porosity object.
test_obj = TreeRoots(self.ln, self.dz, "uniform", self.porous)
# Print object.
print(test_obj)
# Zero water version.
theta_0 = np.zeros(self.ln)
rho_theta0, _ = test_obj.efficiency(theta_0, self.z_roots)
self.assertTrue(np.sum(rho_theta0) == 0.0)
# Simple version.
theta_1d = np.random.rand(self.ln)
rho_theta1, _ = test_obj.efficiency(theta_1d, self.z_roots)
# Check the input/output dimensions.
self.assertEqual(theta_1d.shape, rho_theta1.shape)
# Check if the integrated water efficiency sums to one.
self.assertTrue(isclose(np.sum(rho_theta1) * self.dz, 1.0, rel_tol=1.0e-5))
# _end_def_
def test_wrong_init_params(self):
"""
Test an object initialization with wrong input parameters.
:return: None
"""
with self.assertRaises(ValueError):
# The input model is unknown to the class.
_ = TreeRoots(self.ln, self.dz, "Gaussian", self.porous)
# _end_with_
# _end_def_
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.path.abspath",
"numpy.sum",
"src.water_content.WaterContent",
"numpy.zeros",
"src.porosity.Porosity",
"numpy.arange",
"numpy.linspace",
"src.soil_properties.SoilProperties",
"numpy.random.rand",
"src.tree_roots.TreeRoots"
] | [((134, 163), 'os.path.abspath', 'os.path.abspath', (['"""../../code"""'], {}), "('../../code')\n", (149, 163), False, 'import os\n'), ((4048, 4063), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4061, 4063), False, 'import unittest\n'), ((939, 979), 'numpy.arange', 'np.arange', (['(0.0)', '(z_max + self.dz)', 'self.dz'], {}), '(0.0, z_max + self.dz, self.dz)\n', (948, 979), True, 'import numpy as np\n'), ((1119, 1133), 'src.water_content.WaterContent', 'WaterContent', ([], {}), '()\n', (1131, 1133), False, 'from src.water_content import WaterContent\n'), ((1213, 1229), 'src.soil_properties.SoilProperties', 'SoilProperties', ([], {}), '()\n', (1227, 1229), False, 'from src.soil_properties import SoilProperties\n'), ((1380, 1424), 'numpy.linspace', 'np.linspace', (['(0.0)', '(self.ln * self.dz)', 'self.ln'], {}), '(0.0, self.ln * self.dz, self.ln)\n', (1391, 1424), True, 'import numpy as np\n'), ((1491, 1558), 'src.porosity.Porosity', 'Porosity', (['self.z_grid', 'self.layers', 'self.theta', 'self.soil', '"""Linear"""'], {}), "(self.z_grid, self.layers, self.theta, self.soil, 'Linear')\n", (1499, 1558), False, 'from src.porosity import Porosity\n'), ((2964, 3015), 'src.tree_roots.TreeRoots', 'TreeRoots', (['self.ln', 'self.dz', '"""uniform"""', 'self.porous'], {}), "(self.ln, self.dz, 'uniform', self.porous)\n", (2973, 3015), False, 'from src.tree_roots import TreeRoots\n'), ((3114, 3131), 'numpy.zeros', 'np.zeros', (['self.ln'], {}), '(self.ln)\n', (3122, 3131), True, 'import numpy as np\n'), ((3296, 3319), 'numpy.random.rand', 'np.random.rand', (['self.ln'], {}), '(self.ln)\n', (3310, 3319), True, 'import numpy as np\n'), ((2087, 2136), 'src.tree_roots.TreeRoots', 'TreeRoots', (['self.ln', 'self.dz', 'model_i', 'self.porous'], {}), '(self.ln, self.dz, model_i, self.porous)\n', (2096, 2136), False, 'from src.tree_roots import TreeRoots\n'), ((3925, 3977), 'src.tree_roots.TreeRoots', 'TreeRoots', (['self.ln', 'self.dz', '"""Gaussian"""', 'self.porous'], {}), "(self.ln, self.dz, 'Gaussian', self.porous)\n", (3934, 3977), False, 'from src.tree_roots import TreeRoots\n'), ((3223, 3241), 'numpy.sum', 'np.sum', (['rho_theta0'], {}), '(rho_theta0)\n', (3229, 3241), True, 'import numpy as np\n'), ((3590, 3608), 'numpy.sum', 'np.sum', (['rho_theta1'], {}), '(rho_theta1)\n', (3596, 3608), True, 'import numpy as np\n'), ((2311, 2329), 'numpy.sum', 'np.sum', (['root_pdf_1'], {}), '(root_pdf_1)\n', (2317, 2329), True, 'import numpy as np\n'), ((2684, 2702), 'numpy.sum', 'np.sum', (['root_pdf_1'], {}), '(root_pdf_1)\n', (2690, 2702), True, 'import numpy as np\n'), ((2750, 2768), 'numpy.sum', 'np.sum', (['root_pdf_2'], {}), '(root_pdf_2)\n', (2756, 2768), True, 'import numpy as np\n')] |
# Neural network for pop assignment
# Load packages
import tensorflow.keras as tf
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
import numpy as np
import pandas as pd
import allel
import zarr
import h5py
from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
import itertools
import shutil
import sys
import os
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sn
class pop_find_class:
# instance attribute
def __init__(self, infile, sample_data, seed=None, train_prop=0.8, save_dir="out"):
self.infile = infile
self.sample_data = sample_data
self.seed=seed
self.train_prop=train_prop
self.save_dir=save_dir
if os.path.exists(self.infile) is False:
raise ValueError("infile does not exist")
if os.path.exists(self.sample_data) is False:
raise ValueError("sample_data does not exist")
self.samp_list, self.dc, self.uk_list, self.dc_uk, self.unknowns = read_data(
infile=self.infile,
sample_data=self.sample_data,
save_allele_counts=False,
)
# Create test set that will be used to assess model performance later
self.X_train_0, self.X_holdout, self.y_train_0, self.y_holdout = train_test_split(
self.dc, self.samp_list, stratify=self.samp_list["pops"], train_size=self.train_prop
)
# Create save_dir if doesn't already exist
print(f"Output will be saved to: {save_dir}")
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
# Save train and test set to save_dir
np.save(save_dir + "/X_train.npy", self.X_train_0)
self.y_train_0.to_csv(save_dir + "/y_train.csv", index=False)
np.save(save_dir + "/X_holdout.npy", self.X_holdout)
self.y_holdout.to_csv(save_dir + "/y_holdout.csv", index=False)
def hyper_tune(self, y_train_0=None, dc=None,max_trials=10,runs_per_trial=10,max_epochs=100,train_prop=0.8,seed=None,save_dir="out",mod_name="hyper_tune"):
y_train_0 = self.y_train_0
dc = self.X_train_0
seed=self.seed
if isinstance(max_trials, np.int) is False:
raise ValueError("max_trials should be integer")
if isinstance(runs_per_trial, np.int) is False:
raise ValueError("runs_per_trial should be integer")
if isinstance(max_epochs, np.int) is False:
raise ValueError("max_epochs should be integer")
if isinstance(train_prop, np.float) is False:
raise ValueError("train_prop should be float")
if isinstance(seed, np.int) is False and seed is not None:
raise ValueError("seed should be integer or None")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be string")
if isinstance(mod_name, str) is False:
raise ValueError("mod_name should be string")
# Train prop can't be greater than num samples
if len(dc) * (1 - train_prop) < len(np.unique(y_train_0["pops"])):
raise ValueError("train_prop is too high; not enough samples for test")
# Split data into training test
X_train, X_val, y_train, y_val = train_test_split(
dc,
y_train_0,
stratify=y_train_0["pops"],
train_size=train_prop,
random_state=seed,
)
if len(np.unique(y_train["pops"])) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
hypermodel = classifierHyperModel(
input_shape=X_train.shape[1], num_classes=len(popnames)
)
tuner = RandomSearch(
hypermodel,
objective="val_loss",
seed=seed,
max_trials=max_trials,
executions_per_trial=runs_per_trial,
directory=save_dir,
project_name=mod_name,
)
tuner.search(
X_train - 1,
y_train_enc,
epochs=max_epochs,
validation_data=(X_val - 1, y_val_enc),
)
self.best_mod = tuner.get_best_models(num_models=1)[0]
tuner.get_best_models(num_models=1)[0].save(save_dir + "/best_mod")
def class_train(self,
ensemble=False,
plot_hist=True,
nbags=10,
save_weights=True,
patience=20,
batch_size=32,
max_epochs=100,
):
print(f"Output will be saved to: {self.save_dir}")
y_train = self.y_train_0
dc = self.X_train_0
train_prop = self.train_prop
if len(dc) * (1 - train_prop) < 1:
raise ValueError(
"train_prop is too high; not enough values for test")
seed=self.seed
save_dir = self.save_dir + "/training_output"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
y_test_samples = self.y_holdout["samples"].to_numpy()
y_test_pops = self.y_holdout["pops"].to_numpy()
# One hot encode test values
enc = OneHotEncoder(handle_unknown="ignore")
y_test_enc = enc.fit_transform(
self.y_holdout["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
self.popnames=popnames
# results storage
TEST_LOSS = []
TEST_ACCURACY = []
TEST_95CI = []
yhats = []
ypreds = []
test_dict = {"count": [], "df": []}
if hasattr(self, 'best_mod'):
model = self.best_mod
else:
# Test if train_prop is too high
if len(dc) * (1 - train_prop) < 1:
raise ValueError(
"train_prop is too high; not enough values for test")
X_train, X_val, y_train, y_val = train_test_split(
dc,
y_train,
stratify=y_train["pops"],
train_size=train_prop,
random_state=seed,
)
# Make sure all classes represented in y_val
if len(np.unique(y_train["pops"]) ) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames1 = enc.categories_[0]
model = basic_model(X_train,popnames1)
self.best_mod = model
if ensemble:
X_train = self.X_train_0
y_train = self.y_train_0
n_prime = np.int(np.ceil(len(X_train) * 0.8))
self.ensembl_fl = []
if os.path.exists(save_dir + "/ensemble_weights"):
shutil.rmtree(save_dir + "/ensemble_weights")
os.makedirs(save_dir + "/ensemble_weights")
for i in range(nbags):
good_bag = False
while good_bag is False:
bag_X = np.zeros(shape=(n_prime, X_train.shape[1]))
bag_y = pd.DataFrame({"samples": [], "pops": [], "order": []})
for j in range(0, n_prime):
ind = np.random.choice(len(X_train))
bag_X[j] = X_train[ind]
bag_y = bag_y.append(y_train.iloc[ind])
dup_pops_df = bag_y.groupby(["pops"]).agg(["count"])
if (
pd.Series(popnames).isin(bag_y["pops"]).all()
and (dup_pops_df[("samples", "count")] > 1).all()
):
# Create validation set from training set
bag_X, X_val, bag_y, y_val = train_test_split(
bag_X, bag_y, stratify=bag_y["pops"],
train_size=train_prop
)
if (
pd.Series(popnames).isin(bag_y["pops"]).all()
and pd.Series(popnames).isin(y_val["pops"]).all()
):
good_bag = True
enc = OneHotEncoder(handle_unknown="ignore")
bag_y_enc = enc.fit_transform(
bag_y["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
# Create callbacks
temp_str = "/ensemble_weights/checkpoint_" + str(i)+ ".h5"
self.ensembl_fl.append(temp_str)
checkpointer = tf.callbacks.ModelCheckpoint(
filepath=save_dir + temp_str,
verbose=1,
save_best_only=True,
save_weights_only=True,
monitor="val_loss",
# monitor="loss",
save_freq="epoch",
)
earlystop = tf.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=patience
)
reducelr = tf.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.2,
patience=int(patience / 3),
verbose=1,
mode="auto",
min_delta=0,
cooldown=0,
min_lr=0,
)
callback_list = [checkpointer, earlystop, reducelr]
# Train model
history = model.fit(
bag_X - 1,
bag_y_enc,
batch_size=int(batch_size),
epochs=int(max_epochs),
callbacks=callback_list,
validation_data=(X_val - 1, y_val_enc),
verbose=0,
)
# Load best model
model.load_weights(save_dir + temp_str)
if plot_hist:
plot_history(history=history, i=i, save_dir= save_dir, ensemble=True)
test_loss, test_acc = model.evaluate(self.X_holdout - 1, y_test_enc)
test_df = pd.DataFrame(model.predict(self.X_holdout - 1))
test_df.columns = popnames
test_df["sampleID"] = y_test_samples
test_df["true_pops"] = y_test_pops
test_dict["count"].append(1)
test_dict["df"].append(test_df)
test_df.to_csv(save_dir+"/test_results.csv")
# Fill test lists with information
TEST_LOSS.append(test_loss)
TEST_ACCURACY.append(test_acc)
yhats = np.array(yhats)
# Get ensemble accuracy
tot_bag_df = test_dict["df"][0].iloc[
:, 0:len(popnames)
].copy()
for i in range(0, len(test_dict["df"])):
tot_bag_df += test_dict["df"][i].iloc[:, 0:len(popnames)]
# Normalize values to be between 0 and 1
tot_bag_df = tot_bag_df / nbags
tot_bag_df["top_samp"] = tot_bag_df.idxmax(axis=1)
tot_bag_df["sampleID"] = test_dict["df"][0]["sampleID"]
tot_bag_df["true_pops"] = test_dict["df"][0]["true_pops"]
ENSEMBLE_TEST_ACCURACY = np.sum(
tot_bag_df["top_samp"] == tot_bag_df["true_pops"]
) / len(tot_bag_df)
tot_bag_df.to_csv(save_dir + "/ensemble_test_results.csv")
else:
# Split training data into training and validation
X_train = self.X_train_0
y_train = self.y_train_0
X_train, X_val, y_train, y_val = train_test_split(
dc, y_train, stratify=y_train["pops"],
random_state=seed)
# Make sure all classes represented in y_val
if len(
np.unique(y_train["pops"])
) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
# Create callbacks
if os.path.exists(save_dir + "/default_mod_weights"):
shutil.rmtree(save_dir + "/default_mod_weights")
os.makedirs(save_dir + "/default_mod_weights")
checkpointer = tf.callbacks.ModelCheckpoint(
filepath=save_dir + "/default_mod_weights/checkpoint.h5",
verbose=1,
save_best_only=True,
save_weights_only=True,
monitor="val_loss",
save_freq="epoch",
)
earlystop = tf.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=patience
)
reducelr = tf.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.2,
patience=int(patience / 3),
verbose=1,
mode="auto",
min_delta=0,
cooldown=0,
min_lr=0,
)
callback_list = [checkpointer, earlystop, reducelr]
# Train model
history = model.fit(
X_train - 1,
y_train_enc,
batch_size=int(batch_size),
epochs=int(max_epochs),
callbacks=callback_list,
validation_data=(X_val - 1, y_val_enc),
verbose=0,
)
# Load best model
model.load_weights(save_dir + "/default_mod_weights/checkpoint.h5")
if plot_hist:
plot_history(history=history, save_dir=save_dir, ensemble=False)
tf.backend.clear_session()
test_loss, test_acc = model.evaluate(self.X_holdout - 1, y_test_enc)
test_df = pd.DataFrame(model.predict(self.X_holdout - 1))
test_df.columns = popnames
test_df["sampleID"] = y_test_samples
test_df["true_pops"] = y_test_pops
test_dict["count"].append(1)
test_dict["df"].append(test_df)
test_df.to_csv(save_dir + "/test_results.csv")
# Find confidence interval of best model
test_err = 1 - test_acc
test_95CI = 1.96 * np.sqrt(
(test_err * (1 - test_err)) / len(y_test_enc))
# Fill test lists with information
TEST_LOSS.append(test_loss)
TEST_ACCURACY.append(test_acc)
TEST_95CI.append(test_95CI)
print(
f"Accuracy of model is {np.round(test_acc, 2)}\
+/- {np.round(test_95CI,2)}"
)
# Print metrics to csv
print("Creating outputs...")
metrics = pd.DataFrame(
{
"metric": [
"Test accuracy",
"Test 95% CI",
"Test loss",
],
"value": [
np.round(TEST_ACCURACY, 2),
np.round(TEST_95CI, 2),
np.round(TEST_LOSS, 2),
],
}
)
metrics.to_csv(save_dir + "/metrics.csv", index=False)
print("Process complete")
def predict(self, ensemble=False):
save_dir = self.save_dir + "/training_output"
uksamples = self.unknowns["sampleID"].to_numpy()
ukgen = self.dc_uk
popnames = self.popnames
pred_dict = {"count": [], "df": []}
top_pops = {"df": [], "pops": []}
ypreds = []
#if hasattr(self, ensembl_fl):
model = self.best_mod
if ensemble:
i=0
pred_dict = {"count": [], "df": []}
top_pops = {"df": [], "pops": []}
for checkpoint in self.ensembl_fl:
model.load_weights(save_dir + checkpoint)
tmp_df = pd.DataFrame(model.predict(ukgen))
tmp_df.columns = popnames
tmp_df["sampleID"] = uksamples
tmp_df["bag"] = i
pred_dict["count"].append(i)
pred_dict["df"].append(tmp_df)
# Find top populations for each sample
top_pops["df"].append(i)
top_pops["pops"].append(
pred_dict["df"][i].iloc[
:, 0:len(popnames)
].idxmax(axis=1)
)
i= i+1
ypreds = np.array(ypreds)
top_pops_df = pd.DataFrame(top_pops["pops"])
top_pops_df.columns = uksamples
top_freqs = {"sample": [], "freq": []}
for samp in uksamples:
top_freqs["sample"].append(samp)
top_freqs["freq"].append(
top_pops_df[samp].value_counts() / len(top_pops_df)
)
# Save frequencies to csv for plotting
top_freqs_df = pd.DataFrame(top_freqs["freq"]).fillna(0)
top_freqs_df.to_csv(save_dir + "/pop_assign_freqs.csv")
# Create table to assignments by frequency
freq_df = pd.concat(
[
pd.DataFrame(top_freqs["freq"]).max(axis=1),
pd.DataFrame(top_freqs["freq"]).idxmax(axis=1),
],
axis=1,
).reset_index()
freq_df.columns = ["Assigned Pop",
"Frequency",
"Sample ID"]
freq_df.to_csv(save_dir + "/pop_assign_ensemble.csv",
index=False)
else:
model.load_weights(save_dir + "/default_mod_weights/checkpoint.h5")
tmp_df = pd.DataFrame(model.predict(ukgen))
tmp_df.columns = popnames
tmp_df["sampleID"] = uksamples
tmp_df.to_csv(save_dir + "/pop_assign.csv", index=False)
def read_data(infile, sample_data, save_allele_counts=False):
"""
Reads a .zarr, .vcf, or h5py file containing genetic data and
creates subsettable data for a classifier neural network.
Parameters
----------
infile : string
Path to the .zarr, .vcf, or h5py file.
sample_data : string
Path to .txt file containing sample information
(columns are x, y, sampleID, and pop).
save_allele_counts : boolean
Saves derived allele count information (Default=False).
kfcv : boolean
If being used to test accuracy with k-fold cross-
validation (i.e. no NAs in the sample data), set to
True (Default=False).
Returns
-------
samp_list : dataframe
Contains information on corresponding sampleID and
population classifications.
dc : np.array
Array of derived allele counts.
unknowns : dataframe
If kfcv is set to False, returns a dataframe with
information about sampleID and indices for samples
of unknown origin.
"""
# Check formats of datatypes
if os.path.exists(infile) is False:
raise ValueError("Path to infile does not exist")
# Load genotypes
print("loading genotypes")
if infile.endswith(".zarr"):
callset = zarr.open_group(infile, mode="r")
gt = callset["calldata/GT"]
gen = allel.GenotypeArray(gt[:])
samples = callset["samples"][:]
elif infile.endswith(".vcf") or infile.endswith(".vcf.gz"):
vcf = allel.read_vcf(infile, log=sys.stderr)
gen = allel.GenotypeArray(vcf["calldata/GT"])
samples = vcf["samples"]
elif infile.endswith(".locator.hdf5"):
h5 = h5py.File(infile, "r")
dc = np.array(h5["derived_counts"])
samples = np.array(h5["samples"])
h5.close()
else:
raise ValueError("Infile must have extension 'zarr', 'vcf', or 'hdf5'")
# count derived alleles for biallelic sites
if infile.endswith(".locator.hdf5") is False:
print("counting alleles")
ac = gen.to_allele_counts()
biallel = gen.count_alleles().is_biallelic()
dc = np.array(ac[biallel, :, 1], dtype="int_")
dc = np.transpose(dc)
if (save_allele_counts and
not infile.endswith(".locator.hdf5")):
print("saving derived counts for reanalysis")
outfile = h5py.File(infile + ".locator.hdf5", "w")
outfile.create_dataset("derived_counts", data=dc)
outfile.create_dataset("samples", data=samples,
dtype=h5py.string_dtype())
outfile.close()
# Load data and organize for output
print("loading sample data")
if os.path.exists(sample_data) is False:
raise ValueError("Path to sample_data does not exist")
locs = pd.read_csv(sample_data, sep="\t")
if not pd.Series(["x",
"pop",
"y",
"sampleID"]).isin(locs.columns).all():
raise ValueError("sample_data does not have correct columns")
locs["id"] = locs["sampleID"]
locs.set_index("id", inplace=True)
# sort loc table so samples are in same order as genotype samples
locs = locs.reindex(np.array(samples))
# Create order column for indexing
locs["order"] = np.arange(0, len(locs))
unknowns = locs.iloc[np.where(pd.isnull(locs["pop"]))]
# handle presence of samples with unknown locations
uk_remove = locs[locs["x"].isnull()]["order"].values
dc_uk = dc[uk_remove,:] - 1
dc = np.delete(dc, uk_remove, axis=0)
samples_uk = samples[uk_remove]
samples = np.delete(samples, uk_remove)
locs_uk = locs[locs["pop"].isna()]
locs = locs.dropna()
# check that all sample names are present
if not all(
[locs["sampleID"][x] == samples[x] for x in range(len(samples))]
):
raise ValueError(
"sample ordering failed! Check that sample IDs match VCF.")
locs = np.array(locs["pop"])
samp_list = pd.DataFrame({"samples": samples, "pops": locs})
locs_uk = np.array(locs_uk["pop"])
uk_list = pd.DataFrame({"samples":samples_uk, "pops": locs_uk})
# Return the sample lists
return samp_list, dc, uk_list, dc_uk, unknowns
class classifierHyperModel(HyperModel):
def __init__(self, input_shape, num_classes):
"""
Initializes object of class classifierHyperModel.
Parameters
----------
input_shape : int
Number of training examples.
num_classes : int
Number of populations or labels.
"""
self.input_shape = input_shape
self.num_classes = num_classes
def build(self, hp):
"""
Builds a model with the specified hyperparameters.
Parameters
----------
hp : keras.tuners class
Class that defines how to sample hyperparameters (e.g.
RandomSearch()).
Returns
-------
model : Keras sequential model
Model with all the layers and specified hyperparameters.
"""
model = tf.Sequential()
model.add(tf.layers.BatchNormalization(
input_shape=(self.input_shape,)))
model.add(
tf.layers.Dense(
units=hp.Int(
"units_1",
# placeholder values for now
min_value=32,
max_value=512,
step=32,
default=128,
),
activation=hp.Choice(
"dense_activation_1",
values=["elu", "relu", "tanh", "sigmoid"],
default="elu",
),
)
)
model.add(
tf.layers.Dense(
units=hp.Int(
"units_2",
# placeholder values for now
min_value=32,
max_value=512,
step=32,
default=128,
),
activation=hp.Choice(
"dense_activation_2",
values=["elu", "relu", "tanh", "sigmoid"],
default="elu",
),
)
)
model.add(
tf.layers.Dense(
units=hp.Int(
"units_3",
# placeholder values for now
min_value=32,
max_value=512,
step=32,
default=128,
),
activation=hp.Choice(
"dense_activation_3",
values=["elu", "relu", "tanh", "sigmoid"],
default="elu",
),
)
)
model.add(
tf.layers.Dropout(
rate=hp.Float(
"dropout", min_value=0.0,
max_value=0.5,
default=0.25,
step=0.05
)
)
)
model.add(
tf.layers.Dense(
units=hp.Int(
"units_4",
# placeholder values for now
min_value=32,
max_value=512,
step=32,
default=128,
),
activation=hp.Choice(
"dense_activation_4",
values=["elu", "relu", "tanh", "sigmoid"],
default="elu",
),
)
)
model.add(
tf.layers.Dense(
units=hp.Int(
"units_5",
# placeholder values for now
min_value=32,
max_value=512,
step=32,
default=128,
),
activation=hp.Choice(
"dense_activation_5",
values=["elu", "relu", "tanh", "sigmoid"],
default="elu",
),
)
)
model.add(
tf.layers.Dense(
units=hp.Int(
"units_6",
# placeholder values for now
min_value=32,
max_value=512,
step=32,
default=128,
),
activation=hp.Choice(
"dense_activation_6",
values=["elu", "relu", "tanh", "sigmoid"],
default="elu",
),
)
)
model.add(tf.layers.Dense(self.num_classes,
activation="softmax"))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=5e-4,
)
),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
def basic_model(bag_X, popnames):
model = tf.Sequential()
model.add(tf.layers.BatchNormalization(
input_shape=(bag_X.shape[1],)))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dropout(0.25))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(len(popnames), activation="softmax"))
aopt = tf.optimizers.Adam(lr=0.0005)
model.compile(
loss="categorical_crossentropy",
optimizer=aopt,
metrics="accuracy")
return model
def plot_history(history, i=None, ensemble=False, save_dir="out"):
# plot training history
plt.switch_backend("agg")
fig = plt.figure(figsize=(3, 1.5), dpi=200)
plt.rcParams.update({"font.size": 7})
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.plot(
history.history["val_loss"][3:],
"--",
color="black",
lw=0.5,
label="Validation Loss",
)
ax1.plot(
history.history["loss"][3:],
"-",
color="black",
lw=0.5,
label="Training Loss",
)
ax1.set_xlabel("Epoch")
ax1.legend()
if ensemble:
fig.savefig(
save_dir + "/model" + str(i) + "_history.pdf",
bbox_inches="tight"
)
else:
fig.savefig(
save_dir + "/history.pdf",
bbox_inches="tight"
)
plt.close() | [
"numpy.sum",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.figure",
"tensorflow.keras.Sequential",
"shutil.rmtree",
"numpy.round",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy... | [((17514, 17548), 'pandas.read_csv', 'pd.read_csv', (['sample_data'], {'sep': '"""\t"""'}), "(sample_data, sep='\\t')\n", (17525, 17548), True, 'import pandas as pd\n'), ((18158, 18190), 'numpy.delete', 'np.delete', (['dc', 'uk_remove'], {'axis': '(0)'}), '(dc, uk_remove, axis=0)\n', (18167, 18190), True, 'import numpy as np\n'), ((18235, 18264), 'numpy.delete', 'np.delete', (['samples', 'uk_remove'], {}), '(samples, uk_remove)\n', (18244, 18264), True, 'import numpy as np\n'), ((18541, 18562), 'numpy.array', 'np.array', (["locs['pop']"], {}), "(locs['pop'])\n", (18549, 18562), True, 'import numpy as np\n'), ((18576, 18624), 'pandas.DataFrame', 'pd.DataFrame', (["{'samples': samples, 'pops': locs}"], {}), "({'samples': samples, 'pops': locs})\n", (18588, 18624), True, 'import pandas as pd\n'), ((18636, 18660), 'numpy.array', 'np.array', (["locs_uk['pop']"], {}), "(locs_uk['pop'])\n", (18644, 18660), True, 'import numpy as np\n'), ((18672, 18726), 'pandas.DataFrame', 'pd.DataFrame', (["{'samples': samples_uk, 'pops': locs_uk}"], {}), "({'samples': samples_uk, 'pops': locs_uk})\n", (18684, 18726), True, 'import pandas as pd\n'), ((22019, 22034), 'tensorflow.keras.Sequential', 'tf.Sequential', ([], {}), '()\n', (22032, 22034), True, 'import tensorflow.keras as tf\n'), ((22525, 22554), 'tensorflow.keras.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': '(0.0005)'}), '(lr=0.0005)\n', (22543, 22554), True, 'import tensorflow.keras as tf\n'), ((22757, 22782), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (22775, 22782), True, 'from matplotlib import pyplot as plt\n'), ((22791, 22828), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 1.5)', 'dpi': '(200)'}), '(figsize=(3, 1.5), dpi=200)\n', (22801, 22828), True, 'from matplotlib import pyplot as plt\n'), ((22831, 22868), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 7}"], {}), "({'font.size': 7})\n", (22850, 22868), True, 'from matplotlib import pyplot as plt\n'), ((23371, 23382), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (23380, 23382), True, 'from matplotlib import pyplot as plt\n'), ((1402, 1508), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.dc', 'self.samp_list'], {'stratify': "self.samp_list['pops']", 'train_size': 'self.train_prop'}), "(self.dc, self.samp_list, stratify=self.samp_list['pops'],\n train_size=self.train_prop)\n", (1418, 1508), False, 'from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split\n'), ((1611, 1635), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1625, 1635), False, 'import os\n'), ((1666, 1687), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1677, 1687), False, 'import os\n'), ((1730, 1780), 'numpy.save', 'np.save', (["(save_dir + '/X_train.npy')", 'self.X_train_0'], {}), "(save_dir + '/X_train.npy', self.X_train_0)\n", (1737, 1780), True, 'import numpy as np\n'), ((1847, 1899), 'numpy.save', 'np.save', (["(save_dir + '/X_holdout.npy')", 'self.X_holdout'], {}), "(save_dir + '/X_holdout.npy', self.X_holdout)\n", (1854, 1899), True, 'import numpy as np\n'), ((3148, 3254), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dc', 'y_train_0'], {'stratify': "y_train_0['pops']", 'train_size': 'train_prop', 'random_state': 'seed'}), "(dc, y_train_0, stratify=y_train_0['pops'], train_size=\n train_prop, random_state=seed)\n", (3164, 3254), False, 'from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split\n'), ((3492, 3530), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (3505, 3530), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((3842, 4011), 'kerastuner.tuners.RandomSearch', 'RandomSearch', (['hypermodel'], {'objective': '"""val_loss"""', 'seed': 'seed', 'max_trials': 'max_trials', 'executions_per_trial': 'runs_per_trial', 'directory': 'save_dir', 'project_name': 'mod_name'}), "(hypermodel, objective='val_loss', seed=seed, max_trials=\n max_trials, executions_per_trial=runs_per_trial, directory=save_dir,\n project_name=mod_name)\n", (3854, 4011), False, 'from kerastuner.tuners import RandomSearch\n'), ((4739, 4763), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4753, 4763), False, 'import os\n'), ((4794, 4815), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4805, 4815), False, 'import os\n'), ((4961, 4999), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (4974, 4999), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((16010, 16032), 'os.path.exists', 'os.path.exists', (['infile'], {}), '(infile)\n', (16024, 16032), False, 'import os\n'), ((16183, 16216), 'zarr.open_group', 'zarr.open_group', (['infile'], {'mode': '"""r"""'}), "(infile, mode='r')\n", (16198, 16216), False, 'import zarr\n'), ((16255, 16281), 'allel.GenotypeArray', 'allel.GenotypeArray', (['gt[:]'], {}), '(gt[:])\n', (16274, 16281), False, 'import allel\n'), ((16941, 16982), 'numpy.array', 'np.array', (['ac[biallel, :, 1]'], {'dtype': '"""int_"""'}), "(ac[biallel, :, 1], dtype='int_')\n", (16949, 16982), True, 'import numpy as np\n'), ((16990, 17006), 'numpy.transpose', 'np.transpose', (['dc'], {}), '(dc)\n', (17002, 17006), True, 'import numpy as np\n'), ((17411, 17438), 'os.path.exists', 'os.path.exists', (['sample_data'], {}), '(sample_data)\n', (17425, 17438), False, 'import os\n'), ((17864, 17881), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (17872, 17881), True, 'import numpy as np\n'), ((19492, 19507), 'tensorflow.keras.Sequential', 'tf.Sequential', ([], {}), '()\n', (19505, 19507), True, 'import tensorflow.keras as tf\n'), ((22046, 22105), 'tensorflow.keras.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {'input_shape': '(bag_X.shape[1],)'}), '(input_shape=(bag_X.shape[1],))\n', (22074, 22105), True, 'import tensorflow.keras as tf\n'), ((22121, 22159), 'tensorflow.keras.layers.Dense', 'tf.layers.Dense', (['(128)'], {'activation': '"""elu"""'}), "(128, activation='elu')\n", (22136, 22159), True, 'import tensorflow.keras as tf\n'), ((22172, 22210), 'tensorflow.keras.layers.Dense', 'tf.layers.Dense', (['(128)'], {'activation': '"""elu"""'}), "(128, activation='elu')\n", (22187, 22210), True, 'import tensorflow.keras as tf\n'), ((22223, 22261), 'tensorflow.keras.layers.Dense', 'tf.layers.Dense', (['(128)'], {'activation': '"""elu"""'}), "(128, activation='elu')\n", (22238, 22261), True, 'import tensorflow.keras as tf\n'), ((22274, 22297), 'tensorflow.keras.layers.Dropout', 'tf.layers.Dropout', (['(0.25)'], {}), '(0.25)\n', (22291, 22297), True, 'import tensorflow.keras as tf\n'), ((22310, 22348), 'tensorflow.keras.layers.Dense', 'tf.layers.Dense', (['(128)'], {'activation': '"""elu"""'}), "(128, activation='elu')\n", (22325, 22348), True, 'import tensorflow.keras as tf\n'), ((22361, 22399), 'tensorflow.keras.layers.Dense', 'tf.layers.Dense', (['(128)'], {'activation': '"""elu"""'}), "(128, activation='elu')\n", (22376, 22399), True, 'import tensorflow.keras as tf\n'), ((22412, 22450), 'tensorflow.keras.layers.Dense', 'tf.layers.Dense', (['(128)'], {'activation': '"""elu"""'}), "(128, activation='elu')\n", (22427, 22450), True, 'import tensorflow.keras as tf\n'), ((911, 938), 'os.path.exists', 'os.path.exists', (['self.infile'], {}), '(self.infile)\n', (925, 938), False, 'import os\n'), ((999, 1031), 'os.path.exists', 'os.path.exists', (['self.sample_data'], {}), '(self.sample_data)\n', (1013, 1031), False, 'import os\n'), ((1640, 1663), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (1653, 1663), False, 'import shutil\n'), ((4768, 4791), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (4781, 4791), False, 'import shutil\n'), ((5545, 5647), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dc', 'y_train'], {'stratify': "y_train['pops']", 'train_size': 'train_prop', 'random_state': 'seed'}), "(dc, y_train, stratify=y_train['pops'], train_size=\n train_prop, random_state=seed)\n", (5561, 5647), False, 'from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split\n'), ((5941, 5979), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (5954, 5979), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((6405, 6451), 'os.path.exists', 'os.path.exists', (["(save_dir + '/ensemble_weights')"], {}), "(save_dir + '/ensemble_weights')\n", (6419, 6451), False, 'import os\n'), ((6507, 6550), 'os.makedirs', 'os.makedirs', (["(save_dir + '/ensemble_weights')"], {}), "(save_dir + '/ensemble_weights')\n", (6518, 6550), False, 'import os\n'), ((9246, 9261), 'numpy.array', 'np.array', (['yhats'], {}), '(yhats)\n', (9254, 9261), True, 'import numpy as np\n'), ((10053, 10127), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dc', 'y_train'], {'stratify': "y_train['pops']", 'random_state': 'seed'}), "(dc, y_train, stratify=y_train['pops'], random_state=seed)\n", (10069, 10127), False, 'from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split\n'), ((10420, 10458), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (10433, 10458), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((10694, 10743), 'os.path.exists', 'os.path.exists', (["(save_dir + '/default_mod_weights')"], {}), "(save_dir + '/default_mod_weights')\n", (10708, 10743), False, 'import os\n'), ((10801, 10847), 'os.makedirs', 'os.makedirs', (["(save_dir + '/default_mod_weights')"], {}), "(save_dir + '/default_mod_weights')\n", (10812, 10847), False, 'import os\n'), ((10870, 11059), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.callbacks.ModelCheckpoint', ([], {'filepath': "(save_dir + '/default_mod_weights/checkpoint.h5')", 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(True)', 'monitor': '"""val_loss"""', 'save_freq': '"""epoch"""'}), "(filepath=save_dir +\n '/default_mod_weights/checkpoint.h5', verbose=1, save_best_only=True,\n save_weights_only=True, monitor='val_loss', save_freq='epoch')\n", (10898, 11059), True, 'import tensorflow.keras as tf\n'), ((11091, 11169), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': 'patience'}), "(monitor='val_loss', min_delta=0, patience=patience)\n", (11117, 11169), True, 'import tensorflow.keras as tf\n'), ((11847, 11873), 'tensorflow.keras.backend.clear_session', 'tf.backend.clear_session', ([], {}), '()\n', (11871, 11873), True, 'import tensorflow.keras as tf\n'), ((13944, 13960), 'numpy.array', 'np.array', (['ypreds'], {}), '(ypreds)\n', (13952, 13960), True, 'import numpy as np\n'), ((13978, 14008), 'pandas.DataFrame', 'pd.DataFrame', (["top_pops['pops']"], {}), "(top_pops['pops'])\n", (13990, 14008), True, 'import pandas as pd\n'), ((16385, 16423), 'allel.read_vcf', 'allel.read_vcf', (['infile'], {'log': 'sys.stderr'}), '(infile, log=sys.stderr)\n', (16399, 16423), False, 'import allel\n'), ((16432, 16471), 'allel.GenotypeArray', 'allel.GenotypeArray', (["vcf['calldata/GT']"], {}), "(vcf['calldata/GT'])\n", (16451, 16471), False, 'import allel\n'), ((17141, 17181), 'h5py.File', 'h5py.File', (["(infile + '.locator.hdf5')", '"""w"""'], {}), "(infile + '.locator.hdf5', 'w')\n", (17150, 17181), False, 'import h5py\n'), ((17991, 18013), 'pandas.isnull', 'pd.isnull', (["locs['pop']"], {}), "(locs['pop'])\n", (18000, 18013), True, 'import pandas as pd\n'), ((19520, 19581), 'tensorflow.keras.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {'input_shape': '(self.input_shape,)'}), '(input_shape=(self.input_shape,))\n', (19548, 19581), True, 'import tensorflow.keras as tf\n'), ((21649, 21704), 'tensorflow.keras.layers.Dense', 'tf.layers.Dense', (['self.num_classes'], {'activation': '"""softmax"""'}), "(self.num_classes, activation='softmax')\n", (21664, 21704), True, 'import tensorflow.keras as tf\n'), ((2973, 3001), 'numpy.unique', 'np.unique', (["y_train_0['pops']"], {}), "(y_train_0['pops'])\n", (2982, 3001), True, 'import numpy as np\n'), ((3280, 3306), 'numpy.unique', 'np.unique', (["y_train['pops']"], {}), "(y_train['pops'])\n", (3289, 3306), True, 'import numpy as np\n'), ((3315, 3339), 'numpy.unique', 'np.unique', (["y_val['pops']"], {}), "(y_val['pops'])\n", (3324, 3339), True, 'import numpy as np\n'), ((6458, 6503), 'shutil.rmtree', 'shutil.rmtree', (["(save_dir + '/ensemble_weights')"], {}), "(save_dir + '/ensemble_weights')\n", (6471, 6503), False, 'import shutil\n'), ((7429, 7467), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (7442, 7467), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((7784, 7945), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.callbacks.ModelCheckpoint', ([], {'filepath': '(save_dir + temp_str)', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(True)', 'monitor': '"""val_loss"""', 'save_freq': '"""epoch"""'}), "(filepath=save_dir + temp_str, verbose=1,\n save_best_only=True, save_weights_only=True, monitor='val_loss',\n save_freq='epoch')\n", (7812, 7945), True, 'import tensorflow.keras as tf\n'), ((8014, 8092), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': 'patience'}), "(monitor='val_loss', min_delta=0, patience=patience)\n", (8040, 8092), True, 'import tensorflow.keras as tf\n'), ((9752, 9809), 'numpy.sum', 'np.sum', (["(tot_bag_df['top_samp'] == tot_bag_df['true_pops'])"], {}), "(tot_bag_df['top_samp'] == tot_bag_df['true_pops'])\n", (9758, 9809), True, 'import numpy as np\n'), ((10749, 10797), 'shutil.rmtree', 'shutil.rmtree', (["(save_dir + '/default_mod_weights')"], {}), "(save_dir + '/default_mod_weights')\n", (10762, 10797), False, 'import shutil\n'), ((16546, 16568), 'h5py.File', 'h5py.File', (['infile', '"""r"""'], {}), "(infile, 'r')\n", (16555, 16568), False, 'import h5py\n'), ((16576, 16606), 'numpy.array', 'np.array', (["h5['derived_counts']"], {}), "(h5['derived_counts'])\n", (16584, 16606), True, 'import numpy as np\n'), ((16619, 16642), 'numpy.array', 'np.array', (["h5['samples']"], {}), "(h5['samples'])\n", (16627, 16642), True, 'import numpy as np\n'), ((5727, 5753), 'numpy.unique', 'np.unique', (["y_train['pops']"], {}), "(y_train['pops'])\n", (5736, 5753), True, 'import numpy as np\n'), ((5763, 5787), 'numpy.unique', 'np.unique', (["y_val['pops']"], {}), "(y_val['pops'])\n", (5772, 5787), True, 'import numpy as np\n'), ((6640, 6683), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_prime, X_train.shape[1])'}), '(shape=(n_prime, X_train.shape[1]))\n', (6648, 6683), True, 'import numpy as np\n'), ((6697, 6751), 'pandas.DataFrame', 'pd.DataFrame', (["{'samples': [], 'pops': [], 'order': []}"], {}), "({'samples': [], 'pops': [], 'order': []})\n", (6709, 6751), True, 'import pandas as pd\n'), ((10200, 10226), 'numpy.unique', 'np.unique', (["y_train['pops']"], {}), "(y_train['pops'])\n", (10209, 10226), True, 'import numpy as np\n'), ((10239, 10263), 'numpy.unique', 'np.unique', (["y_val['pops']"], {}), "(y_val['pops'])\n", (10248, 10263), True, 'import numpy as np\n'), ((14302, 14333), 'pandas.DataFrame', 'pd.DataFrame', (["top_freqs['freq']"], {}), "(top_freqs['freq'])\n", (14314, 14333), True, 'import pandas as pd\n'), ((17300, 17319), 'h5py.string_dtype', 'h5py.string_dtype', ([], {}), '()\n', (17317, 17319), False, 'import h5py\n'), ((7171, 7248), 'sklearn.model_selection.train_test_split', 'train_test_split', (['bag_X', 'bag_y'], {'stratify': "bag_y['pops']", 'train_size': 'train_prop'}), "(bag_X, bag_y, stratify=bag_y['pops'], train_size=train_prop)\n", (7187, 7248), False, 'from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split\n'), ((12566, 12587), 'numpy.round', 'np.round', (['test_acc', '(2)'], {}), '(test_acc, 2)\n', (12574, 12587), True, 'import numpy as np\n'), ((12604, 12626), 'numpy.round', 'np.round', (['test_95CI', '(2)'], {}), '(test_95CI, 2)\n', (12612, 12626), True, 'import numpy as np\n'), ((12829, 12855), 'numpy.round', 'np.round', (['TEST_ACCURACY', '(2)'], {}), '(TEST_ACCURACY, 2)\n', (12837, 12855), True, 'import numpy as np\n'), ((12863, 12885), 'numpy.round', 'np.round', (['TEST_95CI', '(2)'], {}), '(TEST_95CI, 2)\n', (12871, 12885), True, 'import numpy as np\n'), ((12893, 12915), 'numpy.round', 'np.round', (['TEST_LOSS', '(2)'], {}), '(TEST_LOSS, 2)\n', (12901, 12915), True, 'import numpy as np\n'), ((17557, 17597), 'pandas.Series', 'pd.Series', (["['x', 'pop', 'y', 'sampleID']"], {}), "(['x', 'pop', 'y', 'sampleID'])\n", (17566, 17597), True, 'import pandas as pd\n'), ((14484, 14515), 'pandas.DataFrame', 'pd.DataFrame', (["top_freqs['freq']"], {}), "(top_freqs['freq'])\n", (14496, 14515), True, 'import pandas as pd\n'), ((14534, 14565), 'pandas.DataFrame', 'pd.DataFrame', (["top_freqs['freq']"], {}), "(top_freqs['freq'])\n", (14546, 14565), True, 'import pandas as pd\n'), ((6978, 6997), 'pandas.Series', 'pd.Series', (['popnames'], {}), '(popnames)\n', (6987, 6997), True, 'import pandas as pd\n'), ((7287, 7306), 'pandas.Series', 'pd.Series', (['popnames'], {}), '(popnames)\n', (7296, 7306), True, 'import pandas as pd\n'), ((7343, 7362), 'pandas.Series', 'pd.Series', (['popnames'], {}), '(popnames)\n', (7352, 7362), True, 'import pandas as pd\n')] |
import logging
import re
from os import path
import os
import netCDF4
import numpy as np
from netCDF4 import Dataset, date2index, num2date
from dateutil.parser import parse
from datetime import datetime
from dateutil.relativedelta import relativedelta
import geopyspark as gps
from shapely.geometry import Polygon
from pyspark_settings import NC_INPUT_PATH
logger = logging.getLogger('pyspark')
def open_file(path):
return Dataset(path, 'r', format='NETCDF4')
def filter_files(file_list, start_time, end_time, issues, is_forecast_product):
"""
Filter files in file_list for matching time range and issues. If is_forecast_product is False,
treats product as non-forecast. This means it will match files only based on start and end time.
:param file_list: List of file names
:param start_time: Request start time
:param end_time: Request end time
:param issues: Issues to match or empty list or None
:param is_forecast_product: True iff the product is to be treated as forecast product, with issues and horizons
:return: List of matched file names, sorted by start date
"""
parsed_files = []
for file_name in file_list:
name_components = re.search(r'(.*)_(\d{12}).nc', file_name)
if name_components:
try:
file_start_datetime = datetime.strptime(name_components.group(2), '%Y%m%d%H%M')
parsed_files.append((file_name, file_start_datetime))
except ValueError:
raise Exception('Invalid product file name: ', file_name)
else:
raise Exception('Could not parse product file name: ', file_name)
matched_files = []
for file_name, file_start_datetime in sorted(parsed_files, key=lambda f: f[1]):
if not is_forecast_product:
if file_start_datetime <= start_time and len(matched_files) > 0:
matched_files.pop() # the previous file contains data that is too early. Remove it.
if file_start_datetime <= end_time:
matched_files.append(file_name)
elif file_start_datetime.time() in issues \
and (start_time.date() <= file_start_datetime.date() <= end_time.date()):
# Forecast product
matched_files.append(file_name)
return matched_files
def get_indexes(lat_array, lon_array, shape, lat, lon):
"""
Calculates the indexes of lat, lon into the lat_array and lon_array
:param lat_array: Array containing each grid cell's latitude
:param lon_array: Array containing each grid cell's longitude
:param shape: Grid shape
:param lat: Latitude to search
:param lon: Longitude to search
:return: y/x-index of lat/lon in the lat_/lon_arrays
"""
flattened_lat = lat_array.flatten()
flattened_lon = lon_array.flatten()
index = (np.square(flattened_lat - lat) + np.square(flattened_lon - lon)).argmin()
return int(index / shape[1]), int(index % shape[1]) # y, x
def get_bounding_box_polygon(lat_array, lon_array, shape, polygon_extent):
""" Transforms a lat/lon-polygon into x/y-coordinates """
# todo: investigate better ways
y_slice_start, x_slice_start = get_indexes(lat_array, lon_array, shape, polygon_extent.ymin, polygon_extent.xmin)
y_slice_stop, x_slice_stop = get_indexes(lat_array, lon_array, shape, polygon_extent.ymax, polygon_extent.xmax)
return x_slice_start, x_slice_stop, y_slice_start, y_slice_stop
def get_slice_indexes_and_extent(nc_file, geojson_shape):
"""
Calculates x/y slice indexes in the nc file for the given shape.
:param nc_file: NetCDF File
:param geojson_shape: Requested shape
:return: x/y-indexes of shape bounding box, geopyspark extent of bounding box,
geojson features as polygons in x/y coordinates
"""
lat_array = nc_file['lat'][:]
lon_array = nc_file['lon'][:]
# Transform the geojson into shapes. We need the shapes represented both as
# indices into the lat-/lon-arrays (to read only the required slices from NetCDF)
# and as x-/y-values (to mask the constructed layout).
x_coords = nc_file['rlon'][:]
y_coords = nc_file['rlat'][:]
mask_shapes_indices = []
mask_shapes_xy = []
for feature in geojson_shape:
# Get each vertex's index in the lat- and lon-arrays
vertex_indices = np.array(list(get_indexes(lat_array, lon_array, lon_array.shape,
vertex[1], vertex[0])
for vertex in feature['geometry']['coordinates'][0]))
mask_shapes_indices.append(vertex_indices)
# Get the corresponding x and y values
vertex_xs = x_coords[np.array(vertex_indices)[:, 1]]
vertex_ys = y_coords[np.array(vertex_indices)[:, 0]]
# Transform into a polygon
polygon = Polygon(zip(vertex_xs, vertex_ys))
mask_shapes_xy.append(polygon)
# Get the slices to read from NetCDF
y_slice_start = int(min(s[:, 0].min() for s in mask_shapes_indices))
x_slice_start = int(min(s[:, 1].min() for s in mask_shapes_indices))
y_slice_stop = int(max(s[:, 0].max() for s in mask_shapes_indices))
x_slice_stop = int(max(s[:, 1].max() for s in mask_shapes_indices))
x_min = float(min(s.bounds[0] for s in mask_shapes_xy))
y_min = float(min(s.bounds[1] for s in mask_shapes_xy))
x_max = float(max(s.bounds[2] for s in mask_shapes_xy))
y_max = float(max(s.bounds[3] for s in mask_shapes_xy))
extent = gps.Extent(x_min, y_min, x_max, y_max)
return x_slice_start, x_slice_stop, y_slice_start, y_slice_stop, extent, mask_shapes_xy
def read_metadata(nc_file, request_vars):
"""
Reads metadata given NetCDF file's metadata
:param nc_file: NetCDF file
:param request_vars: Request variables
:return: file metadata, projection variable, metadata per variable, variables' fillvalues, variables' dtypes
"""
file_metadata = {attr: nc_file.getncattr(attr) for attr in nc_file.ncattrs()}
proj_var = nc_file.get_variables_by_attributes(grid_mapping_name=lambda v: v is not None)[0]
variables_metadata = {}
variables_fillvals = {}
variables_dtypes = {}
for var_name in request_vars:
variable = nc_file[var_name]
variables_metadata[var_name] = {attr: variable.getncattr(attr) for attr in variable.ncattrs()}
variables_dtypes[var_name] = variable.datatype
if '_FillValue' in variables_metadata[var_name]:
variables_fillvals[var_name] = variables_metadata[var_name]['_FillValue']
else:
variables_fillvals[var_name] = netCDF4.default_fillvals[variable.dtype.str[1:]]
return file_metadata, proj_var, variables_metadata, variables_fillvals, variables_dtypes
def slice(product, out_dir, geojson_shape, start_time, end_time, request_vars, horizons, issues, spark_ctx):
"""
Slices the given product into specified shape.
:param product: Product name
:param out_dir: Directory to save output files in
:param geojson_shape: Shape to be extracted
:param start_time: Requested start time
:param end_time: Requested end time
:param request_vars: Requested variables
:param spark_ctx: Spark context
:param horizons: Requested horizons
:param issues: Requested issues
:return: Number of generated output files
"""
request_start_time = parse(start_time)
request_end_time = parse(end_time)
# If requested range is empty or user requests issues but no horizon or vice versa, raise exception.
# If both issues and horizons are empty, we try processing the product as a non-forecast product
# and return a single file.
if request_end_time < request_start_time or (len(horizons) == 0 and len(issues) > 0) \
or (len(horizons) > 0 and len(issues) == 0):
raise Exception('Request inconsistent.')
is_forecast_product = True
if len(horizons) == len(issues) == 0:
is_forecast_product = False
logger.info('Treating as non-forecast product.')
request_issues = list(map(lambda i: parse(i).time(), issues))
logger.info('Request time range: {} - {}, issues {}, horizons {}'.format(request_start_time, request_end_time,
request_issues, horizons))
logger.info('Request variables: {}'.format(request_vars))
# Look for a forecast_productName folder first. If it doesn't exist, check if there's a productName folder.
product_path = path.join(NC_INPUT_PATH, 'forecast_' + product)
if not path.isdir(product_path):
product_path = path.join(NC_INPUT_PATH, product)
if not path.isdir(product_path):
raise Exception('Directory for product not found (neither forecast_{p} nor {p} exist).'.format(p=product))
# Get product files, ignore files that are out of request date range
product_files = filter_files(os.listdir(product_path), request_start_time, request_end_time, request_issues,
is_forecast_product)
if len(product_files) == 0:
raise Exception('No matching NetCDF files in product directory.')
# Get x/y slice indexes based on first nc file, so we don't need to calculate this for every file.
nc_file = open_file(path.join(product_path, product_files[0]))
x_slice_start, x_slice_stop, y_slice_start, y_slice_stop, extent, xy_polygons = \
get_slice_indexes_and_extent(nc_file, geojson_shape)
x = x_slice_stop - x_slice_start + 1
y = y_slice_stop - y_slice_start + 1
logger.info('x: {}, y: {}, (x_start x_stop y_start y_stop): {}'.format(x, y, (x_slice_start, x_slice_stop,
y_slice_start, y_slice_stop)))
# Read the x/y- and lat/lon-coordinates specified by the request
x_coords = nc_file['rlon'][x_slice_start:x_slice_stop + 1]
y_coords = nc_file['rlat'][y_slice_start:y_slice_stop + 1]
lats = nc_file['lat'][y_slice_start:y_slice_stop + 1, x_slice_start:x_slice_stop + 1]
lons = nc_file['lon'][y_slice_start:y_slice_stop + 1, x_slice_start:x_slice_stop + 1]
# Get the file and variable metadata
file_metadata, proj_var, variables_metadata, var_fillvals, var_dtypes = read_metadata(nc_file, request_vars)
if proj_var.name == 'lambert_azimuthal_equal_area':
crs = '+proj=laea +lat_0=90 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m no_defs'
elif proj_var.name == 'latitude_longitude':
crs = '+proj=longlat +datum=WGS84 +no_defs'
else:
raise Exception('Unsupported projection:', proj_var)
# Create the output NetCDF files' scaffolds (i.e., the dimensions and metadata but no variable data so far)
# For forecast products, we create one output file per requested date and issue.
# For non-forecast products, we create only one output file.
out_files = []
date = request_start_time
if is_forecast_product:
while date <= request_end_time:
for issue in request_issues:
out_file_name = '{}_{}_{}{}.nc'.format(product, '+'.join(request_vars), date.strftime('%Y%m%d'),
issue.strftime('%H%M'))
out_file_path = path.join(out_dir, out_file_name)
out_files.append(generate_output_netcdf(out_file_path, x_coords, y_coords, lats, lons,
variables_metadata, var_fillvals, var_dtypes, file_metadata,
proj_var, nc_file['rlon'], nc_file['rlat']))
date += relativedelta(days=1)
else:
out_file_name = '{}_{}_{}_{}.nc'.format(product, '+'.join(request_vars),
request_start_time.strftime('%Y%m%d%H%M'),
request_end_time.strftime('%Y%m%d%H%M'))
out_file_path = path.join(out_dir, out_file_name)
out_files.append(generate_output_netcdf(out_file_path, x_coords, y_coords, lats, lons, variables_metadata,
var_fillvals, var_dtypes, file_metadata, proj_var,
nc_file['rlon'], nc_file['rlat']))
nc_file.close()
# Go though input files and add the content of their time slices to the output file
for k, nc_file_name in enumerate(product_files):
nc_file = open_file(path.join(product_path, nc_file_name))
file_vars = nc_file.variables.keys()
if any(v not in file_vars for v in request_vars):
raise Exception('Product file does not contain all variables.')
# Calculate start and end time for this file in this request
file_instants = num2date(nc_file['time'][:], nc_file['time'].getncattr('units'),
nc_file['time'].getncattr('calendar'))
if not is_forecast_product:
current_start_date = max(file_instants.min(), request_start_time)
current_end_date = min(file_instants.max(), request_end_time)
logger.info('Matched file: {} and period {} - {}'.format(nc_file_name, current_start_date,
current_end_date))
# Get indices of the request's time range
start_instant = file_instants[file_instants <= current_start_date][-1]
end_instant = file_instants[file_instants >= current_end_date][0]
start_time_index, end_time_index = date2index([start_instant, end_instant], nc_file['time'])
time_slices = range(start_time_index, end_time_index + 1)
else:
time_slices = date2index([file_instants[0] + relativedelta(hours=h) for h in horizons], nc_file['time'])
if len(horizons) == 1:
time_slices = [time_slices]
logger.info('time slice indices: {}'.format(time_slices))
variables_data = np.ndarray(shape=(len(time_slices), len(request_vars), y, x))
for i, var_name in enumerate(request_vars):
variable = nc_file[var_name]
if any(variable.getncattr(meta_key) != variables_metadata[var_name][meta_key]
for meta_key in variables_metadata[var_name].keys()) or variable.datatype != var_dtypes[var_name]:
raise Exception('Inconsistent variable metadata.')
variables_data[:, i] = variable[time_slices, y_slice_start:y_slice_stop + 1,
x_slice_start:x_slice_stop + 1]
# Load data into geopyspark
time_instants = file_instants[time_slices]
tiles = []
for var_data_at_instant, instant in zip(variables_data, time_instants):
temporal_projected_extent = gps.TemporalProjectedExtent(extent=extent, proj4=crs,
instant=instant)
tile = gps.Tile.from_numpy_array(var_data_at_instant, var_fillvals[request_vars[0]])
tiles.append((temporal_projected_extent, tile))
rdd = spark_ctx.parallelize(tiles)
raster_layer = gps.RasterLayer.from_numpy_rdd(layer_type=gps.LayerType.SPACETIME, numpy_rdd=rdd)
tiled_raster_layer = raster_layer.tile_to_layout(gps.LocalLayout(y, x)) # todo use smaller tiles
masked_layer = tiled_raster_layer.mask(xy_polygons)
masked_var_tiles = masked_layer.to_numpy_rdd().collect()
masked_var_data = np.array(list(tile.cells for _, tile in masked_var_tiles))
out_file = out_files[k] if is_forecast_product else out_files[0]
append_output_netcdf(out_file, time_instants, variables_metadata, masked_var_data)
nc_file.close()
for f in out_files:
f.close()
logger.info('Slicing completed.')
return len(out_files)
def generate_output_netcdf(out_file_path, x_coords, y_coords, lats, lons, variables_metadata, var_fillvals, var_dtypes,
file_meta, proj_var, x_var, y_var, lat_name='lat', lon_name='lon'):
""" Creates scaffolding of output NetCDF file, without actual variable data """
out_nc = netCDF4.Dataset(out_file_path, 'w')
# define dimensions
out_nc.createDimension(x_var.name, len(x_coords))
out_nc.createDimension(y_var.name, len(y_coords))
out_nc.createDimension('time', None)
# create variables
# original coordinate variables
proj_x = out_nc.createVariable(x_var.name, x_coords.dtype, (x_var.name,))
for attr in x_var.ncattrs():
proj_x.setncattr(attr, x_var.getncattr(attr))
proj_x[:] = x_coords
proj_y = out_nc.createVariable(y_var.name, x_coords.dtype, (y_var.name,))
for attr in y_var.ncattrs():
proj_y.setncattr(attr, y_var.getncattr(attr))
proj_y[:] = y_coords
# auxiliary coordinate variables lat and lon
lat = out_nc.createVariable(lat_name, 'f4', (y_var.name, x_var.name,))
lat.units = 'degrees_north'
lat.standard_name = 'latitude'
lat.long_name = 'latitude coordinate'
lat[:] = lats
lon = out_nc.createVariable(lon_name, 'f4', (y_var.name, x_var.name,))
lon.units = 'degrees_east'
lon.standard_name = 'longitude'
lon.long_name = 'longitude coordinate'
lon[:] = lons
# time variable
var_time = out_nc.createVariable('time', 'i4', ('time',))
var_time.units = 'hours since 1990-01-01 00:00:00'
var_time.calendar = 'gregorian'
var_time.standard_name = 'time'
var_time.axis = 'T'
# grid mapping variable
grid_map = out_nc.createVariable(proj_var.name, 'c', )
for attr in proj_var.ncattrs():
grid_map.setncattr(attr, proj_var.getncattr(attr))
# create data variables
for i, (var_name, var_metadata) in enumerate(variables_metadata.items()):
var_data = out_nc.createVariable(var_name, var_dtypes[var_name], ('time', y_var.name, x_var.name,),
fill_value=var_fillvals[var_name])
var_data.setncatts(var_metadata)
file_meta.pop('DATETIME', None)
file_meta.pop('DOCUMENTNAME', None)
out_nc.setncatts(file_meta)
return out_nc
def append_output_netcdf(out_file, time_instants, variables_metadata, variables_data):
""" Appends time slices of variable data to output NetCDF file """
# append time dimension
var_time = out_file['time']
previous_num_slices = var_time[:].shape[0]
var_time[:] = np.append(var_time[:], netCDF4.date2num(time_instants, units=var_time.units,
calendar=var_time.calendar))
# append actual variable data
for i, var_name in enumerate(variables_metadata.keys()):
var_data = out_file[var_name]
var_data[previous_num_slices:previous_num_slices + len(time_instants)] = variables_data[:, i]
| [
"netCDF4.Dataset",
"dateutil.parser.parse",
"geopyspark.Extent",
"netCDF4.date2index",
"os.path.isdir",
"netCDF4.date2num",
"geopyspark.TemporalProjectedExtent",
"numpy.square",
"dateutil.relativedelta.relativedelta",
"geopyspark.Tile.from_numpy_array",
"numpy.array",
"geopyspark.LocalLayout",... | [((369, 397), 'logging.getLogger', 'logging.getLogger', (['"""pyspark"""'], {}), "('pyspark')\n", (386, 397), False, 'import logging\n'), ((432, 468), 'netCDF4.Dataset', 'Dataset', (['path', '"""r"""'], {'format': '"""NETCDF4"""'}), "(path, 'r', format='NETCDF4')\n", (439, 468), False, 'from netCDF4 import Dataset, date2index, num2date\n'), ((5526, 5564), 'geopyspark.Extent', 'gps.Extent', (['x_min', 'y_min', 'x_max', 'y_max'], {}), '(x_min, y_min, x_max, y_max)\n', (5536, 5564), True, 'import geopyspark as gps\n'), ((7413, 7430), 'dateutil.parser.parse', 'parse', (['start_time'], {}), '(start_time)\n', (7418, 7430), False, 'from dateutil.parser import parse\n'), ((7454, 7469), 'dateutil.parser.parse', 'parse', (['end_time'], {}), '(end_time)\n', (7459, 7469), False, 'from dateutil.parser import parse\n'), ((8553, 8600), 'os.path.join', 'path.join', (['NC_INPUT_PATH', "('forecast_' + product)"], {}), "(NC_INPUT_PATH, 'forecast_' + product)\n", (8562, 8600), False, 'from os import path\n'), ((16265, 16300), 'netCDF4.Dataset', 'netCDF4.Dataset', (['out_file_path', '"""w"""'], {}), "(out_file_path, 'w')\n", (16280, 16300), False, 'import netCDF4\n'), ((1203, 1244), 're.search', 're.search', (['"""(.*)_(\\\\d{12}).nc"""', 'file_name'], {}), "('(.*)_(\\\\d{12}).nc', file_name)\n", (1212, 1244), False, 'import re\n'), ((8612, 8636), 'os.path.isdir', 'path.isdir', (['product_path'], {}), '(product_path)\n', (8622, 8636), False, 'from os import path\n'), ((8661, 8694), 'os.path.join', 'path.join', (['NC_INPUT_PATH', 'product'], {}), '(NC_INPUT_PATH, product)\n', (8670, 8694), False, 'from os import path\n'), ((8962, 8986), 'os.listdir', 'os.listdir', (['product_path'], {}), '(product_path)\n', (8972, 8986), False, 'import os\n'), ((9330, 9371), 'os.path.join', 'path.join', (['product_path', 'product_files[0]'], {}), '(product_path, product_files[0])\n', (9339, 9371), False, 'from os import path\n'), ((12025, 12058), 'os.path.join', 'path.join', (['out_dir', 'out_file_name'], {}), '(out_dir, out_file_name)\n', (12034, 12058), False, 'from os import path\n'), ((15254, 15339), 'geopyspark.RasterLayer.from_numpy_rdd', 'gps.RasterLayer.from_numpy_rdd', ([], {'layer_type': 'gps.LayerType.SPACETIME', 'numpy_rdd': 'rdd'}), '(layer_type=gps.LayerType.SPACETIME,\n numpy_rdd=rdd)\n', (15284, 15339), True, 'import geopyspark as gps\n'), ((18557, 18643), 'netCDF4.date2num', 'netCDF4.date2num', (['time_instants'], {'units': 'var_time.units', 'calendar': 'var_time.calendar'}), '(time_instants, units=var_time.units, calendar=var_time.\n calendar)\n', (18573, 18643), False, 'import netCDF4\n'), ((8710, 8734), 'os.path.isdir', 'path.isdir', (['product_path'], {}), '(product_path)\n', (8720, 8734), False, 'from os import path\n'), ((11708, 11729), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (11721, 11729), False, 'from dateutil.relativedelta import relativedelta\n'), ((12546, 12583), 'os.path.join', 'path.join', (['product_path', 'nc_file_name'], {}), '(product_path, nc_file_name)\n', (12555, 12583), False, 'from os import path\n'), ((13637, 13694), 'netCDF4.date2index', 'date2index', (['[start_instant, end_instant]', "nc_file['time']"], {}), "([start_instant, end_instant], nc_file['time'])\n", (13647, 13694), False, 'from netCDF4 import Dataset, date2index, num2date\n'), ((14891, 14961), 'geopyspark.TemporalProjectedExtent', 'gps.TemporalProjectedExtent', ([], {'extent': 'extent', 'proj4': 'crs', 'instant': 'instant'}), '(extent=extent, proj4=crs, instant=instant)\n', (14918, 14961), True, 'import geopyspark as gps\n'), ((15049, 15126), 'geopyspark.Tile.from_numpy_array', 'gps.Tile.from_numpy_array', (['var_data_at_instant', 'var_fillvals[request_vars[0]]'], {}), '(var_data_at_instant, var_fillvals[request_vars[0]])\n', (15074, 15126), True, 'import geopyspark as gps\n'), ((15393, 15414), 'geopyspark.LocalLayout', 'gps.LocalLayout', (['y', 'x'], {}), '(y, x)\n', (15408, 15414), True, 'import geopyspark as gps\n'), ((2842, 2872), 'numpy.square', 'np.square', (['(flattened_lat - lat)'], {}), '(flattened_lat - lat)\n', (2851, 2872), True, 'import numpy as np\n'), ((2875, 2905), 'numpy.square', 'np.square', (['(flattened_lon - lon)'], {}), '(flattened_lon - lon)\n', (2884, 2905), True, 'import numpy as np\n'), ((4719, 4743), 'numpy.array', 'np.array', (['vertex_indices'], {}), '(vertex_indices)\n', (4727, 4743), True, 'import numpy as np\n'), ((4780, 4804), 'numpy.array', 'np.array', (['vertex_indices'], {}), '(vertex_indices)\n', (4788, 4804), True, 'import numpy as np\n'), ((11333, 11366), 'os.path.join', 'path.join', (['out_dir', 'out_file_name'], {}), '(out_dir, out_file_name)\n', (11342, 11366), False, 'from os import path\n'), ((8114, 8122), 'dateutil.parser.parse', 'parse', (['i'], {}), '(i)\n', (8119, 8122), False, 'from dateutil.parser import parse\n'), ((13836, 13858), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'hours': 'h'}), '(hours=h)\n', (13849, 13858), False, 'from dateutil.relativedelta import relativedelta\n')] |
import sys
from pathlib import Path
sys_path = str((Path(__file__).resolve().parent / '../').resolve())
# sys.path.append("/home/liang/topic_ws/src/object_detect")
sys.path.append(str(sys_path))
import torch
import spconv
import numpy as np
import os
import datetime
from ros_numpy import point_cloud2
from numpy.lib.recfunctions import structured_to_unstructured
import for_ros.utils.calibration as calibration
from for_ros.utils.model import DetNet
from for_ros.utils.common_function import boxes3d_to_corners3d_lidar_torch
import rospy
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
import time
from visualization_msgs.msg import *
from geometry_msgs.msg import *
from for_ros.utils.common_function import publish_pc
from for_ros.utils.common_utils import mask_points_by_range
from for_ros.utils import common_utils
class PrepocessData:
def __init__(self):
# self.voxel_generator_cfg = cfg.DATA_CONFIG.VOXEL_GENERATOR
self.voxel_generator = spconv.utils.VoxelGeneratorV2(
voxel_size=[0.05,0.05,0.1],
point_cloud_range=[0,-40.0,-3.0,70.4,40.0,1.0],
max_num_points=5,
max_voxels=16000
)
self.image_shape = np.array([375,1242],dtype=np.int32)
self.sparse_shape = self.voxel_generator.grid_size[::-1] + [1, 0, 0]
self.calib_data = self.get_calib()
def get_calib(self):
# calib_file = os.path.join("/media/liang/aabbf09e-0a49-40b7-a5a8-15148073b5d7/liang/kitti/training/", 'calib', '000137.txt' )
calib_file = "calib.txt"
assert os.path.exists(calib_file)
return calibration.Calibration(calib_file)
def points2voxel(self,points):
voxel_grid = self.voxel_generator.generate(points)
input_dict = {}
input_dict["voxels"] = voxel_grid["voxels"]
input_dict["coordinates"] = voxel_grid["coordinates"]
input_dict["num_points"] = voxel_grid["num_points_per_voxel"]
input_dict["voxel_centers"] = (input_dict["coordinates"][:, ::-1] + 0.5) * self.voxel_generator.voxel_size \
+ self.voxel_generator.point_cloud_range[0:3]
device = torch.cuda.current_device()
input_dict["voxels"] = torch.tensor(input_dict["voxels"],dtype=torch.float32,device=device)
input_dict["coordinates"] = torch.tensor(input_dict["coordinates"], dtype=torch.int32, device=device)
input_dict["num_points"] = torch.tensor(input_dict["num_points"], dtype=torch.int32, device=device)
input_dict["voxel_centers"] = torch.tensor(input_dict["voxel_centers"] , dtype=torch.float32, device=device)
input_dict["image_shape"] = self.image_shape
zeros_tensor = torch.zeros((input_dict["coordinates"].shape[0],1),dtype=torch.int32,device=device)
input_dict["coordinates"] = torch.cat([zeros_tensor,input_dict["coordinates"]],dim=1)
with torch.set_grad_enabled(False):
input_dict["points_mean"] = input_dict["voxels"][:, :, :].sum(dim=1, keepdim=False)\
/ input_dict["num_points"].type_as(input_dict["voxels"] ).view(-1, 1) #vfe
input_dict["input_sp_tensor"] = spconv.SparseConvTensor(
features=input_dict["points_mean"],
indices=input_dict["coordinates"],
spatial_shape=self.sparse_shape,
batch_size=1
)
input_dict["points"] = mask_points_by_range(points, self.voxel_generator.point_cloud_range)
input_dict["points"] = torch.tensor(input_dict["points"], dtype=torch.float32, device=device)
return input_dict
def parpare_point_cloud():
path = "/media/liang/aabbf09e-0a49-40b7-a5a8-15148073b5d7/liang/kitti/testing/velodyne/000000.bin"
points = np.fromfile(path,dtype=np.float32).reshape(-1,4)
return points
def get_fov_flag(points, img_shape, calib):
# 过滤得到前边90度范围内的点云
# Valid point should be in the image (and in the PC_AREA_SCOPE)
# :param pts_rect:
# :param img_shape:
# :return:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def detect_test():
prepocess_model = PrepocessData()
model = torch.load("DetNet.pkl")
points = parpare_point_cloud()
fov_flag = get_fov_flag(points, prepocess_model.image_shape, prepocess_model.calib_data)
points = points[fov_flag]
with torch.set_grad_enabled(False):
input_sp_tensor = prepocess_model.points2voxel(points)
model.cuda()
model.eval()
output = model(input_sp_tensor)
print(output)
class Detection(object):
def __init__(self):
super().__init__()
# self.model = torch.load("DetNet.pkl")
self.model = DetNet(4)
self.prepocess_model = PrepocessData()
self.pub_marker = rospy.Publisher("/object_by_lidar_pvrcnn", MarkerArray,latch=True, queue_size=1)
self.markers_obj = MarkerArray()
self.max_marker_size = 0
self.max_marker_text_size = 0
code_dir = str(Path(__file__).resolve().parent)
logger_dir = os.path.join(code_dir,"logger")
os.makedirs(logger_dir,exist_ok=True)
log_file = os.path.join(logger_dir,"log_eval_%s.txt" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
self.logger = common_utils.create_logger(log_file)
self.ckpt_file = os.path.join(code_dir,"kitti_model.pth")
# self.ckpt_file = "/home/liang/topic_ws/src/object_detect/for_ros/checkpoint_epoch_80.pth"
self.model.load_params_from_file(self.ckpt_file,logger=self.logger)
self.model.cuda()
self.model.eval()
print("load model")
self.output = {}
self.pub = rospy.Publisher("/object_by_lidar_pvrcnn", MarkerArray,latch=True, queue_size=1)
self.pub_text = rospy.Publisher("/object_by_lidar_pvrcnn_text", MarkerArray,latch=True, queue_size=1)
def detect(self,points):
start=time.time()
fov_flag = get_fov_flag(points, self.prepocess_model.image_shape, self.prepocess_model.calib_data)
points = points[fov_flag]
print("cut_front_point spend time :",time.time()-start)
# pub_velo = rospy.Publisher("after_cut_velodyne_points", PointCloud2, queue_size=2)
# publish_pc(points, 'velodyne',pub_velo)
with torch.set_grad_enabled(False):
start_voxel=time.time()
inputs = self.prepocess_model.points2voxel(points)
print("point to voxel spend time :",time.time()-start_voxel)
start = time.time()
if self.output is None:
self.output.clear()
self.output.update(self.model(inputs))
spend_time = time.time() - start
print("net interfer time :",spend_time)
print("total detect number:",self.output["box_corner"].shape[0])
return 0
def publish_result(detection_model):
boxes3d = detection_model.output["box_corner"]
labels = detection_model.output["class"]
boxes_centers = detection_model.output["box_center"]
# print(oput)
# pub = rospy.Publisher("/object_by_lidar_pvrcnn", MarkerArray,latch=True, queue_size=1)
markers_obj = MarkerArray()
start_time = time.time()
frame_id ="velo_link"
for i in range(boxes3d.shape[0]):
# if max(boxes3d[i][...,2])<-3:
# continue
marker = Marker()
#指定Marker的参考框架
marker.header.frame_id = frame_id
#时间戳
marker.header.stamp = rospy.Time.now()
marker.header.seq = i
#ns代表namespace,命名空间可以避免重复名字引起的错误
marker.ns = "object_namespace"
#Marker的id号
marker.id = i
#Marker的类型,有ARROW,CUBE等
marker.type = marker.LINE_STRIP
#Marker的尺寸,单位是m
#Marker的动作类型有ADD,DELETE等
# marker.action = Marker.ADD
#Marker的位置姿态
marker.pose.position.x = 0.0
marker.pose.position.y = 0.0
marker.pose.position.z = 0.0
marker.pose.orientation.x = 0.0
marker.pose.orientation.y = 0.0
marker.pose.orientation.z = 0.0
marker.pose.orientation.w = 1.0
marker.scale.x = 0.1
marker.scale.y = 0.1
marker.scale.z = 0.1
#Marker的颜色和透明度
marker.color.r = 0.0
marker.color.g = 1.0
marker.color.b = 0.0
marker.color.a = 1
p = Point()
box = boxes3d[i]
#add pnts
p.x = box[0][0]
p.y = box[0][1]
p.z = box[0][2]
marker.points.append(p)
p = Point()
p.x = box[1][0]
p.y = box[1][1]
p.z = box[1][2]
marker.points.append(p)
p = Point()
p.x = box[2][0]
p.y = box[2][1]
p.z = box[2][2]
marker.points.append(p)
p = Point()
p.x = box[3][0]
p.y = box[3][1]
p.z = box[3][2]
marker.points.append(p)
p = Point()
p.x = box[0][0]
p.y = box[0][1]
p.z = box[0][2]
marker.points.append(p)
p = Point()
p.x = box[4][0]
p.y = box[4][1]
p.z = box[4][2]
marker.points.append(p)
p = Point()
p.x = box[5][0]
p.y = box[5][1]
p.z = box[5][2]
marker.points.append(p)
p = Point()
p.x = box[1][0]
p.y = box[1][1]
p.z = box[1][2]
marker.points.append(p)
p = Point()
p.x = box[5][0]
p.y = box[5][1]
p.z = box[5][2]
marker.points.append(p)
p = Point()
p.x = box[6][0]
p.y = box[6][1]
p.z = box[6][2]
marker.points.append(p)
p = Point()
p.x = box[2][0]
p.y = box[2][1]
p.z = box[2][2]
marker.points.append(p)
p = Point()
p.x = box[6][0]
p.y = box[6][1]
p.z = box[6][2]
marker.points.append(p)
p = Point()
p.x = box[7][0]
p.y = box[7][1]
p.z = box[7][2]
marker.points.append(p)
p = Point()
p.x = box[3][0]
p.y = box[3][1]
p.z = box[3][2]
marker.points.append(p)
p = Point()
p.x = box[7][0]
p.y = box[7][1]
p.z = box[7][2]
marker.points.append(p)
p = Point()
p.x = box[4][0]
p.y = box[4][1]
p.z = box[4][2]
marker.points.append(p)
p = Point()
p.x = box[0][0]
p.y = box[0][1]
p.z = box[0][2]
marker.points.append(p)
# detection_model.markers_obj.markers.append(marker)
markers_obj.markers.append(marker)
#Marker被自动销毁之前的存活时间,rospy.Duration()意味着在程序结束之前一直存在
# once
# detection_model.pub_marker.publish(detection_model.markers_obj)
# detection_model.pub_marker.publish(markers_obj)
if len(markers_obj.markers)>detection_model.max_marker_size:
detection_model.max_marker_size = len(markers_obj.markers)
if len(markers_obj.markers) !=0:
for i in range(len(markers_obj.markers),detection_model.max_marker_size+1):
marker = Marker()
#指定Marker的参考框架
marker.header.frame_id = frame_id
#时间戳
marker.header.stamp = rospy.Time.now()
marker.header.seq = i
#ns代表namespace,命名空间可以避免重复名字引起的错误
marker.ns = "object_namespace"
#Marker的id号
marker.id = i
#Marker的类型,有ARROW,CUBE等
marker.type = marker.LINE_STRIP
marker.color.a = 0
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 0.0
marker.pose.position.x = 0.0
marker.pose.position.y = 0.0
marker.pose.position.z = 0.0
marker.scale.x = 0.05
markers_obj.markers.append(marker)
markers_text = MarkerArray()
for i in range(boxes3d.shape[0]):
boxes_center = boxes_centers[i]
label = labels[i]
marker_text = Marker()
marker_text.header.frame_id = frame_id
marker_text.header.stamp = rospy.Time.now()
marker_text.header.seq = i + 500
marker_text.ns = "obj_speed"
marker_text.id = i + 500
marker_text.action = Marker.ADD
marker_text.type = Marker.TEXT_VIEW_FACING
marker_text.pose.position.x = boxes_center[0]
marker_text.pose.position.y = boxes_center[1]
marker_text.pose.position.z = boxes_center[2]
marker_text.pose.orientation.x = 0
marker_text.pose.orientation.y = 0
marker_text.pose.orientation.z = 0
marker_text.pose.orientation.w = 1
marker_text.scale.x = 3
marker_text.scale.y = 3
marker_text.scale.z = 3
marker_text.color.r = 1
marker_text.color.g = 0
marker_text.color.b = 0
marker_text.color.a = 1
# marker_text.text = str(speed)+ ":" + str(label)
marker_text.text = str(label)
markers_text.markers.append(marker_text)
if len(markers_text.markers)>detection_model.max_marker_text_size:
detection_model.max_marker_text_size = len(markers_text.markers)
if len(markers_text.markers) !=0:
for i in range(len(markers_text.markers),detection_model.max_marker_text_size+1):
marker_text = Marker()
marker_text.header.frame_id = frame_id
marker_text.header.stamp = rospy.Time.now()
marker_text.header.seq = i +500
marker_text.ns = "obj_speed"
marker_text.id = i + 500
marker_text.action = Marker.ADD
marker_text.type = Marker.TEXT_VIEW_FACING
marker_text.pose.position.x = 0
marker_text.pose.position.y = 0
marker_text.pose.position.z = 0
marker_text.pose.orientation.x = 0
marker_text.pose.orientation.y = 0
marker_text.pose.orientation.z = 0
marker_text.pose.orientation.w = 1
marker_text.scale.x = 0.05
marker_text.scale.y = 0.05
marker_text.scale.z = 0.05
marker_text.color.r = 0
marker_text.color.g = 1
marker_text.color.b = 0
marker_text.color.a = 0
marker_text.text = ""
markers_text.markers.append(marker_text)
detection_model.pub_text.publish(markers_text)
markers_text.markers.clear()
detection_model.pub.publish(markers_obj)
# detection_model.markers_obj.markers.clear()
markers_obj.markers.clear()
# detection_model.markers_obj.markers.clear(boxes3d.shape[0])
print("publish object spend time:",time.time() - start_time)
# 循环发布
# rate = rospy.Rate(10)
# while not rospy.is_shutdown():
#
# #发布Marker
# pub_maker.publish(marker)
#
# #控制发布频率
# rate.sleep()
def velo_callback(msg,detection_model):
# arr_bbox = BoundingBoxArray()
start = time.time()
initial_time = time.time()
detection_model.markers_obj.markers.clear()
if len(detection_model.markers_obj.markers)!=0:
print("not complete clear")
# print(detection_model.output)
pc_data = point_cloud2.pointcloud2_to_array(msg)
points=structured_to_unstructured(pc_data)
points=points.reshape(-1,4)
# points = np.array(list(pc_data),dtype = np.float32)
spend_time = time.time() - start
print("single subscribe time :",spend_time)
start = time.time()
detection_model.detect(points) # start interfer
spend_time = time.time() - start
print("detetction time :",spend_time)
publish_result(detection_model)
print("total time:", time.time()-initial_time)
# rospy.init_node('Object_list')
# pub_velo = rospy.Publisher("/object_by_lidar", Marker, queue_size=1)
# rospy.loginfo("Initializing...")
def subscribe_point_cloud():
rospy.init_node('l3Dnet_node')
detection_model = Detection()
points = rospy.Subscriber("velodyne_points", PointCloud2,
velo_callback,callback_args=detection_model, queue_size=1)
rospy.spin()
def sub_sequential_lidar():
rospy.init_node('PVRCNN')
detection_model = Detection()
points = rospy.Subscriber("/kitti/velo/pointcloud", PointCloud2,
velo_callback,callback_args=detection_model, queue_size=1)
rospy.spin()
if __name__ == '__main__':
# subscribe_point_cloud()
sub_sequential_lidar() | [
"for_ros.utils.common_utils.mask_points_by_range",
"rospy.Subscriber",
"torch.cat",
"pathlib.Path",
"torch.cuda.current_device",
"os.path.join",
"spconv.SparseConvTensor",
"rospy.Time.now",
"torch.load",
"os.path.exists",
"for_ros.utils.model.DetNet",
"rospy.init_node",
"torch.zeros",
"dat... | [((4154, 4218), 'numpy.logical_and', 'np.logical_and', (['(pts_img[:, 0] >= 0)', '(pts_img[:, 0] < img_shape[1])'], {}), '(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])\n', (4168, 4218), True, 'import numpy as np\n'), ((4236, 4300), 'numpy.logical_and', 'np.logical_and', (['(pts_img[:, 1] >= 0)', '(pts_img[:, 1] < img_shape[0])'], {}), '(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])\n', (4250, 4300), True, 'import numpy as np\n'), ((4322, 4360), 'numpy.logical_and', 'np.logical_and', (['val_flag_1', 'val_flag_2'], {}), '(val_flag_1, val_flag_2)\n', (4336, 4360), True, 'import numpy as np\n'), ((4382, 4433), 'numpy.logical_and', 'np.logical_and', (['val_flag_merge', '(pts_rect_depth >= 0)'], {}), '(val_flag_merge, pts_rect_depth >= 0)\n', (4396, 4433), True, 'import numpy as np\n'), ((4530, 4554), 'torch.load', 'torch.load', (['"""DetNet.pkl"""'], {}), "('DetNet.pkl')\n", (4540, 4554), False, 'import torch\n'), ((7539, 7550), 'time.time', 'time.time', ([], {}), '()\n', (7548, 7550), False, 'import time\n'), ((15240, 15251), 'time.time', 'time.time', ([], {}), '()\n', (15249, 15251), False, 'import time\n'), ((15271, 15282), 'time.time', 'time.time', ([], {}), '()\n', (15280, 15282), False, 'import time\n'), ((15470, 15508), 'ros_numpy.point_cloud2.pointcloud2_to_array', 'point_cloud2.pointcloud2_to_array', (['msg'], {}), '(msg)\n', (15503, 15508), False, 'from ros_numpy import point_cloud2\n'), ((15520, 15555), 'numpy.lib.recfunctions.structured_to_unstructured', 'structured_to_unstructured', (['pc_data'], {}), '(pc_data)\n', (15546, 15555), False, 'from numpy.lib.recfunctions import structured_to_unstructured\n'), ((15743, 15754), 'time.time', 'time.time', ([], {}), '()\n', (15752, 15754), False, 'import time\n'), ((16160, 16190), 'rospy.init_node', 'rospy.init_node', (['"""l3Dnet_node"""'], {}), "('l3Dnet_node')\n", (16175, 16190), False, 'import rospy\n'), ((16238, 16350), 'rospy.Subscriber', 'rospy.Subscriber', (['"""velodyne_points"""', 'PointCloud2', 'velo_callback'], {'callback_args': 'detection_model', 'queue_size': '(1)'}), "('velodyne_points', PointCloud2, velo_callback,\n callback_args=detection_model, queue_size=1)\n", (16254, 16350), False, 'import rospy\n'), ((16379, 16391), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (16389, 16391), False, 'import rospy\n'), ((16424, 16449), 'rospy.init_node', 'rospy.init_node', (['"""PVRCNN"""'], {}), "('PVRCNN')\n", (16439, 16449), False, 'import rospy\n'), ((16497, 16616), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/kitti/velo/pointcloud"""', 'PointCloud2', 'velo_callback'], {'callback_args': 'detection_model', 'queue_size': '(1)'}), "('/kitti/velo/pointcloud', PointCloud2, velo_callback,\n callback_args=detection_model, queue_size=1)\n", (16513, 16616), False, 'import rospy\n'), ((16646, 16658), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (16656, 16658), False, 'import rospy\n'), ((999, 1155), 'spconv.utils.VoxelGeneratorV2', 'spconv.utils.VoxelGeneratorV2', ([], {'voxel_size': '[0.05, 0.05, 0.1]', 'point_cloud_range': '[0, -40.0, -3.0, 70.4, 40.0, 1.0]', 'max_num_points': '(5)', 'max_voxels': '(16000)'}), '(voxel_size=[0.05, 0.05, 0.1],\n point_cloud_range=[0, -40.0, -3.0, 70.4, 40.0, 1.0], max_num_points=5,\n max_voxels=16000)\n', (1028, 1155), False, 'import spconv\n'), ((1206, 1243), 'numpy.array', 'np.array', (['[375, 1242]'], {'dtype': 'np.int32'}), '([375, 1242], dtype=np.int32)\n', (1214, 1243), True, 'import numpy as np\n'), ((1571, 1597), 'os.path.exists', 'os.path.exists', (['calib_file'], {}), '(calib_file)\n', (1585, 1597), False, 'import os\n'), ((1613, 1648), 'for_ros.utils.calibration.Calibration', 'calibration.Calibration', (['calib_file'], {}), '(calib_file)\n', (1636, 1648), True, 'import for_ros.utils.calibration as calibration\n'), ((2156, 2183), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2181, 2183), False, 'import torch\n'), ((2216, 2286), 'torch.tensor', 'torch.tensor', (["input_dict['voxels']"], {'dtype': 'torch.float32', 'device': 'device'}), "(input_dict['voxels'], dtype=torch.float32, device=device)\n", (2228, 2286), False, 'import torch\n'), ((2321, 2394), 'torch.tensor', 'torch.tensor', (["input_dict['coordinates']"], {'dtype': 'torch.int32', 'device': 'device'}), "(input_dict['coordinates'], dtype=torch.int32, device=device)\n", (2333, 2394), False, 'import torch\n'), ((2430, 2502), 'torch.tensor', 'torch.tensor', (["input_dict['num_points']"], {'dtype': 'torch.int32', 'device': 'device'}), "(input_dict['num_points'], dtype=torch.int32, device=device)\n", (2442, 2502), False, 'import torch\n'), ((2541, 2618), 'torch.tensor', 'torch.tensor', (["input_dict['voxel_centers']"], {'dtype': 'torch.float32', 'device': 'device'}), "(input_dict['voxel_centers'], dtype=torch.float32, device=device)\n", (2553, 2618), False, 'import torch\n'), ((2696, 2786), 'torch.zeros', 'torch.zeros', (["(input_dict['coordinates'].shape[0], 1)"], {'dtype': 'torch.int32', 'device': 'device'}), "((input_dict['coordinates'].shape[0], 1), dtype=torch.int32,\n device=device)\n", (2707, 2786), False, 'import torch\n'), ((2816, 2875), 'torch.cat', 'torch.cat', (["[zeros_tensor, input_dict['coordinates']]"], {'dim': '(1)'}), "([zeros_tensor, input_dict['coordinates']], dim=1)\n", (2825, 2875), False, 'import torch\n'), ((3421, 3489), 'for_ros.utils.common_utils.mask_points_by_range', 'mask_points_by_range', (['points', 'self.voxel_generator.point_cloud_range'], {}), '(points, self.voxel_generator.point_cloud_range)\n', (3441, 3489), False, 'from for_ros.utils.common_utils import mask_points_by_range\n'), ((3521, 3591), 'torch.tensor', 'torch.tensor', (["input_dict['points']"], {'dtype': 'torch.float32', 'device': 'device'}), "(input_dict['points'], dtype=torch.float32, device=device)\n", (3533, 3591), False, 'import torch\n'), ((4722, 4751), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (4744, 4751), False, 'import torch\n'), ((5062, 5071), 'for_ros.utils.model.DetNet', 'DetNet', (['(4)'], {}), '(4)\n', (5068, 5071), False, 'from for_ros.utils.model import DetNet\n'), ((5145, 5230), 'rospy.Publisher', 'rospy.Publisher', (['"""/object_by_lidar_pvrcnn"""', 'MarkerArray'], {'latch': '(True)', 'queue_size': '(1)'}), "('/object_by_lidar_pvrcnn', MarkerArray, latch=True,\n queue_size=1)\n", (5160, 5230), False, 'import rospy\n'), ((5415, 5447), 'os.path.join', 'os.path.join', (['code_dir', '"""logger"""'], {}), "(code_dir, 'logger')\n", (5427, 5447), False, 'import os\n'), ((5455, 5493), 'os.makedirs', 'os.makedirs', (['logger_dir'], {'exist_ok': '(True)'}), '(logger_dir, exist_ok=True)\n', (5466, 5493), False, 'import os\n'), ((5629, 5665), 'for_ros.utils.common_utils.create_logger', 'common_utils.create_logger', (['log_file'], {}), '(log_file)\n', (5655, 5665), False, 'from for_ros.utils import common_utils\n'), ((5691, 5732), 'os.path.join', 'os.path.join', (['code_dir', '"""kitti_model.pth"""'], {}), "(code_dir, 'kitti_model.pth')\n", (5703, 5732), False, 'import os\n'), ((6032, 6117), 'rospy.Publisher', 'rospy.Publisher', (['"""/object_by_lidar_pvrcnn"""', 'MarkerArray'], {'latch': '(True)', 'queue_size': '(1)'}), "('/object_by_lidar_pvrcnn', MarkerArray, latch=True,\n queue_size=1)\n", (6047, 6117), False, 'import rospy\n'), ((6137, 6227), 'rospy.Publisher', 'rospy.Publisher', (['"""/object_by_lidar_pvrcnn_text"""', 'MarkerArray'], {'latch': '(True)', 'queue_size': '(1)'}), "('/object_by_lidar_pvrcnn_text', MarkerArray, latch=True,\n queue_size=1)\n", (6152, 6227), False, 'import rospy\n'), ((6266, 6277), 'time.time', 'time.time', ([], {}), '()\n', (6275, 6277), False, 'import time\n'), ((7812, 7828), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (7826, 7828), False, 'import rospy\n'), ((12385, 12401), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (12399, 12401), False, 'import rospy\n'), ((15663, 15674), 'time.time', 'time.time', ([], {}), '()\n', (15672, 15674), False, 'import time\n'), ((15824, 15835), 'time.time', 'time.time', ([], {}), '()\n', (15833, 15835), False, 'import time\n'), ((2887, 2916), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (2909, 2916), False, 'import torch\n'), ((3170, 3316), 'spconv.SparseConvTensor', 'spconv.SparseConvTensor', ([], {'features': "input_dict['points_mean']", 'indices': "input_dict['coordinates']", 'spatial_shape': 'self.sparse_shape', 'batch_size': '(1)'}), "(features=input_dict['points_mean'], indices=\n input_dict['coordinates'], spatial_shape=self.sparse_shape, batch_size=1)\n", (3193, 3316), False, 'import spconv\n'), ((3763, 3798), 'numpy.fromfile', 'np.fromfile', (['path'], {'dtype': 'np.float32'}), '(path, dtype=np.float32)\n', (3774, 3798), True, 'import numpy as np\n'), ((6639, 6668), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (6661, 6668), False, 'import torch\n'), ((6694, 6705), 'time.time', 'time.time', ([], {}), '()\n', (6703, 6705), False, 'import time\n'), ((6862, 6873), 'time.time', 'time.time', ([], {}), '()\n', (6871, 6873), False, 'import time\n'), ((11529, 11545), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (11543, 11545), False, 'import rospy\n'), ((13707, 13723), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (13721, 13723), False, 'import rospy\n'), ((14935, 14946), 'time.time', 'time.time', ([], {}), '()\n', (14944, 14946), False, 'import time\n'), ((15947, 15958), 'time.time', 'time.time', ([], {}), '()\n', (15956, 15958), False, 'import time\n'), ((6464, 6475), 'time.time', 'time.time', ([], {}), '()\n', (6473, 6475), False, 'import time\n'), ((7022, 7033), 'time.time', 'time.time', ([], {}), '()\n', (7031, 7033), False, 'import time\n'), ((6817, 6828), 'time.time', 'time.time', ([], {}), '()\n', (6826, 6828), False, 'import time\n'), ((5361, 5375), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5365, 5375), False, 'from pathlib import Path\n'), ((5556, 5579), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5577, 5579), False, 'import datetime\n'), ((52, 66), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (56, 66), False, 'from pathlib import Path\n')] |
import numpy as np
import pybullet as pb
import matplotlib.pyplot as pt
def get_object_poses(env):
# take the picture
rgba, view, proj = env.get_camera_image()
# replace this with NN
objs = [
pb.getBasePositionAndOrientation(bid)
for bid in range(3, pb.getNumBodies()) # first two bodies are robot and table
]
return objs
def to_image_coordinates(rgba, view, proj, x):
# rgba, view, proj: as returned by get_camera_image
# x: 3xN array of N 3d points to transform
# image width and height
width, height = rgba.shape[1], rgba.shape[0]
# view and projection matrix transforms
view = np.array(view).reshape((4,4)).T
proj = np.array(proj).reshape((4,4)).T
# homogenous coordinates
x = np.concatenate((x, np.ones((1, x.shape[1]))), axis=0)
# clipping space coordinates
x = proj @ view @ x
# perspective projection
x = np.stack((x[0]/x[3], x[1]/x[3]))
# image coordinates
ij = np.stack(((1-x[1])*height, (1+x[0])*width))/2
return ij
def tweak_grip(env, targets):
# take the picture
rgba, view, proj = env.get_camera_image()
# get focal point for sub-image around targets
x = np.array(targets).mean(axis=0).reshape((3, 1))
# clip sub-image
ij = to_image_coordinates(rgba, view, proj, x)
i, j = tuple(ij.astype(int).flatten())
print(i, j)
print(rgba.shape)
sub_image = rgba[i-5:i+5, j-5:j+5,:]
# show sub-image
pt.imshow(sub_image)
pt.show()
# replace this with NN(sub_image) that modifies targets
return targets
| [
"numpy.stack",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"pybullet.getBasePositionAndOrientation",
"numpy.ones",
"numpy.array",
"pybullet.getNumBodies"
] | [((918, 954), 'numpy.stack', 'np.stack', (['(x[0] / x[3], x[1] / x[3])'], {}), '((x[0] / x[3], x[1] / x[3]))\n', (926, 954), True, 'import numpy as np\n'), ((1479, 1499), 'matplotlib.pyplot.imshow', 'pt.imshow', (['sub_image'], {}), '(sub_image)\n', (1488, 1499), True, 'import matplotlib.pyplot as pt\n'), ((1504, 1513), 'matplotlib.pyplot.show', 'pt.show', ([], {}), '()\n', (1511, 1513), True, 'import matplotlib.pyplot as pt\n'), ((218, 255), 'pybullet.getBasePositionAndOrientation', 'pb.getBasePositionAndOrientation', (['bid'], {}), '(bid)\n', (250, 255), True, 'import pybullet as pb\n'), ((985, 1036), 'numpy.stack', 'np.stack', (['((1 - x[1]) * height, (1 + x[0]) * width)'], {}), '(((1 - x[1]) * height, (1 + x[0]) * width))\n', (993, 1036), True, 'import numpy as np\n'), ((787, 811), 'numpy.ones', 'np.ones', (['(1, x.shape[1])'], {}), '((1, x.shape[1]))\n', (794, 811), True, 'import numpy as np\n'), ((284, 301), 'pybullet.getNumBodies', 'pb.getNumBodies', ([], {}), '()\n', (299, 301), True, 'import pybullet as pb\n'), ((655, 669), 'numpy.array', 'np.array', (['view'], {}), '(view)\n', (663, 669), True, 'import numpy as np\n'), ((698, 712), 'numpy.array', 'np.array', (['proj'], {}), '(proj)\n', (706, 712), True, 'import numpy as np\n'), ((1207, 1224), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (1215, 1224), True, 'import numpy as np\n')] |
import os
import cv2
import csv
import numpy as np
num_output = 8
input_shape = (512, 512, 3)
batch_size = 10
IMAGES_FOLDER = 'resized_frames'
ANNOTATION_FILE = 'annotation_formatted.csv'
OUTPUT = 'output'
### Initialise empty numpy arrays
data = np.empty((0,512,512,3), dtype=np.int8)
target = np.empty((0,8), dtype=np.float)
### Read annotation file, fetch image, normalise image and array, compose data and target arrays
with open(ANNOTATION_FILE,'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in reader:
# print(row)
if line_count == 0:
line_count += 1
else:
image_path = os.path.join(IMAGES_FOLDER, row[0])
image = cv2.imread(image_path)/ 255
image = np.expand_dims(image, axis=0)
points = row[1]
dimen = (float)(row[2])
p = points.strip('][').split(', ')
p = np.array(p, dtype=np.int)
p = np.divide(p, dimen)
p = np.expand_dims(p, axis=0)
if image is not None:
data = np.vstack((data, image))
target = np.vstack((target, p))
line_count += 1
### Shuffle data and target synchronously
num_samples = data.shape[0]
arr = np.arange(num_samples)
np.random.shuffle(arr)
print("num_samples", num_samples)
data = data[arr]
target = target[arr]
print(data.shape)
print(target.shape)
np.save(os.path.join(OUTPUT,'data.npy'), data)
np.save(os.path.join(OUTPUT,'target.npy'), target)
| [
"numpy.divide",
"csv.reader",
"numpy.empty",
"numpy.expand_dims",
"cv2.imread",
"numpy.arange",
"numpy.array",
"numpy.vstack",
"os.path.join",
"numpy.random.shuffle"
] | [((253, 294), 'numpy.empty', 'np.empty', (['(0, 512, 512, 3)'], {'dtype': 'np.int8'}), '((0, 512, 512, 3), dtype=np.int8)\n', (261, 294), True, 'import numpy as np\n'), ((301, 333), 'numpy.empty', 'np.empty', (['(0, 8)'], {'dtype': 'np.float'}), '((0, 8), dtype=np.float)\n', (309, 333), True, 'import numpy as np\n'), ((1294, 1316), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (1303, 1316), True, 'import numpy as np\n'), ((1317, 1339), 'numpy.random.shuffle', 'np.random.shuffle', (['arr'], {}), '(arr)\n', (1334, 1339), True, 'import numpy as np\n'), ((490, 525), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (500, 525), False, 'import csv\n'), ((1460, 1492), 'os.path.join', 'os.path.join', (['OUTPUT', '"""data.npy"""'], {}), "(OUTPUT, 'data.npy')\n", (1472, 1492), False, 'import os\n'), ((1507, 1541), 'os.path.join', 'os.path.join', (['OUTPUT', '"""target.npy"""'], {}), "(OUTPUT, 'target.npy')\n", (1519, 1541), False, 'import os\n'), ((687, 722), 'os.path.join', 'os.path.join', (['IMAGES_FOLDER', 'row[0]'], {}), '(IMAGES_FOLDER, row[0])\n', (699, 722), False, 'import os\n'), ((791, 820), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (805, 820), True, 'import numpy as np\n'), ((952, 977), 'numpy.array', 'np.array', (['p'], {'dtype': 'np.int'}), '(p, dtype=np.int)\n', (960, 977), True, 'import numpy as np\n'), ((994, 1013), 'numpy.divide', 'np.divide', (['p', 'dimen'], {}), '(p, dimen)\n', (1003, 1013), True, 'import numpy as np\n'), ((1030, 1055), 'numpy.expand_dims', 'np.expand_dims', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (1044, 1055), True, 'import numpy as np\n'), ((743, 765), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (753, 765), False, 'import cv2\n'), ((1114, 1138), 'numpy.vstack', 'np.vstack', (['(data, image)'], {}), '((data, image))\n', (1123, 1138), True, 'import numpy as np\n'), ((1164, 1186), 'numpy.vstack', 'np.vstack', (['(target, p)'], {}), '((target, p))\n', (1173, 1186), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Catalog.
TODO: add plotting
"""
from pandas import DataFrame, read_csv, read_table
from numpy import full, sqrt, power, arctan2, rad2deg
from numpy.random import uniform, normal
from .data import GUO_FILE
from .seed import set_numpy_random_seed
#TODO: allow access to columns directly via []
class Catalog(object):
"""
TODO
"""
REQUIRED_KEYS = []
def __init__(self, dataframe):
self.check(dataframe)
self.dataframe = dataframe
self._matrix = dataframe.as_matrix()
def __len__(self):
return len(self.dataframe)
def check(self, dataframe):
"""
TODO
"""
columns = dataframe.columns
for key in self.REQUIRED_KEYS:
if key not in columns:
raise Exception("Missing {} key in catalog dataframe!".format(key))
if len(dataframe) == 0:
raise Exception("Empty catalog!")
def column_fast(self, key):
"""
TODO
use numpy arrays to avoid pandas.Series indexing and make huge savings on vectorized math operations
see https://penandpants.com/2014/09/05/performance-of-pandas-series-vs-numpy-arrays/
"""
return self._matrix[:, self.dataframe.columns.get_loc(key)]
class SourceCatalog(Catalog):
"""
TODO
"""
ID = 'id'
RA = 'ra'
DEC = 'dec'
Z = 'z'
E1 = 'e1'
E2 = 'e2'
E_MAG = 'e_mag'
E_PHI = 'e_phi'
REQUIRED_KEYS = [ID, RA, DEC, Z, E1, E2, E_MAG, E_PHI]
def __init__(self, dataframe):
super(SourceCatalog, self).__init__(dataframe)
#TODO: handle csv vs table
@staticmethod
def from_file(filename, keymap):
dataframe = read_csv(filename, usecols=keymap.keys())
dataframe.rename(columns=keymap, inplace=True)
return SourceCatalog(dataframe)
class SourceCatalogFactory(object):
"""
TODO
"""
Z = 1.3857
def __init__(self, limits, density, sigma_e=0.2, random_seed=None):
if random_seed:
set_numpy_random_seed(random_seed)
self.limits = limits
self.density = density
self.sigma_e = sigma_e
def generate(self):
"""
TODO
"""
df = DataFrame()
area = (self.limits.xf.arcmin - self.limits.xi.arcmin) * (self.limits.yf.arcmin -
self.limits.yi.arcmin)
count = abs(int(area * self.density)) #TODO: figure out negative area
df[SourceCatalog.ID] = range(count)
df[SourceCatalog.RA] = uniform(self.limits.xi.radian, self.limits.xf.radian,
count)
df[SourceCatalog.DEC] = uniform(self.limits.yi.radian, self.limits.yf.radian,
count)
df[SourceCatalog.Z] = full(count, SourceCatalogFactory.Z)
e1 = normal(0, self.sigma_e, count)
e2 = normal(0, self.sigma_e, count)
# Change any |e|> 1 ellipticity components
while abs(e1 > 1.0).any() or abs(e2 > 1.0).any():
for i in abs(e1 > 1.0).nonzero():
e1[i] = normal(0.0, self.sigma_e)
for i in (abs(e2) > 1.0).nonzero():
e2[i] = normal(0.0, self.sigma_e)
df[SourceCatalog.E1] = e1
df[SourceCatalog.E2] = e2
df[SourceCatalog.E_MAG] = sqrt(power(df[SourceCatalog.E1], 2) + power(df[SourceCatalog.E2],
2))
df[SourceCatalog.E_PHI] = rad2deg(arctan2(df[SourceCatalog.E2], df[SourceCatalog.E1])) / 2.0
return SourceCatalog(df)
class HaloCatalog(Catalog):
"""
TODO
"""
ID = 'id'
HALO_MASS = 'mass_h'
STELLAR_MASS = 'mass_s'
RA = 'ra'
DEC = 'dec'
Z = 'z'
def __init__(self, dataframe):
super(HaloCatalog, self).__init__(dataframe)
@staticmethod
def from_file(filename, keymap):
dataframe = read_table(filename, usecols=keymap.keys())
dataframe.rename(columns=keymap, inplace=True)
return HaloCatalog(dataframe)
#TODO: switch this map
@staticmethod
def default():
keymap = {
'GalID': HaloCatalog.ID,
'M_Subhalo[M_sol/h]': HaloCatalog.HALO_MASS,
'M_Stellar[M_sol/h]': HaloCatalog.STELLAR_MASS,
'pos_0[rad]': HaloCatalog.RA,
'pos_1[rad]': HaloCatalog.DEC,
'z_spec': HaloCatalog.Z,
}
return HaloCatalog.from_file(GUO_FILE, keymap=keymap)
class FastSampleHaloCatalogFactory(object):
"""
TODO
"""
def __init__(self, mutable_mass_halo_catalog, mass_distribution, random_seed=None):
if random_seed:
set_numpy_random_seed(random_seed)
self.mutable_mass_halo_catalog = mutable_mass_halo_catalog
self.mass_distribution = mass_distribution
def generate(self):
halo_mass = self.mass_distribution.sample(len(self.mutable_mass_halo_catalog))
self.mutable_mass_halo_catalog.set_halo_mass(halo_mass)
return self.mutable_mass_halo_catalog
class MutableHaloMassCatalog(HaloCatalog):
"""
TODO
"""
def __init__(self, dataframe):
super(MutableHaloMassCatalog, self).__init__(dataframe)
@staticmethod
def from_file(filename, keymap, limits, z):
dataframe = read_table(filename, usecols=keymap.keys())
dataframe.rename(columns=keymap, inplace=True)
dataframe[HaloCatalog.RA] = -dataframe[HaloCatalog.RA] # left handed coordinate system
dataframe = dataframe[
(dataframe[HaloCatalog.RA] > limits.xf.radian) &
(dataframe[HaloCatalog.RA] < limits.xi.radian) &
(dataframe[HaloCatalog.DEC] > limits.yi.radian) &
(dataframe[HaloCatalog.DEC] < limits.yf.radian) &
(dataframe[HaloCatalog.HALO_MASS] > 0) &
(dataframe[HaloCatalog.Z] <= z)
]\
.reset_index(drop=True)
return MutableHaloMassCatalog(dataframe)
#TODO: best sequence of signatures for radius
@staticmethod
def default(limits, z):
keymap = {
'GalID': HaloCatalog.ID,
'M_Subhalo[M_sol/h]': HaloCatalog.HALO_MASS,
'M_Stellar[M_sol/h]': HaloCatalog.STELLAR_MASS,
'pos_0[rad]': HaloCatalog.RA,
'pos_1[rad]': HaloCatalog.DEC,
'z_spec': HaloCatalog.Z
}
return MutableHaloMassCatalog.from_file(GUO_FILE, keymap, limits, z)
def set_halo_mass(self, column):
self.dataframe[HaloCatalog.HALO_MASS] = column
| [
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.full",
"numpy.arctan2",
"numpy.power",
"numpy.random.normal"
] | [((2269, 2280), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (2278, 2280), False, 'from pandas import DataFrame, read_csv, read_table\n'), ((2606, 2666), 'numpy.random.uniform', 'uniform', (['self.limits.xi.radian', 'self.limits.xf.radian', 'count'], {}), '(self.limits.xi.radian, self.limits.xf.radian, count)\n', (2613, 2666), False, 'from numpy.random import uniform, normal\n'), ((2738, 2798), 'numpy.random.uniform', 'uniform', (['self.limits.yi.radian', 'self.limits.yf.radian', 'count'], {}), '(self.limits.yi.radian, self.limits.yf.radian, count)\n', (2745, 2798), False, 'from numpy.random import uniform, normal\n'), ((2869, 2904), 'numpy.full', 'full', (['count', 'SourceCatalogFactory.Z'], {}), '(count, SourceCatalogFactory.Z)\n', (2873, 2904), False, 'from numpy import full, sqrt, power, arctan2, rad2deg\n'), ((2918, 2948), 'numpy.random.normal', 'normal', (['(0)', 'self.sigma_e', 'count'], {}), '(0, self.sigma_e, count)\n', (2924, 2948), False, 'from numpy.random import uniform, normal\n'), ((2962, 2992), 'numpy.random.normal', 'normal', (['(0)', 'self.sigma_e', 'count'], {}), '(0, self.sigma_e, count)\n', (2968, 2992), False, 'from numpy.random import uniform, normal\n'), ((3172, 3197), 'numpy.random.normal', 'normal', (['(0.0)', 'self.sigma_e'], {}), '(0.0, self.sigma_e)\n', (3178, 3197), False, 'from numpy.random import uniform, normal\n'), ((3270, 3295), 'numpy.random.normal', 'normal', (['(0.0)', 'self.sigma_e'], {}), '(0.0, self.sigma_e)\n', (3276, 3295), False, 'from numpy.random import uniform, normal\n'), ((3403, 3433), 'numpy.power', 'power', (['df[SourceCatalog.E1]', '(2)'], {}), '(df[SourceCatalog.E1], 2)\n', (3408, 3433), False, 'from numpy import full, sqrt, power, arctan2, rad2deg\n'), ((3436, 3466), 'numpy.power', 'power', (['df[SourceCatalog.E2]', '(2)'], {}), '(df[SourceCatalog.E2], 2)\n', (3441, 3466), False, 'from numpy import full, sqrt, power, arctan2, rad2deg\n'), ((3549, 3600), 'numpy.arctan2', 'arctan2', (['df[SourceCatalog.E2]', 'df[SourceCatalog.E1]'], {}), '(df[SourceCatalog.E2], df[SourceCatalog.E1])\n', (3556, 3600), False, 'from numpy import full, sqrt, power, arctan2, rad2deg\n')] |
"""
Various functions / models for calculating airmass
"""
import numpy as np
from astropy.constants import R_earth
# import matplotlib.pyplot as plt
RHO0 = 1.225 # Density of air at sea level (kg/m^3)
HMAX = 84852. # Maximal height for model atmosphere
# Re = 6378100 # Earth radius (m)
YOUNG_COEF1 = [1.002432, 0.148386, 0.0096467]
YOUNG_COEF2 = [1., 0.149864, 0.0102963, 0.000303978]
HARDIE_COEFF = [-0.0008083, -0.002875, -0.0018167, 1]
def altitude(ra, dec, lmst, lat):
"""
Compute the altitude of an object given
Parameters
----------
ra, dec:
equatorial coordinates in radians
lat:
observer lattitude in radians
lmst:
local mean sidereal time in radians
"""
# h = lmst - ra # hour angle
# a is the altitude
return np.arcsin(np.sin(lat) * np.sin(dec) +
np.cos(lat) * np.cos(dec) * np.cos(lmst - ra))
def refractive_index(h_gp):
"""average atmospheric refractive index"""
delta = 2.93e-4
rho = atmosphere(h_gp)
n = 1. + delta * (rho / RHO0)
return n
class Atmopshere(object):
pass
def atmosphere(H_gp): # class StandardAtmosphere
"""
US Standard Atmosphere, 1976
As published by NOAA, NASA, and USAF
The standard atmosphere is mathematically defined in six layers from
sea level to 71 km
http://scipp.ucsc.edu/outreach/balloon/atmos/1976%20Standard%20Atmosphere.htm
Parameters
----------
H_gp : Geopotential scale height (m)
Returns
-------
rho : Atmospheric pressure (kg/m^3)
"""
if isinstance(H_gp, (float, int)):
H_gp = np.array([H_gp])
regions = [(0. <= H_gp) & (H_gp <= 11e3),
(11e3 < H_gp) & (H_gp <= 20e3),
(20e3 < H_gp) & (H_gp <= 32e3),
(32e3 < H_gp) & (H_gp <= 47e3),
(47e3 < H_gp) & (H_gp <= 51e3),
(51e3 < H_gp) & (H_gp <= 71e3),
(71e3 < H_gp) & (H_gp <= 84852.)]
expressions = [lambda x: RHO0 * (1. - x / 44330.94) ** 4.25587615,
lambda x: RHO0 * 0.29707755 * np.exp((11e3 - x) / 6341.62),
lambda x: RHO0 * (0.978260357 + x / 201019.8) ** -35.163195,
lambda x: RHO0 * (0.85699696 + x / 57946.3) ** -13.2011407,
lambda x: RHO0 * 0.0011653266 *
np.exp((47e3 - x) / 7922.26),
lambda x: RHO0 * (0.798988674 - x / 184809.7) ** 11.201141,
lambda x: RHO0 * (0.900194103 - x / 198095.96) ** 16.0815975]
return np.piecewise(H_gp, regions, expressions)
def plane_parallel(z):
"""
When the zenith angle is small to moderate, a good approximation is given by
assuming a homogeneous plane-parallel atmosphere (i.e., one in which density
is constant and Earth’s curvature is ignored). The air mass X then is
simply the secant of the zenith angle z:
.. math:: X = sec(z)
At a zenith angle of 60°, the air mass is approximately 2. However, because
the Earth is not flat, this formula is only usable for zenith angles up to
about 60° to 75°, depending on accuracy requirements. At greater zenith
angles, the accuracy degrades rapidly, with X = sec z becoming infinite at
the horizon; the horizon air mass in the more-realistic spherical atmosphere
is usually less than 40.
"""
return np.sec(z)
def homogeneous_spherical(z, h=0., h_atm=HMAX):
"""
Airmass for a non-refracting homogeneous spherical atmosphere with elevated
observer
Parameters
----------
z: float, array
apparent zenith distance (radians)
h: float
observer altitude in metres
h_atm: float
height of atmosphere in metres
Returns
-------
relative airmass
Notes
-----
<http://en.wikipedia.org/wiki/Airmass#Homogeneous_spherical_atmosphere_with_elevated_observer>_
References
----------
.. [1] <NAME>. 1929. Theoretische Photometrie, Über die Extinktion
des Lichtes in der Erdatmosphäre. In Handbuch der Astrophysik. Band II,
erste Hälfte. Berlin: Springer.
"""
r = R_earth.value / h_atm
y = h / h_atm
cosz = np.cos(z)
rcosz = (r + y) * cosz
return np.sqrt(rcosz**2 + 2 * r * (1-y) - y**2 + 1) - rcosz
def Young74(z, h):
"""
Airmass model derived assuming an isothermal atmosphere. and dropping high
order terms [1]_. Isothermal atmosphere with pressure scale hight `h` has
an exponential density attenuation of the form:
.. math:: \rho = \rho_0 e^{-y/H}
In an isothermal atmosphere, 37% of the atmosphere is above the pressure
scale height. An approximate correction for refraction is also included in
this model.
Parameters
----------
z: float, array
apparent zenith distance (radians)
h: float
observer altitude in metres
h_atm: float
height of atmosphere in metres
Returns
-------
relative airmass
References
----------
.. [1] <NAME>. 1974. Atmospheric Extinction. Ch. 3.1 in Methods of
Experimental Physics, Vol. 12 Astrophysics, Part A: Optical and Infrared.
ed. N. Carleton. New York: Academic Press. ISBN 0-12-474912-1.
"""
# Non-physical (interpolative) models follow
# ----------------------------
def Hardie62(z):
"""
gives usable results for zenith angles of up to perhaps 85°. As with the
previous formula, the calculated air mass reaches a maximum, and then
approaches negative infinity at the horizon [1]_.
Parameters
----------
z: float, array true zenith distance (radians)
Returns
-------
relative airmass
References
----------
.. [1] <NAME>. 1962. In Astronomical Techniques. <NAME>., ed. Chicago:
University of Chicago Press, 184–. LCCN 62009113.
`_ADS: <https://ui.adsabs.harvard.edu/abs/1962aste.book.....H>`_
"""
secz_1 = np.sez(z) - 1
return np.polyval(HARDIE_COEFF, secz_1)
def YoungIrvine67(z):
"""
This gives usable results up to approximately 80°, but the accuracy degrades
rapidly at greater zenith angles. The calculated air mass reaches a maximum
of 11.13 at 86.6°, becomes zero at 88°, and approaches negative infinity at
the horizon [1]_.
Parameters
----------
z: float, array
true zenith distance (radians)
Returns
-------
relative airmass
References
----------
.. [1] <NAME>., and <NAME>. 1967. Multicolor photoelectric
photometry of the brighter planets. I. Program and procedure. Astronomical
Journal 72:945–950. doi: 10.1086/110366
`_ADS: <https://ui.adsabs.harvard.edu/abs/1967AJ.....72..945Y>`_
"""
secz = np.sez(z)
return secz * (1 - 0.0012 * (secz*secz - 1))
def Rozenberg66(z):
"""
gives reasonable results for high zenith angles, with a horizon air mass of
40.
Parameters
----------
z: float, array true zenith distance (radians)
Returns
-------
relative airmass
References
----------
.. [1] <NAME>. 1966. Twilight: A Study in Atmospheric Optics. New York:
Plenum Press, 160. Translated from the Russian by <NAME>. LCCN
65011345.
"""
cosz = np.cos(z)
return 1 / (cosz + 0.025 * np.exp(-11 * cosz))
def KastenYoung89(z):
"""
reasonable results for zenith angles of up to 90°, with an air mass of
approximately 38 at the horizon. Here the second z term
is in degrees _[1].
Parameters
----------
z: float, array zenith distance (radians)
Returns
-------
relative airmass
References
----------
.. [1] <NAME>.; <NAME>. (1989). "Revised optical air mass tables and
approximation formula". Applied Optics. 28 (22): 4735–4738.
`ADS <https://ui.adsabs.harvard.edu/abs/1989ApOpt..28.4735K>`
"""
zd = np.radians(z)
return (np.cos(z) + 0.50572 * (96.07995 - zd) ** -1.6364) ** -1
def Young94(z):
"""
in terms of the true zenith angle z t for which he claimed a maximum error
(at the horizon) of 0.0037 air mass.
Parameters
----------
z: float, array true zenith distance (radians)
Returns
-------
relative airmass
References
----------
.. [1] <NAME>. 1994. Air mass and refraction. Applied Optics.
33:1108–1110. doi: 10.1364/AO.33.001108 _ADS:
`ADS <https://ui.adsabs.harvard.edu/abs/1994ApOpt..33.1108Y>`_
"""
cosz = np.cos(z)
return np.polyval(YOUNG_COEF1, cosz) / np.polyval(YOUNG_COEF2, cosz)
def Pickering02(z):
"""
"The Southern Limits of the Ancient Star Catalog." Pickering, <NAME>. 2002.
DIO 12:1, 20, n. 39.
Parameters
----------
z: float, array apparent zenith distance (radians)
Returns
-------
relative airmass
References
----------
.. [1] <NAME>. (2002). "The Southern Limits of the Ancient Star
Catalog" DIO. 12 (1): 20–39.
`PDF <http://www.dioi.org/vols/wc0.pdf>`_
"""
a = np.degrees(np.pi / 2 - z) # apparent altitude in degrees
gamma = a + 244 / (165 + 47 * a ** 1.1)
return 1. / np.sin(np.radians(gamma))
def Kivalov07(Z, delh=50):
"""
References
----------
"""
raise NotImplementedError
r0 = 6356766 # Earth radius (m)
def i(z, h):
n0 = refractive_index(0.)
n = refractive_index(h)
rh = r0 + h
sini = (r0 * n0 / (rh * n)) * np.sin(z)
return np.arcsin(sini)
def delM(z, h):
hm = h + 0.5 * delh # mean height of layer
rh = r0 + h # base of layer
rhp = r0 + h + delh # top of layer
rhm = r0 + hm
rho = atmosphere(hm)
im = np.mean([i(z, h), i(z, h + delh)], axis=0)
cos_delphi = (4 * (rhm * np.cos(im)) ** 2 - (delh * np.sin(im)) ** 2) / (
4 * (rhm * np.cos(im)) ** 2 + (delh * np.sin(im)) ** 2)
dM = rho * np.sqrt(rh * rh + rhp * rhp - 2 * rh * rhp * cos_delphi)
return dM
H = np.arange(0., Hmax, delh)
X = np.empty(Z.shape)
for j, z in enumerate(Z):
DM = delM(z, H)
X[j] = sum(DM)
return X
| [
"numpy.radians",
"numpy.sez",
"numpy.degrees",
"numpy.polyval",
"numpy.empty",
"numpy.arcsin",
"numpy.sin",
"numpy.arange",
"numpy.array",
"numpy.cos",
"numpy.sec",
"numpy.exp",
"numpy.piecewise",
"numpy.sqrt"
] | [((2562, 2602), 'numpy.piecewise', 'np.piecewise', (['H_gp', 'regions', 'expressions'], {}), '(H_gp, regions, expressions)\n', (2574, 2602), True, 'import numpy as np\n'), ((3394, 3403), 'numpy.sec', 'np.sec', (['z'], {}), '(z)\n', (3400, 3403), True, 'import numpy as np\n'), ((4208, 4217), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (4214, 4217), True, 'import numpy as np\n'), ((5978, 6010), 'numpy.polyval', 'np.polyval', (['HARDIE_COEFF', 'secz_1'], {}), '(HARDIE_COEFF, secz_1)\n', (5988, 6010), True, 'import numpy as np\n'), ((6747, 6756), 'numpy.sez', 'np.sez', (['z'], {}), '(z)\n', (6753, 6756), True, 'import numpy as np\n'), ((7261, 7270), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (7267, 7270), True, 'import numpy as np\n'), ((7887, 7900), 'numpy.radians', 'np.radians', (['z'], {}), '(z)\n', (7897, 7900), True, 'import numpy as np\n'), ((8478, 8487), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (8484, 8487), True, 'import numpy as np\n'), ((9030, 9055), 'numpy.degrees', 'np.degrees', (['(np.pi / 2 - z)'], {}), '(np.pi / 2 - z)\n', (9040, 9055), True, 'import numpy as np\n'), ((10018, 10044), 'numpy.arange', 'np.arange', (['(0.0)', 'Hmax', 'delh'], {}), '(0.0, Hmax, delh)\n', (10027, 10044), True, 'import numpy as np\n'), ((10052, 10069), 'numpy.empty', 'np.empty', (['Z.shape'], {}), '(Z.shape)\n', (10060, 10069), True, 'import numpy as np\n'), ((1632, 1648), 'numpy.array', 'np.array', (['[H_gp]'], {}), '([H_gp])\n', (1640, 1648), True, 'import numpy as np\n'), ((4257, 4307), 'numpy.sqrt', 'np.sqrt', (['(rcosz ** 2 + 2 * r * (1 - y) - y ** 2 + 1)'], {}), '(rcosz ** 2 + 2 * r * (1 - y) - y ** 2 + 1)\n', (4264, 4307), True, 'import numpy as np\n'), ((5953, 5962), 'numpy.sez', 'np.sez', (['z'], {}), '(z)\n', (5959, 5962), True, 'import numpy as np\n'), ((8499, 8528), 'numpy.polyval', 'np.polyval', (['YOUNG_COEF1', 'cosz'], {}), '(YOUNG_COEF1, cosz)\n', (8509, 8528), True, 'import numpy as np\n'), ((8531, 8560), 'numpy.polyval', 'np.polyval', (['YOUNG_COEF2', 'cosz'], {}), '(YOUNG_COEF2, cosz)\n', (8541, 8560), True, 'import numpy as np\n'), ((9486, 9501), 'numpy.arcsin', 'np.arcsin', (['sini'], {}), '(sini)\n', (9495, 9501), True, 'import numpy as np\n'), ((7913, 7922), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (7919, 7922), True, 'import numpy as np\n'), ((9155, 9172), 'numpy.radians', 'np.radians', (['gamma'], {}), '(gamma)\n', (9165, 9172), True, 'import numpy as np\n'), ((9461, 9470), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (9467, 9470), True, 'import numpy as np\n'), ((9934, 9990), 'numpy.sqrt', 'np.sqrt', (['(rh * rh + rhp * rhp - 2 * rh * rhp * cos_delphi)'], {}), '(rh * rh + rhp * rhp - 2 * rh * rhp * cos_delphi)\n', (9941, 9990), True, 'import numpy as np\n'), ((814, 825), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (820, 825), True, 'import numpy as np\n'), ((828, 839), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (834, 839), True, 'import numpy as np\n'), ((891, 908), 'numpy.cos', 'np.cos', (['(lmst - ra)'], {}), '(lmst - ra)\n', (897, 908), True, 'import numpy as np\n'), ((2101, 2132), 'numpy.exp', 'np.exp', (['((11000.0 - x) / 6341.62)'], {}), '((11000.0 - x) / 6341.62)\n', (2107, 2132), True, 'import numpy as np\n'), ((2360, 2391), 'numpy.exp', 'np.exp', (['((47000.0 - x) / 7922.26)'], {}), '((47000.0 - x) / 7922.26)\n', (2366, 2391), True, 'import numpy as np\n'), ((7302, 7320), 'numpy.exp', 'np.exp', (['(-11 * cosz)'], {}), '(-11 * cosz)\n', (7308, 7320), True, 'import numpy as np\n'), ((863, 874), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (869, 874), True, 'import numpy as np\n'), ((877, 888), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (883, 888), True, 'import numpy as np\n'), ((9825, 9835), 'numpy.sin', 'np.sin', (['im'], {}), '(im)\n', (9831, 9835), True, 'import numpy as np\n'), ((9897, 9907), 'numpy.sin', 'np.sin', (['im'], {}), '(im)\n', (9903, 9907), True, 'import numpy as np\n'), ((9798, 9808), 'numpy.cos', 'np.cos', (['im'], {}), '(im)\n', (9804, 9808), True, 'import numpy as np\n'), ((9870, 9880), 'numpy.cos', 'np.cos', (['im'], {}), '(im)\n', (9876, 9880), True, 'import numpy as np\n')] |
import torch
import torchvision
from torchvision.models import vgg16
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.optim import Adam
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision.transforms import Compose, CenterCrop, Normalize, Scale, Resize, ToTensor, ToPILImage
from torch.optim.lr_scheduler import LambdaLR, StepLR
import numpy as np
import glob
import PIL.Image as Image
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
import json
import pickle
from dataset import BatchData
from model import PreResNet, BiasLayer
#from cifar import Cifar100
from core50 import Core50
from toybox import Toybox
from ilab import Ilab
from cifar100 import cifar100
from exemplar import Exemplar
from copy import deepcopy
class Trainer:
def __init__(self, total_cls, paradigm, run,dataset):
self.total_cls = total_cls
self.seen_cls = 0
#self.dataset = Cifar100()
if dataset == "core50":
self.dataset = Core50(paradigm, run)
if dataset == 'toybox':
self.dataset = Toybox(paradigm, run)
if dataset == "ilab":
print("in ilab data")
self.dataset = Ilab(paradigm, run)
if dataset == "cifar100":
print("in cifar100 data")
self.dataset = cifar100(paradigm, run)
print("total_cls is")
print(total_cls)
self.model = PreResNet(32,total_cls).cuda()
print(self.model)
self.model = nn.DataParallel(self.model, device_ids=[0])
self.bias_layer1 = BiasLayer().cuda()
self.bias_layer2 = BiasLayer().cuda()
self.bias_layer3 = BiasLayer().cuda()
self.bias_layer4 = BiasLayer().cuda()
self.bias_layer5 = BiasLayer().cuda()
# if self.total_cls == 10:
# self.bias_layers=[self.bias_layer1, self.bias_layer2, self.bias_layer3, self.bias_layer4, self.bias_layer5]
if self.total_cls == 12:
self.bias_layer6 = BiasLayer().cuda()
self.bias_layers=[self.bias_layer1, self.bias_layer2, self.bias_layer3, self.bias_layer4, self.bias_layer5,self.bias_layer6]
if self.total_cls == 14:
print("for ilab data")
self.bias_layer6 = BiasLayer().cuda()
self.bias_layer7 = BiasLayer().cuda()
self.bias_layers=[self.bias_layer1, self.bias_layer2, self.bias_layer3, self.bias_layer4, self.bias_layer5,self.bias_layer6, self.bias_layer7]
if self.total_cls == 100:
print("for ilab data")
self.bias_layer6 = BiasLayer().cuda()
self.bias_layer7 = BiasLayer().cuda()
self.bias_layer8 = BiasLayer().cuda()
self.bias_layer9 = BiasLayer().cuda()
self.bias_layer10 = BiasLayer().cuda()
self.bias_layer11 = BiasLayer().cuda()
self.bias_layer12 = BiasLayer().cuda()
self.bias_layer13 = BiasLayer().cuda()
self.bias_layer14 = BiasLayer().cuda()
self.bias_layer15 = BiasLayer().cuda()
self.bias_layer16 = BiasLayer().cuda()
self.bias_layer17 = BiasLayer().cuda()
self.bias_layer18 = BiasLayer().cuda()
self.bias_layer19 = BiasLayer().cuda()
self.bias_layer20 = BiasLayer().cuda()
self.bias_layers=[self.bias_layer1, self.bias_layer2, self.bias_layer3, self.bias_layer4, self.bias_layer5,self.bias_layer6, self.bias_layer7,
self.bias_layer8,self.bias_layer9,self.bias_layer10,self.bias_layer11,self.bias_layer12,self.bias_layer13,self.bias_layer14,
self.bias_layer15,self.bias_layer16,self.bias_layer17,self.bias_layer18,self.bias_layer19,self.bias_layer20]
self.input_transform= Compose([
transforms.Resize(32),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32,padding=4),
ToTensor(),
Normalize([0.5071,0.4866,0.4409],[0.2673,0.2564,0.2762])])
self.input_transform_eval= Compose([
transforms.Resize(32),
ToTensor(),
Normalize([0.5071,0.4866,0.4409],[0.2673,0.2564,0.2762])])
total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
print("Solver total trainable parameters : ", total_params)
def test(self, testdata):
print("test data number : ",len(testdata))
self.model.eval()
count = 0
correct = 0
wrong = 0
task1 = 0
task2 = 0
task3 = 0
task4 = 0
task5 = 0
task6 = 0
task7 = 0
task8,task9,task10,task11,task12,task13,task14 = 0,0,0,0,0,0,0
task15,task16,task17,task18,task19,task20 = 0,0,0,0,0,0
sum1 = 0
sum2 = 0
sum3 = 0
sum4 = 0
sum5 = 0
sum6 = 0
sum7 = 0
sum8,sum9,sum10,sum11,sum12,sum13,sum14 = 0,0,0,0,0,0,0
sum15,sum16,sum17,sum18,sum19,sum20 = 0,0,0,0,0,0
for i, (image, label) in enumerate(testdata):
image = image.cuda()
label = label.view(-1).cuda()
p = self.model(image)
p = self.bias_forward(p)
pred = p[:,:self.seen_cls].argmax(dim=-1)
correct += sum(pred == label).item()
wrong += sum(pred != label).item()
for a,b in zip(label,pred):
if a == b:
if a >= 0 and a <= 4:
task1 += 1
elif a >= 5 and a <= 9:
task2 += 1
elif a >= 10 and a <= 14:
task3 += 1
elif a >= 15 and a <= 19:
task4 += 1
elif a >= 20 and a <= 24:
task5 += 1
elif a >= 25 and a <= 29:
task6 += 1
elif a >= 30 and a <= 34:
task7 += 1
elif a >= 35 and a <= 39:
task8 += 1
elif a >= 40 and a <= 44:
task9 += 1
elif a >= 45 and a <= 49:
task10 += 1
elif a >= 50 and a <= 54:
task11 += 1
elif a >= 55 and a <= 59:
task12 += 1
elif a >= 60 and a <= 64:
task13 += 1
elif a >= 65 and a <= 69:
task14 += 1
elif a >= 70 and a <= 74:
task15 += 1
elif a >= 75 and a <= 79:
task16 += 1
elif a >= 80 and a <= 84:
task17 += 1
elif a >= 85 and a <= 89:
task18 += 1
elif a >= 90 and a <= 94:
task19 += 1
elif a >= 95 and a <= 99:
task20 += 1
if a >= 0 and a <= 4:
sum1 += 1
elif a >= 5 and a <= 9:
sum2 += 1
elif a >= 10 and a <= 14:
sum3 += 1
elif a >= 15 and a <= 19:
sum4 += 1
elif a >= 20 and a <= 24:
sum5 += 1
elif a >= 25 and a <= 29:
sum6 += 1
elif a >= 30 and a <= 34:
sum7 += 1
elif a >= 35 and a <= 39:
sum8 += 1
elif a >= 40 and a <= 44:
sum9 += 1
elif a >= 45 and a <= 49:
sum10 += 1
elif a >= 50 and a <= 54:
sum11 += 1
elif a >= 55 and a <= 59:
sum12 += 1
elif a >= 60 and a <= 64:
sum13 += 1
elif a >= 65 and a <= 69:
sum14 += 1
elif a >= 70 and a <= 74:
sum15 += 1
elif a >= 75 and a <= 79:
sum16 += 1
elif a >= 80 and a <= 84:
sum17 += 1
elif a >= 85 and a <= 89:
sum18 += 1
elif a >= 90 and a <= 94:
sum19 += 1
elif a >= 95 and a <= 99:
sum20 += 1
# print("pred and label")
# print(pred,label)
acc = correct / (wrong + correct)
print("Test Acc: {}".format(acc*100))
# print("task wise")
# print(task1, task2, task3, task4, task5,task6, task7)
# print("task wise sum")
# print(sum1, sum2, sum3, sum4,sum5, sum6, sum7)
print("Task wise accuracies:")
if sum1 != 0:
#task1_res.append(float(task1 / sum1))
acc_1sttask = task1 / sum1
print("task 1:",task1 / sum1)
if sum2 != 0:
print("task 2:",task2 / sum2)
if sum3 != 0:
print("task 3:",task3 / sum3)
if sum4 != 0:
print("task 4:",task4 / sum4)
if sum5 != 0:
print("task 5:",task5 / sum5)
if sum6 != 0:
print("task 6:",task6 / sum6)
if sum7 != 0:
print("task 7:",task7 / sum7)
if sum8 != 0:
print("task 8:",task8 / sum8)
if sum9 != 0:
print("task 9:",task9 / sum9)
if sum10 != 0:
print("task 10:",task10 / sum10)
if sum11 != 0:
print("task 11:",task11 / sum11)
if sum12 != 0:
print("task 12:",task12 / sum12)
if sum13 != 0:
print("task 13:",task13 / sum13)
if sum14 != 0:
print("task 14:",task14 / sum14)
if sum15 != 0:
print("task 15:",task15 / sum15)
if sum16 != 0:
print("task 16:",task16 / sum16)
if sum17 != 0:
print("task 17:",task17 / sum17)
if sum18 != 0:
print("task 18:",task18 / sum18)
if sum19 != 0:
print("task 19:",task19 / sum19)
if sum20 != 0:
print("task 20:",task20 / sum20)
# print("correct")
# print(correct)
# print("wrong + correct")
# print(wrong + correct)
self.model.train()
print("---------------------------------------------")
return acc,acc_1sttask
def eval(self, criterion, evaldata):
self.model.eval()
losses = []
correct = 0
wrong = 0
for i, (image, label) in enumerate(evaldata):
image = image.cuda()
label = label.view(-1).cuda()
p = self.model(image)
p = self.bias_forward(p)
loss = criterion(p, label)
losses.append(loss.item())
pred = p[:,:self.seen_cls].argmax(dim=-1)
correct += sum(pred == label).item()
wrong += sum(pred != label).item()
print("Validation Loss: {}".format(np.mean(losses)))
print("Validation Acc: {}".format(100*correct/(correct+wrong)))
self.model.train()
return
def get_lr(self, optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def train(self, batch_size, epoches, lr, max_size):
total_cls = self.total_cls
criterion = nn.CrossEntropyLoss()
exemplar = Exemplar(max_size, total_cls)
previous_model = None
dataset = self.dataset
test_xs = []
test_ys = []
train_xs = []
train_ys = []
test_accs = []
first_task_test_res_final = []
for inc_i in range(dataset.batch_num):
print(f"Incremental num : {inc_i}")
train, val, test = dataset.getNextClasses(inc_i)
print(len(train), len(val), len(test))
train_x, train_y = zip(*train)
val_x, val_y = zip(*val)
test_x, test_y = zip(*test)
test_xs.extend(test_x)
test_ys.extend(test_y)
train_xs, train_ys = exemplar.get_exemplar_train()
train_xs.extend(train_x)
train_xs.extend(val_x)
train_ys.extend(train_y)
train_ys.extend(val_y)
if inc_i > 0 :
epoches = 1 #stream learning; see data only once
train_data = DataLoader(BatchData(train_xs, train_ys, self.input_transform),
batch_size=batch_size, shuffle=True, drop_last=True)
val_data = DataLoader(BatchData(val_x, val_y, self.input_transform_eval),
batch_size=batch_size, shuffle=False)
test_data = DataLoader(BatchData(test_xs, test_ys, self.input_transform_eval),
batch_size=batch_size, shuffle=False)
optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=0.9, weight_decay=2e-4)
# scheduler = LambdaLR(optimizer, lr_lambda=adjust_cifar100)
scheduler = StepLR(optimizer, step_size=70, gamma=0.1)
# bias_optimizer = optim.SGD(self.bias_layers[inc_i].parameters(), lr=lr, momentum=0.9)
bias_optimizer = optim.Adam(self.bias_layers[inc_i].parameters(), lr=0.001)
# bias_scheduler = StepLR(bias_optimizer, step_size=70, gamma=0.1)
#exemplar.update(total_cls//dataset.batch_num, (train_x, train_y), (val_x, val_y)) #is this even correct????? #RBZ changes
exemplar.update(total_cls//20, (train_x, train_y), (val_x, val_y))
self.seen_cls = exemplar.get_cur_cls()
print("seen cls number : ", self.seen_cls)
val_xs, val_ys = exemplar.get_exemplar_val()
val_bias_data = DataLoader(BatchData(val_xs, val_ys, self.input_transform),
batch_size=1, shuffle=True, drop_last=False)
test_acc = []
first_task_test_res = []
for epoch in range(epoches):
print("---"*50)
print("Epoch", epoch)
scheduler.step()
cur_lr = self.get_lr(optimizer)
print("Current Learning Rate : ", cur_lr)
self.model.train()
for _ in range(len(self.bias_layers)):
self.bias_layers[_].eval()
if inc_i > 0:
self.stage1_distill(train_data, criterion, optimizer)
else:
self.stage1(train_data, criterion, optimizer)
acc,_ = self.test(test_data)
if inc_i > 0:
for epoch in range(epoches):
# bias_scheduler.step()
self.model.eval()
for _ in range(len(self.bias_layers)):
self.bias_layers[_].train()
self.stage2(val_bias_data, criterion, bias_optimizer)
if epoch % 1 == 0:
acc,_ = self.test(test_data)
test_acc.append(acc)
for i, layer in enumerate(self.bias_layers):
layer.printParam(i)
self.previous_model = deepcopy(self.model)
acc,acc_1stTask = self.test(test_data)
test_acc.append(acc)
first_task_test_res.append(acc_1stTask)
test_accs.append(max(test_acc))
first_task_test_res_final.append(max(first_task_test_res)) #probably doesnt matter because of 1epoch afterwards
print("test_accs")
print(test_accs)
return test_accs,first_task_test_res_final
def bias_forward(self, input):
in1 = input[:, :5]
in2 = input[:, 5:10]
in3 = input[:, 10:15]
in4 = input[:, 15:20]
in5 = input[:, 20:25]
in6 = input[:, 25:30]
in7 = input[:, 30:35]
in8 = input[:, 35:40]
in9 = input[:, 40:45]
in10 = input[:, 45:50]
in11 = input[:, 50:55]
in12 = input[:, 55:60]
in13 = input[:, 60:65]
in14 = input[:, 65:70]
in15 = input[:, 70:75]
in16 = input[:, 75:80]
in17 = input[:, 80:85]
in18 = input[:, 85:90]
in19 = input[:, 90:95]
in20 = input[:, 95:100]
out1 = self.bias_layer1(in1)
out2 = self.bias_layer2(in2)
out3 = self.bias_layer3(in3)
out4 = self.bias_layer4(in4)
out5 = self.bias_layer5(in5)
out6,out7,out8,out9 = self.bias_layer6(in6),self.bias_layer7(in7),self.bias_layer8(in8),self.bias_layer9(in9)
out10,out11,out12,out13 = self.bias_layer10(in10),self.bias_layer11(in11),self.bias_layer12(in12),self.bias_layer13(in13)
out14,out15,out16,out17 = self.bias_layer14(in14),self.bias_layer15(in15),self.bias_layer16(in16),self.bias_layer17(in17)
out18,out19,out20 = self.bias_layer18(in18),self.bias_layer19(in19),self.bias_layer20(in20)
if self.total_cls == 10:
return torch.cat([out1, out2, out3, out4, out5], dim = 1)
elif self.total_cls == 12:
in6 = input[:, 10:12]
out6 = self.bias_layer6(in6)
return torch.cat([out1, out2, out3, out4, out5,out6], dim = 1)
elif self.total_cls == 14:
in6 = input[:, 10:12]
out6 = self.bias_layer6(in6)
in7 = input[:, 12:14]
out7 = self.bias_layer7(in7)
return torch.cat([out1, out2, out3, out4, out5,out6,out7], dim = 1)
if self.total_cls == 100:
#print("in total_cls = 100")
return torch.cat([out1, out2, out3, out4, out5,
out6,out7,out8,out9,out10,out11,out12,out13,
out14,out15,out16,out17,out18,out19,out20], dim = 1)
'''elif self.total_cls == 14:
in6 = input[:, 10:12]
in7 = input[:, 12:14]
out6 = self.bias_layer6(in6)
out7 = self.bias_layer6(in7)
return torch.cat([out1, out2, out3, out4, out5, out6, out7], dim = 1)
'''
def stage1(self, train_data, criterion, optimizer):
print("Training ... ")
losses = []
for i, (image, label) in enumerate(tqdm(train_data)):
image = image.cuda()
label = label.view(-1).cuda()
p = self.model(image)
p = self.bias_forward(p)
loss = criterion(p[:,:self.seen_cls], label)
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
losses.append(loss.item())
print("stage1 loss :", np.mean(losses))
def stage1_distill(self, train_data, criterion, optimizer):
print("Training ... ")
distill_losses = []
ce_losses = []
T = 5
alpha = (self.seen_cls - 5)/ self.seen_cls
print("classification proportion 1-alpha = ", 1-alpha)
for i, (image, label) in enumerate(tqdm(train_data)):
image = image.cuda()
#if label == -1:
# print(label)
#if label > 10:
# print(label)
# print("above 10")
label = label.view(-1).cuda()
p = self.model(image)
p = self.bias_forward(p)
with torch.no_grad():
pre_p = self.previous_model(image)
pre_p = self.bias_forward(pre_p)
pre_p = F.softmax(pre_p[:,:self.seen_cls-2]/T, dim=1)
logp = F.log_softmax(p[:,:self.seen_cls-2]/T, dim=1)
loss_soft_target = -torch.mean(torch.sum(pre_p * logp, dim=1))
loss_hard_target = nn.CrossEntropyLoss()(p[:,:self.seen_cls], label)
loss = loss_soft_target * T * T + (1-alpha) * loss_hard_target
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
distill_losses.append(loss_soft_target.item())
ce_losses.append(loss_hard_target.item())
print("stage1 distill loss :", np.mean(distill_losses), "ce loss :", np.mean(ce_losses))
def stage1(self, train_data, criterion, optimizer):
print("Training ... ")
losses = []
for i, (image, label) in enumerate(tqdm(train_data)):
image = image.cuda()
label = label.view(-1).cuda()
p = self.model(image)
p = self.bias_forward(p)
loss = criterion(p[:,:self.seen_cls], label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
print("stage1 loss :", np.mean(losses))
def stage2(self, val_bias_data, criterion, optimizer):
print("Evaluating ... ")
losses = []
for i, (image, label) in enumerate(tqdm(val_bias_data)):
image = image.cuda()
label = label.view(-1).cuda()
p = self.model(image)
p = self.bias_forward(p)
loss = criterion(p[:,:self.seen_cls], label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
print("stage2 loss :", np.mean(losses))
| [
"core50.Core50",
"ilab.Ilab",
"torch.optim.lr_scheduler.StepLR",
"torch.cat",
"numpy.mean",
"torchvision.transforms.Normalize",
"torch.no_grad",
"toybox.Toybox",
"cifar100.cifar100",
"torch.nn.functional.log_softmax",
"exemplar.Exemplar",
"copy.deepcopy",
"tqdm.tqdm",
"torchvision.transfor... | [((1547, 1590), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.model'], {'device_ids': '[0]'}), '(self.model, device_ids=[0])\n', (1562, 1590), True, 'import torch.nn as nn\n'), ((11762, 11783), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (11781, 11783), True, 'import torch.nn as nn\n'), ((11803, 11832), 'exemplar.Exemplar', 'Exemplar', (['max_size', 'total_cls'], {}), '(max_size, total_cls)\n', (11811, 11832), False, 'from exemplar import Exemplar\n'), ((1047, 1068), 'core50.Core50', 'Core50', (['paradigm', 'run'], {}), '(paradigm, run)\n', (1053, 1068), False, 'from core50 import Core50\n'), ((1128, 1149), 'toybox.Toybox', 'Toybox', (['paradigm', 'run'], {}), '(paradigm, run)\n', (1134, 1149), False, 'from toybox import Toybox\n'), ((1241, 1260), 'ilab.Ilab', 'Ilab', (['paradigm', 'run'], {}), '(paradigm, run)\n', (1245, 1260), False, 'from ilab import Ilab\n'), ((1360, 1383), 'cifar100.cifar100', 'cifar100', (['paradigm', 'run'], {}), '(paradigm, run)\n', (1368, 1383), False, 'from cifar100 import cifar100\n'), ((13420, 13462), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(70)', 'gamma': '(0.1)'}), '(optimizer, step_size=70, gamma=0.1)\n', (13426, 13462), False, 'from torch.optim.lr_scheduler import LambdaLR, StepLR\n'), ((15559, 15579), 'copy.deepcopy', 'deepcopy', (['self.model'], {}), '(self.model)\n', (15567, 15579), False, 'from copy import deepcopy\n'), ((17383, 17431), 'torch.cat', 'torch.cat', (['[out1, out2, out3, out4, out5]'], {'dim': '(1)'}), '([out1, out2, out3, out4, out5], dim=1)\n', (17392, 17431), False, 'import torch\n'), ((17992, 18149), 'torch.cat', 'torch.cat', (['[out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12,\n out13, out14, out15, out16, out17, out18, out19, out20]'], {'dim': '(1)'}), '([out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,\n out11, out12, out13, out14, out15, out16, out17, out18, out19, out20],\n dim=1)\n', (18001, 18149), False, 'import torch\n'), ((18579, 18595), 'tqdm.tqdm', 'tqdm', (['train_data'], {}), '(train_data)\n', (18583, 18595), False, 'from tqdm import tqdm\n'), ((18979, 18994), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (18986, 18994), True, 'import numpy as np\n'), ((19314, 19330), 'tqdm.tqdm', 'tqdm', (['train_data'], {}), '(train_data)\n', (19318, 19330), False, 'from tqdm import tqdm\n'), ((19854, 19904), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['(p[:, :self.seen_cls - 2] / T)'], {'dim': '(1)'}), '(p[:, :self.seen_cls - 2] / T, dim=1)\n', (19867, 19904), True, 'import torch.nn.functional as F\n'), ((20391, 20414), 'numpy.mean', 'np.mean', (['distill_losses'], {}), '(distill_losses)\n', (20398, 20414), True, 'import numpy as np\n'), ((20429, 20447), 'numpy.mean', 'np.mean', (['ce_losses'], {}), '(ce_losses)\n', (20436, 20447), True, 'import numpy as np\n'), ((20601, 20617), 'tqdm.tqdm', 'tqdm', (['train_data'], {}), '(train_data)\n', (20605, 20617), False, 'from tqdm import tqdm\n'), ((20984, 20999), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (20991, 20999), True, 'import numpy as np\n'), ((21157, 21176), 'tqdm.tqdm', 'tqdm', (['val_bias_data'], {}), '(val_bias_data)\n', (21161, 21176), False, 'from tqdm import tqdm\n'), ((21543, 21558), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (21550, 21558), True, 'import numpy as np\n'), ((1469, 1493), 'model.PreResNet', 'PreResNet', (['(32)', 'total_cls'], {}), '(32, total_cls)\n', (1478, 1493), False, 'from model import PreResNet, BiasLayer\n'), ((1618, 1629), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (1627, 1629), False, 'from model import PreResNet, BiasLayer\n'), ((1664, 1675), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (1673, 1675), False, 'from model import PreResNet, BiasLayer\n'), ((1710, 1721), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (1719, 1721), False, 'from model import PreResNet, BiasLayer\n'), ((1756, 1767), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (1765, 1767), False, 'from model import PreResNet, BiasLayer\n'), ((1802, 1813), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (1811, 1813), False, 'from model import PreResNet, BiasLayer\n'), ((3905, 3926), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32)'], {}), '(32)\n', (3922, 3926), False, 'from torchvision import transforms\n'), ((3960, 3993), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3991, 3993), False, 'from torchvision import transforms\n'), ((4027, 4063), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (4048, 4063), False, 'from torchvision import transforms\n'), ((4096, 4106), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4104, 4106), False, 'from torchvision.transforms import Compose, CenterCrop, Normalize, Scale, Resize, ToTensor, ToPILImage\n'), ((4140, 4201), 'torchvision.transforms.Normalize', 'Normalize', (['[0.5071, 0.4866, 0.4409]', '[0.2673, 0.2564, 0.2762]'], {}), '([0.5071, 0.4866, 0.4409], [0.2673, 0.2564, 0.2762])\n', (4149, 4201), False, 'from torchvision.transforms import Compose, CenterCrop, Normalize, Scale, Resize, ToTensor, ToPILImage\n'), ((4277, 4298), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32)'], {}), '(32)\n', (4294, 4298), False, 'from torchvision import transforms\n'), ((4332, 4342), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4340, 4342), False, 'from torchvision.transforms import Compose, CenterCrop, Normalize, Scale, Resize, ToTensor, ToPILImage\n'), ((4376, 4437), 'torchvision.transforms.Normalize', 'Normalize', (['[0.5071, 0.4866, 0.4409]', '[0.2673, 0.2564, 0.2762]'], {}), '([0.5071, 0.4866, 0.4409], [0.2673, 0.2564, 0.2762])\n', (4385, 4437), False, 'from torchvision.transforms import Compose, CenterCrop, Normalize, Scale, Resize, ToTensor, ToPILImage\n'), ((11394, 11409), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (11401, 11409), True, 'import numpy as np\n'), ((12780, 12831), 'dataset.BatchData', 'BatchData', (['train_xs', 'train_ys', 'self.input_transform'], {}), '(train_xs, train_ys, self.input_transform)\n', (12789, 12831), False, 'from dataset import BatchData\n'), ((12944, 12994), 'dataset.BatchData', 'BatchData', (['val_x', 'val_y', 'self.input_transform_eval'], {}), '(val_x, val_y, self.input_transform_eval)\n', (12953, 12994), False, 'from dataset import BatchData\n'), ((13105, 13159), 'dataset.BatchData', 'BatchData', (['test_xs', 'test_ys', 'self.input_transform_eval'], {}), '(test_xs, test_ys, self.input_transform_eval)\n', (13114, 13159), False, 'from dataset import BatchData\n'), ((14149, 14196), 'dataset.BatchData', 'BatchData', (['val_xs', 'val_ys', 'self.input_transform'], {}), '(val_xs, val_ys, self.input_transform)\n', (14158, 14196), False, 'from dataset import BatchData\n'), ((17563, 17617), 'torch.cat', 'torch.cat', (['[out1, out2, out3, out4, out5, out6]'], {'dim': '(1)'}), '([out1, out2, out3, out4, out5, out6], dim=1)\n', (17572, 17617), False, 'import torch\n'), ((19648, 19663), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19661, 19663), False, 'import torch\n'), ((19789, 19839), 'torch.nn.functional.softmax', 'F.softmax', (['(pre_p[:, :self.seen_cls - 2] / T)'], {'dim': '(1)'}), '(pre_p[:, :self.seen_cls - 2] / T, dim=1)\n', (19798, 19839), True, 'import torch.nn.functional as F\n'), ((20006, 20027), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (20025, 20027), True, 'import torch.nn as nn\n'), ((2043, 2054), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2052, 2054), False, 'from model import PreResNet, BiasLayer\n'), ((2299, 2310), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2308, 2310), False, 'from model import PreResNet, BiasLayer\n'), ((2349, 2360), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2358, 2360), False, 'from model import PreResNet, BiasLayer\n'), ((2636, 2647), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2645, 2647), False, 'from model import PreResNet, BiasLayer\n'), ((2686, 2697), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2695, 2697), False, 'from model import PreResNet, BiasLayer\n'), ((2736, 2747), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2745, 2747), False, 'from model import PreResNet, BiasLayer\n'), ((2786, 2797), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2795, 2797), False, 'from model import PreResNet, BiasLayer\n'), ((2837, 2848), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2846, 2848), False, 'from model import PreResNet, BiasLayer\n'), ((2888, 2899), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2897, 2899), False, 'from model import PreResNet, BiasLayer\n'), ((2939, 2950), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2948, 2950), False, 'from model import PreResNet, BiasLayer\n'), ((2990, 3001), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (2999, 3001), False, 'from model import PreResNet, BiasLayer\n'), ((3041, 3052), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (3050, 3052), False, 'from model import PreResNet, BiasLayer\n'), ((3092, 3103), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (3101, 3103), False, 'from model import PreResNet, BiasLayer\n'), ((3143, 3154), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (3152, 3154), False, 'from model import PreResNet, BiasLayer\n'), ((3194, 3205), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (3203, 3205), False, 'from model import PreResNet, BiasLayer\n'), ((3245, 3256), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (3254, 3256), False, 'from model import PreResNet, BiasLayer\n'), ((3296, 3307), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (3305, 3307), False, 'from model import PreResNet, BiasLayer\n'), ((3347, 3358), 'model.BiasLayer', 'BiasLayer', ([], {}), '()\n', (3356, 3358), False, 'from model import PreResNet, BiasLayer\n'), ((17836, 17896), 'torch.cat', 'torch.cat', (['[out1, out2, out3, out4, out5, out6, out7]'], {'dim': '(1)'}), '([out1, out2, out3, out4, out5, out6, out7], dim=1)\n', (17845, 17896), False, 'import torch\n'), ((19943, 19973), 'torch.sum', 'torch.sum', (['(pre_p * logp)'], {'dim': '(1)'}), '(pre_p * logp, dim=1)\n', (19952, 19973), False, 'import torch\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Code to run psychophysics tests using a Jupyter notebook.
This code implements the GUI for 2 alternative forced choiced experiments, using
either audio (for easy testing) or tactors.
"""
import json
import random
import IPython.display as display
import ipywidgets as widgets
import levitt_experiment as levitt
import numpy as np
import sleeve_usb
# Basic code from:
# https://stackoverflow.com/questions/54813069/python-onclick-button-widget-return-object-
class TestGui(object):
"""Basic Jupyter GUI for a psychophysical test.
Just play as many beeps as the trial number, which goes up each time
you click an answer button.
"""
def __init__(
self,
button_names=('First', 'Second'),
title='This is the Experiment Title',
description='Click on the button indicating which half is higher.'):
self.button_names = button_names
self.title = title
self.description = description
self.signal = None
self.fs = 16000.0
self.trial_num = 1
self.stimulus_pane = widgets.Output()
def experiment_parameters(self):
return {'fs': self.fs}
def create_widgets(self):
"""Creates the ipython widgets needed for the display. Called once."""
self.title_pane = widgets.Label(
self.title, disabled=False, style={'font_weight': 'bold'})
self.description_pane = widgets.Label(self.description, disabled=False)
self.button_widgets = {}
for l in self.button_names:
b = widgets.Button(
description=l,
disabled=False,
button_style='warning', # 'success', 'info', 'warning', 'danger', ''
tooltip=f'{l} answer',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
b.on_click(self.answer_event_handler)
self.button_widgets[l] = b
self.legend_pane = widgets.Label(
f'Trial number {self.trial_num}', disabled=False)
self.debug_pane = widgets.Label('', disabled=False)
def display_widgets(self):
"""Displays the widgets in the output panel. Called once."""
self.create_widgets()
self.create_stimulus()
answer_pane = widgets.HBox(list(self.button_widgets.values()))
self.show_stimulus(autoplay=False)
display.display(
widgets.VBox([
self.title_pane,
self.description_pane,
self.stimulus_pane,
answer_pane,
self.legend_pane,
self.debug_pane,
]))
def update_display(self, new_legend):
"""Updates the display after each trial."""
self.legend_pane.value = new_legend
self.show_stimulus()
def update_debug(self, new_message: str):
"""Updates the display after each trial."""
self.debug_pane.value = new_message
def create_stimulus(self):
"""Given the status of the experiment, creates the current stimulus.
In this case, the audio consists of trial_num beeps, for testing code.
"""
stim_len = 0.2
self.fs = 16000
blip_len = int(stim_len * self.fs)
blip = np.zeros(blip_len)
t = np.arange(int(blip_len * .75)) / float(self.fs)
blip[0:t.shape[0]] = np.sin(2 * np.pi * t * 440)
self.test_signal = np.concatenate([blip for i in range(self.trial_num)],
axis=0)
self.update_debug(f'DEBUG: Now playing {self.trial_num} beeps.')
def show_stimulus(self, done_message: str = None, autoplay=True):
"""Shows stimulus specific pane, perhaps updated after each trial."""
with self.stimulus_pane:
display.clear_output()
if done_message:
a = done_message
else:
a = display.Audio(
data=self.test_signal, rate=self.fs, autoplay=autoplay)
display.display(a)
def answer_event_handler(self, obj):
"""Handles the button clicks, checks answer, shows next trial."""
print(f'Got a click from {obj.description}.')
self.check_answer(obj.description)
self.show_next_trial()
def show_next_trial(self):
"""Updates the trial number count, and then updates the exp display."""
self.trial_num += 1
self.create_stimulus()
self.update_display(f'Trial number {self.trial_num}')
def check_answer(self, _):
print('Generic check_answer called, probably an error.')
pass # Nothing to do for generic method. Either answer is right
class Exp2AFC(TestGui):
"""Defined and implements a 2 alternative, forced choice test."""
def __init__(self, max_runs=8, initial_level=0.05, **kwargs):
"""Creates the experiment.
Just a dummy audio experiment for testing.
Specialize this to do a real experiment, hopefully these methods will
make that easier.
Args:
max_runs: How many runs to test the subject on. (A run is a
monotonic sequence of level changes, as defined by Levitt.)
initial_level: Which stimulus level to start the experiment with
(meaning depends on the experiment.)
**kwargs: keyword arguments for the super class.
"""
self.max_runs = max_runs
self.test_level = initial_level
self.levitt_exp = levitt.LevittExp(
initial_level=initial_level,
change_delta=0.5,
decrease_step_by_run=True,
multiplicative_step=True)
self.correct_answer = None
self.last_result = ''
self.test_description = ''
super().__init__(**kwargs)
def experiment_parameters(self):
return {
'max_runs': self.max_runs,
'initial_level': self.initial_levels,
'levitt_results': self.levitt_exp.results(),
'levitt_threshold': self.levitt_exp.calculate_threshold(),
'type': str(type(self)),
'button_names': self.button_names,
'title': self.title,
'description': self.description,
}
def check_answer(self, trial_answer: str):
"""Checks to see if the right answer was received and updates the Levitt parameters."""
correct = self.correct_answer in trial_answer.lower()
if correct:
self.last_result = 'Correct'
else:
self.last_result = 'Incorrect'
self.test_level = self.levitt_exp.note_response(correct)
print('Check_answer got %s, responding with level %g' %
(correct, self.test_level))
def show_next_trial(self):
"""Shows an experimental trial.
Checks to see if we have done enough runs, then exits.
Otherwise, creates the audio GUI widget and displays it for the
subject's action.
"""
if self.levitt_exp.run_number > self.max_runs:
self.test_level = self.levitt_exp.calculate_threshold()
self.show_stimulus('All done, with a threshold of %g.' % self.test_level)
return
super().show_next_trial()
msg = f'Last result was {self.last_result.lower()}. '
msg += f'Now showing run #{self.levitt_exp.run_number}, '
msg += f'trial #{self.levitt_exp.trial_number}. '
msg += f'This test is {self.test_description}.'
self.update_debug(msg)
def save_experiment(self, filename):
exp_dict = self.experiment_parameters()
with open(filename, 'w') as fp:
json.dump(exp_dict, fp)
class AudioExp2AFC(Exp2AFC):
"""Do a simple pitch-based 2AFC test, just for testing."""
def __init__(self, fs=16000, f0=440, **kwargs):
self.fs = fs
self.f0 = f0
self.blip_len = 0.5
self.pitch_std = 0.5
super().__init__(**kwargs)
def experiment_parameters(self):
params = super().experiment_parameters()
params['fs'] = self.fs
params['f0'] = self.f0
params['blip_len'] = self.blip_len
params['pitch_std'] = self.pitch_std
return params
def create_stimulus(self) -> None:
"""Creates a 2 alternative forced choice pitch JND experiment.
Stores for later use:
The NP array of mono data at the desired sample rate, and
a boolean that tells which segment (first or second) is higher.
"""
if self.test_level < 0:
raise ValueError('Difference argument should be > 0, not %s' %
self.test_level)
f1 = random.normalvariate(self.f0, self.pitch_std)
f2 = (1 + self.test_level) * f1 # Always higher
if random.random() < 0.5:
self.correct_answer = 'first'
s1 = f2
s2 = f1
else:
self.correct_answer = 'second'
s1 = f1
s2 = f2
stim_len = int(self.fs * self.blip_len)
t = np.arange(2 * stim_len) / float(self.fs)
window = np.hanning(stim_len)
self.test_signal = 0 * t
self.test_signal[:stim_len] = np.sin(2 * np.pi * s1 * t[:stim_len]) * window
self.test_signal[stim_len:] = np.sin(2 * np.pi * s2 * t[stim_len:]) * window
self.test_description = '%g -> %g with step %g' % (s1, s2, self.test_level)
class TactorExp2AFC(Exp2AFC):
"""Tactor amplitude threshold level experiment."""
def __init__(self,
f0: float = 50,
blip_len: float = 0.5,
stim_channel: int = 0,
click_channel: int = 3,
mask_channel: int = 2,
mask_level: float = 0,
**kwargs):
"""Initialize a tactor experiment.
Args:
f0: frequency of the buzz
blip_len: Length of each sample (1/2 of total) in seconds
stim_channel: Which channel on the sleeve is being tested.
click_channel: Which channel gets the click indicating the center
point.
mask_channel: Where to put the noise mask signal.
mask_level: Amplitude of the masking signal. Zero means no mask.
**kwargs: Arguments for the super class.
"""
self.f0 = f0
self.blip_len = blip_len
self.stim_channel = stim_channel
self.click_channel = click_channel
self.mask_channel = mask_channel
self.mask_level = mask_level
self.sleeve = sleeve_usb.SleeveUSB()
self.fs = self.sleeve.SAMPLE_RATE
super().__init__(**kwargs)
def experiment_parameters(self):
params = super().experiment_parameters()
params['f0'] = self.f0
params['blip_len'] = self.blip_len
params['stim_channel'] = self.stim_channel
params['click_channel'] = self.click_channel
params['mask_channel'] = self.mask_channel
params['mask_level'] = self.mask_level
return params
def create_stimulus(self) -> None:
"""Creates a 2 alternative forced choice tactor threshold experiment.
Computes: Sets two items, the NP array of tactor data, and a boolean
that tells which segment (first or second) is higher.
"""
if self.test_level < 0:
raise ValueError('Level argument should be > 0, not %s' % self.test_level)
fs = self.sleeve.SAMPLE_RATE
blip_len = int(self.blip_len * fs)
test_stim = np.zeros((blip_len, self.sleeve.TACTILE_CHANNELS))
t = np.arange(blip_len) / float(fs)
window = np.hanning(blip_len)
test_stim[:, self.stim_channel] = self.test_level * np.sin(
2 * np.pi * self.f0 * t) * window
if random.random() < 0.5:
self.correct_answer = 'first'
s1 = test_stim
s2 = 0 * test_stim
else:
self.correct_answer = 'second'
s1 = 0 * test_stim
s2 = test_stim
self.test_signal = np.concatenate((s1, s2), axis=0)
if self.mask_level > 0:
np.clip(self.mask_level * np.random.standard_normal(2 * blip_len), -1, 1,
self.test_signal[:, self.mask_channel])
# Add clicks to another channel so user can orient.
click_len = 50
click = np.sin(2 * np.pi * 250 * t[:click_len])
# self.signal[:click_len, click_channel] = click
self.test_signal[blip_len:(blip_len + click_len),
self.click_channel] = click
# self.signal[-click_len:, click_channel] = click
self.test_description = 'Stimulus level %g is in the %s segment (%d & %d)' % (
self.test_level, self.correct_answer, self.stim_channel,
self.click_channel)
def _play_stimulus(self, _=None):
sleeve = sleeve_usb.SleeveUSB()
sleeve.send_waves_to_tactors(self.test_signal)
def show_stimulus(self, done_message: str = None, autoplay=True):
"""Displays the UI that presents the stimulus, audio in this case."""
del autoplay
with self.stimulus_pane:
display.clear_output()
if done_message:
b = done_message
else:
b = widgets.Button(
description='Play Stimulus',
disabled=False,
button_style='success', # success, info, warning, danger, ''
tooltip='play stimulus',
icon='play' # (FontAwesome names without the `fa-` prefix)
)
b.on_click(self._play_stimulus)
display.display(b)
class TactorPhaseExp(TactorExp2AFC):
"""Test the phase sensitivity of our tactile sensors."""
def __init__(self,
title='Are the two signals the same or different?',
button_names=('Same', 'Different'),
initial_level=180.0,
stim_level=0.75,
stim2_channel=7,
**kwargs):
"""Initialize the object with the experimental parameters.
Args:
title: Redefines title with new default.
button_names: Redefines button names with new default.
initial_level: Redefines level with new default (180 degrees.)
stim_level: What amplitude to give the stimulus.
stim2_channel: Which channel to use for second stimulus
**kwargs: Arguments for the super class.
"""
self.stim_level = stim_level
self.stim2_channel = stim2_channel
self.sleeve = sleeve_usb.SleeveUSB()
self.fs = self.sleeve.SAMPLE_RATE
super().__init__(
title=title,
initial_level=initial_level,
button_names=button_names,
**kwargs)
def create_stimulus(self):
"""Creates a phase-difference tactor threshold experiment.
Returns output in the class' test_signal and test_description slots.
"""
if self.stim_level < 0:
raise ValueError('Level argument should be > 0, not %s' % self.stim_level)
if random.random() < 0.5:
self.correct_answer = 'same'
phase = 0
else:
self.correct_answer = 'different'
phase = 2 * np.pi / 360.0 * max(0.0, min(180.0, self.stim_level))
self.test_signal = self.synthesize_signal(self.f0, phase)
if self.mask_level > 0:
np.clip(self.mask_level * np.random.standard_normal(2 * self.blip_len),
-1, 1,
self.test_signal[:, self.mask_channel])
self.test_description = 'Stimulus level %g is %s (%d & %d)' % (
self.stim_level, self.correct_answer, self.stim_channel,
self.stim2_channel)
def experiment_parameters(self):
params = super().experiment_parameters()
params['initial_level'] = self.initial_level
params['stim_level'] = self.stim_level
params['stim2_channel'] = self.stim2_channel
return params
# The following methods define a simple GUI to let a user explore the
# frequency phase space.
def synthesize_signal(self, f0, phase):
"""Just synthesize one test signal: two channels at frequency and phase difference.
Note: Several parameters of the signal are defined by the class when it
is initialized,
including blip_len, stim_level, stim_channel, stim2_channel.
Args:
f0: Frequency of the sinusoid.
phase: Initial phase of the sinusoid in degrees.
Returns:
A multidimensional vector containing the desired signals.
"""
fs = self.sleeve.SAMPLE_RATE
blip_len = int(self.blip_len * fs)
test_stim = np.zeros((blip_len, self.sleeve.TACTILE_CHANNELS))
t = np.arange(blip_len) / float(fs)
window = np.hanning(blip_len)
test_stim[:, self.stim_channel] = self.stim_level * np.sin(
2 * np.pi * f0 * t) * window
test_stim[:, self.stim2_channel] = self.stim_level * np.sin(
2 * np.pi * f0 * t + phase) * window
return test_stim
def play_event_handler(self, obj):
"""Handles the play_widget GUI, playing the desired stimulus."""
f0 = self.f0_widget.value
phase = self.phase_widget.value
print(
f'Got a click from {obj.description} for {f0}Hz at {phase} degrees.')
self.test_signal = self.synthesize_signal(f0, phase)
self._play_stimulus()
def play_widget(self):
"""Creates a widget that lets us test different frequencies and phases."""
self.f0_widget = widgets.FloatSlider(
value=32,
min=25,
max=250,
step=1,
description='F0:',
disabled=False,
continuous_update=False,
orientation='vertical',
readout=True,
readout_format='.1f',
)
self.phase_widget = widgets.FloatSlider(
value=90,
min=0,
max=180.0,
step=1,
description='Phase:',
disabled=False,
continuous_update=False,
orientation='vertical',
readout=True,
readout_format='.1f',
)
play_same = widgets.Button(
description='Play Same',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
play_same.on_click(self.play_event_handler)
play_different = widgets.Button(
description='Play Different',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
play_different.on_click(self.play_event_handler)
button_pane = widgets.VBox([play_same, play_different])
test_pane = widgets.HBox([self.f0_widget, self.phase_widget, button_pane])
return test_pane
| [
"IPython.display.Audio",
"levitt_experiment.LevittExp",
"ipywidgets.Output",
"numpy.sin",
"numpy.arange",
"ipywidgets.Button",
"IPython.display.display",
"ipywidgets.Label",
"numpy.hanning",
"sleeve_usb.SleeveUSB",
"json.dump",
"ipywidgets.HBox",
"random.random",
"numpy.random.standard_nor... | [((1603, 1619), 'ipywidgets.Output', 'widgets.Output', ([], {}), '()\n', (1617, 1619), True, 'import ipywidgets as widgets\n'), ((1810, 1882), 'ipywidgets.Label', 'widgets.Label', (['self.title'], {'disabled': '(False)', 'style': "{'font_weight': 'bold'}"}), "(self.title, disabled=False, style={'font_weight': 'bold'})\n", (1823, 1882), True, 'import ipywidgets as widgets\n'), ((1920, 1967), 'ipywidgets.Label', 'widgets.Label', (['self.description'], {'disabled': '(False)'}), '(self.description, disabled=False)\n', (1933, 1967), True, 'import ipywidgets as widgets\n'), ((2400, 2463), 'ipywidgets.Label', 'widgets.Label', (['f"""Trial number {self.trial_num}"""'], {'disabled': '(False)'}), "(f'Trial number {self.trial_num}', disabled=False)\n", (2413, 2463), True, 'import ipywidgets as widgets\n'), ((2495, 2528), 'ipywidgets.Label', 'widgets.Label', (['""""""'], {'disabled': '(False)'}), "('', disabled=False)\n", (2508, 2528), True, 'import ipywidgets as widgets\n'), ((3583, 3601), 'numpy.zeros', 'np.zeros', (['blip_len'], {}), '(blip_len)\n', (3591, 3601), True, 'import numpy as np\n'), ((3683, 3710), 'numpy.sin', 'np.sin', (['(2 * np.pi * t * 440)'], {}), '(2 * np.pi * t * 440)\n', (3689, 3710), True, 'import numpy as np\n'), ((5633, 5753), 'levitt_experiment.LevittExp', 'levitt.LevittExp', ([], {'initial_level': 'initial_level', 'change_delta': '(0.5)', 'decrease_step_by_run': '(True)', 'multiplicative_step': '(True)'}), '(initial_level=initial_level, change_delta=0.5,\n decrease_step_by_run=True, multiplicative_step=True)\n', (5649, 5753), True, 'import levitt_experiment as levitt\n'), ((8531, 8576), 'random.normalvariate', 'random.normalvariate', (['self.f0', 'self.pitch_std'], {}), '(self.f0, self.pitch_std)\n', (8551, 8576), False, 'import random\n'), ((8906, 8926), 'numpy.hanning', 'np.hanning', (['stim_len'], {}), '(stim_len)\n', (8916, 8926), True, 'import numpy as np\n'), ((10261, 10283), 'sleeve_usb.SleeveUSB', 'sleeve_usb.SleeveUSB', ([], {}), '()\n', (10281, 10283), False, 'import sleeve_usb\n'), ((11157, 11207), 'numpy.zeros', 'np.zeros', (['(blip_len, self.sleeve.TACTILE_CHANNELS)'], {}), '((blip_len, self.sleeve.TACTILE_CHANNELS))\n', (11165, 11207), True, 'import numpy as np\n'), ((11262, 11282), 'numpy.hanning', 'np.hanning', (['blip_len'], {}), '(blip_len)\n', (11272, 11282), True, 'import numpy as np\n'), ((11619, 11651), 'numpy.concatenate', 'np.concatenate', (['(s1, s2)'], {'axis': '(0)'}), '((s1, s2), axis=0)\n', (11633, 11651), True, 'import numpy as np\n'), ((11902, 11941), 'numpy.sin', 'np.sin', (['(2 * np.pi * 250 * t[:click_len])'], {}), '(2 * np.pi * 250 * t[:click_len])\n', (11908, 11941), True, 'import numpy as np\n'), ((12378, 12400), 'sleeve_usb.SleeveUSB', 'sleeve_usb.SleeveUSB', ([], {}), '()\n', (12398, 12400), False, 'import sleeve_usb\n'), ((13963, 13985), 'sleeve_usb.SleeveUSB', 'sleeve_usb.SleeveUSB', ([], {}), '()\n', (13983, 13985), False, 'import sleeve_usb\n'), ((15962, 16012), 'numpy.zeros', 'np.zeros', (['(blip_len, self.sleeve.TACTILE_CHANNELS)'], {}), '((blip_len, self.sleeve.TACTILE_CHANNELS))\n', (15970, 16012), True, 'import numpy as np\n'), ((16067, 16087), 'numpy.hanning', 'np.hanning', (['blip_len'], {}), '(blip_len)\n', (16077, 16087), True, 'import numpy as np\n'), ((16792, 16974), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'value': '(32)', 'min': '(25)', 'max': '(250)', 'step': '(1)', 'description': '"""F0:"""', 'disabled': '(False)', 'continuous_update': '(False)', 'orientation': '"""vertical"""', 'readout': '(True)', 'readout_format': '""".1f"""'}), "(value=32, min=25, max=250, step=1, description='F0:',\n disabled=False, continuous_update=False, orientation='vertical',\n readout=True, readout_format='.1f')\n", (16811, 16974), True, 'import ipywidgets as widgets\n'), ((17079, 17267), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'value': '(90)', 'min': '(0)', 'max': '(180.0)', 'step': '(1)', 'description': '"""Phase:"""', 'disabled': '(False)', 'continuous_update': '(False)', 'orientation': '"""vertical"""', 'readout': '(True)', 'readout_format': '""".1f"""'}), "(value=90, min=0, max=180.0, step=1, description=\n 'Phase:', disabled=False, continuous_update=False, orientation=\n 'vertical', readout=True, readout_format='.1f')\n", (17098, 17267), True, 'import ipywidgets as widgets\n'), ((17362, 17472), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Play Same"""', 'disabled': '(False)', 'button_style': '""""""', 'tooltip': '"""Click me"""', 'icon': '"""check"""'}), "(description='Play Same', disabled=False, button_style='',\n tooltip='Click me', icon='check')\n", (17376, 17472), True, 'import ipywidgets as widgets\n'), ((17681, 17797), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Play Different"""', 'disabled': '(False)', 'button_style': '""""""', 'tooltip': '"""Click me"""', 'icon': '"""check"""'}), "(description='Play Different', disabled=False, button_style=\n '', tooltip='Click me', icon='check')\n", (17695, 17797), True, 'import ipywidgets as widgets\n'), ((18007, 18048), 'ipywidgets.VBox', 'widgets.VBox', (['[play_same, play_different]'], {}), '([play_same, play_different])\n', (18019, 18048), True, 'import ipywidgets as widgets\n'), ((18065, 18127), 'ipywidgets.HBox', 'widgets.HBox', (['[self.f0_widget, self.phase_widget, button_pane]'], {}), '([self.f0_widget, self.phase_widget, button_pane])\n', (18077, 18127), True, 'import ipywidgets as widgets\n'), ((2040, 2150), 'ipywidgets.Button', 'widgets.Button', ([], {'description': 'l', 'disabled': '(False)', 'button_style': '"""warning"""', 'tooltip': 'f"""{l} answer"""', 'icon': '"""check"""'}), "(description=l, disabled=False, button_style='warning',\n tooltip=f'{l} answer', icon='check')\n", (2054, 2150), True, 'import ipywidgets as widgets\n'), ((2813, 2939), 'ipywidgets.VBox', 'widgets.VBox', (['[self.title_pane, self.description_pane, self.stimulus_pane, answer_pane,\n self.legend_pane, self.debug_pane]'], {}), '([self.title_pane, self.description_pane, self.stimulus_pane,\n answer_pane, self.legend_pane, self.debug_pane])\n', (2825, 2939), True, 'import ipywidgets as widgets\n'), ((4081, 4103), 'IPython.display.clear_output', 'display.clear_output', ([], {}), '()\n', (4101, 4103), True, 'import IPython.display as display\n'), ((4265, 4283), 'IPython.display.display', 'display.display', (['a'], {}), '(a)\n', (4280, 4283), True, 'import IPython.display as display\n'), ((7597, 7620), 'json.dump', 'json.dump', (['exp_dict', 'fp'], {}), '(exp_dict, fp)\n', (7606, 7620), False, 'import json\n'), ((8637, 8652), 'random.random', 'random.random', ([], {}), '()\n', (8650, 8652), False, 'import random\n'), ((8852, 8875), 'numpy.arange', 'np.arange', (['(2 * stim_len)'], {}), '(2 * stim_len)\n', (8861, 8875), True, 'import numpy as np\n'), ((8990, 9027), 'numpy.sin', 'np.sin', (['(2 * np.pi * s1 * t[:stim_len])'], {}), '(2 * np.pi * s1 * t[:stim_len])\n', (8996, 9027), True, 'import numpy as np\n'), ((9071, 9108), 'numpy.sin', 'np.sin', (['(2 * np.pi * s2 * t[stim_len:])'], {}), '(2 * np.pi * s2 * t[stim_len:])\n', (9077, 9108), True, 'import numpy as np\n'), ((11217, 11236), 'numpy.arange', 'np.arange', (['blip_len'], {}), '(blip_len)\n', (11226, 11236), True, 'import numpy as np\n'), ((11397, 11412), 'random.random', 'random.random', ([], {}), '()\n', (11410, 11412), False, 'import random\n'), ((12647, 12669), 'IPython.display.clear_output', 'display.clear_output', ([], {}), '()\n', (12667, 12669), True, 'import IPython.display as display\n'), ((13066, 13084), 'IPython.display.display', 'display.display', (['b'], {}), '(b)\n', (13081, 13084), True, 'import IPython.display as display\n'), ((14448, 14463), 'random.random', 'random.random', ([], {}), '()\n', (14461, 14463), False, 'import random\n'), ((16022, 16041), 'numpy.arange', 'np.arange', (['blip_len'], {}), '(blip_len)\n', (16031, 16041), True, 'import numpy as np\n'), ((4176, 4245), 'IPython.display.Audio', 'display.Audio', ([], {'data': 'self.test_signal', 'rate': 'self.fs', 'autoplay': 'autoplay'}), '(data=self.test_signal, rate=self.fs, autoplay=autoplay)\n', (4189, 4245), True, 'import IPython.display as display\n'), ((11339, 11370), 'numpy.sin', 'np.sin', (['(2 * np.pi * self.f0 * t)'], {}), '(2 * np.pi * self.f0 * t)\n', (11345, 11370), True, 'import numpy as np\n'), ((12742, 12868), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Play Stimulus"""', 'disabled': '(False)', 'button_style': '"""success"""', 'tooltip': '"""play stimulus"""', 'icon': '"""play"""'}), "(description='Play Stimulus', disabled=False, button_style=\n 'success', tooltip='play stimulus', icon='play')\n", (12756, 12868), True, 'import ipywidgets as widgets\n'), ((16145, 16171), 'numpy.sin', 'np.sin', (['(2 * np.pi * f0 * t)'], {}), '(2 * np.pi * f0 * t)\n', (16151, 16171), True, 'import numpy as np\n'), ((16247, 16281), 'numpy.sin', 'np.sin', (['(2 * np.pi * f0 * t + phase)'], {}), '(2 * np.pi * f0 * t + phase)\n', (16253, 16281), True, 'import numpy as np\n'), ((11713, 11752), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(2 * blip_len)'], {}), '(2 * blip_len)\n', (11738, 11752), True, 'import numpy as np\n'), ((14768, 14812), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(2 * self.blip_len)'], {}), '(2 * self.blip_len)\n', (14793, 14812), True, 'import numpy as np\n')] |
import pandas as pd
import matplotlib.pyplot as plt
from PyQt5.QtCore import *
from libs.figure.figure_QDialog import fig_Dialog
import os
import numpy as np
class save_DynamicResult_(QThread):
def __init__(self, over_tracked, parameter, save_path, parent=None):
super(save_DynamicResult_, self).__init__()
self.overtracked = over_tracked
self.particle = {"binding":[], "debinding":[]}
self.parameter = parameter
self.save_path = save_path
self.binding = []
self.debinding = []
self.Method = parameter[0]
self.SubImg_T = parameter[1]
def save(self):
all = self.overtracked
for i in range(len(all)):
if self.Method == 0:
start_frame = all[i][1][0]
over_frame = all[i][-1][0]
if all[i][-1][2] == "debinding":
over_index = self.search_debinding(all[i])
over_frame = all[i][over_index][0]
if self.Method == 0 and all[i][-1][2] == "binding" and all[i][-1][0] % self.SubImg_T == 0:
# TODO:这里需要修改!!!
pass # 如果减第一帧,该轨迹的最后一帧是500的整数倍,那就认为该粒子还存在
self.binding.append(start_frame)
self.debinding.append(over_frame)
else:
if len(all[i]) == 2:
# 如果这一类只有一个,可能为binding也可能为debinding,那就添加进去
if all[i][-1][2] != "debinding":
self.particle[all[i][-1][2]].append(all[i][-1][0])
pass
# 下面是类别中大于2个的,标准为binding开始,debinding结束,不标准的则是binding开始,binding结束,
start_frame = all[i][1][0]
over_frame = all[i][-1][0]
over_index = -1
if all[i][-1][2] == "debinding":
over_index = self.search_debinding(all[i])
over_frame = all[i][over_index][0]
self.particle["binding"].append(start_frame)
self.particle["debinding"].append(over_frame)
# if all[i][-1][2] == "debinding":
# over_index = self.search_debinding(all[i])
# over_frame = all[i][over_index][0]
# if all[i][-1][2] == "binding" and all[i][over_index][2] == "debinding":
# self.particle["binding"].append(start_frame)
# self.particle["debinding"].append(over_frame)
# elif all[i][-1][2] == "binding" and all[i][over_index][2] == "binding":
# self.particle["binding"].append(start_frame)
# elif all[i][-1][2] == "debinding" and all[i][over_index][2] == "debinding":
# self.particle["debinding"].append(over_frame)
if self.Method == 1:
self.binding = self.particle["binding"]
self.debinding = self.particle["debinding"]
print(self.binding)
binding = self.sort_(self.binding)
debinding = self.sort_(self.debinding)
binding_Data = pd.DataFrame(binding, columns=["Frame", "New Binding"])
binding_Data = binding_Data.set_index("Frame", drop=True)
debinding_Data = pd.DataFrame(debinding, columns=["Frame", "New Debinding"])
debinding_Data = debinding_Data.set_index("Frame", drop=True)
df = pd.concat([binding_Data, debinding_Data], axis=1)
print(df)
max_index = df.index[-1]
index = [i for i in range(1, max_index + 1)]
data = np.zeros([max_index, 2])
for i in df.index:
data[i - 1, :] = df.loc[i, :]
new = pd.DataFrame(data, index=index, columns=["New Binding", "New Debinding"])
new = new.fillna(0)
have_binding = [[1, 0]]
have_debinding = [[1, 0]]
b_, deb_ = 0, 0
for i in range(1, len(new)):
b_ += new.iloc[i]["New Binding"]
deb_ += new.iloc[i]["New Debinding"]
have_binding.append([i + 1, b_])
have_debinding.append([i + 1, deb_])
have_binding_Data = pd.DataFrame(have_binding, columns=["Frame", "have Binding"])
have_binding_Data = have_binding_Data.set_index("Frame", drop=True)
have_debinding_Data = pd.DataFrame(have_debinding, columns=["Frame", "have Debinding"])
have_debinding_Data = have_debinding_Data.set_index("Frame", drop=True)
have_ = pd.concat([have_binding_Data, have_debinding_Data], axis=1)
add_have = pd.concat([new, have_], axis=1)
# print(df)
writer = pd.ExcelWriter(self.save_path) # 写入Excel文件
add_have.to_excel(writer, 'page_1', float_format='%d')
worksheet1 = writer.sheets["page_1"]
worksheet1.set_column('A:D', 13)
writer.save()
writer.close()
def sort_(self, result):
result = pd.value_counts(result)
x = list(result.index)
x = sorted(x)
sorted_ = [[i, result[i]] for i in x]
return sorted_
def search_debinding(self, data):
'''从后往前搜索,找到第一次出现debinding的位置,则认为从这里结束,返回位置'''
index = -1
if data[1][2] == "debinding":
return 1
for i in range(2, len(data)):
index = -1 * i
if data[index][2] == "binding" and data[index + 1][2] == "debinding":
return index + 1
if abs(index) >= len(data):
return -1
return -1
| [
"pandas.DataFrame",
"numpy.zeros",
"pandas.ExcelWriter",
"pandas.concat",
"pandas.value_counts"
] | [((3048, 3103), 'pandas.DataFrame', 'pd.DataFrame', (['binding'], {'columns': "['Frame', 'New Binding']"}), "(binding, columns=['Frame', 'New Binding'])\n", (3060, 3103), True, 'import pandas as pd\n'), ((3195, 3254), 'pandas.DataFrame', 'pd.DataFrame', (['debinding'], {'columns': "['Frame', 'New Debinding']"}), "(debinding, columns=['Frame', 'New Debinding'])\n", (3207, 3254), True, 'import pandas as pd\n'), ((3339, 3388), 'pandas.concat', 'pd.concat', (['[binding_Data, debinding_Data]'], {'axis': '(1)'}), '([binding_Data, debinding_Data], axis=1)\n', (3348, 3388), True, 'import pandas as pd\n'), ((3509, 3533), 'numpy.zeros', 'np.zeros', (['[max_index, 2]'], {}), '([max_index, 2])\n', (3517, 3533), True, 'import numpy as np\n'), ((3617, 3690), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'index', 'columns': "['New Binding', 'New Debinding']"}), "(data, index=index, columns=['New Binding', 'New Debinding'])\n", (3629, 3690), True, 'import pandas as pd\n'), ((4062, 4123), 'pandas.DataFrame', 'pd.DataFrame', (['have_binding'], {'columns': "['Frame', 'have Binding']"}), "(have_binding, columns=['Frame', 'have Binding'])\n", (4074, 4123), True, 'import pandas as pd\n'), ((4230, 4295), 'pandas.DataFrame', 'pd.DataFrame', (['have_debinding'], {'columns': "['Frame', 'have Debinding']"}), "(have_debinding, columns=['Frame', 'have Debinding'])\n", (4242, 4295), True, 'import pandas as pd\n'), ((4392, 4451), 'pandas.concat', 'pd.concat', (['[have_binding_Data, have_debinding_Data]'], {'axis': '(1)'}), '([have_binding_Data, have_debinding_Data], axis=1)\n', (4401, 4451), True, 'import pandas as pd\n'), ((4471, 4502), 'pandas.concat', 'pd.concat', (['[new, have_]'], {'axis': '(1)'}), '([new, have_], axis=1)\n', (4480, 4502), True, 'import pandas as pd\n'), ((4541, 4571), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['self.save_path'], {}), '(self.save_path)\n', (4555, 4571), True, 'import pandas as pd\n'), ((4826, 4849), 'pandas.value_counts', 'pd.value_counts', (['result'], {}), '(result)\n', (4841, 4849), True, 'import pandas as pd\n')] |
import numpy
import seaborn
import matplotlib.pyplot as plt
import pandas
import textwrap
import os
import scipy.stats.distributions
import numpy as np
def plot_sampling_boundaries_1D(x_values, ranges, **kwargs):
plt.axvline(ranges, ls='--', color='k', lw=1)
plt.axvline(max(x_values), ls='--', color='k', lw=1)
def plot_triangle(params, likelihood, ranges, opt_params, n, boundaries=True, maxima=False):
df = pandas.DataFrame(params[:-1], columns=likelihood.flat_parameter_names)
df2 = pandas.DataFrame(np.expand_dims(params[-1], axis=0), columns=likelihood.flat_parameter_names)
wrapper = textwrap.TextWrapper(width=25)
columns = {}
for i, column in enumerate(df.columns):
columns[column] = wrapper.fill(column)
df.rename(columns=columns, inplace=True)
pairplot = seaborn.pairplot(df.sample(n=n), kind='kde', corner=True)
for i in range(pairplot.axes.shape[0]):
for j in range(pairplot.axes.shape[0]):
if i == j and boundaries is True:
pairplot.axes[i][j].axvline(ranges[i, 0], ls='--', color='k')
pairplot.axes[i][j].axvline(ranges[i, 1], ls='--', color='k')
pairplot.axes[i][j].axvline(
scipy.stats.distributions.norm(df2.values[0][i], df2.values[0][i] / 10).ppf(0.025), ls='--',
color='r')
pairplot.axes[i][j].axvline(
scipy.stats.distributions.norm(df2.values[0][i], df2.values[0][i] / 10).ppf(0.975), ls='--',
color='r')
elif i > j and maxima is True:
for param_set in opt_params:
pairplot.axes[i][j].scatter(param_set[j], param_set[i], marker='x', color='k')
plt.tight_layout()
pairplot.savefig(os.path.join('result/figures', 'trace_with_sampling_boundaries.png'), dpi=300)
# pairplot.savefig('trace_with_opt.png', dpi=300)
plt.close()
def plot_parameter_changes(optimized_params, original_params, parameter_labels, optimization_labels):
plt.figure(figsize=(10, 10))
percentages = []
for i, param_set in enumerate(optimized_params):
if i > 1:
plt.plot(100 * param_set / original_params, label=optimization_labels[i],ls='-.',alpha=0.4,color='b')
else:
plt.plot(100 * param_set / original_params,label=optimization_labels[i])
percentages.append(100 * param_set / original_params)
plt.axhline(100, ls='--', color='k', label='Original force field')
x = np.linspace(0,11,12)
plt.gcf().subplots_adjust(bottom=0.3)
plt.xticks(x,parameter_labels,rotation='vertical')
plt.xlabel('Parameter')
plt.ylabel('% Change from original force field')
plt.title('Parameter Changes')
plt.legend()
plt.show()
| [
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"os.path.join",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.expand_dims",
"textwrap.TextWrapper",
"matplotlib.pyplot.fi... | [((219, 264), 'matplotlib.pyplot.axvline', 'plt.axvline', (['ranges'], {'ls': '"""--"""', 'color': '"""k"""', 'lw': '(1)'}), "(ranges, ls='--', color='k', lw=1)\n", (230, 264), True, 'import matplotlib.pyplot as plt\n'), ((426, 496), 'pandas.DataFrame', 'pandas.DataFrame', (['params[:-1]'], {'columns': 'likelihood.flat_parameter_names'}), '(params[:-1], columns=likelihood.flat_parameter_names)\n', (442, 496), False, 'import pandas\n'), ((615, 645), 'textwrap.TextWrapper', 'textwrap.TextWrapper', ([], {'width': '(25)'}), '(width=25)\n', (635, 645), False, 'import textwrap\n'), ((1735, 1753), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1751, 1753), True, 'import matplotlib.pyplot as plt\n'), ((1913, 1924), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1922, 1924), True, 'import matplotlib.pyplot as plt\n'), ((2033, 2061), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2043, 2061), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2499), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(100)'], {'ls': '"""--"""', 'color': '"""k"""', 'label': '"""Original force field"""'}), "(100, ls='--', color='k', label='Original force field')\n", (2444, 2499), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2530), 'numpy.linspace', 'np.linspace', (['(0)', '(11)', '(12)'], {}), '(0, 11, 12)\n', (2519, 2530), True, 'import numpy as np\n'), ((2575, 2627), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'parameter_labels'], {'rotation': '"""vertical"""'}), "(x, parameter_labels, rotation='vertical')\n", (2585, 2627), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2653), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Parameter"""'], {}), "('Parameter')\n", (2640, 2653), True, 'import matplotlib.pyplot as plt\n'), ((2658, 2706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% Change from original force field"""'], {}), "('% Change from original force field')\n", (2668, 2706), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2741), 'matplotlib.pyplot.title', 'plt.title', (['"""Parameter Changes"""'], {}), "('Parameter Changes')\n", (2720, 2741), True, 'import matplotlib.pyplot as plt\n'), ((2746, 2758), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2756, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2763, 2773), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2771, 2773), True, 'import matplotlib.pyplot as plt\n'), ((524, 558), 'numpy.expand_dims', 'np.expand_dims', (['params[-1]'], {'axis': '(0)'}), '(params[-1], axis=0)\n', (538, 558), True, 'import numpy as np\n'), ((1775, 1843), 'os.path.join', 'os.path.join', (['"""result/figures"""', '"""trace_with_sampling_boundaries.png"""'], {}), "('result/figures', 'trace_with_sampling_boundaries.png')\n", (1787, 1843), False, 'import os\n'), ((2166, 2274), 'matplotlib.pyplot.plot', 'plt.plot', (['(100 * param_set / original_params)'], {'label': 'optimization_labels[i]', 'ls': '"""-."""', 'alpha': '(0.4)', 'color': '"""b"""'}), "(100 * param_set / original_params, label=optimization_labels[i],\n ls='-.', alpha=0.4, color='b')\n", (2174, 2274), True, 'import matplotlib.pyplot as plt\n'), ((2294, 2367), 'matplotlib.pyplot.plot', 'plt.plot', (['(100 * param_set / original_params)'], {'label': 'optimization_labels[i]'}), '(100 * param_set / original_params, label=optimization_labels[i])\n', (2302, 2367), True, 'import matplotlib.pyplot as plt\n'), ((2533, 2542), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2540, 2542), True, 'import matplotlib.pyplot as plt\n')] |
import arviz
import numpy as np
import pandas
import seaborn as sns
import torch
import torch.distributions as dist
from matplotlib import pyplot
import test_stan
import generate_data
sns.set()
# np.random.seed(1)
def user_simulator_typezero(action, W, a, educability=0.6):
# action is either a tuple, or -1 for educate.
# Educate action
if isinstance(action, int):
print("Educate!")
educate_o = dist.Bernoulli(educability).sample()
return educate_o
else:
probs = a + action @ W
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def user_simulator_typeone(action, W, a, educability=0.6):
# action is either a tuple, or -1 for educate.
# Educate action
if isinstance(action, int):
print("Educate!")
educate_o = dist.Bernoulli(educability).sample()
return educate_o
else:
probs = a + action @ W
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def user_simulator_switching(action, W, a, educability=0.1, user_type=0, forgetting=0.0):
# action is either a tuple, or -1 for educate.
# W[0] is the type-zero user weights, W[1] type-one.
# Educate action
educability_per_type = [educability, 1.0]
if isinstance(action, int):
user_type_ = int(dist.Bernoulli(educability_per_type[user_type]).sample().item())
#if user_type != user_type_:
# print("User Type Changed!")
return user_type_
else:
probs = a + action @ W[user_type]
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def test_user_typezero():
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone]}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 100
teacher_actions = list(np.random.choice(n_covars, n_iterations))
model_file = None
for i in range(n_iterations):
act_in = teacher_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_typezero(action, torch.tensor(W_typezero, dtype=torch.double), a=1.0)
if outcome == 1.0:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
arviz.plot_trace(fit)
pyplot.show()
def test_user_typeone():
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone]}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 20
teacher_actions = list(np.random.choice(n_covars, n_iterations))
model_file = None
for i in range(n_iterations):
act_in = teacher_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_typeone(action, torch.tensor(W_typeone, dtype=torch.double), a=1.0)
if outcome == 1.0:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
arviz.plot_trace(fit)
pyplot.show()
def test_user_switching(educability=0.01):
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
sns.heatmap(corr_mat)
pyplot.show()
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone], "educability": educability,
"forgetting": 0.0}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 100
recommend_actions = list(np.random.choice(n_covars, n_iterations))
educate_or_recommend = list(np.random.choice(2, n_iterations, p=(0.5, 0.5)))
educate_or_recommend[0] = 1
model_file = None
user_type = 0
change_point = 0
for i in range(n_iterations):
#print("Step: {}".format(i))
if educate_or_recommend[i] == 0:
act_in = -1
else:
act_in = recommend_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_switching(action, torch.tensor([W_typezero, W_typeone], dtype=torch.double), a=1.0,
educability=data_dict["educability"], user_type=user_type)
if outcome == 1:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
else:
_user_type = 0 + user_type
user_type = user_simulator_switching(act_in, torch.tensor([W_typezero, W_typeone], dtype=torch.double),
a=1.0, educability=data_dict["educability"], user_type=user_type)
action = [-1.0, -1.0]
outcome = 0
data_dict["x"].append(action)
data_dict["y"].append(outcome)
if user_type == 1 and _user_type == 0:
print("State Changed to Type 1 at iteration: {}".format(i))
change_point += i
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
# if i % 100 ==0:
s = fit.summary()
print(fit)
arviz.plot_trace(fit)
pyplot.show()
summary = pandas.DataFrame(s['summary'], columns=s['summary_colnames'], index=s['summary_rownames'])
print(summary.iloc[2:6, :])
strt = 6 + (3 * n_iterations)
endn = strt + n_iterations
print(summary.iloc[strt, :])
print(summary.iloc[endn, :])
pyplot.plot(list(summary.iloc[307:407, 0]))
pyplot.axvline(x=change_point, ymin=0, ymax=1, color='r', linestyle='--')
pyplot.scatter(x=np.arange(n_iterations), y=np.zeros(n_iterations), c=educate_or_recommend, s=1.5, marker="x",
cmap="bone")
pyplot.savefig("interaction_alpha_e{}_test.png".format(educability), dpi=300)
| [
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"torch.distributions.Bernoulli",
"seaborn.heatmap",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.max",
"arviz.plot_trace",
"numpy.arange",
"test_stan.fit_model_w_education",
"numpy.random.choice",
"torch.zeros",
"generate_data",
"seaborn.... | [((186, 195), 'seaborn.set', 'sns.set', ([], {}), '()\n', (193, 195), True, 'import seaborn as sns\n'), ((1726, 1782), 'generate_data', 'generate_data', ([], {'n_noncollinear': '(50)', 'n_collinear': '(100)', 'n': '(100)'}), '(n_noncollinear=50, n_collinear=100, n=100)\n', (1739, 1782), False, 'import generate_data\n'), ((3170, 3191), 'arviz.plot_trace', 'arviz.plot_trace', (['fit'], {}), '(fit)\n', (3186, 3191), False, 'import arviz\n'), ((3196, 3209), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (3207, 3209), False, 'from matplotlib import pyplot\n'), ((3288, 3344), 'generate_data', 'generate_data', ([], {'n_noncollinear': '(50)', 'n_collinear': '(100)', 'n': '(100)'}), '(n_noncollinear=50, n_collinear=100, n=100)\n', (3301, 3344), False, 'import generate_data\n'), ((4729, 4750), 'arviz.plot_trace', 'arviz.plot_trace', (['fit'], {}), '(fit)\n', (4745, 4750), False, 'import arviz\n'), ((4755, 4768), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (4766, 4768), False, 'from matplotlib import pyplot\n'), ((4868, 4924), 'generate_data', 'generate_data', ([], {'n_noncollinear': '(50)', 'n_collinear': '(100)', 'n': '(100)'}), '(n_noncollinear=50, n_collinear=100, n=100)\n', (4881, 4924), False, 'import generate_data\n'), ((5048, 5069), 'seaborn.heatmap', 'sns.heatmap', (['corr_mat'], {}), '(corr_mat)\n', (5059, 5069), True, 'import seaborn as sns\n'), ((5074, 5087), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (5085, 5087), False, 'from matplotlib import pyplot\n'), ((7458, 7479), 'arviz.plot_trace', 'arviz.plot_trace', (['fit'], {}), '(fit)\n', (7474, 7479), False, 'import arviz\n'), ((7484, 7497), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (7495, 7497), False, 'from matplotlib import pyplot\n'), ((7512, 7607), 'pandas.DataFrame', 'pandas.DataFrame', (["s['summary']"], {'columns': "s['summary_colnames']", 'index': "s['summary_rownames']"}), "(s['summary'], columns=s['summary_colnames'], index=s[\n 'summary_rownames'])\n", (7528, 7607), False, 'import pandas\n'), ((7819, 7892), 'matplotlib.pyplot.axvline', 'pyplot.axvline', ([], {'x': 'change_point', 'ymin': '(0)', 'ymax': '(1)', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(x=change_point, ymin=0, ymax=1, color='r', linestyle='--')\n", (7833, 7892), False, 'from matplotlib import pyplot\n'), ((2097, 2140), 'torch.zeros', 'torch.zeros', (['(n_covars + 1)'], {'dtype': 'torch.bool'}), '(n_covars + 1, dtype=torch.bool)\n', (2108, 2140), False, 'import torch\n'), ((2192, 2232), 'numpy.random.choice', 'np.random.choice', (['n_covars', 'n_iterations'], {}), '(n_covars, n_iterations)\n', (2208, 2232), True, 'import numpy as np\n'), ((3659, 3702), 'torch.zeros', 'torch.zeros', (['(n_covars + 1)'], {'dtype': 'torch.bool'}), '(n_covars + 1, dtype=torch.bool)\n', (3670, 3702), False, 'import torch\n'), ((3753, 3793), 'numpy.random.choice', 'np.random.choice', (['n_covars', 'n_iterations'], {}), '(n_covars, n_iterations)\n', (3769, 3793), True, 'import numpy as np\n'), ((5347, 5390), 'torch.zeros', 'torch.zeros', (['(n_covars + 1)'], {'dtype': 'torch.bool'}), '(n_covars + 1, dtype=torch.bool)\n', (5358, 5390), False, 'import torch\n'), ((5444, 5484), 'numpy.random.choice', 'np.random.choice', (['n_covars', 'n_iterations'], {}), '(n_covars, n_iterations)\n', (5460, 5484), True, 'import numpy as np\n'), ((5518, 5565), 'numpy.random.choice', 'np.random.choice', (['(2)', 'n_iterations'], {'p': '(0.5, 0.5)'}), '(2, n_iterations, p=(0.5, 0.5))\n', (5534, 5565), True, 'import numpy as np\n'), ((7336, 7390), 'test_stan.fit_model_w_education', 'test_stan.fit_model_w_education', (['data_dict', 'model_file'], {}), '(data_dict, model_file)\n', (7367, 7390), False, 'import test_stan\n'), ((2641, 2693), 'torch.tensor', 'torch.tensor', (['[corr_mat[act_in, -1], max_cross_corr]'], {}), '([corr_mat[act_in, -1], max_cross_corr])\n', (2653, 2693), False, 'import torch\n'), ((3111, 3165), 'test_stan.fit_model_w_education', 'test_stan.fit_model_w_education', (['data_dict', 'model_file'], {}), '(data_dict, model_file)\n', (3142, 3165), False, 'import test_stan\n'), ((4202, 4254), 'torch.tensor', 'torch.tensor', (['[corr_mat[act_in, -1], max_cross_corr]'], {}), '([corr_mat[act_in, -1], max_cross_corr])\n', (4214, 4254), False, 'import torch\n'), ((4670, 4724), 'test_stan.fit_model_w_education', 'test_stan.fit_model_w_education', (['data_dict', 'model_file'], {}), '(data_dict, model_file)\n', (4701, 4724), False, 'import test_stan\n'), ((6167, 6219), 'torch.tensor', 'torch.tensor', (['[corr_mat[act_in, -1], max_cross_corr]'], {}), '([corr_mat[act_in, -1], max_cross_corr])\n', (6179, 6219), False, 'import torch\n'), ((7915, 7938), 'numpy.arange', 'np.arange', (['n_iterations'], {}), '(n_iterations)\n', (7924, 7938), True, 'import numpy as np\n'), ((7942, 7964), 'numpy.zeros', 'np.zeros', (['n_iterations'], {}), '(n_iterations)\n', (7950, 7964), True, 'import numpy as np\n'), ((430, 457), 'torch.distributions.Bernoulli', 'dist.Bernoulli', (['educability'], {}), '(educability)\n', (444, 457), True, 'import torch.distributions as dist\n'), ((548, 576), 'torch.distributions.Bernoulli', 'dist.Bernoulli', ([], {'logits': 'probs'}), '(logits=probs)\n', (562, 576), True, 'import torch.distributions as dist\n'), ((829, 856), 'torch.distributions.Bernoulli', 'dist.Bernoulli', (['educability'], {}), '(educability)\n', (843, 856), True, 'import torch.distributions as dist\n'), ((947, 975), 'torch.distributions.Bernoulli', 'dist.Bernoulli', ([], {'logits': 'probs'}), '(logits=probs)\n', (961, 975), True, 'import torch.distributions as dist\n'), ((1578, 1606), 'torch.distributions.Bernoulli', 'dist.Bernoulli', ([], {'logits': 'probs'}), '(logits=probs)\n', (1592, 1606), True, 'import torch.distributions as dist\n'), ((2549, 2563), 'numpy.max', 'np.max', (['masked'], {}), '(masked)\n', (2555, 2563), True, 'import numpy as np\n'), ((2748, 2792), 'torch.tensor', 'torch.tensor', (['W_typezero'], {'dtype': 'torch.double'}), '(W_typezero, dtype=torch.double)\n', (2760, 2792), False, 'import torch\n'), ((4110, 4124), 'numpy.max', 'np.max', (['masked'], {}), '(masked)\n', (4116, 4124), True, 'import numpy as np\n'), ((4308, 4351), 'torch.tensor', 'torch.tensor', (['W_typeone'], {'dtype': 'torch.double'}), '(W_typeone, dtype=torch.double)\n', (4320, 4351), False, 'import torch\n'), ((6075, 6089), 'numpy.max', 'np.max', (['masked'], {}), '(masked)\n', (6081, 6089), True, 'import numpy as np\n'), ((6275, 6332), 'torch.tensor', 'torch.tensor', (['[W_typezero, W_typeone]'], {'dtype': 'torch.double'}), '([W_typezero, W_typeone], dtype=torch.double)\n', (6287, 6332), False, 'import torch\n'), ((6802, 6859), 'torch.tensor', 'torch.tensor', (['[W_typezero, W_typeone]'], {'dtype': 'torch.double'}), '([W_typezero, W_typeone], dtype=torch.double)\n', (6814, 6859), False, 'import torch\n'), ((1341, 1388), 'torch.distributions.Bernoulli', 'dist.Bernoulli', (['educability_per_type[user_type]'], {}), '(educability_per_type[user_type])\n', (1355, 1388), True, 'import torch.distributions as dist\n')] |
# Copyright 2020 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import numpy as np
import sys; sys.path.append(".")
import kubric.pylab as kb
class BouncingBallsWorker(kb.Worker):
def get_argparser(self):
parser = super().get_argparser()
# add additional commandline arguments
parser.add_argument("--min_nr_balls", type=int, default=4)
parser.add_argument("--max_nr_balls", type=int, default=4)
parser.add_argument("--ball_radius", type=float, default=0.2)
parser.add_argument("--restitution", type=float, default=1.)
parser.add_argument("--friction", type=float, default=.0)
parser.add_argument("--color", type=str, default="cat4")
parser.add_argument("--shape", type=str, default="sphere")
return parser
def add_room(self):
floor_material = kb.FlatMaterial(color=kb.get_color('black'),
indirect_visibility=False)
wall_material = kb.FlatMaterial(color=kb.get_color('white'),
indirect_visibility=False)
room_dynamics = {
"restitution": self.config.restitution,
"friction": self.config.friction,
}
floor = kb.Cube(scale=(1, 1, 0.9), position=(0, 0, -0.9),
material=floor_material, static=True, restitution=0.,
friction=0.)
north_wall = kb.Cube(scale=(1.2, 0.9, 1), position=(0, 1.9, 0.9),
material=wall_material, static=True, **room_dynamics)
south_wall = kb.Cube(scale=(1.2, 0.9, 1), position=(0, -1.9, 0.9),
material=wall_material, static=True, **room_dynamics)
east_wall = kb.Cube(scale=(0.9, 1, 1), position=(1.9, 0, 0.9),
material=wall_material, static=True, **room_dynamics)
west_wall = kb.Cube(scale=(0.9, 1, 1), position=(-1.9, 0, 0.9),
material=wall_material, static=True, **room_dynamics)
self.add(floor, north_wall, south_wall, east_wall, west_wall,
is_background=True)
def add_camera(self):
camera = kb.OrthographicCamera(position=(0, 0, 3), orthographic_scale=2.2)
# looks down by default
self.add(camera)
self.scene.camera = camera
def get_colors(self, num):
if self.config.color == "uniform_hsv":
return [kb.random_hue_color(rnd=self.rnd) for _ in range(num)]
if self.config.color == "fixed":
hues = np.linspace(0, 1., num, endpoint=False)
return [kb.Color.from_hsv(hue, 1., 1.) for hue in hues]
if self.config.color.startswith("cat"):
num_colors = int(self.config.color[3:])
all_hues = np.linspace(0, 1., num_colors, endpoint=False)
hues = self.rnd.choice(all_hues, size=num)
return [kb.Color.from_hsv(hue, 1., 1.) for hue in hues]
if self.config.color.startswith("noreplace"):
num_colors = int(self.config.color[9:])
all_hues = np.linspace(0, 1., num_colors, endpoint=False)
hues = self.rnd.choice(all_hues, size=num, replace=False)
return [kb.Color.from_hsv(hue, 1., 1.) for hue in hues]
def get_random_ball(self, color):
velocity_range = (-1, -1, 0), (1, 1, 0)
ball_material = kb.FlatMaterial(color=color,
indirect_visibility=False)
shape = self.config.shape
if shape == "mixed":
shape = self.rnd.choice(["cube", "sphere"])
if shape == "cube":
ball = kb.Cube(scale=[self.config.ball_radius]*3,
material=ball_material,
friction=self.config.friction,
restitution=self.config.restitution,
quaternion=kb.random_rotation([0, 0, 1], self.rnd),
velocity=self.rnd.uniform(*velocity_range))
elif shape == "sphere":
ball = kb.Sphere(scale=[self.config.ball_radius]*3,
material=ball_material,
friction=self.config.friction,
restitution=self.config.restitution,
velocity=self.rnd.uniform(*velocity_range))
else:
raise ValueError(f"Unknown shape type '{shape}'")
return ball
def run(self):
self.add_camera()
self.add_room()
spawn_area = (-1, -1, 0), (1, 1, 2.1*self.config.ball_radius)
balls = []
nr_objects = self.rnd.randint(self.config.min_nr_balls, self.config.max_nr_balls+1)
colors = self.get_colors(nr_objects)
for color in colors:
ball = self.get_random_ball(color)
self.place_without_overlap(ball, [kb.position_sampler(spawn_area)])
balls.append(ball)
sim_path = self.save_simulator_state()
self.run_simulation()
render_path = self.save_renderer_state()
self.render()
output = self.post_process()
# collect ground-truth factors
output["factors"] = []
for i, obj in enumerate(balls):
output["factors"].append({
"color": obj.material.color.rgb,
"mass": obj.mass,
"animation": obj.keyframes,
})
out_path = self.save_output(output)
name = datetime.datetime.now().strftime("%b%d_%H-%M-%S")
self.export(self.output_dir, name, files_list=[sim_path, render_path, out_path])
if __name__ == '__main__':
worker = BouncingBallsWorker()
worker.setup()
worker.run()
| [
"sys.path.append",
"kubric.pylab.FlatMaterial",
"kubric.pylab.random_rotation",
"kubric.pylab.Cube",
"kubric.pylab.random_hue_color",
"kubric.pylab.OrthographicCamera",
"kubric.pylab.position_sampler",
"numpy.linspace",
"kubric.pylab.Color.from_hsv",
"datetime.datetime.now",
"kubric.pylab.get_co... | [((630, 650), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (645, 650), False, 'import sys\n'), ((1698, 1820), 'kubric.pylab.Cube', 'kb.Cube', ([], {'scale': '(1, 1, 0.9)', 'position': '(0, 0, -0.9)', 'material': 'floor_material', 'static': '(True)', 'restitution': '(0.0)', 'friction': '(0.0)'}), '(scale=(1, 1, 0.9), position=(0, 0, -0.9), material=floor_material,\n static=True, restitution=0.0, friction=0.0)\n', (1705, 1820), True, 'import kubric.pylab as kb\n'), ((1872, 1982), 'kubric.pylab.Cube', 'kb.Cube', ([], {'scale': '(1.2, 0.9, 1)', 'position': '(0, 1.9, 0.9)', 'material': 'wall_material', 'static': '(True)'}), '(scale=(1.2, 0.9, 1), position=(0, 1.9, 0.9), material=wall_material,\n static=True, **room_dynamics)\n', (1879, 1982), True, 'import kubric.pylab as kb\n'), ((2021, 2133), 'kubric.pylab.Cube', 'kb.Cube', ([], {'scale': '(1.2, 0.9, 1)', 'position': '(0, -1.9, 0.9)', 'material': 'wall_material', 'static': '(True)'}), '(scale=(1.2, 0.9, 1), position=(0, -1.9, 0.9), material=\n wall_material, static=True, **room_dynamics)\n', (2028, 2133), True, 'import kubric.pylab as kb\n'), ((2170, 2278), 'kubric.pylab.Cube', 'kb.Cube', ([], {'scale': '(0.9, 1, 1)', 'position': '(1.9, 0, 0.9)', 'material': 'wall_material', 'static': '(True)'}), '(scale=(0.9, 1, 1), position=(1.9, 0, 0.9), material=wall_material,\n static=True, **room_dynamics)\n', (2177, 2278), True, 'import kubric.pylab as kb\n'), ((2315, 2424), 'kubric.pylab.Cube', 'kb.Cube', ([], {'scale': '(0.9, 1, 1)', 'position': '(-1.9, 0, 0.9)', 'material': 'wall_material', 'static': '(True)'}), '(scale=(0.9, 1, 1), position=(-1.9, 0, 0.9), material=wall_material,\n static=True, **room_dynamics)\n', (2322, 2424), True, 'import kubric.pylab as kb\n'), ((2582, 2647), 'kubric.pylab.OrthographicCamera', 'kb.OrthographicCamera', ([], {'position': '(0, 0, 3)', 'orthographic_scale': '(2.2)'}), '(position=(0, 0, 3), orthographic_scale=2.2)\n', (2603, 2647), True, 'import kubric.pylab as kb\n'), ((3675, 3730), 'kubric.pylab.FlatMaterial', 'kb.FlatMaterial', ([], {'color': 'color', 'indirect_visibility': '(False)'}), '(color=color, indirect_visibility=False)\n', (3690, 3730), True, 'import kubric.pylab as kb\n'), ((2920, 2960), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'num'], {'endpoint': '(False)'}), '(0, 1.0, num, endpoint=False)\n', (2931, 2960), True, 'import numpy as np\n'), ((3129, 3176), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'num_colors'], {'endpoint': '(False)'}), '(0, 1.0, num_colors, endpoint=False)\n', (3140, 3176), True, 'import numpy as np\n'), ((3401, 3448), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'num_colors'], {'endpoint': '(False)'}), '(0, 1.0, num_colors, endpoint=False)\n', (3412, 3448), True, 'import numpy as np\n'), ((1352, 1373), 'kubric.pylab.get_color', 'kb.get_color', (['"""black"""'], {}), "('black')\n", (1364, 1373), True, 'import kubric.pylab as kb\n'), ((1481, 1502), 'kubric.pylab.get_color', 'kb.get_color', (['"""white"""'], {}), "('white')\n", (1493, 1502), True, 'import kubric.pylab as kb\n'), ((2815, 2848), 'kubric.pylab.random_hue_color', 'kb.random_hue_color', ([], {'rnd': 'self.rnd'}), '(rnd=self.rnd)\n', (2834, 2848), True, 'import kubric.pylab as kb\n'), ((2974, 3006), 'kubric.pylab.Color.from_hsv', 'kb.Color.from_hsv', (['hue', '(1.0)', '(1.0)'], {}), '(hue, 1.0, 1.0)\n', (2991, 3006), True, 'import kubric.pylab as kb\n'), ((3239, 3271), 'kubric.pylab.Color.from_hsv', 'kb.Color.from_hsv', (['hue', '(1.0)', '(1.0)'], {}), '(hue, 1.0, 1.0)\n', (3256, 3271), True, 'import kubric.pylab as kb\n'), ((3526, 3558), 'kubric.pylab.Color.from_hsv', 'kb.Color.from_hsv', (['hue', '(1.0)', '(1.0)'], {}), '(hue, 1.0, 1.0)\n', (3543, 3558), True, 'import kubric.pylab as kb\n'), ((5544, 5567), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5565, 5567), False, 'import datetime\n'), ((4139, 4178), 'kubric.pylab.random_rotation', 'kb.random_rotation', (['[0, 0, 1]', 'self.rnd'], {}), '([0, 0, 1], self.rnd)\n', (4157, 4178), True, 'import kubric.pylab as kb\n'), ((5019, 5050), 'kubric.pylab.position_sampler', 'kb.position_sampler', (['spawn_area'], {}), '(spawn_area)\n', (5038, 5050), True, 'import kubric.pylab as kb\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 2 13:15:35 2018
Modified Sat Dec 1 2018 (fix save/import issue)
Modified Wed Dec 5 2018 (Fix Issue 2, Handle DC Loads)
Modified on 02/22/2019 for version 0.1.0
Modified on 04/11/2021 to address Issues #10, 12, & 13 related to improving
Site Load Definition performance and ease of use
@author: <NAME>
-------------------------------------------------------------------------------
Name: SiteLoad.py
Purpose: Provide Methods for Building and Maintaining the Site Energy
Load Table as a Panda DataFrame
Copyright: (c) <NAME> 2018
License: GNU General Public License, version 3 (GPL-3.0)
This program is distributed WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.
-------------------------------------------------------------------------------
"""
import numpy as np
import pandas as pd
import Parameters as sp
from DataFrame import DataFrame
import guiFrames as tbf
def findindex(val):
""" If val is a Column Label return the column index
else: return -1"""
try:
return sp.load_fields.index(val)
except:
return -1
class SiteLoad(DataFrame):
""" A Panda DataFrame Structure containing the Site Energy Load
Characteristics"""
def __init__(self, master= None):
self.master = master
DataFrame.__init__(self, sp.load_fields, sp.load_field_types)
def addRow(self, typ, qty=None, uf=None, hrs=None, st=0, wts=None,
mde=None):
""" Create a new entry in the load array from individual items """
ar = [typ, qty, uf, hrs, st, wts, mde]
self.add_new_row(ar)
def getDefaultRowValues(self, load_type):
""" Return the dictionary of default values for this type """
return sp.load_types[load_type]
def setStdRowValues(self, ar):
""" Update AR based on change of Load Type """
if ar[0] != '':
key = ar[0]
if key in sp.load_types.keys():
od = sp.load_types[key]
for sks in od.keys():
indx = self.get_col_indx(sks)
if indx > 0:
ar[indx] = od[sks]
return ar
def getTypeOptions(self):
""" Return list of Load Type options """
ret = list(sp.load_types.keys())
ret.sort()
return ret
def get_daily_load(self):
""" Return the total electrical load for a day """
return sum(self.get_load_profile()['Total'])
def get_demand_hours(self):
elp = self.get_load_profile()
return len(elp.loc[elp['Total'] > 0])
def get_load_profile(self):
""" Return a Dataframe of hourly usage by AC, DC and Total Power
for the given load over a 24 hour period """
ac_rslt = [0.0]*24
dc_rslt = [0.0]*24
tl_rslt = [0.0]*24
for dfrw in range(self.get_row_count()):
ac_mode = True
ldvals = self.get_row_by_index(dfrw)
if ldvals[sp.load_fields.index('Mode')] == 'DC':
ac_mode = False
hr_wts = (ldvals[sp.load_fields.index('Qty')] *
ldvals[sp.load_fields.index('Use Factor')] *
ldvals[sp.load_fields.index('Watts')] )
st = ldvals[sp.load_fields.index('Start Hour')]
if type(st) is str or st is None:
st = 0
hpd = ldvals[sp.load_fields.index('Hours')]
if type(hpd) is str or hpd is None:
hpd = 24
et = hpd + st
for h in range(24):
if et < 24:
if h >= st and h < et:
if ac_mode:
ac_rslt[h] += hr_wts
else:
dc_rslt[h] += hr_wts
else:
if h >= st or h + 24 < et:
if ac_mode:
ac_rslt[h] += hr_wts
else:
dc_rslt[h] += hr_wts
for i in range(24):
tl_rslt[i] = ac_rslt[i] + dc_rslt[i]
return pd.DataFrame({'AC': ac_rslt, 'DC': dc_rslt, 'Total':tl_rslt})
def show_load_profile(self, window):
""" Build & display the load profile graphic """
elp = self.get_load_profile()
dmd_hrs = len(elp.loc[elp['Total'] > 0])
if dmd_hrs > 0:
pls = 'Peak Hourly Load KW: Total= {0:4.2f},\tAC= {1:4.2f},\tDC= {2:4.2f}'
pl = pls.format(max(elp['Total'])/1000, (max(elp['AC']))/1000,
(max(elp['DC']))/1000)
tdls = 'Daily Load KW: Total= {0:4.2f},\tAC= {1:4.2f},\tDC= {2:4.2f}'
tdl = tdls.format(sum(elp['Total'])/1000, sum(elp['AC'])/1000,
sum(elp['DC'])/1000)
avhs = 'Avg Hourly Load KW: Total= {0:4.2f},\tAC= {1:4.2f},\tDC= {2:4.2f}'
avhl = avhs.format(sum(elp['Total'])/(1000*dmd_hrs),
sum(elp['AC'])/(1000*dmd_hrs),
sum(elp['DC'])/(1000*dmd_hrs))
pltlist = [{'label': 'Load', 'data': np.array(elp['Total']),
'type': 'Bar', 'color': 'grey', 'width': 0.4,
'xaxis':np.array([x for x in range(24)])}]
tbf.plot_graphic(window, 'Hour of Day', 'Watts',
np.array([x for x in range(24)]),
pltlist,'Hourly Electrical Use Profile', (6,4),
text_inserts= [pl,tdl,avhl])
def report_error(self, msg, level, error_class):
""" Generate Error Report """
if self.master is None or self.master.stw is None:
if error_class is None:
print('{0} Error: {1}'.format(level, msg))
else:
raise error_class(msg)
else:
self.master.stw.show_message(msg, level)
def check_arg_definition(self):
""" Verify the load is defined """
elp = self.get_load_profile()
if sum(elp['Total']) == 0.0:
return False, 'Electrical Load is unspecified'
return True, ""
def check_definition(self):
""" Check Load Definition and if rslt is False
Report in status window if identified else raise Error
return rslt """
rslt, msg = self.check_arg_definition()
if not rslt:
self.report_error(msg, "Fatal", AttributeError )
return rslt
def main():
sl = SiteLoad()
sl.add_new_row(['Light, LED', 15, 0.30, "", "", 5.0, 'AC'])
sl.add_new_row(['Light, LED', 8, 0.85, 2, 6, 5.0, 'AC'])
sl.add_new_row(['Light, Halogen', 10, 0.95, 5, 18, 35.0, 'AC'])
sl.add_new_row(['Well Pump DC, 1 HP', 1, 0.35, 12, 8, 500.0, 'DC'])
sl.add_new_row(['Phone Charger', 10, 0.45, 12, 6, 2.0, 'DC'])
elp = sl.get_load_profile()
print(elp)
print(sl.get_dataframe())
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"Parameters.load_fields.index",
"DataFrame.DataFrame.__init__",
"numpy.array",
"Parameters.load_types.keys"
] | [((1235, 1260), 'Parameters.load_fields.index', 'sp.load_fields.index', (['val'], {}), '(val)\n', (1255, 1260), True, 'import Parameters as sp\n'), ((1495, 1556), 'DataFrame.DataFrame.__init__', 'DataFrame.__init__', (['self', 'sp.load_fields', 'sp.load_field_types'], {}), '(self, sp.load_fields, sp.load_field_types)\n', (1513, 1556), False, 'from DataFrame import DataFrame\n'), ((4353, 4415), 'pandas.DataFrame', 'pd.DataFrame', (["{'AC': ac_rslt, 'DC': dc_rslt, 'Total': tl_rslt}"], {}), "({'AC': ac_rslt, 'DC': dc_rslt, 'Total': tl_rslt})\n", (4365, 4415), True, 'import pandas as pd\n'), ((2482, 2502), 'Parameters.load_types.keys', 'sp.load_types.keys', ([], {}), '()\n', (2500, 2502), True, 'import Parameters as sp\n'), ((2139, 2159), 'Parameters.load_types.keys', 'sp.load_types.keys', ([], {}), '()\n', (2157, 2159), True, 'import Parameters as sp\n'), ((3501, 3535), 'Parameters.load_fields.index', 'sp.load_fields.index', (['"""Start Hour"""'], {}), "('Start Hour')\n", (3521, 3535), True, 'import Parameters as sp\n'), ((3631, 3660), 'Parameters.load_fields.index', 'sp.load_fields.index', (['"""Hours"""'], {}), "('Hours')\n", (3651, 3660), True, 'import Parameters as sp\n'), ((3205, 3233), 'Parameters.load_fields.index', 'sp.load_fields.index', (['"""Mode"""'], {}), "('Mode')\n", (3225, 3233), True, 'import Parameters as sp\n'), ((3444, 3473), 'Parameters.load_fields.index', 'sp.load_fields.index', (['"""Watts"""'], {}), "('Watts')\n", (3464, 3473), True, 'import Parameters as sp\n'), ((5386, 5408), 'numpy.array', 'np.array', (["elp['Total']"], {}), "(elp['Total'])\n", (5394, 5408), True, 'import numpy as np\n'), ((3317, 3344), 'Parameters.load_fields.index', 'sp.load_fields.index', (['"""Qty"""'], {}), "('Qty')\n", (3337, 3344), True, 'import Parameters as sp\n'), ((3377, 3411), 'Parameters.load_fields.index', 'sp.load_fields.index', (['"""Use Factor"""'], {}), "('Use Factor')\n", (3397, 3411), True, 'import Parameters as sp\n')] |
#!usr/bin/env python3
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
seed_number = 1234
norm_dist = "random_draws_normal.csv"
#import data
infile = norm_dist
random_numbers = pd.read_csv(infile, index_col=0)
random_numbers.columns = range(random_numbers.shape[1])
#parameters
mean = 0.065
theta = 0.2
#monthly mean is simply =annual_mean/12 because returns are continuous (e^mu etc)
monthly_mean = mean/12
monthly_theta = theta/(12**(0.5))
returns = pd.DataFrame(random_numbers*monthly_theta + monthly_mean)
#values:
values = pd.DataFrame(index=range(random_numbers.shape[0]),columns=range(random_numbers.shape[1]))
values[0] = 200*np.exp(returns[0])
for i in range(1,random_numbers.shape[1]):
values[i] = values[i-1]*np.exp(returns[i])
for i in (5,11,23):
print()
print("After {} months:".format(i+1))
print("The mean asset value is: ${:,.2f}".format(values[i].mean()))
print("The standard deviation is: ${:,.2f}".format(values[i].std()))
print("The skew is: {:,.2f}".format(values[i].skew()))
month_12 = pd.DataFrame(values[11])
month_12['pdf'] = 1/month_12.shape[0]
month_12 = month_12.sort_values([11], ascending=[1])
month_12['cdf'] = month_12['pdf'].cumsum()
plt.plot(month_12[11],month_12['cdf'], color = 'royalblue', linewidth = 4)
plt.xlabel('Asset Value ($)')
plt.ylabel('Cumulative Probability')
plt.title("Simulated Probability Distribution in Month 12", loc='center')
plt.style.use('ggplot')
plt.show() | [
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.style.use",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((230, 262), 'pandas.read_csv', 'pd.read_csv', (['infile'], {'index_col': '(0)'}), '(infile, index_col=0)\n', (241, 262), True, 'import pandas as pd\n'), ((507, 566), 'pandas.DataFrame', 'pd.DataFrame', (['(random_numbers * monthly_theta + monthly_mean)'], {}), '(random_numbers * monthly_theta + monthly_mean)\n', (519, 566), True, 'import pandas as pd\n'), ((1072, 1096), 'pandas.DataFrame', 'pd.DataFrame', (['values[11]'], {}), '(values[11])\n', (1084, 1096), True, 'import pandas as pd\n'), ((1232, 1303), 'matplotlib.pyplot.plot', 'plt.plot', (['month_12[11]', "month_12['cdf']"], {'color': '"""royalblue"""', 'linewidth': '(4)'}), "(month_12[11], month_12['cdf'], color='royalblue', linewidth=4)\n", (1240, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1307, 1336), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Asset Value ($)"""'], {}), "('Asset Value ($)')\n", (1317, 1336), True, 'import matplotlib.pyplot as plt\n'), ((1337, 1373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Probability"""'], {}), "('Cumulative Probability')\n", (1347, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1374, 1447), 'matplotlib.pyplot.title', 'plt.title', (['"""Simulated Probability Distribution in Month 12"""'], {'loc': '"""center"""'}), "('Simulated Probability Distribution in Month 12', loc='center')\n", (1383, 1447), True, 'import matplotlib.pyplot as plt\n'), ((1448, 1471), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1461, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1472, 1482), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1480, 1482), True, 'import matplotlib.pyplot as plt\n'), ((689, 707), 'numpy.exp', 'np.exp', (['returns[0]'], {}), '(returns[0])\n', (695, 707), True, 'import numpy as np\n'), ((777, 795), 'numpy.exp', 'np.exp', (['returns[i]'], {}), '(returns[i])\n', (783, 795), True, 'import numpy as np\n')] |
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
#concatenating a new record to a existing array file
census = np.concatenate((data, new_record))
#filtering the age column
age = census[:,0]
#statistics of age determination to visualize the major groups of people
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
#filtering the race column
race = census[:,2]
#filtering the different types of races
race_0, race_1, race_2, race_3, race_4 = race[race==0], race[race==1], race[race==2], race[race==3], race[race==4]
#determining the number of people in each races
[len_0, len_1, len_2, len_3, len_4] = [len(race_0), len(race_1), len(race_2), len(race_3), len(race_4)]
l = [len_0, len_1, len_2, len_3, len_4]
#finding out the minority race
minority_race = l.index(min(l))
print('Race ',minority_race)
#filtering the senior citizens
senior_citizens = census[age>60]
#total working hours
working_hours_sum = np.sum(senior_citizens[:,6])
print(working_hours_sum)
#number of senior citizens
senior_citizens_len = len(senior_citizens)
#average working hour of one senior citizen
avg_working_hours = round(working_hours_sum/senior_citizens_len,2)
print(avg_working_hours)
#filtering number of education had
education_num = census[:,1]
#filtering the highly and poorly educated people
high = census[education_num>10,:]
low = census[education_num<=10,:]
#finding out the average income
avg_pay_high = round(np.mean(high[:,7]),2)
avg_pay_low = round(np.mean(low[:,7]),2)
print(avg_pay_high,avg_pay_low)
#justifying whether education plays a role in income
if avg_pay_high>avg_pay_low:
print('Better education leads to higher income')
else:
print('Education does\'nt matter to get a higher pay')
| [
"numpy.sum",
"warnings.filterwarnings",
"numpy.std",
"numpy.genfromtxt",
"numpy.max",
"numpy.min",
"numpy.mean",
"numpy.concatenate"
] | [((82, 115), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (105, 115), False, 'import warnings\n'), ((203, 252), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (216, 252), True, 'import numpy as np\n'), ((340, 374), 'numpy.concatenate', 'np.concatenate', (['(data, new_record)'], {}), '((data, new_record))\n', (354, 374), True, 'import numpy as np\n'), ((510, 521), 'numpy.max', 'np.max', (['age'], {}), '(age)\n', (516, 521), True, 'import numpy as np\n'), ((533, 544), 'numpy.min', 'np.min', (['age'], {}), '(age)\n', (539, 544), True, 'import numpy as np\n'), ((557, 569), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (564, 569), True, 'import numpy as np\n'), ((581, 592), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (587, 592), True, 'import numpy as np\n'), ((1212, 1241), 'numpy.sum', 'np.sum', (['senior_citizens[:, 6]'], {}), '(senior_citizens[:, 6])\n', (1218, 1241), True, 'import numpy as np\n'), ((1729, 1748), 'numpy.mean', 'np.mean', (['high[:, 7]'], {}), '(high[:, 7])\n', (1736, 1748), True, 'import numpy as np\n'), ((1772, 1790), 'numpy.mean', 'np.mean', (['low[:, 7]'], {}), '(low[:, 7])\n', (1779, 1790), True, 'import numpy as np\n')] |
"""
Nearest-neighbor distance distribution analysis
Nearest-neighbor distance distributions provide information about deviations from a spatial homogeneous Poisson process
(i.e. complete spatial randomness, CSR).
Point-event distances are given by the distance between a random point (not being an event) and the nearest event.
The point-event distance distribution is estimated from a number of random sample points and plotted in comparison to
the analytical function for equal localization density.
For a homogeneous 2D Poisson process with intensity :math:`\\rho` (expected number of points per unit area) the distance
from a randomly chosen event to the nearest other event (nearest-neighbor distance) is distributed according to the
following probability density (pdf) or cumulative density function (cdf) [1]_:
.. math::
pdf(w) &= 2 \\rho \\pi w \\ exp(- \\rho \\pi w^2)
cdf(w) &= 1 - exp (- \\rho \\pi w^2)
The same distribution holds for point-event distances if events are distributed as a homogeneous Poisson process with
intensity :math:`\\rho`.
References
----------
.. [1] <NAME>, Nearest Neighbor Methods,
Department of Statistics, Iowa State University,
20 December 2001
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.neighbors import NearestNeighbors
from locan.analysis.analysis_base import _Analysis, _list_parameters
from locan.configuration import N_JOBS
__all__ = ["NearestNeighborDistances"]
logger = logging.getLogger(__name__)
#### The algorithms
def pdf_nnDistances_csr_2D(x, density):
"""
Probability density function for nearest-neighbor distances of points distributed in 2D with complete spatial
randomness.
Parameters
----------
x : float
distance
density : float
density of points
Returns
-------
float
Probability density function pdf(x).
"""
return 2 * density * np.pi * x * np.exp(-density * np.pi * x ** 2)
def pdf_nnDistances_csr_3D(x, density):
"""
Probability density function for nearest-neighbor distances of points distributed in 3D with complete spatial
randomness.
Parameters
----------
x : float
distance
density : float
density of points
Returns
-------
float
Probability density function pdf(x).
"""
a = (3 / 4 / np.pi / density) ** (1 / 3)
return 3 / a * (x / a) ** 2 * np.exp(-((x / a) ** 3))
def _nearest_neighbor_distances(points, k=1, other_points=None):
if other_points is None:
nn = NearestNeighbors(n_neighbors=k, metric="euclidean", n_jobs=N_JOBS).fit(
points
)
distances, indices = nn.kneighbors()
else:
nn = NearestNeighbors(n_neighbors=k, metric="euclidean", n_jobs=N_JOBS).fit(
other_points
)
distances, indices = nn.kneighbors(points)
return pd.DataFrame(
{"nn_distance": distances[..., k - 1], "nn_index": indices[..., k - 1]}
)
# The specific analysis classes
class NearestNeighborDistances(_Analysis):
"""
Compute the k-nearest-neighbor distances within data or between data and other_data.
The algorithm relies on sklearn.neighbors.NearestNeighbors.
Parameters
----------
meta : locan.analysis.metadata_analysis_pb2.AMetadata
Metadata about the current analysis routine.
k : int
Compute the kth nearest neighbor.
Attributes
----------
count : int
A counter for counting instantiations.
parameter : dict
A dictionary with all settings for the current computation.
meta : locan.analysis.metadata_analysis_pb2.AMetadata
Metadata about the current analysis routine.
results : numpy.ndarray, pandas.DataFrame
Computed results.
distribution_statistics : Distribution_stats, None
Distribution parameters derived from MLE fitting of results.
"""
count = 0
def __init__(self, meta=None, k=1):
super().__init__(meta=meta, k=k)
self.dimension = None
self.localization_density = None
self.results = None
self.distribution_statistics = None
def compute(self, locdata, other_locdata=None):
"""
Run the computation.
Parameters
----------
locdata : LocData
Localization data.
other_locdata : LocData
Other localization data from which nearest neighbors are taken.
Returns
-------
Analysis class
Returns the Analysis class object (self).
"""
if not len(locdata):
logger.warning("Locdata is empty.")
return self
self.dimension = locdata.dimension
# setting the localization density of locdata
if other_locdata is None:
self.localization_density = locdata.properties["localization_density_bb"]
else:
if other_locdata.dimension != self.dimension:
raise TypeError(
"Dimensions for locdata and other_locdata must be identical."
)
self.localization_density = other_locdata.properties[
"localization_density_bb"
]
points = locdata.coordinates
if other_locdata is None:
other_points = None
else:
other_points = other_locdata.coordinates
self.results = _nearest_neighbor_distances(
points=points, **self.parameter, other_points=other_points
)
return self
def fit_distributions(self, with_constraints=True):
"""
Fit probability density functions to the distributions of `loc_property` values in the results
using MLE (scipy.stats).
If with_constraints is true we put the following constraints on the fit procedure:
If distribution is expon then floc=np.min(self.analysis_class.results[self.loc_property].values).
Parameters
----------
distribution : str, scipy.stats.distribution
Distribution model to fit.
with_constraints : bool
Flag to use predefined constraints on fit parameters.
"""
if self:
self.distribution_statistics = _DistributionFits(self)
self.distribution_statistics.fit(with_constraints=with_constraints)
else:
logger.warning("No results available to fit.")
def hist(self, ax=None, bins="auto", density=True, fit=False, **kwargs):
"""
Provide histogram as :class:`matplotlib.axes.Axes` object showing hist(results).
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
The axes on which to show the image.
bins : int, list, 'auto'
Bin specification as used in :func:`matplotlib.hist`
density : bool
Flag for normalization as used in matplotlib.hist. True returns probability density function; None returns
counts.
fit : bool
Flag indicating to fit pdf of nearest-neighbor distances under complete spatial randomness.
kwargs : dict
Other parameters passed to :func:`matplotlib.plot`.
Returns
-------
:class:`matplotlib.axes.Axes`
Axes object with the plot.
"""
if ax is None:
ax = plt.gca()
if not self:
return ax
values, bin_values, patches = ax.hist(
self.results["nn_distance"], bins=bins, density=density, label="data"
)
x_data = (bin_values[:-1] + bin_values[1:]) / 2
if self.dimension == 2:
ax.plot(
x_data,
pdf_nnDistances_csr_2D(x_data, self.localization_density),
"r-",
label="CSR",
**kwargs,
)
elif self.dimension == 3:
ax.plot(
x_data,
pdf_nnDistances_csr_3D(x_data, self.localization_density),
"r-",
label="CSR",
**kwargs,
)
else:
logger.warning(
f"No analytic probability density function for {self.dimension} dimensions available."
)
# fit distributions:
if fit:
if isinstance(self.distribution_statistics, _DistributionFits):
self.distribution_statistics.plot(ax=ax)
else:
self.fit_distributions()
self.distribution_statistics.plot(ax=ax)
ax.set(
title="k-Nearest Neigbor Distances\n"
+ " (k = "
+ str(self.parameter["k"])
+ ")",
xlabel="distance (nm)",
ylabel="pdf" if density else "counts",
)
ax.legend(loc="best")
return ax
#### Auxiliary functions and classes
class NNDistances_csr_2d(stats.rv_continuous):
"""
Continuous distribution function for nearest-neighbor distances of points distributed in 2D
under complete spatial randomness.
Parameters
----------
density : float
Shape parameter `density`, being the density of points.
"""
def _pdf(self, x, density):
return 2 * density * np.pi * x * np.exp(-density * np.pi * x ** 2)
class NNDistances_csr_3d(stats.rv_continuous):
"""
Continuous distribution function for nearest-neighbor distances of points distributed in 3D
under complete spatial randomness.
Parameters
----------
density : float
Shape parameter `density`, being the density of points.
"""
def _pdf(self, x, density):
a = (3 / 4 / np.pi / density) ** (1 / 3)
return 3 / a * (x / a) ** 2 * np.exp(-((x / a) ** 3))
class _DistributionFits:
"""
Handle for distribution fits.
This class is typically instantiated by LocalizationProperty methods.
It holds the statistical parameters derived by fitting the result distributions using MLE (scipy.stats).
Statistical parameters are defined as described in
:ref:(https://docs.scipy.org/doc/scipy/reference/tutorial/stats/continuous.html)
Parameters
----------
analyis_class : LocalizationPrecision
The analysis class with result data to fit.
Attributes
----------
analyis_class : LocalizationPrecision
The analysis class with result data to fit.
loc_property : str
The LocData property for which to fit an appropriate distribution
distribution : str, scipy.stats.distribution
Distribution model to fit.
parameters : list of str
Free parameters in `distribution`.
"""
def __init__(self, analysis_class):
self.analysis_class = analysis_class
self.loc_property = "nn_distance"
self.distribution = None
self.parameters = []
def fit(self, with_constraints=True, **kwargs):
"""
Fit model function to analysis_class.results.
If with_constraints is true (default) we put the following constraints on the fit procedure:
loc=0, scale=1
Parameters
----------
distribution : str, scipy.stats.distribution
Distribution model to fit.
with_constraints : bool
Flag to use predefined constraints on fit parameters.
kwargs : dict
Other parameters passed to the `distribution.fit()` method.
"""
if self.analysis_class.dimension == 2:
self.distribution = NNDistances_csr_2d(name="NNDistances_csr_2d", a=0.0)
elif self.analysis_class.dimension == 3:
self.distribution = NNDistances_csr_3d(name="NNDistances_csr_3d", a=0.0)
else:
logger.warning(
f"No fit model for {self.analysis_class.dimension} dimensions available."
)
return
self.parameters = [name.strip() for name in self.distribution.shapes.split(",")]
self.parameters += ["loc", "scale"]
if with_constraints:
kwargs_ = dict(dict(floc=0, fscale=1), **kwargs)
else:
kwargs_ = kwargs
fit_results = self.distribution.fit(
data=self.analysis_class.results[self.loc_property].values, **kwargs_
)
for parameter, result in zip(self.parameters, fit_results):
setattr(self, parameter, result)
def plot(self, ax=None, **kwargs):
"""
Provide plot as :class:`matplotlib.axes.Axes` object showing the probability distribution functions of fitted
results.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
The axes on which to show the image.
kwargs : dict
Other parameters passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
:class:`matplotlib.axes.Axes`
Axes object with the plot.
"""
if ax is None:
ax = plt.gca()
if self.distribution is None:
return ax
# plot fit curve
x_values = np.linspace(
self.distribution.ppf(0.001, **self.parameter_dict()),
self.distribution.ppf(0.999, **self.parameter_dict()),
100,
)
ax.plot(
x_values,
self.distribution.pdf(x_values, **self.parameter_dict()),
"r-",
**dict(
dict(lw=3, alpha=0.6, label=str(self.distribution.name) + " pdf"),
**kwargs,
),
)
return ax
def parameter_dict(self):
""" Dictionary of fitted parameters. """
return {k: self.__dict__[k] for k in self.parameters}
| [
"pandas.DataFrame",
"sklearn.neighbors.NearestNeighbors",
"numpy.exp",
"matplotlib.pyplot.gca",
"logging.getLogger"
] | [((1530, 1557), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1547, 1557), False, 'import logging\n'), ((2955, 3044), 'pandas.DataFrame', 'pd.DataFrame', (["{'nn_distance': distances[..., k - 1], 'nn_index': indices[..., k - 1]}"], {}), "({'nn_distance': distances[..., k - 1], 'nn_index': indices[...,\n k - 1]})\n", (2967, 3044), True, 'import pandas as pd\n'), ((1993, 2026), 'numpy.exp', 'np.exp', (['(-density * np.pi * x ** 2)'], {}), '(-density * np.pi * x ** 2)\n', (1999, 2026), True, 'import numpy as np\n'), ((2482, 2503), 'numpy.exp', 'np.exp', (['(-(x / a) ** 3)'], {}), '(-(x / a) ** 3)\n', (2488, 2503), True, 'import numpy as np\n'), ((7442, 7451), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7449, 7451), True, 'import matplotlib.pyplot as plt\n'), ((9357, 9390), 'numpy.exp', 'np.exp', (['(-density * np.pi * x ** 2)'], {}), '(-density * np.pi * x ** 2)\n', (9363, 9390), True, 'import numpy as np\n'), ((9826, 9847), 'numpy.exp', 'np.exp', (['(-(x / a) ** 3)'], {}), '(-(x / a) ** 3)\n', (9832, 9847), True, 'import numpy as np\n'), ((13046, 13055), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13053, 13055), True, 'import matplotlib.pyplot as plt\n'), ((2616, 2682), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'k', 'metric': '"""euclidean"""', 'n_jobs': 'N_JOBS'}), "(n_neighbors=k, metric='euclidean', n_jobs=N_JOBS)\n", (2632, 2682), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((2785, 2851), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'k', 'metric': '"""euclidean"""', 'n_jobs': 'N_JOBS'}), "(n_neighbors=k, metric='euclidean', n_jobs=N_JOBS)\n", (2801, 2851), False, 'from sklearn.neighbors import NearestNeighbors\n')] |
import os
import shutil
import time
import socket
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import Utils
from Utils.checkpoints import build_logger
from Utils.checkpoints import plot_image, save_context
from Utils import flags
import Torture
from Torture.Models import resnet_3layer as resnet
import torchvision
from MI import pgd
EVALUATE_EPOCH = 1
SAVE_EPOCH = 10
EPOCH_TOTAL = 200
HYPERPARAMETERS = None
DEFAULT_RESULTS_FOLDER_ARGUMENT = "Not Valid"
DEFAULT_RESULTS_FOLDER = "./results/"
FILES_TO_BE_SAVED = ["./", "./Torture", "./Torture/Models", "./Utils"]
KEY_ARGUMENTS = ["batch_size", "model", "data", "adv_ratio"]
config = {
"DEFAULT_RESULTS_FOLDER": DEFAULT_RESULTS_FOLDER,
"FILES_TO_BE_SAVED": FILES_TO_BE_SAVED,
"KEY_ARGUMENTS": KEY_ARGUMENTS
}
flags.DEFINE_argument("-gpu", "--gpu", default="-1")
flags.DEFINE_argument("--results-folder",
default=DEFAULT_RESULTS_FOLDER_ARGUMENT)
flags.DEFINE_argument("-k", "-key", "--key", default="")
flags.DEFINE_argument("-data", "--data", default="Caltech101")
flags.DEFINE_boolean("-o", "--overwrite-results", default=False)
flags.DEFINE_argument("-bs",
"-batch_size",
"--batch_size",
type=int,
default=50)
flags.DEFINE_argument("-nw",
"-num_workers",
"--num_workers",
type=int,
default=64)
flags.DEFINE_argument("-ar",
"-adv_ratio",
"--adv_ratio",
type=float,
default=0.)
flags.DEFINE_argument("-model", "--model", default="resnet18")
FLAGS = flags.FLAGS
logger, MODELS_FOLDER, SUMMARIES_FOLDER = save_context(__file__, FLAGS, config)
logger.info("build dataloader")
with open("TotalList.txt", "a") as f:
f.write(socket.gethostname() + ":" + FLAGS.results_folder + "\n")
# Figure finished
def onehot(ind):
vector = np.zeros([num_classes])
vector[ind] = 1
return vector.astype(np.float32)
logger.info("build dataloader")
if FLAGS.data.lower() in ["cifar10"]:
train_trans, test_trans = Torture.Models.transforms.cifar_transform()
trainset = torchvision.datasets.CIFAR10(root='/home/LargeData/cifar/',
train=True,
download=True,
transform=train_trans)
testset = torchvision.datasets.CIFAR10(root='/home/LargeData/cifar/',
train=False,
download=True,
transform=test_trans)
num_classes = 10
elif FLAGS.data.lower() in ["cifar100"]:
train_trans, test_trans = Torture.Models.transforms.cifar_transform()
trainset = torchvision.datasets.CIFAR100(root='/home/LargeData/cifar/',
train=True,
download=True,
transform=train_trans)
testset = torchvision.datasets.CIFAR100(root='/home/LargeData/cifar/',
train=False,
download=True,
transform=test_trans)
num_classes = 100
dataloader_train = torch.utils.data.DataLoader(trainset,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=2)
dataloader_test = torch.utils.data.DataLoader(testset,
batch_size=FLAGS.batch_size,
shuffle=False,
num_workers=2)
if FLAGS.model.lower() in resnet.model_dict:
CLASSIFIER = resnet.model_dict[FLAGS.model.lower()]
else:
raise ValueError("unknown model name")
classifier = CLASSIFIER(num_classes=num_classes)
device = torch.device("cuda:0")
classifier = classifier.to(device)
# classifier = nn.DataParallel(classifier)
def anneal_lr(epoch):
if epoch < 100:
return 1.
elif epoch < 150:
return 0.1
else:
return 0.01
pgd_kwargs = {
"eps": 16. / 255.,
"eps_iter": 4. / 255.,
"nb_iter": 10,
"norm": np.inf,
"clip_min": -1,
"clip_max": 1,
"loss_fn": None,
}
criterion = nn.CrossEntropyLoss()
criterion_adv = nn.CrossEntropyLoss()
optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, [anneal_lr])
for epoch in range(EPOCH_TOTAL): # loop over the dataset multiple times
logger.info("Start Epoch {}".format(epoch))
running_loss = 0.0
lr_scheduler.step()
classifier.train()
for i, data_batch in enumerate(dataloader_train):
# get the inputs; data is a list of [inputs, labels]
img_batch, label_batch = data_batch
img_batch, label_batch = img_batch.to(device), label_batch.to(device)
# zero the parameter gradients
optimizer.zero_grad()
loss = 0.
if FLAGS.adv_ratio > 0.:
classifier.eval()
adv_x = pgd.projected_gradient_descent(classifier, img_batch,
**pgd_kwargs)
classifier.train()
optimizer.zero_grad()
output_batch_adv = classifier(adv_x)
loss += criterion_adv(output_batch_adv,
label_batch) * FLAGS.adv_ratio
else:
optimizer.zero_grad()
if FLAGS.adv_ratio < 1.:
output_batch = classifier(img_batch)
loss += criterion(output_batch,
label_batch) * (1. - FLAGS.adv_ratio)
loss.backward()
optimizer.step()
running_loss += loss.item()
logger.info('[%d] train loss: %.3f' % (epoch + 1, running_loss / i))
if epoch % EVALUATE_EPOCH == 0:
running_loss, correct, total = 0.0, 0.0, 0.0
classifier.eval()
for i, data_batch in enumerate(dataloader_test):
# get the inputs; data is a list of [inputs, labels]
img_batch, label_batch = data_batch
img_batch, label_batch = img_batch.to(device), label_batch.to(
device)
output_batch = classifier(img_batch)
loss = criterion(output_batch, label_batch)
running_loss += loss.item()
_, predicted = torch.max(output_batch.data, 1)
correct += (predicted == label_batch).sum().item()
total += label_batch.size(0)
logger.info('[%d] test loss: %.3f, accuracy: %.3f' %
(epoch + 1, running_loss / i, correct / total))
if epoch % SAVE_EPOCH == 0 or epoch == EPOCH_TOTAL - 1:
torch.save(classifier.state_dict(),
os.path.join(MODELS_FOLDER, "eopch{}.ckpt".format(epoch)))
logger.info('Finished Training')
| [
"Utils.flags.DEFINE_argument",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss",
"numpy.zeros",
"torchvision.datasets.CIFAR100",
"torchvision.datasets.CIFAR10",
"socket.gethostname",
"torch.optim.lr_scheduler.LambdaLR",
"Utils.checkpoints.save_context",
"MI.pgd.projected_gradient_descent... | [((814, 866), 'Utils.flags.DEFINE_argument', 'flags.DEFINE_argument', (['"""-gpu"""', '"""--gpu"""'], {'default': '"""-1"""'}), "('-gpu', '--gpu', default='-1')\n", (835, 866), False, 'from Utils import flags\n'), ((867, 954), 'Utils.flags.DEFINE_argument', 'flags.DEFINE_argument', (['"""--results-folder"""'], {'default': 'DEFAULT_RESULTS_FOLDER_ARGUMENT'}), "('--results-folder', default=\n DEFAULT_RESULTS_FOLDER_ARGUMENT)\n", (888, 954), False, 'from Utils import flags\n'), ((972, 1028), 'Utils.flags.DEFINE_argument', 'flags.DEFINE_argument', (['"""-k"""', '"""-key"""', '"""--key"""'], {'default': '""""""'}), "('-k', '-key', '--key', default='')\n", (993, 1028), False, 'from Utils import flags\n'), ((1029, 1091), 'Utils.flags.DEFINE_argument', 'flags.DEFINE_argument', (['"""-data"""', '"""--data"""'], {'default': '"""Caltech101"""'}), "('-data', '--data', default='Caltech101')\n", (1050, 1091), False, 'from Utils import flags\n'), ((1092, 1156), 'Utils.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""-o"""', '"""--overwrite-results"""'], {'default': '(False)'}), "('-o', '--overwrite-results', default=False)\n", (1112, 1156), False, 'from Utils import flags\n'), ((1157, 1242), 'Utils.flags.DEFINE_argument', 'flags.DEFINE_argument', (['"""-bs"""', '"""-batch_size"""', '"""--batch_size"""'], {'type': 'int', 'default': '(50)'}), "('-bs', '-batch_size', '--batch_size', type=int,\n default=50)\n", (1178, 1242), False, 'from Utils import flags\n'), ((1327, 1414), 'Utils.flags.DEFINE_argument', 'flags.DEFINE_argument', (['"""-nw"""', '"""-num_workers"""', '"""--num_workers"""'], {'type': 'int', 'default': '(64)'}), "('-nw', '-num_workers', '--num_workers', type=int,\n default=64)\n", (1348, 1414), False, 'from Utils import flags\n'), ((1499, 1585), 'Utils.flags.DEFINE_argument', 'flags.DEFINE_argument', (['"""-ar"""', '"""-adv_ratio"""', '"""--adv_ratio"""'], {'type': 'float', 'default': '(0.0)'}), "('-ar', '-adv_ratio', '--adv_ratio', type=float,\n default=0.0)\n", (1520, 1585), False, 'from Utils import flags\n'), ((1669, 1731), 'Utils.flags.DEFINE_argument', 'flags.DEFINE_argument', (['"""-model"""', '"""--model"""'], {'default': '"""resnet18"""'}), "('-model', '--model', default='resnet18')\n", (1690, 1731), False, 'from Utils import flags\n'), ((1795, 1832), 'Utils.checkpoints.save_context', 'save_context', (['__file__', 'FLAGS', 'config'], {}), '(__file__, FLAGS, config)\n', (1807, 1832), False, 'from Utils.checkpoints import plot_image, save_context\n'), ((3458, 3558), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'FLAGS.batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, batch_size=FLAGS.batch_size, shuffle=\n True, num_workers=2)\n', (3485, 3558), False, 'import torch\n'), ((3714, 3814), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'FLAGS.batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=FLAGS.batch_size, shuffle=\n False, num_workers=2)\n', (3741, 3814), False, 'import torch\n'), ((4158, 4180), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (4170, 4180), False, 'import torch\n'), ((4573, 4594), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4592, 4594), True, 'import torch.nn as nn\n'), ((4611, 4632), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4630, 4632), True, 'import torch.nn as nn\n'), ((4718, 4769), 'torch.optim.lr_scheduler.LambdaLR', 'optim.lr_scheduler.LambdaLR', (['optimizer', '[anneal_lr]'], {}), '(optimizer, [anneal_lr])\n', (4745, 4769), True, 'import torch.optim as optim\n'), ((2024, 2047), 'numpy.zeros', 'np.zeros', (['[num_classes]'], {}), '([num_classes])\n', (2032, 2047), True, 'import numpy as np\n'), ((2208, 2251), 'Torture.Models.transforms.cifar_transform', 'Torture.Models.transforms.cifar_transform', ([], {}), '()\n', (2249, 2251), False, 'import Torture\n'), ((2267, 2380), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""/home/LargeData/cifar/"""', 'train': '(True)', 'download': '(True)', 'transform': 'train_trans'}), "(root='/home/LargeData/cifar/', train=True,\n download=True, transform=train_trans)\n", (2295, 2380), False, 'import torchvision\n'), ((2523, 2636), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""/home/LargeData/cifar/"""', 'train': '(False)', 'download': '(True)', 'transform': 'test_trans'}), "(root='/home/LargeData/cifar/', train=False,\n download=True, transform=test_trans)\n", (2551, 2636), False, 'import torchvision\n'), ((2854, 2897), 'Torture.Models.transforms.cifar_transform', 'Torture.Models.transforms.cifar_transform', ([], {}), '()\n', (2895, 2897), False, 'import Torture\n'), ((2913, 3027), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""/home/LargeData/cifar/"""', 'train': '(True)', 'download': '(True)', 'transform': 'train_trans'}), "(root='/home/LargeData/cifar/', train=True,\n download=True, transform=train_trans)\n", (2942, 3027), False, 'import torchvision\n'), ((3173, 3287), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""/home/LargeData/cifar/"""', 'train': '(False)', 'download': '(True)', 'transform': 'test_trans'}), "(root='/home/LargeData/cifar/', train=False,\n download=True, transform=test_trans)\n", (3202, 3287), False, 'import torchvision\n'), ((5371, 5438), 'MI.pgd.projected_gradient_descent', 'pgd.projected_gradient_descent', (['classifier', 'img_batch'], {}), '(classifier, img_batch, **pgd_kwargs)\n', (5401, 5438), False, 'from MI import pgd\n'), ((6682, 6713), 'torch.max', 'torch.max', (['output_batch.data', '(1)'], {}), '(output_batch.data, 1)\n', (6691, 6713), False, 'import torch\n'), ((1916, 1936), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1934, 1936), False, 'import socket\n')] |
from plotnine import *
import numpy as np
import pandas as pd
import functions.et_make_df as make_df
from functions.et_helper import winmean,winmean_cl_boot
import MISC
import logging
logger = logging.getLogger(__name__)
def process_lum(etsamples,etmsgs):
all_lum = pd.DataFrame()
for subject in etsamples.subject.unique():
for et in ['pl','el']:
logger.info("subject:%s, et:%s"%(subject,et))
all_lum = pd.concat([all_lum,process_lum_singlesub(etsamples,etmsgs,subject,et)])
return(all_lum)
def process_lum_singlesub(etsamples,etmsgs,subject,eyetracker,td=[-1,5]):
condquery = 'condition == "DILATION" & exp_event=="lum"'
td = [-1, 12]
#subject = 'VP1'
#eyetracker='pl'
query = 'subject==@subject& eyetracker==@eyetracker'
lum_epoch = make_df.make_epochs(etsamples.query(query),etmsgs.query(query+'&'+condquery), td=td)
#normalize by SD division. Could be made better e.g. by quantile division
lum_epoch = lum_epoch.groupby(["msg_time"],as_index=False).apply(lambda rows:standardize_lum(rows))
#remove duplicated "eyetracker" and "subject" columns
#lum_epoch = lum_epoch.loc[:,~lum_epoch.columns.duplicated(keep="first")]
return(lum_epoch)
def standardize_lum(df):
#df.loc[:,"pa_norm"] = lum_epoch.pa/scipy.stats.iqr(lum_epoch.pa)
if np.sum(df.td<0)==0:
logger.warning('trial has no baseline')
df.loc[:,"pa_norm"] = np.nan
else:
df.loc[:,"pa_norm"] = df.pa / np.median(df.loc[df.td<0,'pa'])
return(df)
def bin_lum(lum_epoch,nbins=100):
from scipy.stats import binned_statistic
# to plot the data correctly, we need to bin them & take means
def lum_bin_function(df):
newtd = np.linspace(lum_epoch.td.min(),lum_epoch.td.max(),nbins+1)
binned = binned_statistic(x=df.td,values=df.pa_norm,statistic="mean",bins=newtd)
return(pd.DataFrame({"subject":df.subject.iloc[0],"block":df.block.iloc[0],"eyetracker":df.eyetracker.iloc[0],"td":newtd[1:]-(newtd[1]-newtd[0])/2,"pa_norm":binned[0],"lum":df.lum.iloc[0]}))
lum_epoch_binned = lum_epoch.groupby(["subject","eyetracker","lum","block"],as_index=False).apply(lambda rows: lum_bin_function(rows))
lum_epoch_binned = lum_epoch_binned.reset_index()
return(lum_epoch_binned)
def plot_time_all(all_lum_binned):
# Plot the average over subjects of the average over blocks with +-95CI
all_lum_binned_noblock = all_lum_binned.groupby(["td","eyetracker","subject","lum"],as_index=False).agg(winmean)
all_lum_binned_noblock.loc[:,'plot_grouping'] = all_lum_binned_noblock.eyetracker + all_lum_binned_noblock.lum.map(str)
p = (ggplot(all_lum_binned_noblock.query('lum>0'),aes(x='td',y='pa_norm',group="plot_grouping",color="lum",shape="eyetracker"))
+stat_summary(fun_data=winmean_cl_boot,position=position_dodge(width=0.06),size=0.2)
+geom_vline(xintercept=[0,3,10] )
+scale_color_gradient(low='black',high='lightgray')+xlim((-1,6))
+scale_shape_manual(values=[">","<"])
)
return(p)
def plot_time_diff(all_lum_binned,subject="VP3"):
# Plot the difference between eyetracker for a single subject
all_lum_diff = all_lum_binned.groupby(["td","lum","block","subject"],as_index=False).pa_norm.agg(np.diff)
all_lum_diff.loc[:,'pa_norm'] = pd.to_numeric(all_lum_diff.loc[:,'pa_norm'])
all_lum_diff.loc[:,'plot_grouping'] = all_lum_diff.block.map(str) + all_lum_diff.lum.map(str)
p=(ggplot(all_lum_diff.query("subject==@subject"),aes(x='td',y='pa_norm',color="lum",group="plot_grouping"))
+geom_line()
+geom_vline(xintercept=[0,3,10] )
+scale_color_gradient(low='black',high='lightgray')
+xlim((-1,6))
+ylab("Eyetracker difference in pupilsize")
+scale_shape_manual(values=[">","<"])
)
return(p)
def calc_mean(all_lum,t_from=2,t_to=3):
mean_lum = all_lum.query("td>@t_from & td<=@t_to").groupby(["lum","block","subject","eyetracker","msg_time"],as_index=False).pa_norm.agg(winmean)
return(mean_lum)
def plot_mean(all_lum):
mean_lum = calc_mean(all_lum)
p=(ggplot(mean_lum.query("lum>0").groupby(["lum","subject","eyetracker"],as_index=False).pa_norm.agg(winmean),aes(x="lum",y="pa_norm",shape="eyetracker",color="lum"))
+stat_summary(fun_data=winmean_cl_boot,position=position_dodge(width=15))
+geom_point(alpha=0.1)
+scale_color_gradient(low='black',high='lightgray')
+scale_shape_manual(values=[">","<"])
)
return(p)
def plot_diff(all_lum):
mean_lum = calc_mean(all_lum)
diff_lum = mean_lum.query("lum>0").groupby(["lum","block","subject"],as_index=False).pa_norm.agg(np.diff)
diff_lum.loc[:,'pa_norm'] = pd.to_numeric(diff_lum.loc[:,'pa_norm'])
p = (ggplot(diff_lum,aes(x="subject",y="pa_norm",color="lum",group="lum"))
+stat_summary(fun_data=winmean_cl_boot,position=position_dodge(width=0.5))
#+geom_point(alpha=0.2,position=position_dodge(width=0.5))
+scale_color_gradient(low='black',high='lightgray')
+ylab('pupil area difference (Eyelink-Pupillabs)[a.u.]')
#+scale_shape_manual(values=[">","<"])
)
return(p) | [
"pandas.DataFrame",
"numpy.sum",
"scipy.stats.binned_statistic",
"numpy.median",
"logging.getLogger",
"pandas.to_numeric"
] | [((193, 220), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (210, 220), False, 'import logging\n'), ((271, 285), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (283, 285), True, 'import pandas as pd\n'), ((3430, 3475), 'pandas.to_numeric', 'pd.to_numeric', (["all_lum_diff.loc[:, 'pa_norm']"], {}), "(all_lum_diff.loc[:, 'pa_norm'])\n", (3443, 3475), True, 'import pandas as pd\n'), ((4890, 4931), 'pandas.to_numeric', 'pd.to_numeric', (["diff_lum.loc[:, 'pa_norm']"], {}), "(diff_lum.loc[:, 'pa_norm'])\n", (4903, 4931), True, 'import pandas as pd\n'), ((1360, 1377), 'numpy.sum', 'np.sum', (['(df.td < 0)'], {}), '(df.td < 0)\n', (1366, 1377), True, 'import numpy as np\n'), ((1849, 1923), 'scipy.stats.binned_statistic', 'binned_statistic', ([], {'x': 'df.td', 'values': 'df.pa_norm', 'statistic': '"""mean"""', 'bins': 'newtd'}), "(x=df.td, values=df.pa_norm, statistic='mean', bins=newtd)\n", (1865, 1923), False, 'from scipy.stats import binned_statistic\n'), ((1941, 2148), 'pandas.DataFrame', 'pd.DataFrame', (["{'subject': df.subject.iloc[0], 'block': df.block.iloc[0], 'eyetracker': df\n .eyetracker.iloc[0], 'td': newtd[1:] - (newtd[1] - newtd[0]) / 2,\n 'pa_norm': binned[0], 'lum': df.lum.iloc[0]}"], {}), "({'subject': df.subject.iloc[0], 'block': df.block.iloc[0],\n 'eyetracker': df.eyetracker.iloc[0], 'td': newtd[1:] - (newtd[1] -\n newtd[0]) / 2, 'pa_norm': binned[0], 'lum': df.lum.iloc[0]})\n", (1953, 2148), True, 'import pandas as pd\n'), ((1529, 1563), 'numpy.median', 'np.median', (["df.loc[df.td < 0, 'pa']"], {}), "(df.loc[df.td < 0, 'pa'])\n", (1538, 1563), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""utilities.py: datafile checking and format conversions
"""
import csv
import gdal
import json
import numpy as np
import os
import shapefile
from gdalconst import *
from osgeo import osr, gdal
from random import randint
import matplotlib.pyplot as plt
import pandas
""" Do basic checks on any input csv file
Parameters:
fin - file containing csv format data
"""
def check_csvfile(fin):
csvin = csv.reader(fin);
headers = csvin.next();
rowlens = {}
coltxt = {}
for row in csvin:
rlen = len(row);
rowlens.setdefault(rlen,0);
rowlens[rlen] += 1;
for col,data in enumerate(row):
if data != "":
coltxt.setdefault(col,0);
coltxt[col] += 1;
print(rowlens);
print(coltxt);
fin.close();
return();
"""Convert dataset into geotiff file
See http://gis.stackexchange.com/questions/62343/how-can-i-convert-a-ascii-file-to-geotiff-using-python
Example:
"""
def array_to_geotiff(data, xllcorner, yllcorner, cellsize, datatype, outfile):
nbands = 1;
xsize = len(data);
yzise = len(data[0]);
xtlcorner = xllcorner + xsize * cellsize;
ytlcorner = yllcorner;
raster = np.array(data); #FIXIT: is this conversion needed?
#Create output file
#Geotransform g is an array, with:
#g[0] /* top left x */
#g[1] /* w-e pixel resolution */
#g[2] /* rotation, 0 if image is "north up" */
#g[3] /* top left y */
#g[4] /* rotation, 0 if image is "north up" */
#g[5] /* n-s pixel resolution */
driver = gdal.GetDriverByName("GTiff");
dst_ds = driver.Create(outfile, xsize, ysize, nbands, gdal.GDT_Byte );
dst_ds.SetGeoTransform( [ xtlcorner, cellsize, 0, ytlcorner, 0, cellsize ] );
# set map projections
srs = osr.SpatialReference();
srs.SetWellKnownGeogCS("WGS84");
dst_ds.SetProjection( srs.ExportToWkt() );
# write data to output file
dst_ds.GetRasterBand(1).WriteArray(raster);
return();
""" Check geotiff file
Example use:
infile = "../../big_data/trees/Simard_Pinto_3DGlobalVeg_JGR.tif";
check_geotiff(infile);
"""
def check_geotiff(infile, print_line=False):
dataset = gdal.Open(infile, GA_ReadOnly);
cols = dataset.RasterXSize;
rows = dataset.RasterYSize;
nbands = dataset.RasterCount;
driver = dataset.GetDriver().LongName;
print("{} size: {}x{}x{}".format(driver, str(rows), str(cols), str(nbands)));
geotransform = dataset.GetGeoTransform();
print(geotransform);
bx = -32768; #arbitrary big and small numbers
bn = 32768;
for b in range(1,nbands+1):
band = dataset.GetRasterBand(b);
bandtype = gdal.GetDataTypeName(band.DataType);
print("Band {} type {}".format(b, bandtype));
#test first line of data
scanline = band.ReadRaster( 0, 0, band.XSize, 1,band.XSize, 1, band.DataType);
if print_line:
print(scanline);
#Get data ranges, histogram
data = band.ReadAsArray(0,0,band.XSize, band.YSize).astype(np.float);
mx = np.amax(data);
mn = np.amin(data);
bx = max(bx, mx);
bn = min(bn, mn);
print("range {};{}".format(mn,mx));
hist = np.histogram(data, bins=range(int(mn)-1,int(mx)+1)); #Fails for 0/1 values
#plt.hist(data, bins=range(int(mn),int(mx)));
#plt.show();
print("All bands max {} min {}".format(bx, bn));
return(hist)
def plot_hist(inhist, method="matplotlib"):
hist = inhist[0];
bins = inhist[1];
if method == "matplotlib":
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
else:
pandas.DataFrame({'x':bins[1:],'y':hist}).plot(x='x',kind='bar');
return()
""" convert shapefile to geojson
parameters:
string infile: name of shapefile (.shp)
Useful function from <NAME>,
found on http://geospatialpython.com/2013/07/shapefile-to-geojson.html
"""
def shp_to_geojson(infile):
# read the shapefile
reader = shapefile.Reader(infile);
fields = reader.fields[1:];
field_names = [field[0] for field in fields];
buffer = [];
for sr in reader.shapeRecords():
atr = dict(zip(field_names, sr.record));
geom = sr.shape.__geo_interface__
buffer.append(dict(type="Feature", geometry=geom, properties=atr));
# write the GeoJSON file
outfile = infile[:-3]+"json";
geojson = open(outfile, "w");
geojson.write(json.dumps({"type": "FeatureCollection",\
"features": buffer}, indent=2) + "\n");
geojson.close()
return();
""" Generate test data for landscape (10x10 grid) visualisations
"""
def generate_landscape_test():
fout = open("landscape_test.csv", "wb");
csvout = csv.writer(fout, quoting=csv.QUOTE_NONNUMERIC);
csvout.writerow(["x","y","value","valuetype","landscapeid"])
for x in range(0,9):
for y in range(0,9):
value = randint(0,9);
csvout.writerow([str(x),str(y),value, "biomass", "1"]);
fout.close();
return();
| [
"pandas.DataFrame",
"csv.reader",
"csv.writer",
"numpy.amin",
"matplotlib.pyplot.show",
"random.randint",
"matplotlib.pyplot.bar",
"json.dumps",
"numpy.amax",
"numpy.array",
"shapefile.Reader",
"osgeo.gdal.Open",
"osgeo.gdal.GetDriverByName",
"osgeo.gdal.GetDataTypeName",
"osgeo.osr.Spat... | [((428, 443), 'csv.reader', 'csv.reader', (['fin'], {}), '(fin)\n', (438, 443), False, 'import csv\n'), ((1223, 1237), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1231, 1237), True, 'import numpy as np\n'), ((1584, 1613), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (1604, 1613), False, 'from osgeo import osr, gdal\n'), ((1809, 1831), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (1829, 1831), False, 'from osgeo import osr, gdal\n'), ((2204, 2234), 'osgeo.gdal.Open', 'gdal.Open', (['infile', 'GA_ReadOnly'], {}), '(infile, GA_ReadOnly)\n', (2213, 2234), False, 'from osgeo import osr, gdal\n'), ((4104, 4128), 'shapefile.Reader', 'shapefile.Reader', (['infile'], {}), '(infile)\n', (4120, 4128), False, 'import shapefile\n'), ((4854, 4900), 'csv.writer', 'csv.writer', (['fout'], {'quoting': 'csv.QUOTE_NONNUMERIC'}), '(fout, quoting=csv.QUOTE_NONNUMERIC)\n', (4864, 4900), False, 'import csv\n'), ((2688, 2723), 'osgeo.gdal.GetDataTypeName', 'gdal.GetDataTypeName', (['band.DataType'], {}), '(band.DataType)\n', (2708, 2723), False, 'from osgeo import osr, gdal\n'), ((3079, 3092), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (3086, 3092), True, 'import numpy as np\n'), ((3107, 3120), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (3114, 3120), True, 'import numpy as np\n'), ((3668, 3718), 'matplotlib.pyplot.bar', 'plt.bar', (['center', 'hist'], {'align': '"""center"""', 'width': 'width'}), "(center, hist, align='center', width=width)\n", (3675, 3718), True, 'import matplotlib.pyplot as plt\n'), ((3727, 3737), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3735, 3737), True, 'import matplotlib.pyplot as plt\n'), ((4549, 4620), 'json.dumps', 'json.dumps', (["{'type': 'FeatureCollection', 'features': buffer}"], {'indent': '(2)'}), "({'type': 'FeatureCollection', 'features': buffer}, indent=2)\n", (4559, 4620), False, 'import json\n'), ((5042, 5055), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (5049, 5055), False, 'from random import randint\n'), ((3756, 3800), 'pandas.DataFrame', 'pandas.DataFrame', (["{'x': bins[1:], 'y': hist}"], {}), "({'x': bins[1:], 'y': hist})\n", (3772, 3800), False, 'import pandas\n')] |
import os, sys
selfPath = os.path.dirname(__file__)
parentPath = os.path.dirname(selfPath)
sys.path += [parentPath]
from Imp.Imp import Implementation
from ItkHandler.ItkHandler import ItkHandler
import numpy as np
import unittest
TEST_EQUALITYRATIO_DELTA = 1e-1
def CreateTestResult(mode_):
imp = Implementation("TestMethod", selfPath)
imp.SelectMethod("TestMethod")
imp.SelectMode(mode_)
imp.Run(0)
return imp.GetResult()
class TestPCE(unittest.TestCase):
def test_stdMultiVarFunctions(self):
pceRes = CreateTestResult("Pce")
mcRes = CreateTestResult("MonteCarlo")
pceResIm = ItkHandler()
mcResIm = ItkHandler()
pceResIm.LoadImage(pceRes)
mcResIm.LoadImage(mcRes)
imPce = pceResIm.GetImageVolume().reshape(-1)
imMc = mcResIm.GetImageVolume().reshape(-1)
diff = np.abs((imPce - imMc) / imMc)
sz = diff.size
diff = diff.sum() / sz
self.assertAlmostEqual(diff, 0, delta = TEST_EQUALITYRATIO_DELTA)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"numpy.abs",
"os.path.dirname",
"ItkHandler.ItkHandler.ItkHandler",
"Imp.Imp.Implementation"
] | [((31, 56), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (46, 56), False, 'import os, sys\n'), ((71, 96), 'os.path.dirname', 'os.path.dirname', (['selfPath'], {}), '(selfPath)\n', (86, 96), False, 'import os, sys\n'), ((327, 365), 'Imp.Imp.Implementation', 'Implementation', (['"""TestMethod"""', 'selfPath'], {}), "('TestMethod', selfPath)\n", (341, 365), False, 'from Imp.Imp import Implementation\n'), ((1105, 1120), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1118, 1120), False, 'import unittest\n'), ((668, 680), 'ItkHandler.ItkHandler.ItkHandler', 'ItkHandler', ([], {}), '()\n', (678, 680), False, 'from ItkHandler.ItkHandler import ItkHandler\n'), ((700, 712), 'ItkHandler.ItkHandler.ItkHandler', 'ItkHandler', ([], {}), '()\n', (710, 712), False, 'from ItkHandler.ItkHandler import ItkHandler\n'), ((907, 936), 'numpy.abs', 'np.abs', (['((imPce - imMc) / imMc)'], {}), '((imPce - imMc) / imMc)\n', (913, 936), True, 'import numpy as np\n')] |
import datetime
import glob
from google_speech import Speech
import numpy as np
import pandas as pd
from pygame.draw import polygon
from pygame.font import SysFont
from pygame_utilities import display_centered_text
import time
from bead import (
digitize,
draw_columns,
height_to_width,
)
import operation as op
AS_SUFFIX = '_abacus_as.dat'
DATE_FORMAT = '%Y_%m_%d'
def get_data_filenames(suffix=AS_SUFFIX):
return glob.glob('*' + suffix)
def date_to_filename(
dt,
suffix=AS_SUFFIX
):
return dt.strftime(DATE_FORMAT) + suffix
def storage_filename():
return date_to_filename(datetime.date.today())
# Get listing of appropriate data files by date
def get_dataset_dates(suffix=AS_SUFFIX):
dates = []
fns = get_data_filenames(suffix=suffix)
for fn in fns:
dt = datetime.datetime.strptime(
fn.split(suffix)[0], DATE_FORMAT
).date()
dates.append(dt)
dates.sort()
return dates
def read_as_data(date):
return pd.read_csv(
date_to_filename(date, suffix=AS_SUFFIX),
delimiter=',',
header=None,
names=[
'problem',
'response_time',
'response',
'correct',
'time_of_day',
'presentation_method',
]
)
def write_problem_result(
stream,
operands,
response,
response_time,
is_correct,
number_style,
):
# n1, n2, time, answer, correct?
stream.write(
'{},{:.2f},{},{},{},{}\n'.format(
';'.join(map(str, operands)),
response_time,
response,
is_correct,
datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'),
number_style.name,
)
)
def generate_problems(
addition_prob=.5,
num_digits=6,
num_operands=5,
new_problem_prob=.5,
previous_incorrect_prob=.4,
previous_slow_prob=.1,
):
if new_problem_prob + previous_incorrect_prob + previous_slow_prob != 1.:
raise ValueError('Problem selection probabilities must sum to 1.')
dates = get_dataset_dates()
add_digit_pair_prob = op.digit_pair_prob(
np.ones(op.OPERATION_COUNT) / op.OPERATION_COUNT,
op.add_op_index_to_digit_pairs
)
sub_digit_pair_prob = op.digit_pair_prob(
np.ones(op.OPERATION_COUNT) / op.OPERATION_COUNT,
op.sub_op_index_to_digit_pairs
)
while True:
rand = np.random.random()
# decide whether to generate a new problem or give a problem where
# the answer was previously incorrect or the response was slow
if not dates or 0 <= rand < new_problem_prob:
operands = op.generate_mixed_problem(
add_digit_pair_prob,
addition_prob,
sub_digit_pair_prob,
num_digits,
num_operands,
)
else:
date = np.random.choice(dates)
df = read_as_data(date)
incorrect_df = df[df.correct == False] # noqa: E712
# choose a problem where the response was wrong, if possible1
if (
incorrect_df.shape[0] > 0
and 0. <= rand - new_problem_prob < previous_incorrect_prob
):
row_n = np.random.randint(low=0, high=incorrect_df.shape[0])
operands = map(
int,
incorrect_df.iloc[row_n].problem.split(';')
)
print('Failure from {}'.format(date))
else:
groups = df.groupby('problem')
if not groups:
continue
max_time_series = groups.response_time.max()
total_max_time = sum(max_time_series)
row_n = np.random.choice(
max_time_series.shape[0],
p=max_time_series / total_max_time
)
operands = map(int, max_time_series.index[row_n].split(';'))
print('Slow response {} from {}'.format(
max_time_series.iloc[row_n],
date
))
yield list(operands)
def format_operand(operand):
return '{: >+10,}'.format(operand)
def display_arabic_add_subtract_problem(
screen,
color,
operands,
font
):
operand_rows = [
format_operand(operand) for operand in operands
]
max_length = max(len(row) for row in operand_rows)
operand_rows.append('-' * max_length)
text = '\n'.join(operand_rows)
response_x, response_y, width = display_centered_text(
screen,
text,
color,
font,
)
return response_x, response_y, width
def display_abacus_add_subtract_problem(
screen,
color,
separator_bead_color,
upper_left,
height,
operands,
line_spacing=1.2,
font=None,
):
if font is None:
font = SysFont('Lucida Console', height)
x_ul, y_ul = upper_left
max_digits = max(
len(digitize(operand))
for operand in operands
)
column_width = height_to_width(height)
max_sign_width = 0.
for row_n, operand in enumerate(operands):
x_row = x_ul
y_row = y_ul + row_n * height * line_spacing
digits = digitize(abs(operand))
n_digits = len(digits)
sign = '+' if operand >= 0 else '-'
sign_surface = font.render(sign, True, color)
sign_width = sign_surface.get_width()
max_sign_width = max(sign_width, max_sign_width)
screen.blit(
sign_surface,
(x_row, y_row)
)
draw_columns(
screen,
color,
(
x_row
+ sign_width
+ (max_digits - n_digits) * column_width,
y_row
),
height,
digits,
separator_bead_color=separator_bead_color,
)
row_n = len(operands)
x_rect_left = x_ul
x_rect_right = (
x_ul
+ max_sign_width
+ max_digits * column_width
)
y_rect_top = (
y_ul
+ height * (row_n * line_spacing
- .25 * (line_spacing - 1.))
)
y_rect_bottom = (
y_rect_top
+ .25 * (line_spacing - 1.) * height
)
polygon(
screen,
color,
[
(x_rect_left, y_rect_top),
(x_rect_right, y_rect_top),
(x_rect_right, y_rect_bottom),
(x_rect_left, y_rect_bottom),
]
)
# coordinates of where to draw the rightmost digit
# of the response
return (
(
x_ul
+ sign_width
+ max_digits * column_width
),
y_ul + row_n * height * line_spacing
)
def read_problem(
operands,
inter_operand_pause=1.5,
language='en',
):
speeches = []
for operand in operands:
speeches.append(Speech(str(operand), language))
for speech in speeches:
speech.play(None)
time.sleep(inter_operand_pause)
| [
"numpy.random.choice",
"bead.digitize",
"operation.generate_mixed_problem",
"bead.height_to_width",
"pygame.font.SysFont",
"bead.draw_columns",
"datetime.date.today",
"numpy.ones",
"time.sleep",
"datetime.datetime.now",
"numpy.random.random",
"numpy.random.randint",
"glob.glob",
"pygame.dr... | [((436, 459), 'glob.glob', 'glob.glob', (["('*' + suffix)"], {}), "('*' + suffix)\n", (445, 459), False, 'import glob\n'), ((4699, 4747), 'pygame_utilities.display_centered_text', 'display_centered_text', (['screen', 'text', 'color', 'font'], {}), '(screen, text, color, font)\n', (4720, 4747), False, 'from pygame_utilities import display_centered_text\n'), ((5243, 5266), 'bead.height_to_width', 'height_to_width', (['height'], {}), '(height)\n', (5258, 5266), False, 'from bead import digitize, draw_columns, height_to_width\n'), ((6476, 6620), 'pygame.draw.polygon', 'polygon', (['screen', 'color', '[(x_rect_left, y_rect_top), (x_rect_right, y_rect_top), (x_rect_right,\n y_rect_bottom), (x_rect_left, y_rect_bottom)]'], {}), '(screen, color, [(x_rect_left, y_rect_top), (x_rect_right,\n y_rect_top), (x_rect_right, y_rect_bottom), (x_rect_left, y_rect_bottom)])\n', (6483, 6620), False, 'from pygame.draw import polygon\n'), ((623, 644), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (642, 644), False, 'import datetime\n'), ((2503, 2521), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2519, 2521), True, 'import numpy as np\n'), ((5071, 5104), 'pygame.font.SysFont', 'SysFont', (['"""Lucida Console"""', 'height'], {}), "('Lucida Console', height)\n", (5078, 5104), False, 'from pygame.font import SysFont\n'), ((5782, 5947), 'bead.draw_columns', 'draw_columns', (['screen', 'color', '(x_row + sign_width + (max_digits - n_digits) * column_width, y_row)', 'height', 'digits'], {'separator_bead_color': 'separator_bead_color'}), '(screen, color, (x_row + sign_width + (max_digits - n_digits) *\n column_width, y_row), height, digits, separator_bead_color=\n separator_bead_color)\n', (5794, 5947), False, 'from bead import digitize, draw_columns, height_to_width\n'), ((7214, 7245), 'time.sleep', 'time.sleep', (['inter_operand_pause'], {}), '(inter_operand_pause)\n', (7224, 7245), False, 'import time\n'), ((2227, 2254), 'numpy.ones', 'np.ones', (['op.OPERATION_COUNT'], {}), '(op.OPERATION_COUNT)\n', (2234, 2254), True, 'import numpy as np\n'), ((2376, 2403), 'numpy.ones', 'np.ones', (['op.OPERATION_COUNT'], {}), '(op.OPERATION_COUNT)\n', (2383, 2403), True, 'import numpy as np\n'), ((2746, 2858), 'operation.generate_mixed_problem', 'op.generate_mixed_problem', (['add_digit_pair_prob', 'addition_prob', 'sub_digit_pair_prob', 'num_digits', 'num_operands'], {}), '(add_digit_pair_prob, addition_prob,\n sub_digit_pair_prob, num_digits, num_operands)\n', (2771, 2858), True, 'import operation as op\n'), ((2983, 3006), 'numpy.random.choice', 'np.random.choice', (['dates'], {}), '(dates)\n', (2999, 3006), True, 'import numpy as np\n'), ((3365, 3417), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'incorrect_df.shape[0]'}), '(low=0, high=incorrect_df.shape[0])\n', (3382, 3417), True, 'import numpy as np\n'), ((3875, 3953), 'numpy.random.choice', 'np.random.choice', (['max_time_series.shape[0]'], {'p': '(max_time_series / total_max_time)'}), '(max_time_series.shape[0], p=max_time_series / total_max_time)\n', (3891, 3953), True, 'import numpy as np\n'), ((5167, 5184), 'bead.digitize', 'digitize', (['operand'], {}), '(operand)\n', (5175, 5184), False, 'from bead import digitize, draw_columns, height_to_width\n'), ((1688, 1711), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1709, 1711), False, 'import datetime\n')] |
import numpy as np
import pickle as pkl
sample_id_to_subject_id = {}
subject_id_time = {}
subject_id_u = {}
with open("data_diet/metadata.txt", "r") as f:
for line in f:
line = line.split()
if "sampleID" in line[0]:
continue
sample_id = line[0]
subject_id = line[2]
day = float(line[3])
perturb = float(line[4])
sample_id_to_subject_id[sample_id] = subject_id
subject_id_time[subject_id] = subject_id_time.get(subject_id, []) + [day]
subject_id_u[subject_id] = subject_id_u.get(subject_id, []) + [perturb]
counts = np.loadtxt("data_diet/counts.txt", delimiter="\t", dtype=str, comments="!")
# swap last two rows since there are no zeros in the penultimate row
tmp = counts[-2]
counts[-2] = counts[-1]
counts[-1] = tmp
counts = counts[:,1:]
subject_id_counts = {}
for row in counts.T:
sample_id = row[0]
counts = row[1:].astype(float)
subject_id = sample_id_to_subject_id[sample_id]
counts /= 1000
if subject_id in subject_id_counts:
subject_id_counts[subject_id] = np.vstack( (subject_id_counts[subject_id], np.array(counts)) )
else:
subject_id_counts[subject_id] = np.array(counts)
Y_diet = []
U_diet = []
T_diet = []
zero_counts = 0
total_counts = 0
for subject_id in sorted(subject_id_counts):
y = np.array(subject_id_counts[subject_id])
t = np.array(subject_id_time[subject_id])
u = np.array(subject_id_u[subject_id])
u = u.reshape((u.size, 1))
zero_counts += y[y == 0].size
total_counts += y.size
Y_diet.append(y)
U_diet.append(u)
T_diet.append(t)
pkl.dump(Y_diet, open("Y_diet.pkl", "wb"))
pkl.dump(U_diet, open("U_diet.pkl", "wb"))
pkl.dump(T_diet, open("T_diet.pkl", "wb")) | [
"numpy.array",
"numpy.loadtxt"
] | [((611, 686), 'numpy.loadtxt', 'np.loadtxt', (['"""data_diet/counts.txt"""'], {'delimiter': '"""\t"""', 'dtype': 'str', 'comments': '"""!"""'}), "('data_diet/counts.txt', delimiter='\\t', dtype=str, comments='!')\n", (621, 686), True, 'import numpy as np\n'), ((1347, 1386), 'numpy.array', 'np.array', (['subject_id_counts[subject_id]'], {}), '(subject_id_counts[subject_id])\n', (1355, 1386), True, 'import numpy as np\n'), ((1395, 1432), 'numpy.array', 'np.array', (['subject_id_time[subject_id]'], {}), '(subject_id_time[subject_id])\n', (1403, 1432), True, 'import numpy as np\n'), ((1441, 1475), 'numpy.array', 'np.array', (['subject_id_u[subject_id]'], {}), '(subject_id_u[subject_id])\n', (1449, 1475), True, 'import numpy as np\n'), ((1206, 1222), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (1214, 1222), True, 'import numpy as np\n'), ((1136, 1152), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (1144, 1152), True, 'import numpy as np\n')] |
"""
Script for splitting a dataset hdf5 file into training and validation trajectories.
Args:
dataset (str): path to hdf5 dataset
filter_key (str): if provided, split the subset of trajectories
in the file that correspond to this filter key into a training
and validation set of trajectories, instead of splitting the
full set of trajectories
ratio (float): validation ratio, in (0, 1). Defaults to 0.1, which is 10%.
Example usage:
python split_train_val.py --dataset /path/to/demo.hdf5 --ratio 0.1
"""
import argparse
import h5py
import numpy as np
from robomimic.utils.file_utils import create_hdf5_filter_key
def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None):
"""
Splits data into training set and validation set from HDF5 file.
Args:
hdf5_path (str): path to the hdf5 file
to load the transitions from
val_ratio (float): ratio of validation demonstrations to all demonstrations
filter_key (str): if provided, split the subset of demonstration keys stored
under mask/@filter_key instead of the full set of demonstrations
"""
# retrieve number of demos
f = h5py.File(hdf5_path, "r")
if filter_key is not None:
print("using filter key: {}".format(filter_key))
demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])])
else:
demos = sorted(list(f["data"].keys()))
num_demos = len(demos)
f.close()
# get random split
num_demos = len(demos)
num_val = int(val_ratio * num_demos)
mask = np.zeros(num_demos)
mask[:num_val] = 1.
np.random.shuffle(mask)
mask = mask.astype(int)
train_inds = (1 - mask).nonzero()[0]
valid_inds = mask.nonzero()[0]
train_keys = [demos[i] for i in train_inds]
valid_keys = [demos[i] for i in valid_inds]
print("{} validation demonstrations out of {} total demonstrations.".format(num_val, num_demos))
# pass mask to generate split
name_1 = "train"
name_2 = "valid"
if filter_key is not None:
name_1 = "{}_{}".format(filter_key, name_1)
name_2 = "{}_{}".format(filter_key, name_2)
train_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=train_keys, key_name=name_1)
valid_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=name_2)
print("Total number of train samples: {}".format(np.sum(train_lengths)))
print("Average number of train samples {}".format(np.mean(train_lengths)))
print("Total number of valid samples: {}".format(np.sum(valid_lengths)))
print("Average number of valid samples {}".format(np.mean(valid_lengths)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
type=str,
help="path to hdf5 dataset",
)
parser.add_argument(
"--filter_key",
type=str,
default=None,
help="if provided, split the subset of trajectories in the file that correspond to\
this filter key into a training and validation set of trajectories, instead of\
splitting the full set of trajectories",
)
parser.add_argument(
"--ratio",
type=float,
default=0.1,
help="validation ratio, in (0, 1)"
)
args = parser.parse_args()
# seed to make sure results are consistent
np.random.seed(0)
split_train_val_from_hdf5(args.dataset, val_ratio=args.ratio, filter_key=args.filter_key)
| [
"h5py.File",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"robomimic.utils.file_utils.create_hdf5_filter_key",
"numpy.zeros",
"numpy.mean",
"numpy.random.shuffle"
] | [((1207, 1232), 'h5py.File', 'h5py.File', (['hdf5_path', '"""r"""'], {}), "(hdf5_path, 'r')\n", (1216, 1232), False, 'import h5py\n'), ((1623, 1642), 'numpy.zeros', 'np.zeros', (['num_demos'], {}), '(num_demos)\n', (1631, 1642), True, 'import numpy as np\n'), ((1671, 1694), 'numpy.random.shuffle', 'np.random.shuffle', (['mask'], {}), '(mask)\n', (1688, 1694), True, 'import numpy as np\n'), ((2229, 2316), 'robomimic.utils.file_utils.create_hdf5_filter_key', 'create_hdf5_filter_key', ([], {'hdf5_path': 'hdf5_path', 'demo_keys': 'train_keys', 'key_name': 'name_1'}), '(hdf5_path=hdf5_path, demo_keys=train_keys, key_name=\n name_1)\n', (2251, 2316), False, 'from robomimic.utils.file_utils import create_hdf5_filter_key\n'), ((2332, 2419), 'robomimic.utils.file_utils.create_hdf5_filter_key', 'create_hdf5_filter_key', ([], {'hdf5_path': 'hdf5_path', 'demo_keys': 'valid_keys', 'key_name': 'name_2'}), '(hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=\n name_2)\n', (2354, 2419), False, 'from robomimic.utils.file_utils import create_hdf5_filter_key\n'), ((2771, 2796), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2794, 2796), False, 'import argparse\n'), ((3453, 3470), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3467, 3470), True, 'import numpy as np\n'), ((2469, 2490), 'numpy.sum', 'np.sum', (['train_lengths'], {}), '(train_lengths)\n', (2475, 2490), True, 'import numpy as np\n'), ((2547, 2569), 'numpy.mean', 'np.mean', (['train_lengths'], {}), '(train_lengths)\n', (2554, 2569), True, 'import numpy as np\n'), ((2626, 2647), 'numpy.sum', 'np.sum', (['valid_lengths'], {}), '(valid_lengths)\n', (2632, 2647), True, 'import numpy as np\n'), ((2704, 2726), 'numpy.mean', 'np.mean', (['valid_lengths'], {}), '(valid_lengths)\n', (2711, 2726), True, 'import numpy as np\n')] |
"""
CyclicFeedbackSystem class and methods. This is a child class of the RampSystem
class (see ramp_system.py).
Author: <NAME>
"""
from ramp_systems.ramp_system import RampSystem
from ramp_systems.cell import Cell
import itertools
import sympy
from sympy.matrices import zeros as sympy_zeros
import numpy as np
import warnings
DEFAULT_TOLERANCE = 1e-6
class CyclicFeedbackSystem(RampSystem):
def __init__(self,Network,L,Delta,theta,gamma,tol = DEFAULT_TOLERANCE):
"""
Requires that the network is a cyclic feedback network.
Input:
tol - tolerance used by j_border_crossings()
"""
RampSystem.__init__(self,Network,L,Delta,theta,gamma)
self._set_attributes()
self.tol = tol
def __repr__(self):
return 'CyclicFeedbackSystem(Network = {},L = {},Delta = {},theta = {},gamma = {})'.format(self.Network.specification(),self.L,self.Delta,self.theta,self.gamma)
def _set_attributes(self):
"""
Sets the attributes 'cfs_sign', 'rho', 'rho_inv','edge_sign'
Raises an exception if the network is not a cyclic feedback network.
cfs_sign - sign of the loop
rho - (list) target map. j->rho[j] is the unique edge from j
rho_inv - (list) source map. rho_inv[j]->j is the unique edge into j
edge_sign - (list) edge_sign[j] is the sign of the edge rho_inv[j]->j
"""
Network = self.Network
node = 0
next_node = -1
cfs_sign = 1
N = Network.size()
rho = [-1]*N
rho_inv = [-1]*N
edge_sign = [1]*N
while(next_node != 0):
output = Network.outputs(node)
rho[node] = output[0]
if len(output) != 1:
raise ValueError("CyclicFeedbackSystem requires Network is a cyclic\
feedback network but at least one node had number of outputs\
different from 1.")
next_node = output[0]
rho_inv[next_node] = node
if not Network.interaction(node,next_node): #node represses next_node
cfs_sign *= -1
edge_sign[next_node] = -1
node = next_node
if -1 in rho:
raise ValueError("CyclicFeedbackSystem requires Network is a cyclic\
feedback network but the nodes don't form a length Network.size() cycle.")
self.cfs_sign = cfs_sign
self.rho = rho
self.rho_inv = rho_inv
self.edge_sign = edge_sign
def _get_slope_product(self,eps_func):
"""
Helper function for pos_loop_bifurcations and neg_loop_bifurcations.
input:
eps_func - sympy expression
output:
function which takes a number s and returns the slope product M(eps_func(s))
"""
Delta = sympy.Matrix(self.Delta)
rho_inv = self.rho_inv
slope_product_func = 1
for j in range(self.Network.size()):
slope_product_func *= Delta[j,rho_inv[j]]/(2*eps_func[j,rho_inv[j]])
s = sympy.symbols('s')
return sympy.utilities.lambdify(s,slope_product_func,'numpy')
def get_bifurcations(self,eps_func = None):
if self.cfs_sign == 1:
bifurcations, eps_func = self.pos_loop_bifurcations(eps_func)
else:
bifurcations, eps_func = self.neg_loop_bifurcations(eps_func)
return bifurcations, eps_func
def neg_loop_bifurcations(self,eps_func = None):
"""
Finds all bifurcations assuming cfs_sign == -1.
Input:
eps_func - (optional) sympy expression giving parameterization of eps
assumes eps_func is of the form A*s where A is a matrix
Output:
s_vals - (list[list[list] ] of length N+1)
s_vals[j] is a list with entries [s,x] so that there is a
stability changing border crossing bifurcation at eps_func(s) with
x[j] = theta[rho[j],j]+/-eps[rho[j],j] if j<N. s_vals[N] are
the values of s so that there is a Hopf bifurcation
eps_func - same as input. Returned here in case eps_func is not specified
in the function call.
TO DO:
implement for gammas not equal
"""
if self.cfs_sign != -1:
raise ValueError('neg_loop_bifurcations but the loop is positive')
N = self.Network.size()
s_vals = [[] for i in range(N+1)]
if N <= 2:
return s_vals
s = sympy.symbols('s')
crossings,eps_func = self.border_crossings(eps_func)
slope_product = self._get_slope_product(eps_func)
if self.gamma.min() == self.gamma.max():
gamma = self.gamma[0,0]
secant_condition_val = gamma*np.cos(np.pi/N)**(-N)
#border crossing bifurcations
for j in range(N):
cur_vals = set([crossing for crossing in crossings[j] \
if slope_product(crossing[0])>=secant_condition_val])
s_vals[j].extend(cur_vals)
#hopf bifurcation in singular domain
s_hopf = (slope_product(1)/secant_condition_val)**(1/N) #M(eps(s_hopf)) = gamma*sec(pi/N)**N.
x_hopf = self.singular_equilibrium(eps_func)(s_hopf)
eps_hopf = eps_func.subs(s,s_hopf)
if self.in_singular_domain(x_hopf,eps_hopf):
s_vals[N].append( [s_hopf,x_hopf] )
return s_vals, eps_func
else:
raise NotImplementedError('Inequal gammas not yet implemented for neg_loop_bifurcations.')
def pos_loop_bifurcations(self,eps_func = None):
"""
Finds all bifurcations assuming cfs_sign == 1.
Inputs:
eps_func - (optional) sympy expression describing the parameterization of eps
Outputs:
s_vals - (list[list[list] ] length N+1) s_vals[j] are the values of (s,x) so that there is a
bifurcation at eps_func(s) where x[j] = theta[rho[j],j] +/- eps[rho[j],j]
s_vals[N] = set(). len(s_vals) = N+1 for consistency with neg_loop_bifurcations()
eps_func - same as input. Returned in case eps_func is not specified
during function call.
"""
if self.cfs_sign != 1:
raise ValueError('pos_loop_bifurcations called but the loop is negative.')
crossings, eps_func = self.border_crossings(eps_func)
slope_product = self._get_slope_product(eps_func)
gamma_product = self.gamma.prod()
N = self.Network.size()
s_vals = [[] for i in range(N+1)]
for j in range(N):
cur_vals = [crossing for crossing in crossings[j] if slope_product(crossing[0])>= gamma_product]
s_vals[j].extend(cur_vals)
return s_vals, eps_func
def border_crossings(self,eps_func=None):
"""
Finds all values of eps so that the system has a border crossing on the
boundary of loop characteristic cell tau(eps)
Input:
eps_func - (optional) sympy expression describing the parameterization of eps
default is chosen so that all slopes are the same.
tol - desired tolerance to pass to the root isolation function
Output:
crossings - (list[list]) crossings[j] is the output of
j_border_crossings
eps_func - same as input. Returned here in case eps_func is not specified
in function call.
"""
eps_func,s = self._handle_eps_func(eps_func)
x_eq = self.singular_equilibrium(eps_func,lambdify = False)
N = self.Network.size()
crossings = [[] for i in range(N)]
for j in range(N):
crossings[j] = self.j_border_crossings(j,x_eq,eps_func)
return crossings, eps_func
def j_border_crossings(self,j,x_eq,eps_func):
"""
Finds all values of eps so that the system has a border crossing
of type x[j] = theta[rho[j],j] +/- eps[rho[j],j].
Input:
j - node of the network.
x_eq - sympy expression given by singular_equilibrium(eps_func,lambdify=false)
eps_func - sympy expression describing the parameterization of eps
tol - tolerance on width of saddle node containg intervals
Output:
crossings - (list[list]) inner lists are of the form [s,x]
Border crossing occurs approximately at eps = eps_func(s)
where s is within tol of the true value and the value of
the equilibrium at the crossing is x
"""
tol = self.tol
s = sympy.symbols("s")
rho = self.rho
xj = x_eq[j]
xj = xj.cancel()
num_xj, denom_xj = sympy.fraction(xj)
candidates = []
for beta in [-1,1]:
crossing_poly = num_xj - denom_xj*(self.theta[rho[j],j] + beta*eps_func[rho[j],j])
crossing_poly = sympy.Poly(crossing_poly,domain = 'QQ')
candidates.extend(crossing_poly.intervals(eps=tol,inf = 0))
crossings = []
while len(candidates) != 0:
#output of poly.intervals is a tuple of the form ((a,b),number)
#(a,b) is the interval. I could not find information on what
#number is in the sympy documentation, although I assume it is the
#number of roots in the interval and should always be 1 unless there
# is a double root.
root_int, num_roots = candidates.pop()
a,b = root_int
if a == 0:
continue
a = float(a)
b = float(b)
#check that for i != j, x[i] is in the singular domain
a_in = self.in_singular_domain(x_eq.subs(s,a),eps_func.subs(s,a),j)
b_in = self.in_singular_domain(x_eq.subs(s,b),eps_func.subs(s,b),j)
if a_in or b_in:
crossings.append( [(a+b)/2, np.array(x_eq.subs(s,(a+b)/2)).astype(np.float64)] )
return crossings
def in_singular_domain(self,x,eps,j=None):
"""
Returns true if for each i != j, x[i] is in the closure of the projection
of the loop characteristic cell, pi_i(tau(eps)), and false otherwise.
Input:
x - (Nx1 numpy array) point in phase space
eps - (NxN numpy array) value of perturbation parameter
Output:
bool
"""
N = self.Network.size()
rho = self.rho
x = np.array(x).reshape([N,1])
theta_vec = np.zeros([N,1])
eps_vec = np.zeros([N,1])
for i in range(N):
theta_vec[i] = self.theta[rho[i],i]
eps_vec[i] = eps[rho[i],i]
self._theta_vec = theta_vec
self._eps_vec = eps_vec
in_domain = np.logical_and(x >= theta_vec - eps_vec,\
x <= theta_vec + eps_vec)
if j is not None:
in_domain[j] = True
if sum(in_domain[:,0]) == N:
return True
return False
def _handle_eps_func(self,eps_func):
"""
Function for dealing with eps_func argument.
Input:
eps_func - (sympy expression or None)
Output:
eps_func - (sympy expression)
s - (sympy symbol)
"""
s = sympy.symbols("s")
if eps_func is None:
eps_func = sympy.Matrix(self.Delta)*s
else:
for sym in eps_func.free_symbols:
eps_func = eps_func.subs(sym,s)
return eps_func, s
def singular_equilibrium(self,eps_func=None,lambdify = True):
"""
Compute the equilibrium that would exist if all ramp functions were
operating in their linear regime over all of phase space.
Input:
eps_func - (sympy expression) function giving a parameterization of eps
Assumes the function of the form A*s where A is a matrix
Output:
x - (function) returns value of equilibrium at eps_func(s)
"""
eps_func, s = self._handle_eps_func(eps_func)
N = self.Network.size()
rho_inv = self.rho_inv
L_vec = sympy_zeros(N,1)
Delta_vec = sympy_zeros(N,1)
theta_vec = sympy_zeros(N,1)
signed_slope = sympy_zeros(N,N)
signed_slope_vec = sympy_zeros(N,1)
for j in range(N):
L_vec[j] = self.L[j,self.rho_inv[j]]
Delta_vec[j] = self.Delta[j,self.rho_inv[j]]
theta_vec[j] = self.theta[j,self.rho_inv[j]]
signed_slope[j,rho_inv[j]] = self.edge_sign[j]*Delta_vec[j]/(2*eps_func[j,rho_inv[j]])
signed_slope_vec[j] = signed_slope[j,rho_inv[j]]
Gamma = sympy.Matrix(np.diagflat(self.gamma))
A = Gamma - signed_slope
b = L_vec + 1/2*Delta_vec - signed_slope_vec.multiply_elementwise(theta_vec)
x = A.LUsolve(b)
if lambdify:
return sympy.utilities.lambdify(s,x,"numpy")
else:
return x
def is_weakly_equivalent(self,eps):
"Overriding RampSystem method. CyclicFeedbackSystems are always weakly equivalent. "
return True
def is_essential_node(self,j):
rho_inv = self.rho_inv
rho = self.rho
if self.L[j,rho_inv[j]] >= self.gamma[j]*self.theta[rho[j],j]:
return False
if self.L[j,rho_inv[j]] + self.Delta[j,rho_inv[j]] <= self.gamma[j]*self.theta[rho[j],j]:
return False
return True
def is_essential(self):
for j in range(self.Network.size()):
if not self.is_essential_node(j):
return False
return True
def essential_inessential_nodes(self):
essential = set()
inessential = set()
for j in range(self.Network.size()):
if self.is_essential_node(j):
essential.add(j)
else:
inessential.add(j)
return essential, inessential
def switch_equilibrium_cells(self):
"""
Get the switch equilibrium cells.
:return: List of Cell objects corresponding to the equilibrium cells at eps = 0
"""
if self.is_essential():
return self._essential_equilibrium_cells()
else:
return self._inessential_equilibrium_cells()
def _essential_equilibrium_cells(self):
rho = self.rho
N = self.Network.size()
theta = self.theta
tau = Cell(theta,*[rho[j] for j in range(N)])
if self.cfs_sign == -1:
return [tau]
pi_kappa_L = [(-np.inf,rho[j]) for j in range(N)]
pi_kappa_H = [(rho[j],np.inf) for j in range(N)]
pi_kappa_L = self._map_cell_from_normal(pi_kappa_L)
pi_kappa_H = self._map_cell_from_normal(pi_kappa_H)
return [tau,Cell(theta,*pi_kappa_L),Cell(theta,*pi_kappa_H)]
def _inessential_equilibrium_cells(self):
rho = self.rho
rho_inv = self.rho_inv
theta = self.theta
gamma = self.gamma
L = self.L
U = self.L + self.Delta
pi_kappa = [() for j in range(self.Network.size())]
essential,inessential = self.essential_inessential_nodes()
for j in inessential:
if L[j,rho_inv[j]] > gamma[j]*theta[rho[j],j]:
pi_kappa[j] = (rho[j],np.inf)
else:
pi_kappa[j] = (-np.inf,rho[j])
for j in essential:
k = self._get_last_inessential(j)
if self.cfs_sign == 1:
if L[k,rho_inv[k]] > gamma[k]*theta[rho[k],k]:
pi_kappa[j] = (rho[j],np.inf)
else:
pi_kappa[j] = (-np.inf,rho[j])
else:
if (gamma[k]*theta[rho[k],k]<L[k,rho_inv[k]] and \
0 <= k and k < j) or \
(U[k,rho_inv[k]] < gamma[k]*theta[rho[k],k] and j < k and k<N):
pi_kappa[j] = (rho[j],np.inf)
else:
pi_kappa[j] = (-np.inf,rho[j])
pi_kappa = self._map_cell_from_normal(pi_kappa)
return [Cell(self.theta,*pi_kappa)]
def _map_cell_from_normal(self,pi_kappa):
"""
Maps cell projections for a regular equilibrium cell in the normal form CFS (i.e. all positive
edges except for perhaps N->1) to a new list of cell projections which
correspond to an equilibrium cell of this CFS.
:param pi_kappa: list of tuples of the form (left_index,right_index) where
left_index and right_index are one of rho[j], np.inf, -np.inf.
:return: list of tuples of the form (left_index,right_index) where
left_index and right_index are one of rho[j], np.inf, -np.inf.
"""
new_pi_kappa = pi_kappa.copy()
edge_sign = self.edge_sign
assumed_edge_sign = [1]*self.Network.size()
rho = self.rho
if self.cfs_sign == -1:
#edge rho_inv[0]->0 is repressing in the negative CFS normal form
assumed_edge_sign[0] = -1
for j in range(self.Network.size()):
if not self.is_essential_node(j):
continue
if assumed_edge_sign[j] != edge_sign[j]:
assumed_edge_sign[rho[j]] *= -1
assumed_edge_sign[j] *= -1
new_pi_kappa[j] = self._get_flipped_pi_j(pi_kappa[j],j)
return new_pi_kappa
def _get_flipped_pi_j(self,pi_j,j):
rho = self.rho
if pi_j[0] == -np.inf:
return (rho[j],np.inf)
else:
return (-np.inf,rho[j])
def _get_last_inessential(self,j):
"""
Get the first inessential node k of the form k->rho(k+1)->...->j. Assumes
not self.is_essential()
"""
rho_inv = self.rho_inv
k = rho_inv[j]
while self.is_essential_node(k):
k = rho_inv[k]
return k
def equilibria(self,eps=[]):
"""
Compute all equilibria, including the singular equilibrium when eps == 0
:param eps: NxN numpy array.
:return: list of tuples of the form (x_val, stable). x_val gives the value of
x at the equilibrium and stable is a boolean which is True when the equilibrium
is stable and False otherwise.
"""
if len(eps) == 0:
eps = self._zero
eq_list = []
if self.is_strongly_equivalent(eps):
eq_cells = self.switch_equilibrium_cells()
for kappa in eq_cells:
if kappa.is_regular():
cur_eq = self.Lambda(kappa)/self.gamma
eq_list.append((cur_eq,True))
else:
if np.array_equal(eps,self._zero):
cur_eq = np.array([[self.theta[self.rho[j],j]] for j in range(self.Network.size())])
else:
eps_func = sympy.Matrix(eps)*sympy.symbols('s')
sing_eq_func = self.singular_equilibrium(eps_func)
cur_eq = sing_eq_func(1)
if self.cfs_sign == 1:
stable = False
elif self.Network.size() <= 2 and self.cfs_sign == -1:
stable = True
else:
stable = False
warnings.warn('Singular equilibria of negative CFSs with \
length >=3 are assumed to be unstable, although they \
could have stabilized through a Hopf bifurcation.')
eq_list.append((cur_eq,stable))
else:
raise NotImplementedError('Finding all equilibria is not implemented when (Z,eps) is not strongly equivalent to (Z,0).')
return eq_list
| [
"sympy.symbols",
"sympy.utilities.lambdify",
"sympy.fraction",
"numpy.logical_and",
"numpy.diagflat",
"sympy.matrices.zeros",
"numpy.zeros",
"sympy.Matrix",
"ramp_systems.ramp_system.RampSystem.__init__",
"ramp_systems.cell.Cell",
"numpy.array",
"numpy.cos",
"numpy.array_equal",
"warnings.... | [((651, 709), 'ramp_systems.ramp_system.RampSystem.__init__', 'RampSystem.__init__', (['self', 'Network', 'L', 'Delta', 'theta', 'gamma'], {}), '(self, Network, L, Delta, theta, gamma)\n', (670, 709), False, 'from ramp_systems.ramp_system import RampSystem\n'), ((2875, 2899), 'sympy.Matrix', 'sympy.Matrix', (['self.Delta'], {}), '(self.Delta)\n', (2887, 2899), False, 'import sympy\n'), ((3101, 3119), 'sympy.symbols', 'sympy.symbols', (['"""s"""'], {}), "('s')\n", (3114, 3119), False, 'import sympy\n'), ((3135, 3191), 'sympy.utilities.lambdify', 'sympy.utilities.lambdify', (['s', 'slope_product_func', '"""numpy"""'], {}), "(s, slope_product_func, 'numpy')\n", (3159, 3191), False, 'import sympy\n'), ((4614, 4632), 'sympy.symbols', 'sympy.symbols', (['"""s"""'], {}), "('s')\n", (4627, 4632), False, 'import sympy\n'), ((8901, 8919), 'sympy.symbols', 'sympy.symbols', (['"""s"""'], {}), "('s')\n", (8914, 8919), False, 'import sympy\n'), ((9016, 9034), 'sympy.fraction', 'sympy.fraction', (['xj'], {}), '(xj)\n', (9030, 9034), False, 'import sympy\n'), ((10816, 10832), 'numpy.zeros', 'np.zeros', (['[N, 1]'], {}), '([N, 1])\n', (10824, 10832), True, 'import numpy as np\n'), ((10850, 10866), 'numpy.zeros', 'np.zeros', (['[N, 1]'], {}), '([N, 1])\n', (10858, 10866), True, 'import numpy as np\n'), ((11076, 11142), 'numpy.logical_and', 'np.logical_and', (['(x >= theta_vec - eps_vec)', '(x <= theta_vec + eps_vec)'], {}), '(x >= theta_vec - eps_vec, x <= theta_vec + eps_vec)\n', (11090, 11142), True, 'import numpy as np\n'), ((11607, 11625), 'sympy.symbols', 'sympy.symbols', (['"""s"""'], {}), "('s')\n", (11620, 11625), False, 'import sympy\n'), ((12484, 12501), 'sympy.matrices.zeros', 'sympy_zeros', (['N', '(1)'], {}), '(N, 1)\n', (12495, 12501), True, 'from sympy.matrices import zeros as sympy_zeros\n'), ((12521, 12538), 'sympy.matrices.zeros', 'sympy_zeros', (['N', '(1)'], {}), '(N, 1)\n', (12532, 12538), True, 'from sympy.matrices import zeros as sympy_zeros\n'), ((12558, 12575), 'sympy.matrices.zeros', 'sympy_zeros', (['N', '(1)'], {}), '(N, 1)\n', (12569, 12575), True, 'from sympy.matrices import zeros as sympy_zeros\n'), ((12598, 12615), 'sympy.matrices.zeros', 'sympy_zeros', (['N', 'N'], {}), '(N, N)\n', (12609, 12615), True, 'from sympy.matrices import zeros as sympy_zeros\n'), ((12642, 12659), 'sympy.matrices.zeros', 'sympy_zeros', (['N', '(1)'], {}), '(N, 1)\n', (12653, 12659), True, 'from sympy.matrices import zeros as sympy_zeros\n'), ((9210, 9248), 'sympy.Poly', 'sympy.Poly', (['crossing_poly'], {'domain': '"""QQ"""'}), "(crossing_poly, domain='QQ')\n", (9220, 9248), False, 'import sympy\n'), ((13047, 13070), 'numpy.diagflat', 'np.diagflat', (['self.gamma'], {}), '(self.gamma)\n', (13058, 13070), True, 'import numpy as np\n'), ((13255, 13294), 'sympy.utilities.lambdify', 'sympy.utilities.lambdify', (['s', 'x', '"""numpy"""'], {}), "(s, x, 'numpy')\n", (13279, 13294), False, 'import sympy\n'), ((15149, 15173), 'ramp_systems.cell.Cell', 'Cell', (['theta', '*pi_kappa_L'], {}), '(theta, *pi_kappa_L)\n', (15153, 15173), False, 'from ramp_systems.cell import Cell\n'), ((15173, 15197), 'ramp_systems.cell.Cell', 'Cell', (['theta', '*pi_kappa_H'], {}), '(theta, *pi_kappa_H)\n', (15177, 15197), False, 'from ramp_systems.cell import Cell\n'), ((16436, 16463), 'ramp_systems.cell.Cell', 'Cell', (['self.theta', '*pi_kappa'], {}), '(self.theta, *pi_kappa)\n', (16440, 16463), False, 'from ramp_systems.cell import Cell\n'), ((10769, 10780), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (10777, 10780), True, 'import numpy as np\n'), ((11678, 11702), 'sympy.Matrix', 'sympy.Matrix', (['self.Delta'], {}), '(self.Delta)\n', (11690, 11702), False, 'import sympy\n'), ((4878, 4895), 'numpy.cos', 'np.cos', (['(np.pi / N)'], {}), '(np.pi / N)\n', (4884, 4895), True, 'import numpy as np\n'), ((19029, 19060), 'numpy.array_equal', 'np.array_equal', (['eps', 'self._zero'], {}), '(eps, self._zero)\n', (19043, 19060), True, 'import numpy as np\n'), ((19231, 19248), 'sympy.Matrix', 'sympy.Matrix', (['eps'], {}), '(eps)\n', (19243, 19248), False, 'import sympy\n'), ((19249, 19267), 'sympy.symbols', 'sympy.symbols', (['"""s"""'], {}), "('s')\n", (19262, 19267), False, 'import sympy\n'), ((19676, 19903), 'warnings.warn', 'warnings.warn', (['"""Singular equilibria of negative CFSs with length >=3 are assumed to be unstable, although they could have stabilized through a Hopf bifurcation."""'], {}), "(\n 'Singular equilibria of negative CFSs with length >=3 are assumed to be unstable, although they could have stabilized through a Hopf bifurcation.'\n )\n", (19689, 19903), False, 'import warnings\n')] |
import numpy as np
import logging
import configargparse as argparse
import glob
from skimage.external import tifffile
import matplotlib.pyplot as plt
import matplotlib
from math import ceil
import copy
def error_visualisation(options):
'''
Script for visualising the false negatives signalled by the CTC evaluation tool in TRA_log.txt.
For this we show raw data, segmentation, the segmentation after tracking and the groundtruth.
Note! Everything has to be in CTC-Format!!
The false negatives (from tracking) will be shown in red in the groundtruth and in the segmentation.
The false positives from the segmentation will appear pink in the segmentation.
The mergers are shown in purple on the tracking image.
The output images for all problematic frames will be written in the ouptut directory. If this directory
is not specified, then the results open in normal pyplot pop-up windows.
'''
if options.output == None:
logging.getLogger('error_visualisation.py').info("Figures will open in pop-up windows. Specify an output path if you want to save them all.")
with open(options.txt_path) as txt:
content = txt.readlines()
problematic = []
merger = {}
for line in content:
if "[T=" in line:
break
if "T=" in line:
if ' Label=' in line:
merger.setdefault('*'+line[2:4].strip().zfill(3), [])
merger['*'+line[2:4].strip().zfill(3)].append(line[-3:-1].strip('='))
problematic.append('*'+line[2:4].strip().zfill(3))
for frame in sorted(list(set(problematic))):
# extract the frame number
logging.getLogger('error_visualisation.py').debug("Looking at frame {}".format(frame))
rawim = tifffile.imread(glob.glob(options.raw_path+frame+'.tif'))
gtim = tifffile.imread(glob.glob(options.gt_path+frame+'.tif'))
segim = tifffile.imread(glob.glob(options.seg_path+frame+'.tif'))
traim = tifffile.imread(glob.glob(options.tra_path+frame+'.tif'))
# generating a suitable colormap for showing errors of the tracking result
colors = np.array([[0,0,0]]) # background
for grayval in range(1,traim.max()+1):
colors = np.vstack((colors, [0,float(grayval)/traim.max(), 1-float(grayval)/traim.max()]))
tra_colors = colors.copy()
# visualise mergers as purple blobs in the tracking result
if frame in merger.keys():
for label in merger[frame]:
tra_colors[(int(label))] = [0.65, 0, 1]
tracolormap = matplotlib.colors.ListedColormap(tra_colors, N=traim.max()+1)
# groundtruth colormap
err_colors = np.array([[0,0,0]])
for grayval in range(1, gtim.max()+1):
position = np.where(gtim == grayval)
if position[0].shape[0]==0:
err_colors = np.vstack((err_colors, [1,0,0]))
else:
act_grayval = int(ceil(np.median(traim[position])))
if act_grayval == 0:
err_colors = np.vstack((err_colors, [1,0,0])) # false negatives in tracking result
else:
err_colors = np.vstack((err_colors, colors[act_grayval]))
gtcolormap = matplotlib.colors.ListedColormap(err_colors, N=gtim.max()+1)
# creating colormap fot the segmentation
seg_colors = np.array([[0,0,0]])
for grayval in range(1, segim.max()+1):
position = np.where(segim == grayval)
if position[0].shape[0]==0:
seg_colors = np.vstack((seg_colors, [1,0.8,0.9]))
else:
act_grayval = int(ceil(np.median(gtim[position])))
if act_grayval == 0:
seg_colors = np.vstack((seg_colors, [1,0.8,0.9])) # false positives will appear pink
else:
seg_colors = np.vstack((seg_colors, err_colors[act_grayval]))
segcolormap = matplotlib.colors.ListedColormap(seg_colors, N=gtim.max()+1)
fig, axes = plt.subplots(nrows=2, ncols=2)
# cmap can/should be adjusted here. 'flag' is best for Fluo-SIM 01
name, raw, axraw= tifffile.imshow(rawim, figure=fig, subplot=221, title='raw image FRAME {}'.format(frame.strip('*')), cmap='flag')
axraw.colorbar.remove()
raw.axis('off')
name, seg, axseg = tifffile.imshow(segim, figure=fig, subplot=222, title='segmentation', cmap=segcolormap, vmin=0, vmax=gtim.max()+1)
axseg.colorbar.remove()
seg.axis('off')
name, tra, axtra = tifffile.imshow(traim, figure=fig, subplot=223, title='tracking result', cmap=tracolormap, vmin=0, vmax=traim.max()+1)
axtra.colorbar.remove()
tra.axis('off')
name, gt, axgt = tifffile.imshow(gtim, figure=fig, subplot=224, title='groundtruth', cmap=gtcolormap, vmin=0, vmax=gtim.max()+1)
axgt.colorbar.remove()
gt.axis('off')
fig.tight_layout()
plt.subplots_adjust(hspace = 0.2)
if options.output == None:
plt.show()
else:
plt.savefig(options.output+'/error_frame{}'.format(frame.strip('*')))
plt.close()
if options.output != None:
logging.getLogger('error_visualisation.py').info("Done writing the images. You can open them to view the errors.")
if __name__ == '__main__':
"""
Visualise the erros by viewing raw data, groundtruth and segmentation.
"""
parser = argparse.ArgumentParser(
description='Visualise the erros by viewing raw data, groundtruth segmentation and tracking result.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--res-txt-file', required=True, type=str, dest='txt_path',
help='filname of the ctc txt result file')
parser.add_argument('--raw-path', type=str, dest='raw_path',
help='path of the raw image')
parser.add_argument('--gt-path', required=True, type=str, dest='gt_path',
help='path to the groundtruth')
parser.add_argument('--seg-path', required=True, type=str, dest='seg_path',
help='path to the segmentation')
parser.add_argument('--tra-path', required=True, type=str, dest='tra_path',
help='path to the segmentation')
parser.add_argument('--output', type=str, dest='output',
help='path of the output')
parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)
# parse command line
options, unknown = parser.parse_known_args()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.debug("Ignoring unknown parameters: {}".format(unknown))
error_visualisation(options)
| [
"matplotlib.pyplot.show",
"logging.basicConfig",
"numpy.median",
"matplotlib.pyplot.close",
"numpy.where",
"numpy.array",
"glob.glob",
"matplotlib.pyplot.subplots_adjust",
"numpy.vstack",
"configargparse.ArgumentParser",
"matplotlib.pyplot.subplots",
"logging.getLogger"
] | [((5508, 5699), 'configargparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Visualise the erros by viewing raw data, groundtruth segmentation and tracking result."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Visualise the erros by viewing raw data, groundtruth segmentation and tracking result.'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (5531, 5699), True, 'import configargparse as argparse\n'), ((2152, 2173), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2160, 2173), True, 'import numpy as np\n'), ((2715, 2736), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2723, 2736), True, 'import numpy as np\n'), ((3412, 3433), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (3420, 3433), True, 'import numpy as np\n'), ((4071, 4101), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (4083, 4101), True, 'import matplotlib.pyplot as plt\n'), ((5003, 5034), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.2)'}), '(hspace=0.2)\n', (5022, 5034), True, 'import matplotlib.pyplot as plt\n'), ((6696, 6736), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (6715, 6736), False, 'import logging\n'), ((6755, 6794), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (6774, 6794), False, 'import logging\n'), ((1789, 1833), 'glob.glob', 'glob.glob', (["(options.raw_path + frame + '.tif')"], {}), "(options.raw_path + frame + '.tif')\n", (1798, 1833), False, 'import glob\n'), ((1862, 1905), 'glob.glob', 'glob.glob', (["(options.gt_path + frame + '.tif')"], {}), "(options.gt_path + frame + '.tif')\n", (1871, 1905), False, 'import glob\n'), ((1935, 1979), 'glob.glob', 'glob.glob', (["(options.seg_path + frame + '.tif')"], {}), "(options.seg_path + frame + '.tif')\n", (1944, 1979), False, 'import glob\n'), ((2009, 2053), 'glob.glob', 'glob.glob', (["(options.tra_path + frame + '.tif')"], {}), "(options.tra_path + frame + '.tif')\n", (2018, 2053), False, 'import glob\n'), ((2805, 2830), 'numpy.where', 'np.where', (['(gtim == grayval)'], {}), '(gtim == grayval)\n', (2813, 2830), True, 'import numpy as np\n'), ((3503, 3529), 'numpy.where', 'np.where', (['(segim == grayval)'], {}), '(segim == grayval)\n', (3511, 3529), True, 'import numpy as np\n'), ((5085, 5095), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5093, 5095), True, 'import matplotlib.pyplot as plt\n'), ((5204, 5215), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5213, 5215), True, 'import matplotlib.pyplot as plt\n'), ((974, 1017), 'logging.getLogger', 'logging.getLogger', (['"""error_visualisation.py"""'], {}), "('error_visualisation.py')\n", (991, 1017), False, 'import logging\n'), ((1669, 1712), 'logging.getLogger', 'logging.getLogger', (['"""error_visualisation.py"""'], {}), "('error_visualisation.py')\n", (1686, 1712), False, 'import logging\n'), ((2900, 2934), 'numpy.vstack', 'np.vstack', (['(err_colors, [1, 0, 0])'], {}), '((err_colors, [1, 0, 0]))\n', (2909, 2934), True, 'import numpy as np\n'), ((3599, 3637), 'numpy.vstack', 'np.vstack', (['(seg_colors, [1, 0.8, 0.9])'], {}), '((seg_colors, [1, 0.8, 0.9]))\n', (3608, 3637), True, 'import numpy as np\n'), ((5260, 5303), 'logging.getLogger', 'logging.getLogger', (['"""error_visualisation.py"""'], {}), "('error_visualisation.py')\n", (5277, 5303), False, 'import logging\n'), ((3089, 3123), 'numpy.vstack', 'np.vstack', (['(err_colors, [1, 0, 0])'], {}), '((err_colors, [1, 0, 0]))\n', (3098, 3123), True, 'import numpy as np\n'), ((3214, 3258), 'numpy.vstack', 'np.vstack', (['(err_colors, colors[act_grayval])'], {}), '((err_colors, colors[act_grayval]))\n', (3223, 3258), True, 'import numpy as np\n'), ((3791, 3829), 'numpy.vstack', 'np.vstack', (['(seg_colors, [1, 0.8, 0.9])'], {}), '((seg_colors, [1, 0.8, 0.9]))\n', (3800, 3829), True, 'import numpy as np\n'), ((3918, 3966), 'numpy.vstack', 'np.vstack', (['(seg_colors, err_colors[act_grayval])'], {}), '((seg_colors, err_colors[act_grayval]))\n', (3927, 3966), True, 'import numpy as np\n'), ((2990, 3016), 'numpy.median', 'np.median', (['traim[position]'], {}), '(traim[position])\n', (2999, 3016), True, 'import numpy as np\n'), ((3693, 3718), 'numpy.median', 'np.median', (['gtim[position]'], {}), '(gtim[position])\n', (3702, 3718), True, 'import numpy as np\n')] |
import functools
import numpy as np
import pytest
import tensorflow as tf
from tests.layers.flows.helper import invertible_flow_standard_check
from tfsnippet.layers import CouplingLayer, conv2d
from tfsnippet.ops import (flatten_to_ndims,
unflatten_from_ndims,
transpose_conv2d_channels_last_to_x,
transpose_conv2d_channels_x_to_last)
def naive_coupling_layer(shift_and_scale_fn, x,
axis=-1, value_ndims=1, secondary=False,
scale_type='linear', sigmoid_scale_bias=2.,
reverse=False):
assert(axis < 0)
assert(axis >= -value_ndims)
# split x into two halves
n1 = x.shape[axis] // 2
x1, x2 = np.split(x, [n1], axis)
if secondary:
x1, x2 = x2, x1
n2 = x2.shape[axis]
# compute the shift and scale
shift, scale = shift_and_scale_fn(x1, n2)
assert((scale_type is None) == (scale is None))
# compose the output and log_det
def safe_sigmoid(t):
t = t + sigmoid_scale_bias
# 1 / (1 + exp(-t)) = exp(-log(1 + exp(-t))
ret = np.zeros_like(t)
# for negative t, use `sigmoid(t) = exp(t) / (1 + exp(t))`
neg_t_indices = t < 0
exp_t = np.exp(t[neg_t_indices])
ret[neg_t_indices] = exp_t / (1. + exp_t)
# for positive t, use `sigmoid(t) = 1 / (1 + exp(-t))`
pos_t_indices = t >= 0
exp_neg_t = np.exp(-t[pos_t_indices])
ret[pos_t_indices] = 1. / (1. + exp_neg_t)
return ret
y1 = x1
if scale_type is not None:
scale_functions = {
'exp': np.exp,
'sigmoid': safe_sigmoid,
'linear': (lambda t: t),
}
scale = scale_functions[scale_type](scale)
if reverse:
y2 = x2 / scale - shift
log_det = -np.log(np.abs(scale))
else:
y2 = (x2 + shift) * scale
log_det = np.log(np.abs(scale))
else:
if reverse:
y2 = x2 - shift
else:
y2 = x2 + shift
log_det = np.zeros_like(x)
if secondary:
y1, y2 = y2, y1
y = np.concatenate([y1, y2], axis=axis)
log_det = np.sum(log_det, axis=tuple(range(-value_ndims, 0)))
return y, log_det
class CouplingLayerTestCase(tf.test.TestCase):
def test_coupling_layer(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5)
np.random.seed(1234)
kernel1 = np.random.normal(size=[3, 2]).astype(np.float32)
kernel2 = np.random.normal(size=[2, 3]).astype(np.float32)
shift1 = np.random.normal(size=[2]).astype(np.float32)
shift2 = np.random.normal(size=[3]).astype(np.float32)
def shift_and_scale_fn(x1, n2, no_scale=False):
kernel = kernel1 if n2 == 2 else kernel2
shift = tf.convert_to_tensor(shift1 if n2 == 2 else shift2)
assert(kernel.shape[-1] == n2)
assert(shift.shape[-1] == n2)
x1, s1, s2 = flatten_to_ndims(x1, 2)
scale = unflatten_from_ndims(tf.matmul(x1, kernel), s1, s2)
shift = shift + tf.zeros_like(scale, dtype=shift.dtype)
if no_scale:
scale = None
return shift, scale
def shift_and_scale_numpy_fn(x1, n2, no_scale=False):
a, b = shift_and_scale_fn(x1, n2, no_scale=no_scale)
if b is None:
a = sess.run(a)
else:
a, b = sess.run([a, b])
return a, b
with self.test_session() as sess:
# test linear scale, primary
x = np.random.normal(size=[3, 4, 5]).astype(np.float32)
x_ph = tf.placeholder(dtype=tf.float32, shape=[None, None, 5])
axis = -1
value_ndims = 1
y_ans, log_det_ans = naive_coupling_layer(
shift_and_scale_numpy_fn, x, axis=axis,
value_ndims=value_ndims, secondary=False, scale_type='linear',
reverse=False
)
layer = CouplingLayer(
shift_and_scale_fn, axis=axis, value_ndims=value_ndims,
secondary=False, scale_type='linear'
)
y, log_det = layer.transform(x_ph)
y_out, log_det_out = sess.run([y, log_det], feed_dict={x_ph: x})
assert_allclose(y_out, y_ans)
assert_allclose(log_det_out, log_det_ans)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x})
# test exp scale, primary
axis = -1
value_ndims = 2
y_ans, log_det_ans = naive_coupling_layer(
shift_and_scale_numpy_fn, x, axis=axis,
value_ndims=value_ndims, secondary=False, scale_type='exp',
reverse=False
)
layer = CouplingLayer(
shift_and_scale_fn, axis=axis, value_ndims=value_ndims,
secondary=False, scale_type='exp'
)
y, log_det = layer.transform(x_ph)
y_out, log_det_out = sess.run([y, log_det], feed_dict={x_ph: x})
assert_allclose(y_out, y_ans)
assert_allclose(log_det_out, log_det_ans)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x})
# test sigmoid scale, secondary
sigmoid_scale_bias = np.exp(1)
axis = -1
value_ndims = 1
y_ans, log_det_ans = naive_coupling_layer(
shift_and_scale_numpy_fn, x, axis=axis,
value_ndims=value_ndims, secondary=False, scale_type='sigmoid',
sigmoid_scale_bias=sigmoid_scale_bias, reverse=False
)
layer = CouplingLayer(
shift_and_scale_fn, axis=axis, value_ndims=value_ndims,
secondary=False, scale_type='sigmoid',
sigmoid_scale_bias=sigmoid_scale_bias
)
y, log_det = layer.transform(x_ph)
y_out, log_det_out = sess.run([y, log_det], feed_dict={x_ph: x})
assert_allclose(y_out, y_ans)
assert_allclose(log_det_out, log_det_ans)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x})
# test None scale, primary
axis = -1
value_ndims = 1
y_ans, log_det_ans = naive_coupling_layer(
functools.partial(shift_and_scale_numpy_fn, no_scale=True),
x, axis=axis,
value_ndims=value_ndims, secondary=False, scale_type=None,
reverse=False
)
layer = CouplingLayer(
functools.partial(shift_and_scale_fn, no_scale=True),
axis=axis, value_ndims=value_ndims,
secondary=False, scale_type=None
)
y, log_det = layer.transform(x_ph)
y_out, log_det_out = sess.run([y, log_det], feed_dict={x_ph: x})
assert_allclose(y_out, y_ans)
assert_allclose(log_det_out, log_det_ans)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x})
# test None scale, secondary
axis = -1
value_ndims = 3
y_ans, log_det_ans = naive_coupling_layer(
functools.partial(shift_and_scale_numpy_fn, no_scale=True),
x, axis=axis,
value_ndims=value_ndims, secondary=True, scale_type=None,
reverse=False
)
layer = CouplingLayer(
functools.partial(shift_and_scale_fn, no_scale=True),
axis=axis, value_ndims=value_ndims,
secondary=True, scale_type=None
)
y, log_det = layer.transform(x_ph)
y_out, log_det_out = sess.run([y, log_det], feed_dict={x_ph: x})
assert_allclose(y_out, y_ans)
assert_allclose(log_det_out, log_det_ans)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x})
def test_coupling_layer_with_conv2d(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, atol=1e-5, rtol=5e-4)
np.random.seed(1234)
kernel1 = np.random.normal(size=[3, 3, 5, 6]).astype(np.float32)
kernel2 = np.random.normal(size=[3, 3, 6, 5]).astype(np.float32)
shift1 = np.random.normal(size=[6]).astype(np.float32)
shift2 = np.random.normal(size=[5]).astype(np.float32)
def shift_and_scale_fn(x1, n2, no_scale=False, channels_last=True):
kernel = kernel1 if n2 == 6 else kernel2
shift = tf.convert_to_tensor(shift1 if n2 == 6 else shift2)
assert (kernel.shape[-1] == n2)
assert (shift.shape[-1] == n2)
x1 = transpose_conv2d_channels_x_to_last(
x1, channels_last=channels_last
)
scale = conv2d(x1, n2, (3, 3), use_bias=False, kernel=kernel,
channels_last=True)
shift = shift + tf.zeros_like(scale, dtype=shift.dtype)
scale = transpose_conv2d_channels_last_to_x(scale, channels_last)
shift = transpose_conv2d_channels_last_to_x(shift, channels_last)
if no_scale:
scale = None
return shift, scale
def shift_and_scale_numpy_fn(x1, n2, no_scale=False,
channels_last=True):
a, b = shift_and_scale_fn(x1, n2, no_scale, channels_last)
if b is None:
a = sess.run(a)
else:
a, b = sess.run([a, b])
return a, b
with self.test_session() as sess:
# test exp scale, primary, NHWC
x = np.random.normal(size=[11, 13, 32, 31, 11]).astype(np.float32)
x_ph = tf.placeholder(dtype=tf.float32,
shape=[None, None, None, None, 11])
axis = -1
value_ndims = 3
y_ans, log_det_ans = naive_coupling_layer(
shift_and_scale_numpy_fn, x, axis=axis,
value_ndims=value_ndims, secondary=False, scale_type='exp',
reverse=False
)
layer = CouplingLayer(
shift_and_scale_fn, axis=axis, value_ndims=value_ndims,
secondary=False, scale_type='exp'
)
y, log_det = layer.transform(x_ph)
y_out, log_det_out = sess.run([y, log_det], feed_dict={x_ph: x})
assert_allclose(y_out, y_ans)
assert_allclose(log_det_out, log_det_ans)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x}, rtol=5e-4,
atol=1e-5
)
# test sigmoid scale, secondary, NCHW
x = np.transpose(x, [0, 1, 4, 2, 3])
x_ph = tf.placeholder(dtype=tf.float32,
shape=[None, None, 11, None, None])
axis = -3
value_ndims = 3
y_ans, log_det_ans = naive_coupling_layer(
functools.partial(shift_and_scale_numpy_fn,
channels_last=False),
x, axis=axis,
value_ndims=value_ndims, secondary=True, scale_type='sigmoid',
reverse=False
)
layer = CouplingLayer(
functools.partial(shift_and_scale_fn, channels_last=False),
axis=axis, value_ndims=value_ndims,
secondary=True, scale_type='sigmoid'
)
y, log_det = layer.transform(x_ph)
y_out, log_det_out = sess.run([y, log_det], feed_dict={x_ph: x})
assert_allclose(y_out, y_ans)
assert_allclose(log_det_out, log_det_ans)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x}, rtol=5e-4,
atol=1e-5
)
def test_errors(self):
def shift_and_scale_fn(x1, n2):
return tf.constant(0.), None
with pytest.raises(ValueError, match='The feature axis of `input` must '
'be at least 2'):
layer = CouplingLayer(shift_and_scale_fn, axis=-1, value_ndims=1)
_ = layer.apply(tf.zeros([2, 1]))
with pytest.raises(RuntimeError, match='`scale_type` != None, but no '
'scale is computed'):
layer = CouplingLayer(shift_and_scale_fn, scale_type='linear')
_ = layer.apply(tf.zeros([2, 3]))
def shift_and_scale_fn(x1, n2):
return tf.constant(0.), tf.constant(0.)
with pytest.raises(RuntimeError, match='`scale_type` == None, but '
'scale is computed'):
layer = CouplingLayer(shift_and_scale_fn, scale_type=None)
_ = layer.apply(tf.zeros([2, 3]))
| [
"numpy.random.seed",
"numpy.abs",
"tensorflow.zeros_like",
"tensorflow.matmul",
"numpy.exp",
"numpy.random.normal",
"tests.layers.flows.helper.invertible_flow_standard_check",
"numpy.zeros_like",
"numpy.transpose",
"tensorflow.placeholder",
"pytest.raises",
"tfsnippet.layers.CouplingLayer",
... | [((770, 793), 'numpy.split', 'np.split', (['x', '[n1]', 'axis'], {}), '(x, [n1], axis)\n', (778, 793), True, 'import numpy as np\n'), ((2194, 2229), 'numpy.concatenate', 'np.concatenate', (['[y1, y2]'], {'axis': 'axis'}), '([y1, y2], axis=axis)\n', (2208, 2229), True, 'import numpy as np\n'), ((1158, 1174), 'numpy.zeros_like', 'np.zeros_like', (['t'], {}), '(t)\n', (1171, 1174), True, 'import numpy as np\n'), ((1289, 1313), 'numpy.exp', 'np.exp', (['t[neg_t_indices]'], {}), '(t[neg_t_indices])\n', (1295, 1313), True, 'import numpy as np\n'), ((1479, 1504), 'numpy.exp', 'np.exp', (['(-t[pos_t_indices])'], {}), '(-t[pos_t_indices])\n', (1485, 1504), True, 'import numpy as np\n'), ((2125, 2141), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2138, 2141), True, 'import numpy as np\n'), ((2430, 2487), 'functools.partial', 'functools.partial', (['np.testing.assert_allclose'], {'rtol': '(1e-05)'}), '(np.testing.assert_allclose, rtol=1e-05)\n', (2447, 2487), False, 'import functools\n'), ((2509, 2529), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (2523, 2529), True, 'import numpy as np\n'), ((8319, 8389), 'functools.partial', 'functools.partial', (['np.testing.assert_allclose'], {'atol': '(1e-05)', 'rtol': '(0.0005)'}), '(np.testing.assert_allclose, atol=1e-05, rtol=0.0005)\n', (8336, 8389), False, 'import functools\n'), ((8409, 8429), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (8423, 8429), True, 'import numpy as np\n'), ((2920, 2971), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(shift1 if n2 == 2 else shift2)'], {}), '(shift1 if n2 == 2 else shift2)\n', (2940, 2971), True, 'import tensorflow as tf\n'), ((3082, 3105), 'tfsnippet.ops.flatten_to_ndims', 'flatten_to_ndims', (['x1', '(2)'], {}), '(x1, 2)\n', (3098, 3105), False, 'from tfsnippet.ops import flatten_to_ndims, unflatten_from_ndims, transpose_conv2d_channels_last_to_x, transpose_conv2d_channels_x_to_last\n'), ((3771, 3826), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, None, 5]'}), '(dtype=tf.float32, shape=[None, None, 5])\n', (3785, 3826), True, 'import tensorflow as tf\n'), ((4133, 4244), 'tfsnippet.layers.CouplingLayer', 'CouplingLayer', (['shift_and_scale_fn'], {'axis': 'axis', 'value_ndims': 'value_ndims', 'secondary': '(False)', 'scale_type': '"""linear"""'}), "(shift_and_scale_fn, axis=axis, value_ndims=value_ndims,\n secondary=False, scale_type='linear')\n", (4146, 4244), False, 'from tfsnippet.layers import CouplingLayer, conv2d\n'), ((4521, 4597), 'tests.layers.flows.helper.invertible_flow_standard_check', 'invertible_flow_standard_check', (['self', 'layer', 'sess', 'x_ph'], {'feed_dict': '{x_ph: x}'}), '(self, layer, sess, x_ph, feed_dict={x_ph: x})\n', (4551, 4597), False, 'from tests.layers.flows.helper import invertible_flow_standard_check\n'), ((4956, 5064), 'tfsnippet.layers.CouplingLayer', 'CouplingLayer', (['shift_and_scale_fn'], {'axis': 'axis', 'value_ndims': 'value_ndims', 'secondary': '(False)', 'scale_type': '"""exp"""'}), "(shift_and_scale_fn, axis=axis, value_ndims=value_ndims,\n secondary=False, scale_type='exp')\n", (4969, 5064), False, 'from tfsnippet.layers import CouplingLayer, conv2d\n'), ((5341, 5417), 'tests.layers.flows.helper.invertible_flow_standard_check', 'invertible_flow_standard_check', (['self', 'layer', 'sess', 'x_ph'], {'feed_dict': '{x_ph: x}'}), '(self, layer, sess, x_ph, feed_dict={x_ph: x})\n', (5371, 5417), False, 'from tests.layers.flows.helper import invertible_flow_standard_check\n'), ((5513, 5522), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (5519, 5522), True, 'import numpy as np\n'), ((5869, 6025), 'tfsnippet.layers.CouplingLayer', 'CouplingLayer', (['shift_and_scale_fn'], {'axis': 'axis', 'value_ndims': 'value_ndims', 'secondary': '(False)', 'scale_type': '"""sigmoid"""', 'sigmoid_scale_bias': 'sigmoid_scale_bias'}), "(shift_and_scale_fn, axis=axis, value_ndims=value_ndims,\n secondary=False, scale_type='sigmoid', sigmoid_scale_bias=\n sigmoid_scale_bias)\n", (5882, 6025), False, 'from tfsnippet.layers import CouplingLayer, conv2d\n'), ((6313, 6389), 'tests.layers.flows.helper.invertible_flow_standard_check', 'invertible_flow_standard_check', (['self', 'layer', 'sess', 'x_ph'], {'feed_dict': '{x_ph: x}'}), '(self, layer, sess, x_ph, feed_dict={x_ph: x})\n', (6343, 6389), False, 'from tests.layers.flows.helper import invertible_flow_standard_check\n'), ((7232, 7308), 'tests.layers.flows.helper.invertible_flow_standard_check', 'invertible_flow_standard_check', (['self', 'layer', 'sess', 'x_ph'], {'feed_dict': '{x_ph: x}'}), '(self, layer, sess, x_ph, feed_dict={x_ph: x})\n', (7262, 7308), False, 'from tests.layers.flows.helper import invertible_flow_standard_check\n'), ((8151, 8227), 'tests.layers.flows.helper.invertible_flow_standard_check', 'invertible_flow_standard_check', (['self', 'layer', 'sess', 'x_ph'], {'feed_dict': '{x_ph: x}'}), '(self, layer, sess, x_ph, feed_dict={x_ph: x})\n', (8181, 8227), False, 'from tests.layers.flows.helper import invertible_flow_standard_check\n'), ((8852, 8903), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(shift1 if n2 == 6 else shift2)'], {}), '(shift1 if n2 == 6 else shift2)\n', (8872, 8903), True, 'import tensorflow as tf\n'), ((9009, 9077), 'tfsnippet.ops.transpose_conv2d_channels_x_to_last', 'transpose_conv2d_channels_x_to_last', (['x1'], {'channels_last': 'channels_last'}), '(x1, channels_last=channels_last)\n', (9044, 9077), False, 'from tfsnippet.ops import flatten_to_ndims, unflatten_from_ndims, transpose_conv2d_channels_last_to_x, transpose_conv2d_channels_x_to_last\n'), ((9128, 9201), 'tfsnippet.layers.conv2d', 'conv2d', (['x1', 'n2', '(3, 3)'], {'use_bias': '(False)', 'kernel': 'kernel', 'channels_last': '(True)'}), '(x1, n2, (3, 3), use_bias=False, kernel=kernel, channels_last=True)\n', (9134, 9201), False, 'from tfsnippet.layers import CouplingLayer, conv2d\n'), ((9317, 9374), 'tfsnippet.ops.transpose_conv2d_channels_last_to_x', 'transpose_conv2d_channels_last_to_x', (['scale', 'channels_last'], {}), '(scale, channels_last)\n', (9352, 9374), False, 'from tfsnippet.ops import flatten_to_ndims, unflatten_from_ndims, transpose_conv2d_channels_last_to_x, transpose_conv2d_channels_x_to_last\n'), ((9395, 9452), 'tfsnippet.ops.transpose_conv2d_channels_last_to_x', 'transpose_conv2d_channels_last_to_x', (['shift', 'channels_last'], {}), '(shift, channels_last)\n', (9430, 9452), False, 'from tfsnippet.ops import flatten_to_ndims, unflatten_from_ndims, transpose_conv2d_channels_last_to_x, transpose_conv2d_channels_x_to_last\n'), ((10056, 10124), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, None, None, None, 11]'}), '(dtype=tf.float32, shape=[None, None, None, None, 11])\n', (10070, 10124), True, 'import tensorflow as tf\n'), ((10462, 10570), 'tfsnippet.layers.CouplingLayer', 'CouplingLayer', (['shift_and_scale_fn'], {'axis': 'axis', 'value_ndims': 'value_ndims', 'secondary': '(False)', 'scale_type': '"""exp"""'}), "(shift_and_scale_fn, axis=axis, value_ndims=value_ndims,\n secondary=False, scale_type='exp')\n", (10475, 10570), False, 'from tfsnippet.layers import CouplingLayer, conv2d\n'), ((10847, 10952), 'tests.layers.flows.helper.invertible_flow_standard_check', 'invertible_flow_standard_check', (['self', 'layer', 'sess', 'x_ph'], {'feed_dict': '{x_ph: x}', 'rtol': '(0.0005)', 'atol': '(1e-05)'}), '(self, layer, sess, x_ph, feed_dict={x_ph: x},\n rtol=0.0005, atol=1e-05)\n', (10877, 10952), False, 'from tests.layers.flows.helper import invertible_flow_standard_check\n'), ((11059, 11091), 'numpy.transpose', 'np.transpose', (['x', '[0, 1, 4, 2, 3]'], {}), '(x, [0, 1, 4, 2, 3])\n', (11071, 11091), True, 'import numpy as np\n'), ((11111, 11179), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, None, 11, None, None]'}), '(dtype=tf.float32, shape=[None, None, 11, None, None])\n', (11125, 11179), True, 'import tensorflow as tf\n'), ((12054, 12159), 'tests.layers.flows.helper.invertible_flow_standard_check', 'invertible_flow_standard_check', (['self', 'layer', 'sess', 'x_ph'], {'feed_dict': '{x_ph: x}', 'rtol': '(0.0005)', 'atol': '(1e-05)'}), '(self, layer, sess, x_ph, feed_dict={x_ph: x},\n rtol=0.0005, atol=1e-05)\n', (12084, 12159), False, 'from tests.layers.flows.helper import invertible_flow_standard_check\n'), ((12322, 12408), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""The feature axis of `input` must be at least 2"""'}), "(ValueError, match=\n 'The feature axis of `input` must be at least 2')\n", (12335, 12408), False, 'import pytest\n'), ((12473, 12530), 'tfsnippet.layers.CouplingLayer', 'CouplingLayer', (['shift_and_scale_fn'], {'axis': '(-1)', 'value_ndims': '(1)'}), '(shift_and_scale_fn, axis=-1, value_ndims=1)\n', (12486, 12530), False, 'from tfsnippet.layers import CouplingLayer, conv2d\n'), ((12591, 12679), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""`scale_type` != None, but no scale is computed"""'}), "(RuntimeError, match=\n '`scale_type` != None, but no scale is computed')\n", (12604, 12679), False, 'import pytest\n'), ((12746, 12800), 'tfsnippet.layers.CouplingLayer', 'CouplingLayer', (['shift_and_scale_fn'], {'scale_type': '"""linear"""'}), "(shift_and_scale_fn, scale_type='linear')\n", (12759, 12800), False, 'from tfsnippet.layers import CouplingLayer, conv2d\n'), ((12954, 13039), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""`scale_type` == None, but scale is computed"""'}), "(RuntimeError, match='`scale_type` == None, but scale is computed'\n )\n", (12967, 13039), False, 'import pytest\n'), ((13106, 13156), 'tfsnippet.layers.CouplingLayer', 'CouplingLayer', (['shift_and_scale_fn'], {'scale_type': 'None'}), '(shift_and_scale_fn, scale_type=None)\n', (13119, 13156), False, 'from tfsnippet.layers import CouplingLayer, conv2d\n'), ((1992, 2005), 'numpy.abs', 'np.abs', (['scale'], {}), '(scale)\n', (1998, 2005), True, 'import numpy as np\n'), ((2548, 2577), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[3, 2]'}), '(size=[3, 2])\n', (2564, 2577), True, 'import numpy as np\n'), ((2615, 2644), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[2, 3]'}), '(size=[2, 3])\n', (2631, 2644), True, 'import numpy as np\n'), ((2681, 2707), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[2]'}), '(size=[2])\n', (2697, 2707), True, 'import numpy as np\n'), ((2744, 2770), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[3]'}), '(size=[3])\n', (2760, 2770), True, 'import numpy as np\n'), ((3147, 3168), 'tensorflow.matmul', 'tf.matmul', (['x1', 'kernel'], {}), '(x1, kernel)\n', (3156, 3168), True, 'import tensorflow as tf\n'), ((3206, 3245), 'tensorflow.zeros_like', 'tf.zeros_like', (['scale'], {'dtype': 'shift.dtype'}), '(scale, dtype=shift.dtype)\n', (3219, 3245), True, 'import tensorflow as tf\n'), ((6568, 6626), 'functools.partial', 'functools.partial', (['shift_and_scale_numpy_fn'], {'no_scale': '(True)'}), '(shift_and_scale_numpy_fn, no_scale=True)\n', (6585, 6626), False, 'import functools\n'), ((6829, 6881), 'functools.partial', 'functools.partial', (['shift_and_scale_fn'], {'no_scale': '(True)'}), '(shift_and_scale_fn, no_scale=True)\n', (6846, 6881), False, 'import functools\n'), ((7489, 7547), 'functools.partial', 'functools.partial', (['shift_and_scale_numpy_fn'], {'no_scale': '(True)'}), '(shift_and_scale_numpy_fn, no_scale=True)\n', (7506, 7547), False, 'import functools\n'), ((7749, 7801), 'functools.partial', 'functools.partial', (['shift_and_scale_fn'], {'no_scale': '(True)'}), '(shift_and_scale_fn, no_scale=True)\n', (7766, 7801), False, 'import functools\n'), ((8448, 8483), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[3, 3, 5, 6]'}), '(size=[3, 3, 5, 6])\n', (8464, 8483), True, 'import numpy as np\n'), ((8521, 8556), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[3, 3, 6, 5]'}), '(size=[3, 3, 6, 5])\n', (8537, 8556), True, 'import numpy as np\n'), ((8593, 8619), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[6]'}), '(size=[6])\n', (8609, 8619), True, 'import numpy as np\n'), ((8656, 8682), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[5]'}), '(size=[5])\n', (8672, 8682), True, 'import numpy as np\n'), ((9257, 9296), 'tensorflow.zeros_like', 'tf.zeros_like', (['scale'], {'dtype': 'shift.dtype'}), '(scale, dtype=shift.dtype)\n', (9270, 9296), True, 'import tensorflow as tf\n'), ((11336, 11400), 'functools.partial', 'functools.partial', (['shift_and_scale_numpy_fn'], {'channels_last': '(False)'}), '(shift_and_scale_numpy_fn, channels_last=False)\n', (11353, 11400), False, 'import functools\n'), ((11641, 11699), 'functools.partial', 'functools.partial', (['shift_and_scale_fn'], {'channels_last': '(False)'}), '(shift_and_scale_fn, channels_last=False)\n', (11658, 11699), False, 'import functools\n'), ((12286, 12302), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (12297, 12302), True, 'import tensorflow as tf\n'), ((12559, 12575), 'tensorflow.zeros', 'tf.zeros', (['[2, 1]'], {}), '([2, 1])\n', (12567, 12575), True, 'import tensorflow as tf\n'), ((12829, 12845), 'tensorflow.zeros', 'tf.zeros', (['[2, 3]'], {}), '([2, 3])\n', (12837, 12845), True, 'import tensorflow as tf\n'), ((12907, 12923), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (12918, 12923), True, 'import tensorflow as tf\n'), ((12924, 12940), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (12935, 12940), True, 'import tensorflow as tf\n'), ((13185, 13201), 'tensorflow.zeros', 'tf.zeros', (['[2, 3]'], {}), '([2, 3])\n', (13193, 13201), True, 'import tensorflow as tf\n'), ((1896, 1909), 'numpy.abs', 'np.abs', (['scale'], {}), '(scale)\n', (1902, 1909), True, 'import numpy as np\n'), ((3700, 3732), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[3, 4, 5]'}), '(size=[3, 4, 5])\n', (3716, 3732), True, 'import numpy as np\n'), ((9974, 10017), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[11, 13, 32, 31, 11]'}), '(size=[11, 13, 32, 31, 11])\n', (9990, 10017), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from load import ROOT as R
import numpy as N
from gna.constructors import Points
from gna.bindings import DataType
segments = N.arange(0.0, 5.1, dtype='d')
segments_t = Points(segments)
print( 'Edges', segments )
for case in [
( 0.5, 3.6, 0.4),
(-1.5, 3.6, 0.4),
( 3.5, 6.6, 0.4),
(-1.5, 6.6, 0.4),
(0.0, 5.1),
(-1.e-17, 5.1),
(-0.5, 6.5, 2.)
]:
points = N.arange(*case, dtype='d')
print( ' points', points )
points_t = Points( points )
sw = R.SegmentWise()
sw.segments.edges(segments_t.points.points)
sw.segments.points(points_t.points.points)
res = sw.segments.segments.data()
print(' segments', res)
for i, (i1, i2) in enumerate(zip(res[:-1], res[1:])):
i1, i2 = int(i1), int(i2)
sub = points[i1:i2]
xmin, xmax = segments[i], segments[i+1]
check = (xmin<=sub)*(sub<xmax)
msg = '\033[31mFAIL!\033[0m' if not check.all() else ''
print(' %i %g->%g:'%(i, xmin, xmax), sub, msg )
print()
| [
"load.ROOT.SegmentWise",
"numpy.arange",
"gna.constructors.Points"
] | [((152, 181), 'numpy.arange', 'N.arange', (['(0.0)', '(5.1)'], {'dtype': '"""d"""'}), "(0.0, 5.1, dtype='d')\n", (160, 181), True, 'import numpy as N\n'), ((195, 211), 'gna.constructors.Points', 'Points', (['segments'], {}), '(segments)\n', (201, 211), False, 'from gna.constructors import Points\n'), ((452, 478), 'numpy.arange', 'N.arange', (['*case'], {'dtype': '"""d"""'}), "(*case, dtype='d')\n", (460, 478), True, 'import numpy as N\n'), ((526, 540), 'gna.constructors.Points', 'Points', (['points'], {}), '(points)\n', (532, 540), False, 'from gna.constructors import Points\n'), ((553, 568), 'load.ROOT.SegmentWise', 'R.SegmentWise', ([], {}), '()\n', (566, 568), True, 'from load import ROOT as R\n')] |
import numpy as np
from sklearn.utils import check_array, check_X_y
from joblib import Parallel, delayed
from .regression_tree import RegressionTree
import copy
class StochasticThresholdModelTrees():
"""
Class of the Stochastic Threshold Model Trees.
- Extended ensemble method based on tree-based regressors.
"""
def __init__(
self,
n_estimators=100,
criterion=None,
regressor=None,
threshold_selector=None,
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features='auto',
f_select=True,
ensemble_pred='mean',
scaling=False,
bootstrap=True,
random_state=None,
split_continue=False,
verbose=0
):
self.n_estimators = n_estimators
self.criterion = criterion
self.regressor = regressor
self.threshold_selector = threshold_selector
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.f_select = f_select
self.ensemble_pred = ensemble_pred
self.scaling = scaling
self.bootstrap = bootstrap
self.random_state = random_state
self.split_continue = split_continue
self.verbose = verbose
def fit(self, X, y):
"""Build a forest of trees from the training set (X, y)."""
X, y = check_X_y(X, y, ['csr', 'csc'])
random_state = self.check_random_state(self.random_state)
seeds = random_state.randint(
np.iinfo(np.int32).max, size=self.n_estimators)
self.forest = Parallel(n_jobs=-1, verbose=self.verbose)(
delayed(self._build_trees)(X, y, seeds[i])
for i in range(self.n_estimators))
def predict(self, X, return_std=False):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean or median predicted regression targets of the trees in the forest.
"""
X = check_array(X, accept_sparse='csr')
pred = np.array([tree.predict(X).tolist() for tree in self.forest])
if return_std:
if self.ensemble_pred == 'mean':
return np.mean(pred, axis=0), np.std(pred, axis=0)
elif self.ensemble_pred == 'median':
return np.median(pred, axis=0), np.std(pred, axis=0)
else:
if self.ensemble_pred == 'mean':
return np.mean(pred, axis=0)
elif self.ensemble_pred == 'median':
return np.median(pred, axis=0)
else:
return pred
def _build_trees(self, X, y, seed):
tree = RegressionTree(
criterion=self.criterion,
regressor=self.regressor,
threshold_selector=self.threshold_selector,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
f_select=self.f_select,
scaling=self.scaling,
random_state=seed,
split_continue=self.split_continue
)
if self.bootstrap:
X_bootstrap, y_bootstrap = self._bootstrap(seed, X, y)
tree.fit(X_bootstrap, y_bootstrap)
else:
tree.fit(X, y)
return tree
def count_selected_feature(self):
"""Count the number of features used to divide the tree."""
return np.array(
[tree.count_feature() for tree in self.forest])
def _bootstrap(self, seed, X, y):
n_samples, n_features = X.shape
random_state = np.random.RandomState(seed)
boot_index = random_state.randint(0, n_samples, n_samples)
if isinstance(self.max_features, int):
if not 1 <= self.max_features:
print('The number of features must be one or more.')
boot_features = self.max_features
elif isinstance(self.max_features, float):
if not 1. >= self.max_features:
print('The fraction of features is must be less than 1.0.')
elif not 0 < self.max_features:
print('The fraction of features is must be more than 0.')
boot_features = int(n_features * self.max_features)
else:
if self.max_features == 'auto':
boot_features = n_features
elif self.max_features == 'sqrt':
boot_features = int(np.sqrt(n_features))
elif self.max_features == 'log2':
boot_features = int(np.log2(n_features))
boot_feature_index = random_state.permutation(
n_features)[0:boot_features]
remove_feature_index = list(set(range(
n_features)) - set(boot_feature_index.tolist()))
boot_X = X[boot_index, :].copy()
boot_X[:, remove_feature_index] = 0.0
return boot_X, y[boot_index]
def check_random_state(self, seed):
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, int):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
def get_params(self, deep=True):
return {
'n_estimators': self.n_estimators,
'criterion': self.criterion,
'regressor': self.regressor,
'threshold_selector': self.threshold_selector,
'max_depth': self.max_depth,
'min_samples_split': self.min_samples_split,
'min_samples_leaf': self.min_samples_leaf,
'max_features': self.max_features,
'f_select': self.f_select,
'ensemble_pred': self.ensemble_pred,
'scaling': self.scaling,
'bootstrap': self.bootstrap,
'random_state': self.random_state,
'split_continue': self.split_continue
}
def set_params(self, **params):
for param, value in params.items():
setattr(self, param, value)
return self
| [
"sklearn.utils.check_array",
"numpy.std",
"numpy.median",
"numpy.log2",
"sklearn.utils.check_X_y",
"numpy.iinfo",
"numpy.random.RandomState",
"numpy.mean",
"joblib.Parallel",
"joblib.delayed",
"numpy.sqrt"
] | [((1519, 1550), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y', "['csr', 'csc']"], {}), "(X, y, ['csr', 'csc'])\n", (1528, 1550), False, 'from sklearn.utils import check_array, check_X_y\n'), ((2172, 2207), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': '"""csr"""'}), "(X, accept_sparse='csr')\n", (2183, 2207), False, 'from sklearn.utils import check_array, check_X_y\n'), ((3829, 3856), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3850, 3856), True, 'import numpy as np\n'), ((1745, 1786), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)', 'verbose': 'self.verbose'}), '(n_jobs=-1, verbose=self.verbose)\n', (1753, 1786), False, 'from joblib import Parallel, delayed\n'), ((5338, 5365), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (5359, 5365), True, 'import numpy as np\n'), ((1672, 1690), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1680, 1690), True, 'import numpy as np\n'), ((2630, 2651), 'numpy.mean', 'np.mean', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (2637, 2651), True, 'import numpy as np\n'), ((1801, 1827), 'joblib.delayed', 'delayed', (['self._build_trees'], {}), '(self._build_trees)\n', (1808, 1827), False, 'from joblib import Parallel, delayed\n'), ((2381, 2402), 'numpy.mean', 'np.mean', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (2388, 2402), True, 'import numpy as np\n'), ((2404, 2424), 'numpy.std', 'np.std', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (2410, 2424), True, 'import numpy as np\n'), ((2726, 2749), 'numpy.median', 'np.median', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (2735, 2749), True, 'import numpy as np\n'), ((2499, 2522), 'numpy.median', 'np.median', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (2508, 2522), True, 'import numpy as np\n'), ((2524, 2544), 'numpy.std', 'np.std', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (2530, 2544), True, 'import numpy as np\n'), ((4687, 4706), 'numpy.sqrt', 'np.sqrt', (['n_features'], {}), '(n_features)\n', (4694, 4706), True, 'import numpy as np\n'), ((4792, 4811), 'numpy.log2', 'np.log2', (['n_features'], {}), '(n_features)\n', (4799, 4811), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
import os
import pandas as pd
#plt.rcParams["image.composite_image"] =False
################################################################
import matplotlib
matplotlib.rcParams.update({'text.usetex': False, 'font.family': 'stixgeneral', 'mathtext.fontset': 'stix',})
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
################################################################
N = 1000
ttot = 1800
################################################################
################################################################
plt.close('all')
fig = plt.figure(figsize = [7,5])
gm = gridspec.GridSpec(70, 105, figure = fig)
axz1 = plt.subplot(gm[3:22, 0:23])
axz2 = plt.subplot(gm[3:22, 60:83])
axz = [axz1, axz2]
axz1z = plt.subplot(gm[:12, 28:44])
axz2z = plt.subplot(gm[:12, 88:104])
axzz = [axz1z, axz2z]
axz1h = plt.subplot(gm[14:22, 30:45])
axz2h = plt.subplot(gm[14:22, 90:105])
axzh = [axz1h, axz2h]
ax1 = [plt.subplot(gm[33:46, :45]), plt.subplot(gm[33:46,60:])]
ax2 = [plt.subplot(gm[54:70, :45]), plt.subplot(gm[54:70,60:])]
###############################################################
net_names = ['Small-world', 'Random']
for col, net in enumerate(['small', 'random']):
###############################################################
z = np.loadtxt('./../simulation_files/network_matrix_%s.dat'%net).astype(int)
zinvert = np.abs(z-1)
[rates, cv] = np.loadtxt('./%s/rates_and_cv.dat'%net)
w = np.loadtxt('./../simulation_files/neurons.dat')[:,0]
################## plot network architecture ###################################
##########################################################
axz[col].imshow(zinvert, cmap=plt.cm.gray, origin = 'lower', interpolation='None')
axz[col].plot([949, 949, 999, 999, 949], [999, 949, 949, 999, 999], 'r', lw = 0.6)
axzz[col].imshow(zinvert[-50:, -50:], cmap=plt.cm.gray, origin = 'lower', interpolation='None')
axzh[col].hist(np.sum(z, axis = 1), color = 'k', bins = np.arange(24))
################## plot rates and cvs ###################################
rbins = 50
hw = np.histogram(w, bins = rbins, range = [0, 52])
ax1[col].plot(np.repeat(hw[1], 2)[1:-1], np.repeat(hw[0], 2), 'g', alpha = 0.85)
h = np.histogram(rates, bins = rbins, range = [0, 52])
ax1[col].plot(np.repeat(h[1], 2)[1:-1], np.repeat(h[0], 2), 'k', alpha = 0.85)
h2 = np.histogram(cv[np.where(rates>2)], bins = 28, range = [0, 0.8])
ax2[col].plot(np.repeat(h2[1], 2)[1:-1], np.repeat(h2[0], 2), 'k', alpha = 0.85)
ax1[col].set_xlabel('Rate (Hz)')
ax1[col].set_ylabel('Counts')
ax2[col].set_xlabel('CV isis')
ax2[col].set_ylabel('Counts')
################### Spike count correlation #########################
###################################################
for k, ax in enumerate(ax1+ ax2+axzh):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_position(('outward', 1))
ax.spines['bottom'].set_position(('outward', 1))
for ax in ax1:
ax.set_xlim(-0.02, 52)
ax.set_ylim(-1, 75)
for ax in ax2:
ax.set_xlim(-0.01, 1.03)
ax.set_ylim(-2, 208)
ax.set_xticks([0.0, 0.5, 1])
ax.set_xticklabels(['0','0.5', '1'])
for ax in axz+axzz+axzh:
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(0.43)
for ax in axzz:
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_color('red')
for ax in axzh:
ax.set_yticks([])
ax.set_xlim(0, 20)
ax.set_xticks([0.5, 10.5, 20.5])
ax.set_ylabel('Counts', fontsize = 7)
ax.set_xticklabels(['0','10', '20'], fontsize = 7)
ax.set_xlabel('Synaptic inputs', fontsize = 7)
for ax in axz:
ax.set_xticks([0, 999])
ax.set_xticklabels(['1', 'N'], fontsize = 9)
ax.set_yticks([0, 999])
ax.set_yticklabels(['1', 'N'], fontsize = 9)
ax.set_xlabel('Postsynaptic\nneuron', labelpad = -4)
ax.set_ylabel('Presynaptic\nneuron', labelpad = -4)
for ax in axzz:
ax.set_xticks([])
ax.set_yticks([])
plt.figtext(0.12, 0.97, 'Small-world', fontsize = 14, ha = 'left')
plt.figtext(0.63, 0.97, 'Random', fontsize = 14, ha = 'left')
##################################################################
x1, x2, y1, y2, y3, fz = 0.025, 0.525, 0.97, 0.58, 0.32, 16
plt.figtext(x1, y1, 'A', ha = 'center', va = 'center', fontsize = fz)
plt.figtext(x2, y1, 'B', ha = 'center', va = 'center', fontsize = fz)
plt.figtext(x1, y2, 'C', ha = 'center', va = 'center', fontsize = fz)
plt.figtext(x2, y2, 'D', ha = 'center', va = 'center', fontsize = fz)
plt.figtext(x1, y3, 'E', ha = 'center', va = 'center', fontsize = fz)
plt.figtext(x2, y3, 'F', ha = 'center', va = 'center', fontsize = fz)
################################################################
fig.subplots_adjust(left = 0.1, bottom = 0.09, right = 0.98, top = 0.98)
plt.savefig('figure3.png', dpi = 300)
# plt.savefig('figure3.pdf')
| [
"matplotlib.pylab.savefig",
"numpy.abs",
"numpy.sum",
"matplotlib.pylab.subplot",
"matplotlib.pylab.figtext",
"matplotlib.rcParams.update",
"numpy.histogram",
"numpy.where",
"numpy.arange",
"numpy.loadtxt",
"numpy.repeat",
"matplotlib.pylab.close",
"matplotlib.gridspec.GridSpec",
"matplotl... | [((248, 360), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'text.usetex': False, 'font.family': 'stixgeneral', 'mathtext.fontset': 'stix'\n }"], {}), "({'text.usetex': False, 'font.family':\n 'stixgeneral', 'mathtext.fontset': 'stix'})\n", (274, 360), False, 'import matplotlib\n'), ((655, 671), 'matplotlib.pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (664, 671), True, 'import matplotlib.pylab as plt\n'), ((678, 704), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '[7, 5]'}), '(figsize=[7, 5])\n', (688, 704), True, 'import matplotlib.pylab as plt\n'), ((711, 749), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(70)', '(105)'], {'figure': 'fig'}), '(70, 105, figure=fig)\n', (728, 749), True, 'import matplotlib.gridspec as gridspec\n'), ((759, 786), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[3:22, 0:23]'], {}), '(gm[3:22, 0:23])\n', (770, 786), True, 'import matplotlib.pylab as plt\n'), ((794, 822), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[3:22, 60:83]'], {}), '(gm[3:22, 60:83])\n', (805, 822), True, 'import matplotlib.pylab as plt\n'), ((850, 877), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[:12, 28:44]'], {}), '(gm[:12, 28:44])\n', (861, 877), True, 'import matplotlib.pylab as plt\n'), ((886, 914), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[:12, 88:104]'], {}), '(gm[:12, 88:104])\n', (897, 914), True, 'import matplotlib.pylab as plt\n'), ((945, 974), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[14:22, 30:45]'], {}), '(gm[14:22, 30:45])\n', (956, 974), True, 'import matplotlib.pylab as plt\n'), ((983, 1013), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[14:22, 90:105]'], {}), '(gm[14:22, 90:105])\n', (994, 1013), True, 'import matplotlib.pylab as plt\n'), ((4163, 4225), 'matplotlib.pylab.figtext', 'plt.figtext', (['(0.12)', '(0.97)', '"""Small-world"""'], {'fontsize': '(14)', 'ha': '"""left"""'}), "(0.12, 0.97, 'Small-world', fontsize=14, ha='left')\n", (4174, 4225), True, 'import matplotlib.pylab as plt\n'), ((4230, 4287), 'matplotlib.pylab.figtext', 'plt.figtext', (['(0.63)', '(0.97)', '"""Random"""'], {'fontsize': '(14)', 'ha': '"""left"""'}), "(0.63, 0.97, 'Random', fontsize=14, ha='left')\n", (4241, 4287), True, 'import matplotlib.pylab as plt\n'), ((4420, 4483), 'matplotlib.pylab.figtext', 'plt.figtext', (['x1', 'y1', '"""A"""'], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': 'fz'}), "(x1, y1, 'A', ha='center', va='center', fontsize=fz)\n", (4431, 4483), True, 'import matplotlib.pylab as plt\n'), ((4490, 4553), 'matplotlib.pylab.figtext', 'plt.figtext', (['x2', 'y1', '"""B"""'], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': 'fz'}), "(x2, y1, 'B', ha='center', va='center', fontsize=fz)\n", (4501, 4553), True, 'import matplotlib.pylab as plt\n'), ((4560, 4623), 'matplotlib.pylab.figtext', 'plt.figtext', (['x1', 'y2', '"""C"""'], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': 'fz'}), "(x1, y2, 'C', ha='center', va='center', fontsize=fz)\n", (4571, 4623), True, 'import matplotlib.pylab as plt\n'), ((4630, 4693), 'matplotlib.pylab.figtext', 'plt.figtext', (['x2', 'y2', '"""D"""'], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': 'fz'}), "(x2, y2, 'D', ha='center', va='center', fontsize=fz)\n", (4641, 4693), True, 'import matplotlib.pylab as plt\n'), ((4700, 4763), 'matplotlib.pylab.figtext', 'plt.figtext', (['x1', 'y3', '"""E"""'], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': 'fz'}), "(x1, y3, 'E', ha='center', va='center', fontsize=fz)\n", (4711, 4763), True, 'import matplotlib.pylab as plt\n'), ((4770, 4833), 'matplotlib.pylab.figtext', 'plt.figtext', (['x2', 'y3', '"""F"""'], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': 'fz'}), "(x2, y3, 'F', ha='center', va='center', fontsize=fz)\n", (4781, 4833), True, 'import matplotlib.pylab as plt\n'), ((4978, 5013), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""figure3.png"""'], {'dpi': '(300)'}), "('figure3.png', dpi=300)\n", (4989, 5013), True, 'import matplotlib.pylab as plt\n'), ((1043, 1070), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[33:46, :45]'], {}), '(gm[33:46, :45])\n', (1054, 1070), True, 'import matplotlib.pylab as plt\n'), ((1072, 1099), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[33:46, 60:]'], {}), '(gm[33:46, 60:])\n', (1083, 1099), True, 'import matplotlib.pylab as plt\n'), ((1107, 1134), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[54:70, :45]'], {}), '(gm[54:70, :45])\n', (1118, 1134), True, 'import matplotlib.pylab as plt\n'), ((1136, 1163), 'matplotlib.pylab.subplot', 'plt.subplot', (['gm[54:70, 60:]'], {}), '(gm[54:70, 60:])\n', (1147, 1163), True, 'import matplotlib.pylab as plt\n'), ((1479, 1492), 'numpy.abs', 'np.abs', (['(z - 1)'], {}), '(z - 1)\n', (1485, 1492), True, 'import numpy as np\n'), ((1509, 1550), 'numpy.loadtxt', 'np.loadtxt', (["('./%s/rates_and_cv.dat' % net)"], {}), "('./%s/rates_and_cv.dat' % net)\n", (1519, 1550), True, 'import numpy as np\n'), ((2212, 2254), 'numpy.histogram', 'np.histogram', (['w'], {'bins': 'rbins', 'range': '[0, 52]'}), '(w, bins=rbins, range=[0, 52])\n', (2224, 2254), True, 'import numpy as np\n'), ((2352, 2398), 'numpy.histogram', 'np.histogram', (['rates'], {'bins': 'rbins', 'range': '[0, 52]'}), '(rates, bins=rbins, range=[0, 52])\n', (2364, 2398), True, 'import numpy as np\n'), ((1557, 1604), 'numpy.loadtxt', 'np.loadtxt', (['"""./../simulation_files/neurons.dat"""'], {}), "('./../simulation_files/neurons.dat')\n", (1567, 1604), True, 'import numpy as np\n'), ((2052, 2069), 'numpy.sum', 'np.sum', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (2058, 2069), True, 'import numpy as np\n'), ((2304, 2323), 'numpy.repeat', 'np.repeat', (['hw[0]', '(2)'], {}), '(hw[0], 2)\n', (2313, 2323), True, 'import numpy as np\n'), ((2447, 2465), 'numpy.repeat', 'np.repeat', (['h[0]', '(2)'], {}), '(h[0], 2)\n', (2456, 2465), True, 'import numpy as np\n'), ((2605, 2624), 'numpy.repeat', 'np.repeat', (['h2[0]', '(2)'], {}), '(h2[0], 2)\n', (2614, 2624), True, 'import numpy as np\n'), ((1391, 1454), 'numpy.loadtxt', 'np.loadtxt', (["('./../simulation_files/network_matrix_%s.dat' % net)"], {}), "('./../simulation_files/network_matrix_%s.dat' % net)\n", (1401, 1454), True, 'import numpy as np\n'), ((2093, 2106), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (2102, 2106), True, 'import numpy as np\n'), ((2277, 2296), 'numpy.repeat', 'np.repeat', (['hw[1]', '(2)'], {}), '(hw[1], 2)\n', (2286, 2296), True, 'import numpy as np\n'), ((2421, 2439), 'numpy.repeat', 'np.repeat', (['h[1]', '(2)'], {}), '(h[1], 2)\n', (2430, 2439), True, 'import numpy as np\n'), ((2511, 2530), 'numpy.where', 'np.where', (['(rates > 2)'], {}), '(rates > 2)\n', (2519, 2530), True, 'import numpy as np\n'), ((2578, 2597), 'numpy.repeat', 'np.repeat', (['h2[1]', '(2)'], {}), '(h2[1], 2)\n', (2587, 2597), True, 'import numpy as np\n')] |
import os
import errno
import click
import numpy as np
from configs.ibsr import IBSRConfig
from src.data.preprocessor import Preprocessor
from src.models.unet import Unet
from src.data.utils import DataUtils
# Use IBSRConfig
# You can create another config in 'configs' directory and change _config variable
_config = IBSRConfig
# Model instance used for training, predicting, evaluating
unet = Unet(_config.IMG_SIZE, _config.IMG_SIZE, _config.WEIGHTS_PATH)
@click.group()
def cli():
# Initiate essential directories
try:
os.makedirs(_config.PROCESSED_TRAIN_DATA_DIR)
os.makedirs(_config.PROCESSED_TEST_DATA_DIR)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@click.command('preprocess')
@click.option('--train-dir', 'raw_train_data_dir',
type=click.Path(), default=_config.RAW_TRAIN_DIR)
@click.option('--test-dir', 'raw_test_data_dir',
type=click.Path(), default=_config.RAW_TEST_DIR)
@click.option('--processed-train-dir', 'processed_train_dir',
type=click.Path(), default=_config.PROCESSED_TRAIN_DATA_DIR)
@click.option('--processed-test-dir', 'processed_test_dir',
type=click.Path(), default=_config.PROCESSED_TEST_DATA_DIR)
def process_data(raw_train_data_dir, raw_test_data_dir, processed_train_dir, processed_test_dir):
preprocessor = Preprocessor(raw_train_data_dir=raw_train_data_dir,
raw_test_data_dir=raw_test_data_dir,
processed_train_data_dir=processed_train_dir,
processed_test_data_dir=processed_test_dir,
postfix_data_file=_config.POSTFIX_DATA_FILE,
postfix_mask_data_file=_config.POSTFIX_MASK_DATA_FILE, transpose=[1, 2, 0, 3])
click.echo('Start processing data.')
preprocessor.do_preprocess()
click.echo('Data have been processed.')
@click.command()
def train():
data_utils = DataUtils(_config.PROCESSED_TRAIN_DATA_DIR, _config.PROCESSED_TEST_DATA_DIR)
data, mask = data_utils.get_train_data()
unet.train(data, mask, _config.EPOCHS)
@click.command()
@click.option('--data-path', 'data_path', type=click.Path())
@click.option('--predictions-path', 'predictions_path', type=click.Path())
def predict(data_path, predictions_path):
data = np.load(data_path)
predictions = unet.predict(data)
np.save(predictions_path, predictions)
@click.command()
def evaluate():
data_utils = DataUtils(_config.PROCESSED_TRAIN_DATA_DIR, _config.PROCESSED_TEST_DATA_DIR)
test_data, test_mask = data_utils.get_test_data()
predictions = unet.predict(test_data)
accuracy_csf = unet.evaluate(predictions, test_mask, 0)
accuracy_gm = unet.evaluate(predictions, test_mask, 1)
accuracy_wm = unet.evaluate(predictions, test_mask, 2)
average = unet.evaluate_average(predictions, test_mask)
click.echo('\n')
click.echo('Accuracy of CSF: {}'.format(accuracy_csf))
click.echo('Accuracy of GM: {}'.format(accuracy_gm))
click.echo('Accuracy of WM: {}'.format(accuracy_wm))
click.echo('Average: {}'.format(average))
# Add commands
cli.add_command(process_data)
cli.add_command(train)
cli.add_command(predict)
cli.add_command(evaluate)
if __name__ == '__main__':
cli()
| [
"numpy.load",
"numpy.save",
"os.makedirs",
"src.data.preprocessor.Preprocessor",
"src.models.unet.Unet",
"click.echo",
"click.command",
"click.Path",
"src.data.utils.DataUtils",
"click.group"
] | [((397, 459), 'src.models.unet.Unet', 'Unet', (['_config.IMG_SIZE', '_config.IMG_SIZE', '_config.WEIGHTS_PATH'], {}), '(_config.IMG_SIZE, _config.IMG_SIZE, _config.WEIGHTS_PATH)\n', (401, 459), False, 'from src.models.unet import Unet\n'), ((463, 476), 'click.group', 'click.group', ([], {}), '()\n', (474, 476), False, 'import click\n'), ((728, 755), 'click.command', 'click.command', (['"""preprocess"""'], {}), "('preprocess')\n", (741, 755), False, 'import click\n'), ((1957, 1972), 'click.command', 'click.command', ([], {}), '()\n', (1970, 1972), False, 'import click\n'), ((2171, 2186), 'click.command', 'click.command', ([], {}), '()\n', (2184, 2186), False, 'import click\n'), ((2478, 2493), 'click.command', 'click.command', ([], {}), '()\n', (2491, 2493), False, 'import click\n'), ((1372, 1693), 'src.data.preprocessor.Preprocessor', 'Preprocessor', ([], {'raw_train_data_dir': 'raw_train_data_dir', 'raw_test_data_dir': 'raw_test_data_dir', 'processed_train_data_dir': 'processed_train_dir', 'processed_test_data_dir': 'processed_test_dir', 'postfix_data_file': '_config.POSTFIX_DATA_FILE', 'postfix_mask_data_file': '_config.POSTFIX_MASK_DATA_FILE', 'transpose': '[1, 2, 0, 3]'}), '(raw_train_data_dir=raw_train_data_dir, raw_test_data_dir=\n raw_test_data_dir, processed_train_data_dir=processed_train_dir,\n processed_test_data_dir=processed_test_dir, postfix_data_file=_config.\n POSTFIX_DATA_FILE, postfix_mask_data_file=_config.\n POSTFIX_MASK_DATA_FILE, transpose=[1, 2, 0, 3])\n', (1384, 1693), False, 'from src.data.preprocessor import Preprocessor\n'), ((1840, 1876), 'click.echo', 'click.echo', (['"""Start processing data."""'], {}), "('Start processing data.')\n", (1850, 1876), False, 'import click\n'), ((1914, 1953), 'click.echo', 'click.echo', (['"""Data have been processed."""'], {}), "('Data have been processed.')\n", (1924, 1953), False, 'import click\n'), ((2003, 2079), 'src.data.utils.DataUtils', 'DataUtils', (['_config.PROCESSED_TRAIN_DATA_DIR', '_config.PROCESSED_TEST_DATA_DIR'], {}), '(_config.PROCESSED_TRAIN_DATA_DIR, _config.PROCESSED_TEST_DATA_DIR)\n', (2012, 2079), False, 'from src.data.utils import DataUtils\n'), ((2376, 2394), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (2383, 2394), True, 'import numpy as np\n'), ((2436, 2474), 'numpy.save', 'np.save', (['predictions_path', 'predictions'], {}), '(predictions_path, predictions)\n', (2443, 2474), True, 'import numpy as np\n'), ((2527, 2603), 'src.data.utils.DataUtils', 'DataUtils', (['_config.PROCESSED_TRAIN_DATA_DIR', '_config.PROCESSED_TEST_DATA_DIR'], {}), '(_config.PROCESSED_TRAIN_DATA_DIR, _config.PROCESSED_TEST_DATA_DIR)\n', (2536, 2603), False, 'from src.data.utils import DataUtils\n'), ((2944, 2960), 'click.echo', 'click.echo', (['"""\n"""'], {}), "('\\n')\n", (2954, 2960), False, 'import click\n'), ((547, 592), 'os.makedirs', 'os.makedirs', (['_config.PROCESSED_TRAIN_DATA_DIR'], {}), '(_config.PROCESSED_TRAIN_DATA_DIR)\n', (558, 592), False, 'import os\n'), ((601, 645), 'os.makedirs', 'os.makedirs', (['_config.PROCESSED_TEST_DATA_DIR'], {}), '(_config.PROCESSED_TEST_DATA_DIR)\n', (612, 645), False, 'import os\n'), ((826, 838), 'click.Path', 'click.Path', ([], {}), '()\n', (836, 838), False, 'import click\n'), ((939, 951), 'click.Path', 'click.Path', ([], {}), '()\n', (949, 951), False, 'import click\n'), ((1064, 1076), 'click.Path', 'click.Path', ([], {}), '()\n', (1074, 1076), False, 'import click\n'), ((1199, 1211), 'click.Path', 'click.Path', ([], {}), '()\n', (1209, 1211), False, 'import click\n'), ((2234, 2246), 'click.Path', 'click.Path', ([], {}), '()\n', (2244, 2246), False, 'import click\n'), ((2309, 2321), 'click.Path', 'click.Path', ([], {}), '()\n', (2319, 2321), False, 'import click\n')] |
from abc import ABC, abstractmethod
from typing import Any, List, Optional, Sequence
import numpy as np
from openfermion import IsingOperator, QubitOperator, SymbolicOperator
from zquantum.core.wavefunction import Wavefunction
from ..bitstring_distribution import (
BitstringDistribution,
create_bitstring_distribution_from_probability_distribution,
)
from ..circuits import Circuit
from ..circuits.layouts import CircuitConnectivity
from ..measurement import ExpectationValues, Measurements, expectation_values_to_real
from ..openfermion import change_operator_type, get_expectation_value
class QuantumBackend(ABC):
"""Interface for implementing different quantum backends.
Attributes:
supports_batching: boolean flag indicating whether given backend
supports batching circuits.
batch_size: number of circuit runs in a single batch.
If `supports_batching` is true should be a positive integer.
number_of_circuits_run: number of circuits executed by this backend
number_of_jobs_run: number of jobs executed by this backend. Will be different
from `number_of_circuits_run` if batches are used.
"""
supports_batching: bool = False
batch_size: Optional[int] = None
def __init__(self):
self.number_of_circuits_run = 0
self.number_of_jobs_run = 0
if self.supports_batching:
assert isinstance(self.batch_size, int)
assert self.batch_size > 0
@abstractmethod
def run_circuit_and_measure(self, circuit: Circuit, n_samples: int) -> Measurements:
"""
Method for executing the circuit and measuring the outcome.
Args:
circuit: quantum circuit to be executed.
n_samples: The number of samples to collect.
"""
assert isinstance(n_samples, int) and n_samples > 0
self.number_of_circuits_run += 1
self.number_of_jobs_run += 1
# NOTE: This value is only returned so that mypy doesn't complain.
# You can remove this workaround when we reimplement counter increments in
# a more type-elegant way.
return Measurements()
def run_circuitset_and_measure(
self, circuits: Sequence[Circuit], n_samples: List[int]
) -> List[Measurements]:
"""Run a set of circuits and measure a certain number of bitstrings.
It may be useful to override this method for backends that support
batching.
Args:
circuits: The circuits to execute.
n_samples: The number of samples to collect for each circuit.
"""
measurement_set: List[Measurements]
if not self.supports_batching:
measurement_set = []
for circuit, n_samples_for_circuit in zip(circuits, n_samples):
measurement_set.append(
self.run_circuit_and_measure(
circuit, n_samples=n_samples_for_circuit
)
)
return measurement_set
else:
self.number_of_circuits_run += len(circuits)
if isinstance(self.batch_size, int):
self.number_of_jobs_run += int(np.ceil(len(circuits) / self.batch_size))
# This value is only returned so that mypy doesn't complain.
# You can remove this workaround when we reimplement counter increments in
# a more type-elegant way.
measurement_set = []
return measurement_set
def get_bitstring_distribution(
self, circuit: Circuit, n_samples: int
) -> BitstringDistribution:
"""Calculates a bitstring distribution.
Args:
circuit: quantum circuit to be executed.
Returns:
Probability distribution of getting specific bistrings.
"""
# Get the expectation values
measurements = self.run_circuit_and_measure(circuit, n_samples)
return measurements.get_distribution()
class QuantumSimulator(QuantumBackend):
@abstractmethod
def __init__(
self,
noise_model: Optional[Any] = None,
device_connectivity: Optional[CircuitConnectivity] = None,
):
super().__init__()
self.noise_model = noise_model
self.device_connectivity = device_connectivity
@abstractmethod
def get_wavefunction(self, circuit: Circuit) -> Wavefunction:
"""Returns a wavefunction representing quantum state produced by a circuit
Args:
circuit: quantum circuit to be executed.
"""
self.number_of_circuits_run += 1
self.number_of_jobs_run += 1
def get_exact_expectation_values(
self, circuit: Circuit, operator: SymbolicOperator
) -> ExpectationValues:
"""Calculates the expectation values for given operator, based on the exact
quantum state produced by circuit.
Args:
circuit: quantum circuit to be executed.
operator: Operator for which we calculate the expectation value.
"""
wavefunction = self.get_wavefunction(circuit)
if isinstance(operator, IsingOperator):
operator = change_operator_type(operator, QubitOperator)
expectation_values = ExpectationValues(
np.array([get_expectation_value(term, wavefunction) for term in operator])
)
expectation_values = expectation_values_to_real(expectation_values)
return expectation_values
def get_bitstring_distribution(
self, circuit: Circuit, n_samples: Optional[int] = None
) -> BitstringDistribution:
"""Calculates a bitstring distribution.
Args:
circuit: quantum circuit to be executed.
Returns:
Probability distribution of getting specific bistrings.
"""
if n_samples is None:
wavefunction = self.get_wavefunction(circuit)
return create_bitstring_distribution_from_probability_distribution(
wavefunction.probabilities()
)
else:
# Get the expectation values
measurements = self.run_circuit_and_measure(circuit, n_samples)
return measurements.get_distribution()
def _flip_bits(n, num_bits):
return int(bin(n)[2:].zfill(num_bits)[::-1], 2)
def flip_wavefunction(wavefunction: Wavefunction):
number_of_states = len(wavefunction.amplitudes)
ordering = [
_flip_bits(n, number_of_states.bit_length() - 1)
for n in range(number_of_states)
]
flipped_amplitudes = [wavefunction.amplitudes[i] for i in ordering]
return Wavefunction(np.array(flipped_amplitudes))
| [
"numpy.array"
] | [((6674, 6702), 'numpy.array', 'np.array', (['flipped_amplitudes'], {}), '(flipped_amplitudes)\n', (6682, 6702), True, 'import numpy as np\n')] |
# Copyright (c) 2016, <NAME>
# Licensed under the BSD 3-clause license (see LICENSE)
# This file was modified from the GPy project. Its file header is replicated
# below. Its LICENSE.txt is replicated in the LICENSE file for this directory.
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
This module defines a generic internal :class:`Model` class, which handles
the interface between this class and the :mod:`paramz` optimization layer.
"""
import numpy as np
import paramz
from paramz.transformations import Transformation, __fixed__
from .priorizable import _PriorizableNode
from ..util.docs import inherit_doc
@inherit_doc
class Model(paramz.Model, _PriorizableNode):
"""
A :class:`Model` provides a graphical model dependent on latent parameters,
which it contains either explicitly (as attributes in derived classes
of :class:`Model`) or implicitly (as parameters implicitly linked to
a model's explicit parameters).
Access to any parameter in this tree can be done by the name of those
parameters. See the :class:`Parameterized` documentation for details.
The parameters can be either :class:`Param`s or :class:`Parameterized`.
In fact, the tree of references from objects to their attributes, as
represented in the Python garbage collector, matches identically with
the graphical model that this :class:`Model` represents (though the
direction of the links is reversed).
The :class:`Model` binds together likelihood values computed from
the model without the priors (which is implemented by derived classes)
with the priors. In other words, for observations :math:`\mathbf{y}`,
parameters :math:`\\theta` dependent on priors :math:`\\phi`, the user
supplies :math:`\log p(\mathbf{y}|\\theta,\\phi)` as well as its
derivative with respect to :math:`\\theta`. This class automatically
adds in the missing :math:`\log p(\\theta|\\phi)` term and its derivative.
"""
def log_likelihood(self):
"""
:return: the log likelihood of the current model with respect to its
current inputs and outputs and the current prior.
This should NOT include the likelihood of the parameters
given their priors. In other words, this value should be
:math:`\log p(\mathbf{y}|\\theta,\\phi)`
"""
raise NotImplementedError
def log_likelihood_with_prior(self):
"""
Let the observations be :math:`\mathbf{y}`,
parameters be :math:`\\theta`, and the prior :math:`\\phi`.
.. math::
\log p(\mathbf{y}|\\phi) = \log p(\mathbf{y}|\\theta,\\phi) +
\log p(\mathbf{y}|\\theta,\phi)
:return: the overall log likelihood shown above.
"""
return float(self.log_likelihood()) + self.log_prior()
def objective_function(self):
return -self.log_likelihood_with_prior()
def objective_function_gradients(self):
# self.gradient is the log likelihood without prior gradient
return -(self.gradient + self._log_prior_gradients())
def log_prior(self):
"""
:return: the log prior :math:`\log p(\\theta|\\phi)`
"""
if self.priors.size == 0:
return 0.
log_transformed_prior = sum(
prior.lnpdf(self.param_array[indices]).sum()
for prior, indices in self.priors.items())
# Some of the parameters may have been transformed, so we need
# to account for their Jacobian factor
log_jacobian_prior = 0.
indices_with_prior = {
idx for _, indices in self.priors.items() for idx in indices}
for constraint, indices_with_constraint in self.constraints.items():
if not isinstance(constraint, Transformation):
continue
log_jacobian_prior += sum(
constraint.log_jacobian(self.param_array[i])
for i in indices_with_constraint
if i in indices_with_prior)
return log_transformed_prior + log_jacobian_prior
def _log_prior_gradients(self):
if self.priors.size == 0:
return 0.
grad = np.zeros(self.param_array.size)
for prior, indices in self.priors.items():
np.put(grad, indices, prior.lnpdf_grad(self.param_array[indices]))
# Incorporate Jacobian for transformations again
indices_with_prior = {
idx for idx in indices for _, indices in self.priors.items()}
for constraint, indices in self.constraints.items():
if not isinstance(constraint, Transformation):
continue
for i in indices:
if i in indices_with_prior:
grad[i] += constraint.log_jacobian_grad(
self.param_array[i])
return grad
| [
"numpy.zeros"
] | [((4274, 4305), 'numpy.zeros', 'np.zeros', (['self.param_array.size'], {}), '(self.param_array.size)\n', (4282, 4305), True, 'import numpy as np\n')] |
#
# setup python environment
# pip install --upgrade pip, scikit-image, numpy, Pillow, nibabel
#
# This is the first preprocessing step: downsample and resize all images into the same size
#
# Usage:
#
# for i in {1..15}_{1..10}.tif; do echo $i >> filelist.txt; done
# n=100; while read file; do if [ -f $file ]; then n=$((n+1)); python zeroBoundary_hsv6.py $file im${n}.nii.gz; fi; done < filelist.txt
import sys
import skimage
import nibabel as nib
import numpy as np
from PIL import Image
import nrrd
#
inputImg = sys.argv[1]
outputImg = sys.argv[2]
#
im = skimage.io.imread(inputImg, plugin='tifffile');
imds = skimage.transform.downscale_local_mean(im[:,:,0], (8,8)) # 8x
imdsint = np.array(imds, dtype='uint16')
img = imdsint;
img[0,:] = 0;
img[-1,:] = 0;
img[:,0] = 0;
img[:,-1] = 0;
# save
im = nib.Nifti1Image(img, np.eye(4))
im.to_filename(outputImg)
| [
"skimage.transform.downscale_local_mean",
"numpy.eye",
"numpy.array",
"skimage.io.imread"
] | [((563, 609), 'skimage.io.imread', 'skimage.io.imread', (['inputImg'], {'plugin': '"""tifffile"""'}), "(inputImg, plugin='tifffile')\n", (580, 609), False, 'import skimage\n'), ((619, 678), 'skimage.transform.downscale_local_mean', 'skimage.transform.downscale_local_mean', (['im[:, :, 0]', '(8, 8)'], {}), '(im[:, :, 0], (8, 8))\n', (657, 678), False, 'import skimage\n'), ((692, 722), 'numpy.array', 'np.array', (['imds'], {'dtype': '"""uint16"""'}), "(imds, dtype='uint16')\n", (700, 722), True, 'import numpy as np\n'), ((831, 840), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (837, 840), True, 'import numpy as np\n')] |
import os
import time
import math
import random
import dill as pk
import numpy as np
from collections import defaultdict
import torch
from torch.nn.utils.rnn import pad_sequence
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def myprint(text, file):
file = open(file, 'a')
print(time.strftime("%Y %b %d %a, %H:%M:%S: ", time.localtime()) + text, file=file, flush=True)
file.close()
def record_args(args, info, results):
results[info.RESULT_ARGS]['Database'] = args.database
results[info.RESULT_ARGS]['Profile ID'] = args.profile_id
results[info.RESULT_ARGS]['Pair ID'] = args.pair_id
results[info.RESULT_ARGS]['Num Inducing Points'] = args.num_inducing_point
results[info.RESULT_ARGS]['Batch Size'] = args.batch_size
results[info.RESULT_ARGS]['Num Training Epoch'] = args.num_training_epoch
results[info.RESULT_ARGS]['Learning Rate'] = args.lr
results[info.RESULT_ARGS]['Momentum'] = args.momentum
results[info.RESULT_ARGS]['Warmup Ratio'] = args.warmup_ratio
results[info.RESULT_ARGS]['Beta'] = args.beta
results[info.RESULT_ARGS]['Num Tuning Budget'] = args.num_tuning_budget
results[info.RESULT_ARGS]['Num Tuning Epoch'] = args.num_tuning_epoch
def print_hyperparameter(info, results):
myprint('Trial Hyperparameters', info.FILE_STDOUT)
for k, v in results[info.RESULT_ARGS].items():
myprint(f'{k}: {v}', info.FILE_STDOUT)
myprint('-'*20, info.FILE_STDOUT)
def print_model(info, model, likelihood):
f = lambda para, value: myprint(f'Parameter name: {para} | value = {value.detach().cpu().numpy()}', info.FILE_STDOUT)
myprint('Model Parameters', info.FILE_STDOUT)
for i, mean in enumerate(model.mean_module):
if i in model.meta.training_task_ids or i == model.meta.testing_task_id:
f(f'mean_module.{i}.constant', mean.constant)
f('covar_module_task.U', model.covar_module_task.U)
f('covar_module_task.outputscale', model.covar_module_task.outputscale)
f('covar_module_task.lengthscale', model.covar_module_task.lengthscale)
f('covar_module_hyper.covar_module_hyper.offset', model.covar_module_hyper.covar_module_hyper.offset)
f('covar_module_epoch.alpha', model.covar_module_epoch.alpha)
f('covar_module_epoch.beta', model.covar_module_epoch.beta)
f('covar_module_epoch.outputscale', model.covar_module_epoch.outputscale)
f('likelihood.noise', likelihood.noise)
myprint('-'*20, info.FILE_STDOUT)
def sample_inducing_points(base_X, num_inducing_points):
index_X = np.arange(base_X.shape[0])
np.random.shuffle(index_X)
return base_X[index_X[:num_inducing_points], :]
def get_batch(X, Y, batch_size, if_train):
batch_seq = np.arange(X.shape[0])
if if_train:
np.random.shuffle(batch_seq)
num_batch = X.shape[0] // batch_size
else:
num_batch = math.ceil(X.shape[0]/batch_size)
for idx_batch in range(num_batch):
batch_indices = batch_seq[idx_batch*batch_size:(idx_batch+1)*batch_size]
batch_X, batch_Y = X[batch_indices], Y[batch_indices]
yield batch_X, batch_Y
def reshape(info, input_tensor, mapping_matrix):
return pad_sequence(torch.split(input_tensor, mapping_matrix.sum(1).tolist()), batch_first=True, padding_value=info.EXTREME_SMALL)
def get_query(info, meta, model, unobserved_mask, UCB):
unobserved_UCB = UCB * unobserved_mask
unobserved_UCB[torch.isnan(unobserved_UCB)] = info.EXTREME_SMALL
query_configs = torch.unique(torch.where(unobserved_UCB==unobserved_UCB.max())[0])
query_config = query_configs[torch.randint(query_configs.shape[0],(1,))[0]].item()
query_epoch = np.argmin(model.covar_module_task.if_observed[meta.testing_task_id][query_config])
model.covar_module_task.if_observed[meta.testing_task_id][query_config, query_epoch] = True
unobserved_mask[query_config, query_epoch] = np.nan
return query_config, query_epoch
def get_max(mean, query_config):
max_configs = torch.where(mean==mean.max())[0]
if query_config in max_configs: max_config = query_config
else: max_config = max_configs[torch.randint(max_configs.shape[0], (1,))[0]].item()
max_epochs = torch.where(mean[max_config]==mean[max_config].max())[0]
max_epoch = max_epochs[torch.randint(max_epochs.shape[0], (1,))[0]].item()
return max_config, max_epoch
def get_best(test_best):
best_config, best_epoch = test_best['best_config'], test_best['best_epoch']
return best_config, best_epoch
def load_database(args, info, meta):
configs = pk.load(open(info.FILE_CONFIG, 'rb'))
profiles = pk.load(open(info.FILE_PROFILE, 'rb'))
train_X, train_Y, test_X, test_Y = [], [], [], []
train_rank, test_rank, all_observations, if_observed, if_hidden = defaultdict(list), {}, {}, {}, {}
for task_id in range(len(meta.TASK2ID)):
if task_id not in meta.training_task_ids and task_id != meta.testing_task_id: continue
task_profile = profiles[args.profile_id][meta.ID2TASK[task_id]]
all_observations[task_id] = np.zeros((meta.NUM_BASE_CONFIG, meta.NUM_EPOCH))
if_observed[task_id] = np.full((meta.NUM_BASE_CONFIG, meta.NUM_EPOCH), False)
if task_id == meta.testing_task_id: if_hidden[task_id] = np.full((meta.NUM_BASE_CONFIG, meta.NUM_EPOCH), False)
config_performance = {}
for config_id in range(meta.NUM_BASE_CONFIG):
if config_id in task_profile:
available_epoch = len(task_profile[config_id])
config_performance[config_id] = max(task_profile[config_id])
all_observations[task_id][config_id, range(available_epoch)] = task_profile[config_id]
x = meta.convert_config(configs[config_id])
x[meta.INFO2ID['task']] = task_id
x = np.vstack([x] * available_epoch)
x[:, meta.INFO2ID['epoch']] = (1 + np.arange(available_epoch)) / meta.NUM_EPOCH
if task_id in meta.training_task_ids:
train_X.append(x); train_Y.append(task_profile[config_id])
if_observed[task_id][config_id, range(available_epoch)] = True
elif task_id == meta.testing_task_id:
test_X.append(x); test_Y.append(task_profile[config_id])
if_hidden[task_id][config_id, range(available_epoch)] = True
config_performance = [(k,v) for k,v in sorted(config_performance.items(), key=lambda x: x[1], reverse=True)]
for config_rank, (config_id, _) in enumerate(config_performance):
if task_id in meta.training_task_ids:
train_rank[config_id].append(config_rank)
elif task_id == meta.testing_task_id:
test_rank[config_id] = config_rank
if task_id == meta.testing_task_id:
test_best = {'best_value':config_performance[0][1], 'best_config':config_performance[0][0]}
test_best['best_epoch'] = np.argmax(task_profile[test_best['best_config']])
train_X, test_X = map(lambda x: torch.vstack([torch.from_numpy(each) for each in x]).float().to(info.DEVICE), [train_X, test_X])
train_Y, test_Y = map(lambda x: torch.cat([torch.from_numpy(each) for each in x]).float().to(info.DEVICE), [train_Y, test_Y])
return train_X, train_Y, test_X, test_Y, train_rank, test_rank, test_best, all_observations, if_observed, if_hidden | [
"numpy.full",
"torch.randint",
"numpy.random.seed",
"math.ceil",
"numpy.argmax",
"torch.manual_seed",
"torch.cuda.manual_seed",
"numpy.zeros",
"numpy.argmin",
"collections.defaultdict",
"random.seed",
"numpy.arange",
"time.localtime",
"numpy.vstack",
"torch.isnan",
"numpy.random.shuffl... | [((210, 227), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (221, 227), False, 'import random\n'), ((232, 252), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (246, 252), True, 'import numpy as np\n'), ((257, 280), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (274, 280), False, 'import torch\n'), ((285, 313), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (307, 313), False, 'import torch\n'), ((2715, 2741), 'numpy.arange', 'np.arange', (['base_X.shape[0]'], {}), '(base_X.shape[0])\n', (2724, 2741), True, 'import numpy as np\n'), ((2746, 2772), 'numpy.random.shuffle', 'np.random.shuffle', (['index_X'], {}), '(index_X)\n', (2763, 2772), True, 'import numpy as np\n'), ((2895, 2916), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (2904, 2916), True, 'import numpy as np\n'), ((3891, 3978), 'numpy.argmin', 'np.argmin', (['model.covar_module_task.if_observed[meta.testing_task_id][query_config]'], {}), '(model.covar_module_task.if_observed[meta.testing_task_id][\n query_config])\n', (3900, 3978), True, 'import numpy as np\n'), ((2943, 2971), 'numpy.random.shuffle', 'np.random.shuffle', (['batch_seq'], {}), '(batch_seq)\n', (2960, 2971), True, 'import numpy as np\n'), ((3047, 3081), 'math.ceil', 'math.ceil', (['(X.shape[0] / batch_size)'], {}), '(X.shape[0] / batch_size)\n', (3056, 3081), False, 'import math\n'), ((3644, 3671), 'torch.isnan', 'torch.isnan', (['unobserved_UCB'], {}), '(unobserved_UCB)\n', (3655, 3671), False, 'import torch\n'), ((5035, 5052), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5046, 5052), False, 'from collections import defaultdict\n'), ((5326, 5374), 'numpy.zeros', 'np.zeros', (['(meta.NUM_BASE_CONFIG, meta.NUM_EPOCH)'], {}), '((meta.NUM_BASE_CONFIG, meta.NUM_EPOCH))\n', (5334, 5374), True, 'import numpy as np\n'), ((5406, 5460), 'numpy.full', 'np.full', (['(meta.NUM_BASE_CONFIG, meta.NUM_EPOCH)', '(False)'], {}), '((meta.NUM_BASE_CONFIG, meta.NUM_EPOCH), False)\n', (5413, 5460), True, 'import numpy as np\n'), ((5526, 5580), 'numpy.full', 'np.full', (['(meta.NUM_BASE_CONFIG, meta.NUM_EPOCH)', '(False)'], {}), '((meta.NUM_BASE_CONFIG, meta.NUM_EPOCH), False)\n', (5533, 5580), True, 'import numpy as np\n'), ((7334, 7383), 'numpy.argmax', 'np.argmax', (["task_profile[test_best['best_config']]"], {}), "(task_profile[test_best['best_config']])\n", (7343, 7383), True, 'import numpy as np\n'), ((424, 440), 'time.localtime', 'time.localtime', ([], {}), '()\n', (438, 440), False, 'import time\n'), ((6136, 6168), 'numpy.vstack', 'np.vstack', (['([x] * available_epoch)'], {}), '([x] * available_epoch)\n', (6145, 6168), True, 'import numpy as np\n'), ((3819, 3862), 'torch.randint', 'torch.randint', (['query_configs.shape[0]', '(1,)'], {}), '(query_configs.shape[0], (1,))\n', (3832, 3862), False, 'import torch\n'), ((4519, 4559), 'torch.randint', 'torch.randint', (['max_epochs.shape[0]', '(1,)'], {}), '(max_epochs.shape[0], (1,))\n', (4532, 4559), False, 'import torch\n'), ((4361, 4402), 'torch.randint', 'torch.randint', (['max_configs.shape[0]', '(1,)'], {}), '(max_configs.shape[0], (1,))\n', (4374, 4402), False, 'import torch\n'), ((6220, 6246), 'numpy.arange', 'np.arange', (['available_epoch'], {}), '(available_epoch)\n', (6229, 6246), True, 'import numpy as np\n'), ((7455, 7477), 'torch.from_numpy', 'torch.from_numpy', (['each'], {}), '(each)\n', (7471, 7477), False, 'import torch\n'), ((7585, 7607), 'torch.from_numpy', 'torch.from_numpy', (['each'], {}), '(each)\n', (7601, 7607), False, 'import torch\n')] |
import argparse
import datetime
import logging
import os
import sys
import tempfile
import numpy as np
import igibson
from igibson.envs.igibson_env import iGibsonEnv
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher
from igibson.utils.ig_logging import IGLogWriter
from igibson.utils.utils import parse_config
def main():
"""
Example of how to save a demo of a task
"""
logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
args = parse_args()
collect_demo(
args.scene,
args.task,
args.task_id,
args.instance_id,
args.log_path,
args.disable_save,
args.disable_scene_cache,
args.profile,
args.config,
)
def parse_args():
scene_choices = [
"Beechwood_0_int",
"Beechwood_1_int",
"Benevolence_0_int",
"Benevolence_1_int",
"Benevolence_2_int",
"Ihlen_0_int",
"Ihlen_1_int",
"Merom_0_int",
"Merom_1_int",
"Pomaria_0_int",
"Pomaria_1_int",
"Pomaria_2_int",
"Rs_int",
"Wainscott_0_int",
"Wainscott_1_int",
]
task_id_choices = [0, 1]
parser = argparse.ArgumentParser(description="Run and collect a demo of a task")
parser.add_argument(
"--scene",
type=str,
choices=scene_choices,
default="Rs_int",
nargs="?",
help="Scene name/ID matching iGibson interactive scenes.",
)
parser.add_argument(
"--task",
type=str,
required=False,
default="cleaning_out_drawers",
nargs="?",
help="Name of task to collect a demo of. If it a BEHAVIOR activity, the name should be one of the official"
" activity labels, matching one of the folders in the BDDL repository.",
)
parser.add_argument(
"--task_id",
type=int,
required=False,
default=0,
choices=task_id_choices,
nargs="?",
help="[Only for BEHAVIOR activities] Integer ID identifying the BDDL definition of the BEHAVIOR activity. "
"Since we only provide two definitions per activity, the ID should be 0 or 1.",
)
parser.add_argument(
"--instance_id",
type=int,
required=False,
default=0,
help="[Only for BEHAVIOR activities] Instance of BEHAVIOR activity (particular URDF corresponding to "
"an instantiation of a BDDL activity definition)",
)
demo_file = os.path.join(tempfile.gettempdir(), "demo.hdf5")
parser.add_argument("--log_path", type=str, default=demo_file, help="Path (and filename) of log file")
parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.")
parser.add_argument(
"--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches."
)
parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.")
parser.add_argument(
"--config",
help="which config file to use [default: use yaml files in examples/configs]",
default=os.path.join(igibson.example_config_path, "behavior_vr.yaml"),
)
return parser.parse_args()
def collect_demo(
scene,
task,
task_id=0,
instance_id=0,
log_path=None,
disable_save=False,
disable_scene_cache=False,
profile=False,
config_file=os.path.join(igibson.example_config_path, "behavior_vr.yaml"),
):
""" """
# HDR files for PBR rendering
hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
light_modulation_map_filename = os.path.join(
igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
)
background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
# Rendering settings
rendering_settings = MeshRendererSettings(
optimized=True,
fullscreen=False,
env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True,
enable_pbr=True,
msaa=False,
light_dimming_factor=1.0,
)
config = parse_config(config_file)
config["task"] = task
config["task_id"] = task_id
config["scene_id"] = scene
config["instance_id"] = instance_id
config["online_sampling"] = disable_scene_cache
config["load_clutter"] = True
env = iGibsonEnv(
config_file=config,
mode="headless",
action_timestep=1 / 30.0,
physics_timestep=1 / 300.0,
rendering_settings=rendering_settings,
)
env.reset()
robot = env.robots[0]
log_writer = None
if not disable_save:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if log_path is None:
log_path = "{}_{}_{}_{}_{}.hdf5".format(task, task_id, scene, instance_id, timestamp)
log_writer = IGLogWriter(
env.simulator,
log_filepath=log_path,
task=env.task,
store_vr=False,
vr_robot=robot,
profiling_mode=profile,
filter_objects=True,
)
log_writer.set_up_data_storage()
log_writer.hf.attrs["/metadata/instance_id"] = instance_id
steps = 0
# Main recording loop
while True:
if robot.__class__.__name__ == "BehaviorRobot" and steps < 2:
# Use the first 2 steps to activate BehaviorRobot
action = np.zeros((28,))
action[19] = 1
action[27] = 1
else:
action = np.random.uniform(-0.01, 0.01, size=(robot.action_dim,))
# Execute the action
state, reward, done, info = env.step(action)
if log_writer and not disable_save:
log_writer.process_frame()
if done:
break
if log_writer and not disable_save:
log_writer.end_log_session()
env.close()
if __name__ == "__main__":
main()
| [
"igibson.render.mesh_renderer.mesh_renderer_cpu.MeshRendererSettings",
"igibson.utils.ig_logging.IGLogWriter",
"numpy.random.uniform",
"argparse.ArgumentParser",
"tempfile.gettempdir",
"numpy.zeros",
"datetime.datetime.now",
"igibson.envs.igibson_env.iGibsonEnv",
"logging.info",
"igibson.utils.uti... | [((498, 565), 'logging.info', 'logging.info', (["('*' * 80 + '\\nDescription:' + main.__doc__ + '*' * 80)"], {}), "('*' * 80 + '\\nDescription:' + main.__doc__ + '*' * 80)\n", (510, 565), False, 'import logging\n'), ((1299, 1370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run and collect a demo of a task"""'}), "(description='Run and collect a demo of a task')\n", (1322, 1370), False, 'import argparse\n'), ((3542, 3603), 'os.path.join', 'os.path.join', (['igibson.example_config_path', '"""behavior_vr.yaml"""'], {}), "(igibson.example_config_path, 'behavior_vr.yaml')\n", (3554, 3603), False, 'import os\n'), ((3672, 3749), 'os.path.join', 'os.path.join', (['igibson.ig_dataset_path', '"""scenes"""', '"""background"""', '"""probe_02.hdr"""'], {}), "(igibson.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr')\n", (3684, 3749), False, 'import os\n'), ((3769, 3846), 'os.path.join', 'os.path.join', (['igibson.ig_dataset_path', '"""scenes"""', '"""background"""', '"""probe_03.hdr"""'], {}), "(igibson.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr')\n", (3781, 3846), False, 'import os\n'), ((3883, 3979), 'os.path.join', 'os.path.join', (['igibson.ig_dataset_path', '"""scenes"""', '"""Rs_int"""', '"""layout"""', '"""floor_lighttype_0.png"""'], {}), "(igibson.ig_dataset_path, 'scenes', 'Rs_int', 'layout',\n 'floor_lighttype_0.png')\n", (3895, 3979), False, 'import os\n'), ((4015, 4103), 'os.path.join', 'os.path.join', (['igibson.ig_dataset_path', '"""scenes"""', '"""background"""', '"""urban_street_01.jpg"""'], {}), "(igibson.ig_dataset_path, 'scenes', 'background',\n 'urban_street_01.jpg')\n", (4027, 4103), False, 'import os\n'), ((4151, 4472), 'igibson.render.mesh_renderer.mesh_renderer_cpu.MeshRendererSettings', 'MeshRendererSettings', ([], {'optimized': '(True)', 'fullscreen': '(False)', 'env_texture_filename': 'hdr_texture', 'env_texture_filename2': 'hdr_texture2', 'env_texture_filename3': 'background_texture', 'light_modulation_map_filename': 'light_modulation_map_filename', 'enable_shadow': '(True)', 'enable_pbr': '(True)', 'msaa': '(False)', 'light_dimming_factor': '(1.0)'}), '(optimized=True, fullscreen=False, env_texture_filename\n =hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3\n =background_texture, light_modulation_map_filename=\n light_modulation_map_filename, enable_shadow=True, enable_pbr=True,\n msaa=False, light_dimming_factor=1.0)\n', (4171, 4472), False, 'from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings\n'), ((4555, 4580), 'igibson.utils.utils.parse_config', 'parse_config', (['config_file'], {}), '(config_file)\n', (4567, 4580), False, 'from igibson.utils.utils import parse_config\n'), ((4806, 4950), 'igibson.envs.igibson_env.iGibsonEnv', 'iGibsonEnv', ([], {'config_file': 'config', 'mode': '"""headless"""', 'action_timestep': '(1 / 30.0)', 'physics_timestep': '(1 / 300.0)', 'rendering_settings': 'rendering_settings'}), "(config_file=config, mode='headless', action_timestep=1 / 30.0,\n physics_timestep=1 / 300.0, rendering_settings=rendering_settings)\n", (4816, 4950), False, 'from igibson.envs.igibson_env import iGibsonEnv\n'), ((2614, 2635), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2633, 2635), False, 'import tempfile\n'), ((5306, 5452), 'igibson.utils.ig_logging.IGLogWriter', 'IGLogWriter', (['env.simulator'], {'log_filepath': 'log_path', 'task': 'env.task', 'store_vr': '(False)', 'vr_robot': 'robot', 'profiling_mode': 'profile', 'filter_objects': '(True)'}), '(env.simulator, log_filepath=log_path, task=env.task, store_vr=\n False, vr_robot=robot, profiling_mode=profile, filter_objects=True)\n', (5317, 5452), False, 'from igibson.utils.ig_logging import IGLogWriter\n'), ((3258, 3319), 'os.path.join', 'os.path.join', (['igibson.example_config_path', '"""behavior_vr.yaml"""'], {}), "(igibson.example_config_path, 'behavior_vr.yaml')\n", (3270, 3319), False, 'import os\n'), ((5862, 5877), 'numpy.zeros', 'np.zeros', (['(28,)'], {}), '((28,))\n', (5870, 5877), True, 'import numpy as np\n'), ((5967, 6023), 'numpy.random.uniform', 'np.random.uniform', (['(-0.01)', '(0.01)'], {'size': '(robot.action_dim,)'}), '(-0.01, 0.01, size=(robot.action_dim,))\n', (5984, 6023), True, 'import numpy as np\n'), ((5104, 5127), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5125, 5127), False, 'import datetime\n')] |
###############################################################################
# #
# sudoku_solver.py #
# #
###############################################################################
# Author: <NAME> #
# Data: 2019.12.27 #
# License: MIT Open Source License #
# #
# Description: This is a Python program to solve Sudoku Puzzles using the #
# BackTracking algorithm or strategy. #
# The BackTracking algorithm is a way of solving constrained #
# satisfiable problems using recursion in a deep first tree #
# traversal, in witch the solution is found iteratively. When the constrains #
# aren't satisfied the solution step is backtracked, and from that comes it's #
# name. Each board position calls the solve() function. #
# Any Backtracking problem has: #
# -Choices #
# -Constrains #
# -Goals #
# #
# To Run this code do: #
# 1. Change your Sudoku empty puzzle. #
# 2. Python sudoku.py #
# #
# For references see the project page at: #
# https://github.com/joaocarvalhoopen?tab=repositories #
###############################################################################
import numpy as np
MAX_NUM_ROWS = 9
MAX_NUM_COLS = 9
EMPTY_LIST = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
START_SQUARE_PAIRS = [(0, 0), (0, 3), (0, 6),
(3, 0), (3, 3), (3, 6),
(6, 0), (6, 3), (6, 6)]
def printBoard(state, message):
print("\n" + message + "\n")
for row in range(0, MAX_NUM_ROWS):
for col in range(0, MAX_NUM_COLS):
print(state[row][col], end='')
if col == MAX_NUM_COLS-1:
print()
else:
print(" ", end='')
if col == 2 or col == 5:
print(" ", end='')
if row == 2 or row == 5:
print("")
def getNextValidBoardPos(row_in, col_in, state):
for row in range(row_in, MAX_NUM_ROWS):
for col in range(col_in, MAX_NUM_COLS):
if state[row, col] == 0:
return row, col
col_in = 0
return None, None
def isBoardStateConstrainsSatisfied(state):
# Constrains:
# In each line can only have one number of the possible ones from 1 to 9.
for row in range(0, MAX_NUM_ROWS):
numLst = EMPTY_LIST.copy()
for col in range(0, MAX_NUM_COLS):
numLst[state[row, col]] += 1
if max(numLst[1: ]) > 1:
return False
# In each column can only have one number of the possible ones from 1 to 9.
for col in range(0, MAX_NUM_COLS):
numLst = EMPTY_LIST.copy()
for row in range(0, MAX_NUM_ROWS):
numLst[state[row, col]] += 1
if max(numLst[1: ]) > 1:
return False
# Search in each square.
for startPair in START_SQUARE_PAIRS:
row, col = startPair
numLst = EMPTY_LIST.copy()
numLst[state[row, col]] += 1
numLst[state[row, col+1]] += 1
numLst[state[row, col+2]] += 1
numLst[state[row+1, col]] += 1
numLst[state[row+1, col+1]] += 1
numLst[state[row+1, col+2]] += 1
numLst[state[row+2, col]] += 1
numLst[state[row+2, col+1]] += 1
numLst[state[row+2, col+2]] += 1
if max(numLst[1: ]) > 1:
return False
# All the board constrains are satisfied.
return True
def isIterativeBoardStateConstrainsSatisfied(row_in, col_in, state):
# In backtracking (iterative version), we know that all the previous
# solutions have been tested, so we only need to test/validate:
# -The current row.
# -The current column.
# -The current square cell (9 positions)
# This is a huge time saver.
# Constrains:
# For the current row, test if each position as one number of the possible ones from 1 to 9.
numLst = EMPTY_LIST.copy()
for col in range(0, MAX_NUM_COLS):
numLst[state[row_in, col]] += 1
if max(numLst[1: ]) > 1:
return False
# For the current row, test if each position as one number of the possible ones from 1 to 9.
numLst = EMPTY_LIST.copy()
for row in range(0, MAX_NUM_ROWS):
numLst[state[row, col_in]] += 1
if max(numLst[1: ]) > 1:
return False
# Search in each square.
for startPair in START_SQUARE_PAIRS:
r, c = startPair
if not ((r <= row_in < r + 3) and (c <= col_in < c + 3)):
continue
numLst = EMPTY_LIST.copy()
numLst[state[r, c]] += 1
numLst[state[r, c + 1]] += 1
numLst[state[r, c + 2]] += 1
numLst[state[r + 1, c]] += 1
numLst[state[r + 1, c + 1]] += 1
numLst[state[r + 1, c + 2]] += 1
numLst[state[r + 2, c]] += 1
numLst[state[r + 2, c + 1]] += 1
numLst[state[r + 2, c + 2]] += 1
if max(numLst[1: ]) > 1:
return False
# All the board constrains are satisfied.
return True
def solve(row_in, col_in, state):
# Skip to valid board positions that have a zero (empty position).
row, col = getNextValidBoardPos(row_in, col_in, state)
# Test if achieved the GOAL, end the search return the solution board.
if row == None:
return state.copy()
# Give all the CHOICES to experiment incrementally.
newState = state.copy()
for choice in range(1, 10):
newState[row, col] = choice
# Validate choice against CONSTRAINS.
if not isIterativeBoardStateConstrainsSatisfied(row, col, newState):
continue
# if not isBoardStateConstrainsSatisfied(newState):
# continue
# if valid, incrementally build the next solution phase.
foundSolutionBoard = solve(row, col, newState)
if type(foundSolutionBoard) != type(False):
# Achieved the GOAL.
return foundSolutionBoard
return False
if __name__ == "__main__":
print("\n########################")
print( "# Sudoku Puzzle Solver #")
print( "########################")
# The Zeros mark the places where we will have to discover the number!
sudoku_to_solve = [[ 5, 3, 0, 0, 7, 0, 0, 0, 0],
[ 6, 0, 0, 1, 9, 5, 0, 0, 0],
[ 0, 9, 8, 0, 0, 0, 0, 6, 0],
[ 8, 0, 0, 0, 6, 0, 0, 0, 3],
[ 4, 0, 0, 8, 0, 3, 0, 0, 1],
[ 7, 0, 0, 0, 2, 0, 0, 0, 6],
[ 0, 6, 0, 0, 0, 0, 2, 8, 0],
[ 0, 0, 0, 4, 1, 9, 0, 0, 5],
[ 0, 0, 0, 0, 8, 0, 0, 7, 9]]
board = np.array(sudoku_to_solve)
printBoard(board, "Puzzle board...")
row = 0
col = 0
foundSolutionBoard = solve(row, col, board)
printBoard(foundSolutionBoard, "Solution...")
| [
"numpy.array"
] | [((7723, 7748), 'numpy.array', 'np.array', (['sudoku_to_solve'], {}), '(sudoku_to_solve)\n', (7731, 7748), True, 'import numpy as np\n')] |
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
data=np.genfromtxt(path, delimiter=",", skip_header=1)
print(data.shape)
census=np.concatenate((data,new_record),axis=0)
print(census.shape)
#Code starts here
# --------------
#Code starts here
import numpy as np
age=census[:,0]
max_age=age.max()
min_age=age.min()
age_mean=age.mean()
age_std=np.std(age)
# --------------
#Code starts here
import numpy as np
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
mini=min(len_0,len_1,len_2,len_3,len_4)
mini1=len_3
minority_race=3
#print(len_3)
# --------------
#Code starts here
import numpy as np
senior_citizens=census[census[:,0]>60]
working_hours_sum=senior_citizens.sum(axis=0)[6]
senior_citizens_len=len(senior_citizens)
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
import numpy as np
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=high.mean(axis=0)[7]
avg_pay_low=low.mean(axis=0)[7]
print(avg_pay_high)
print(avg_pay_low)
#working_hours_sum=senior_citizens.sum(axis=0)[6]
| [
"numpy.std",
"numpy.genfromtxt",
"numpy.concatenate"
] | [((194, 243), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (207, 243), True, 'import numpy as np\n'), ((271, 313), 'numpy.concatenate', 'np.concatenate', (['(data, new_record)'], {'axis': '(0)'}), '((data, new_record), axis=0)\n', (285, 313), True, 'import numpy as np\n'), ((497, 508), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (503, 508), True, 'import numpy as np\n')] |
import torch
import torch.nn.utils as nn_utils
import numpy as np
from codes.c_models.base_model import RNNModel
from codes.d_agents.on_policy.on_policy_agent import OnPolicyAgent
from codes.e_utils import replay_buffer
from codes.e_utils.common_utils import float32_preprocessor
class AgentA2C(OnPolicyAgent):
"""
"""
def __init__(self, worker_id, action_shape, params, device):
super(AgentA2C, self).__init__(worker_id, action_shape, params, device)
self.train_action_selector = None
self.test_and_play_action_selector = None
self.model = None
self.optimizer = None
self.actor_optimizer = None
self.critic_optimizer = None
self.buffer = replay_buffer.ExperienceReplayBuffer(
experience_source=None, buffer_size=self.params.BATCH_SIZE
)
def __call__(self, state, critics=None):
raise NotImplementedError
# Lucky Episode에서 얻어낸 batch를 통해 학습할 때와, Unlucky Episode에서 얻어낸 batch를 통해 학습할 때마다 NN의 파라미터들이
# 서로 다른 방향으로 반복적으로 휩쓸려가듯이 학습이 됨 --> Gradients의 Variance가 매우 큼
def on_train(self, step_idx, expected_model_version, local_step_idx=None):
raise NotImplementedError
def unpack_batch_for_actor_critic(
self, batch, target_model=None, sac_base_model=None, alpha=None, params=None
):
"""
Convert batch into training tensors
:param batch:
:param model:
:return: state variable, actions tensor, target values variable
"""
states, actions, rewards, not_done_idx, last_states, last_steps = [], [], [], [], [], []
if isinstance(self.model, RNNModel):
actor_hidden_states = []
critic_hidden_states = []
critic_1_hidden_states = None
critic_2_hidden_states = None
else:
actor_hidden_states = critic_hidden_states = critic_1_hidden_states = critic_2_hidden_states = None
for idx, exp in enumerate(batch):
states.append(np.array(exp.state, copy=False))
actions.append(exp.action)
rewards.append(exp.reward)
if exp.last_state is not None:
not_done_idx.append(idx)
last_states.append(np.array(exp.last_state, copy=False))
last_steps.append(exp.last_step)
if isinstance(self.model, RNNModel):
actor_hidden_states.append(exp.agent_state.actor_hidden_state)
critic_hidden_states.append(exp.agent_state.critic_hidden_state)
states_v = float32_preprocessor(states).to(self.device)
actions_v = self.convert_action_to_torch_tensor(actions, self.device)
if isinstance(self.model, RNNModel):
actor_hidden_states_v = float32_preprocessor(actor_hidden_states).to(self.device)
critic_hidden_states_v = float32_preprocessor(critic_hidden_states).to(self.device)
critic_1_hidden_states_v = None
critic_2_hidden_states_v = None
else:
actor_hidden_states_v = critic_hidden_states_v = critic_1_hidden_states_v = critic_2_hidden_states_v = None
# handle rewards
target_action_values_np = np.array(rewards, dtype=np.float32)
if not_done_idx:
last_states_v = torch.FloatTensor(np.array(last_states, copy=False)).to(self.device)
last_steps_v = np.asarray(last_steps)
last_values_v, _ = target_model.forward_critic(last_states_v, critic_hidden_states_v)
last_values_np = last_values_v.detach().numpy()[:, 0] * (params.GAMMA ** last_steps_v)
target_action_values_np[not_done_idx] += last_values_np
target_action_values_v = float32_preprocessor(target_action_values_np).to(self.device)
# states_v.shape: [128, 3]
# actions_v.shape: [128, 1]
# target_action_values_v.shape: [128]
if isinstance(self.model, RNNModel):
return states_v, actions_v, target_action_values_v, actor_hidden_states_v, critic_hidden_states_v
else:
return states_v, actions_v, target_action_values_v
# def backward_and_step(self, loss_critic_v, loss_entropy_v, loss_actor_v):
# self.optimizer.zero_grad()
# loss_actor_v.backward(retain_graph=True)
# (loss_critic_v + self.params.ENTROPY_LOSS_WEIGHT * loss_entropy_v).backward()
# nn_utils.clip_grad_norm_(self.model.base.parameters(), self.params.CLIP_GRAD)
# self.optimizer.step()
#
# gradients = self.model.get_gradients_for_current_parameters()
#
# # try:
# # self.model.check_gradient_nan_or_zero(gradients)
# # except ValueError as e:
# # print(loss_critic_v, loss_entropy_v, loss_actor_v)
# # exit(-1)
#
# self.buffer.clear()
#
# return gradients, loss_critic_v.item(), loss_actor_v.item() * -1.0
# def backward_and_step(self, loss_critic_v, loss_entropy_v, loss_actor_v):
# self.optimizer.zero_grad()
# loss_actor_v.backward(retain_graph=True)
# (loss_critic_v + self.params.ENTROPY_LOSS_WEIGHT * loss_entropy_v).backward()
# nn_utils.clip_grad_norm_(self.model.base.parameters(), self.params.CLIP_GRAD)
# self.optimizer.step()
#
# gradients = self.model.get_gradients_for_current_parameters()
#
# # try:
# # self.model.check_gradient_nan_or_zero(gradients)
# # except ValueError as e:
# # print(loss_critic_v, loss_entropy_v, loss_actor_v)
# # exit(-1)
#
# self.buffer.clear()
#
# return gradients, loss_critic_v.item(), loss_actor_v.item() * -1.0
| [
"numpy.asarray",
"codes.e_utils.common_utils.float32_preprocessor",
"numpy.array",
"codes.e_utils.replay_buffer.ExperienceReplayBuffer"
] | [((718, 819), 'codes.e_utils.replay_buffer.ExperienceReplayBuffer', 'replay_buffer.ExperienceReplayBuffer', ([], {'experience_source': 'None', 'buffer_size': 'self.params.BATCH_SIZE'}), '(experience_source=None, buffer_size=\n self.params.BATCH_SIZE)\n', (754, 819), False, 'from codes.e_utils import replay_buffer\n'), ((3198, 3233), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float32'}), '(rewards, dtype=np.float32)\n', (3206, 3233), True, 'import numpy as np\n'), ((3384, 3406), 'numpy.asarray', 'np.asarray', (['last_steps'], {}), '(last_steps)\n', (3394, 3406), True, 'import numpy as np\n'), ((2009, 2040), 'numpy.array', 'np.array', (['exp.state'], {'copy': '(False)'}), '(exp.state, copy=False)\n', (2017, 2040), True, 'import numpy as np\n'), ((2557, 2585), 'codes.e_utils.common_utils.float32_preprocessor', 'float32_preprocessor', (['states'], {}), '(states)\n', (2577, 2585), False, 'from codes.e_utils.common_utils import float32_preprocessor\n'), ((3706, 3751), 'codes.e_utils.common_utils.float32_preprocessor', 'float32_preprocessor', (['target_action_values_np'], {}), '(target_action_values_np)\n', (3726, 3751), False, 'from codes.e_utils.common_utils import float32_preprocessor\n'), ((2240, 2276), 'numpy.array', 'np.array', (['exp.last_state'], {'copy': '(False)'}), '(exp.last_state, copy=False)\n', (2248, 2276), True, 'import numpy as np\n'), ((2762, 2803), 'codes.e_utils.common_utils.float32_preprocessor', 'float32_preprocessor', (['actor_hidden_states'], {}), '(actor_hidden_states)\n', (2782, 2803), False, 'from codes.e_utils.common_utils import float32_preprocessor\n'), ((2857, 2899), 'codes.e_utils.common_utils.float32_preprocessor', 'float32_preprocessor', (['critic_hidden_states'], {}), '(critic_hidden_states)\n', (2877, 2899), False, 'from codes.e_utils.common_utils import float32_preprocessor\n'), ((3306, 3339), 'numpy.array', 'np.array', (['last_states'], {'copy': '(False)'}), '(last_states, copy=False)\n', (3314, 3339), True, 'import numpy as np\n')] |
# %%
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import argparse
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import losses
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Dropout, Concatenate, Bidirectional, GlobalMaxPooling1D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
from gensim.models import Word2Vec, KeyedVectors
from layers import Add, LayerNormalization
from layers import MultiHeadAttention, PositionWiseFeedForward
from layers import PositionEncoding
from tensorflow.keras.callbacks import Callback
import tensorflow.keras.backend as K
# %%
def get_data():
DATA = {}
DATA['X1_train'] = np.load('tmp/inputs_0.npy', allow_pickle=True)
DATA['X1_val'] = np.load('tmp/inputs_1.npy', allow_pickle=True)
DATA['X2_train'] = np.load('tmp/inputs_2.npy', allow_pickle=True)
DATA['X2_val'] = np.load('tmp/inputs_3.npy', allow_pickle=True)
DATA['X3_train'] = np.load('tmp/inputs_4.npy', allow_pickle=True)
DATA['X3_val'] = np.load('tmp/inputs_5.npy', allow_pickle=True)
DATA['X4_train'] = np.load('tmp/inputs_6.npy', allow_pickle=True)
DATA['X4_val'] = np.load('tmp/inputs_7.npy', allow_pickle=True)
DATA['X5_train'] = np.load('tmp/inputs_8.npy', allow_pickle=True)
DATA['X5_val'] = np.load('tmp/inputs_9.npy', allow_pickle=True)
DATA['X6_train'] = np.load('tmp/inputs_10.npy', allow_pickle=True)
DATA['X6_val'] = np.load('tmp/inputs_11.npy', allow_pickle=True)
DATA['Y_gender_train'] = np.load('tmp/gender_0.npy', allow_pickle=True)
DATA['Y_gender_val'] = np.load('tmp/gender_1.npy', allow_pickle=True)
DATA['Y_age_train'] = np.load('tmp/age_0.npy', allow_pickle=True)
DATA['Y_age_val'] = np.load('tmp/age_1.npy', allow_pickle=True)
DATA['creative_id_emb'] = np.load(
'tmp/embeddings_0.npy', allow_pickle=True)
DATA['ad_id_emb'] = np.load(
'tmp/embeddings_1.npy', allow_pickle=True)
DATA['product_id_emb'] = np.load(
'tmp/embeddings_2.npy', allow_pickle=True)
DATA['advertiser_id_emb'] = np.load(
'tmp/embeddings_3.npy', allow_pickle=True)
DATA['industry_emb'] = np.load(
'tmp/embeddings_4.npy', allow_pickle=True)
DATA['product_category_emb'] = np.load(
'tmp/embeddings_5.npy', allow_pickle=True)
# DATA['Y_age_train'] = pd.read_csv(
# 'data/train_preliminary/user.csv').age.values-1
# DATA['Y_age_val'] = pd.read_csv(
# 'data/train_preliminary/user.csv').age.values-1
# DATA['Y_gender_train'] = pd.read_csv(
# 'data/train_preliminary/user.csv').gender.values-1
# DATA['Y_gender_val'] = pd.read_csv(
# 'data/train_preliminary/user.csv').gender.values-1
return DATA
# %%
DATA = get_data()
cols_to_emb = ['creative_id', 'ad_id', 'advertiser_id',
'product_id', 'industry', 'product_category']
emb_matrix_dict = {
'creative_id': [DATA['creative_id_emb'].astype('float32')],
'ad_id': [DATA['ad_id_emb'].astype('float32')],
'product_id': [DATA['product_id_emb'].astype('float32')],
'advertiser_id': [DATA['advertiser_id_emb'].astype('float32')],
'industry': [DATA['industry_emb'].astype('float32')],
'product_category': [DATA['product_category_emb'].astype('float32')],
}
conv1d_info_dict = {'creative_id': 128, 'ad_id': 128, 'advertiser_id': 128,
'industry': 128, 'product_category': 128,
'product_id': 128, 'time': 32, 'click_times': -1}
# %%
seq_length_creative_id = 100
labeli = 'age'
# %%
class BiLSTM_Model:
def __init__(self, n_units):
'''
各种参数
:param n_units: for bilstm
'''
self.n_units = n_units
def get_emb_layer(self, emb_matrix, input_length, trainable):
'''
embedding层 index 从maxtrix 里 lookup出向量
'''
embedding_dim = emb_matrix.shape[-1]
input_dim = emb_matrix.shape[0]
emb_layer = keras.layers.Embedding(input_dim, embedding_dim,
input_length=input_length,
weights=[emb_matrix],
trainable=trainable)
return emb_layer
def get_input_layer(self, name=None, dtype="int64"):
'''
input层 字典索引序列
'''
input_layer = keras.Input(
shape=(seq_length_creative_id,), dtype=dtype, name=name)
return input_layer
def get_input_double_layer(self, name=None, dtype="float32"):
'''
input层 dense seqs
'''
input_layer = keras.Input(
shape=(seq_length_creative_id,), dtype=dtype, name=name)
return input_layer
def gru_net(self, emb_layer, click_times_weight):
emb_layer = keras.layers.SpatialDropout1D(0.3)(emb_layer)
x = keras.layers.Conv1D(
filters=emb_layer.shape[-1], kernel_size=1, padding='same', activation='relu')(emb_layer)
# 以上为embedding部分
# bilstm
x = keras.layers.Bidirectional(keras.layers.LSTM(
self.n_units, dropout=0.2, return_sequences=True))(x)
x = keras.layers.Bidirectional(keras.layers.LSTM(
self.n_units, dropout=0.2, return_sequences=True))(x)
conv1a = keras.layers.Conv1D(filters=128, kernel_size=2,
padding='same', activation='relu',)(x)
conv1b = keras.layers.Conv1D(filters=64, kernel_size=4,
padding='same', activation='relu', )(x)
conv1c = keras.layers.Conv1D(filters=32, kernel_size=8,
padding='same', activation='relu',)(x)
gap1a = keras.layers.GlobalAveragePooling1D()(conv1a)
gap1b = keras.layers.GlobalAveragePooling1D()(conv1b)
gap1c = keras.layers.GlobalMaxPooling1D()(conv1c)
max_pool1 = keras.layers.GlobalMaxPooling1D()(x)
concat = keras.layers.concatenate([max_pool1, gap1a, gap1b, gap1c])
return concat
def get_embedding_conv1ded(self, embedding_vector, filter_size=128):
x = keras.layers.Conv1D(filters=filter_size, kernel_size=1,
padding='same', activation='relu')(embedding_vector)
return x
def create_model(self, num_class, labeli):
"""
构建模型的函数
"""
K.clear_session()
# cols to use
inputlist = cols_to_emb
# 这个字典用于指定哪些embedding层也可以进行训练
train_able_dict = {'creative_id': False, 'ad_id': False, 'advertiser_id': False,
'product_id': False, 'industry': True, 'product_category': True, 'time': True, 'click_times': True}
# 所有的input层
inputs_all = []
for col in inputlist:
inputs_all.append(self.get_input_layer(name=col))
# inputs_all.append(self.get_input_double_layer(name = 'click_times'))# 没用上
# input->seq embedding
emb_layer_concat_dict = {}
for index, col in enumerate(inputlist):
layer_emb = self.get_emb_layer(
emb_matrix_dict[col][0], input_length=seq_length_creative_id, trainable=train_able_dict[col])(inputs_all[index])
emb_layer_concat_dict[col] = layer_emb
# 每个列各自降维提取信息
for col in inputlist:
if conv1d_info_dict[col] > 0:
emb_layer_concat_dict[col] = self.get_embedding_conv1ded(
emb_layer_concat_dict[col], filter_size=conv1d_info_dict[col])
# 所有列拼接到一起
concat_all = keras.layers.concatenate(
list(emb_layer_concat_dict.values()))
# 进bilstm
concat_all = self.gru_net(concat_all, inputs_all[-1])
concat_all = keras.layers.Dropout(0.3)(concat_all)
x = keras.layers.Dense(256)(concat_all)
x = keras.layers.PReLU()(x)
x = keras.layers.Dense(256)(x)
x = keras.layers.PReLU()(x)
outputs_all = keras.layers.Dense(
num_class, activation='softmax', name=labeli)(x) # 10分类
model = keras.Model(inputs_all, outputs_all)
print(model.summary())
optimizer = keras.optimizers.Adam(1e-3)
model.compile(optimizer=optimizer,
# loss='sparse_categorical_crossentropy',
loss=tf.keras.losses.CategoricalCrossentropy(
from_logits=False),
metrics=['accuracy'])
return model
# %%
model = BiLSTM_Model(n_units=128).create_model(10, 'age')
# %%
# train_examples = 720000
# val_examples = 180000
train_examples = 810000
val_examples = 90000
model.fit(
{
'creative_id': DATA['X1_train'][:train_examples],
'ad_id': DATA['X2_train'][:train_examples],
'product_id': DATA['X3_train'][:train_examples],
'advertiser_id': DATA['X4_train'][:train_examples],
'industry': DATA['X5_train'][:train_examples],
'product_category': DATA['X6_train'][:train_examples]
},
{
# 'gender': DATA['Y_gender_train'][:train_examples],
'age': DATA['Y_age_train'][:train_examples],
},
validation_data=(
{
'creative_id': DATA['X1_val'][:val_examples],
'ad_id': DATA['X2_val'][:val_examples],
'product_id': DATA['X3_val'][:val_examples],
'advertiser_id': DATA['X4_val'][:val_examples],
'industry': DATA['X5_val'][:val_examples],
'product_category': DATA['X6_val'][:val_examples]
},
{
# 'gender': DATA['Y_gender_val'][:val_examples],
'age': DATA['Y_age_val'][:val_examples],
},
),
epochs=10,
batch_size=1024,
# callbacks=[checkpoint, earlystop_callback, reduce_lr_callback],
)
# %%
# earlystop_callback = tf.keras.callbacks.EarlyStopping(
# monitor="val_accuracy",
# min_delta=0.00001,
# patience=3,
# verbose=1,
# mode="max",
# baseline=None,
# restore_best_weights=True,
# )
# csv_log_callback = tf.keras.callbacks.CSVLogger(
# filename='logs_save/{}_nn_v0621_{}d_bilstm.log'.format(labeli, count), separator=",", append=True)
# reduce_lr_callback = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
# factor=0.5,
# patience=1,
# min_lr=0.0000001)
# callbacks = [earlystop_callback, csv_log_callback, reduce_lr_callback]
| [
"numpy.load",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.Input",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.layers.GlobalMaxPooling1D",
"tensorflow.keras.layers.PReLU",
"tensorflow.keras.Model",
"tensorfl... | [((1023, 1069), 'numpy.load', 'np.load', (['"""tmp/inputs_0.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_0.npy', allow_pickle=True)\n", (1030, 1069), True, 'import numpy as np\n'), ((1091, 1137), 'numpy.load', 'np.load', (['"""tmp/inputs_1.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_1.npy', allow_pickle=True)\n", (1098, 1137), True, 'import numpy as np\n'), ((1161, 1207), 'numpy.load', 'np.load', (['"""tmp/inputs_2.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_2.npy', allow_pickle=True)\n", (1168, 1207), True, 'import numpy as np\n'), ((1229, 1275), 'numpy.load', 'np.load', (['"""tmp/inputs_3.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_3.npy', allow_pickle=True)\n", (1236, 1275), True, 'import numpy as np\n'), ((1299, 1345), 'numpy.load', 'np.load', (['"""tmp/inputs_4.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_4.npy', allow_pickle=True)\n", (1306, 1345), True, 'import numpy as np\n'), ((1367, 1413), 'numpy.load', 'np.load', (['"""tmp/inputs_5.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_5.npy', allow_pickle=True)\n", (1374, 1413), True, 'import numpy as np\n'), ((1437, 1483), 'numpy.load', 'np.load', (['"""tmp/inputs_6.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_6.npy', allow_pickle=True)\n", (1444, 1483), True, 'import numpy as np\n'), ((1505, 1551), 'numpy.load', 'np.load', (['"""tmp/inputs_7.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_7.npy', allow_pickle=True)\n", (1512, 1551), True, 'import numpy as np\n'), ((1575, 1621), 'numpy.load', 'np.load', (['"""tmp/inputs_8.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_8.npy', allow_pickle=True)\n", (1582, 1621), True, 'import numpy as np\n'), ((1643, 1689), 'numpy.load', 'np.load', (['"""tmp/inputs_9.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_9.npy', allow_pickle=True)\n", (1650, 1689), True, 'import numpy as np\n'), ((1713, 1760), 'numpy.load', 'np.load', (['"""tmp/inputs_10.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_10.npy', allow_pickle=True)\n", (1720, 1760), True, 'import numpy as np\n'), ((1782, 1829), 'numpy.load', 'np.load', (['"""tmp/inputs_11.npy"""'], {'allow_pickle': '(True)'}), "('tmp/inputs_11.npy', allow_pickle=True)\n", (1789, 1829), True, 'import numpy as np\n'), ((1859, 1905), 'numpy.load', 'np.load', (['"""tmp/gender_0.npy"""'], {'allow_pickle': '(True)'}), "('tmp/gender_0.npy', allow_pickle=True)\n", (1866, 1905), True, 'import numpy as np\n'), ((1933, 1979), 'numpy.load', 'np.load', (['"""tmp/gender_1.npy"""'], {'allow_pickle': '(True)'}), "('tmp/gender_1.npy', allow_pickle=True)\n", (1940, 1979), True, 'import numpy as np\n'), ((2006, 2049), 'numpy.load', 'np.load', (['"""tmp/age_0.npy"""'], {'allow_pickle': '(True)'}), "('tmp/age_0.npy', allow_pickle=True)\n", (2013, 2049), True, 'import numpy as np\n'), ((2074, 2117), 'numpy.load', 'np.load', (['"""tmp/age_1.npy"""'], {'allow_pickle': '(True)'}), "('tmp/age_1.npy', allow_pickle=True)\n", (2081, 2117), True, 'import numpy as np\n'), ((2148, 2198), 'numpy.load', 'np.load', (['"""tmp/embeddings_0.npy"""'], {'allow_pickle': '(True)'}), "('tmp/embeddings_0.npy', allow_pickle=True)\n", (2155, 2198), True, 'import numpy as np\n'), ((2232, 2282), 'numpy.load', 'np.load', (['"""tmp/embeddings_1.npy"""'], {'allow_pickle': '(True)'}), "('tmp/embeddings_1.npy', allow_pickle=True)\n", (2239, 2282), True, 'import numpy as np\n'), ((2321, 2371), 'numpy.load', 'np.load', (['"""tmp/embeddings_2.npy"""'], {'allow_pickle': '(True)'}), "('tmp/embeddings_2.npy', allow_pickle=True)\n", (2328, 2371), True, 'import numpy as np\n'), ((2413, 2463), 'numpy.load', 'np.load', (['"""tmp/embeddings_3.npy"""'], {'allow_pickle': '(True)'}), "('tmp/embeddings_3.npy', allow_pickle=True)\n", (2420, 2463), True, 'import numpy as np\n'), ((2500, 2550), 'numpy.load', 'np.load', (['"""tmp/embeddings_4.npy"""'], {'allow_pickle': '(True)'}), "('tmp/embeddings_4.npy', allow_pickle=True)\n", (2507, 2550), True, 'import numpy as np\n'), ((2595, 2645), 'numpy.load', 'np.load', (['"""tmp/embeddings_5.npy"""'], {'allow_pickle': '(True)'}), "('tmp/embeddings_5.npy', allow_pickle=True)\n", (2602, 2645), True, 'import numpy as np\n'), ((4285, 4407), 'tensorflow.keras.layers.Embedding', 'keras.layers.Embedding', (['input_dim', 'embedding_dim'], {'input_length': 'input_length', 'weights': '[emb_matrix]', 'trainable': 'trainable'}), '(input_dim, embedding_dim, input_length=input_length,\n weights=[emb_matrix], trainable=trainable)\n', (4307, 4407), False, 'from tensorflow import keras\n'), ((4684, 4752), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(seq_length_creative_id,)', 'dtype': 'dtype', 'name': 'name'}), '(shape=(seq_length_creative_id,), dtype=dtype, name=name)\n', (4695, 4752), False, 'from tensorflow import keras\n'), ((4932, 5000), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(seq_length_creative_id,)', 'dtype': 'dtype', 'name': 'name'}), '(shape=(seq_length_creative_id,), dtype=dtype, name=name)\n', (4943, 5000), False, 'from tensorflow import keras\n'), ((6266, 6324), 'tensorflow.keras.layers.concatenate', 'keras.layers.concatenate', (['[max_pool1, gap1a, gap1b, gap1c]'], {}), '([max_pool1, gap1a, gap1b, gap1c])\n', (6290, 6324), False, 'from tensorflow import keras\n'), ((6687, 6704), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (6702, 6704), True, 'import tensorflow.keras.backend as K\n'), ((8368, 8404), 'tensorflow.keras.Model', 'keras.Model', (['inputs_all', 'outputs_all'], {}), '(inputs_all, outputs_all)\n', (8379, 8404), False, 'from tensorflow import keras\n'), ((8456, 8484), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (8477, 8484), False, 'from tensorflow import keras\n'), ((5116, 5150), 'tensorflow.keras.layers.SpatialDropout1D', 'keras.layers.SpatialDropout1D', (['(0.3)'], {}), '(0.3)\n', (5145, 5150), False, 'from tensorflow import keras\n'), ((5174, 5277), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': 'emb_layer.shape[-1]', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=emb_layer.shape[-1], kernel_size=1, padding=\n 'same', activation='relu')\n", (5193, 5277), False, 'from tensorflow import keras\n'), ((5605, 5692), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(128)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=128, kernel_size=2, padding='same', activation=\n 'relu')\n", (5624, 5692), False, 'from tensorflow import keras\n'), ((5746, 5832), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(64)', 'kernel_size': '(4)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=4, padding='same', activation=\n 'relu')\n", (5765, 5832), False, 'from tensorflow import keras\n'), ((5887, 5973), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(32)', 'kernel_size': '(8)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=8, padding='same', activation=\n 'relu')\n", (5906, 5973), False, 'from tensorflow import keras\n'), ((6026, 6063), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'keras.layers.GlobalAveragePooling1D', ([], {}), '()\n', (6061, 6063), False, 'from tensorflow import keras\n'), ((6088, 6125), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'keras.layers.GlobalAveragePooling1D', ([], {}), '()\n', (6123, 6125), False, 'from tensorflow import keras\n'), ((6150, 6183), 'tensorflow.keras.layers.GlobalMaxPooling1D', 'keras.layers.GlobalMaxPooling1D', ([], {}), '()\n', (6181, 6183), False, 'from tensorflow import keras\n'), ((6212, 6245), 'tensorflow.keras.layers.GlobalMaxPooling1D', 'keras.layers.GlobalMaxPooling1D', ([], {}), '()\n', (6243, 6245), False, 'from tensorflow import keras\n'), ((6433, 6527), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': 'filter_size', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=filter_size, kernel_size=1, padding='same',\n activation='relu')\n", (6452, 6527), False, 'from tensorflow import keras\n'), ((8043, 8068), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.3)'], {}), '(0.3)\n', (8063, 8068), False, 'from tensorflow import keras\n'), ((8093, 8116), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(256)'], {}), '(256)\n', (8111, 8116), False, 'from tensorflow import keras\n'), ((8141, 8161), 'tensorflow.keras.layers.PReLU', 'keras.layers.PReLU', ([], {}), '()\n', (8159, 8161), False, 'from tensorflow import keras\n'), ((8177, 8200), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(256)'], {}), '(256)\n', (8195, 8200), False, 'from tensorflow import keras\n'), ((8216, 8236), 'tensorflow.keras.layers.PReLU', 'keras.layers.PReLU', ([], {}), '()\n', (8234, 8236), False, 'from tensorflow import keras\n'), ((8263, 8327), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_class'], {'activation': '"""softmax"""', 'name': 'labeli'}), "(num_class, activation='softmax', name=labeli)\n", (8281, 8327), False, 'from tensorflow import keras\n'), ((5379, 5446), 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['self.n_units'], {'dropout': '(0.2)', 'return_sequences': '(True)'}), '(self.n_units, dropout=0.2, return_sequences=True)\n', (5396, 5446), False, 'from tensorflow import keras\n'), ((5503, 5570), 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['self.n_units'], {'dropout': '(0.2)', 'return_sequences': '(True)'}), '(self.n_units, dropout=0.2, return_sequences=True)\n', (5520, 5570), False, 'from tensorflow import keras\n'), ((8620, 8678), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (8659, 8678), True, 'import tensorflow as tf\n')] |
import numpy as np
from PIL import Image, ImageOps, ImageEnhance
from pysot.utils.bbox import center2corner, Center , Corner ,corner2center
import math
# ImageNet code should change this value
PI=3.1415926
def center_ro(cx,cy,px,py,a):
a=-2*PI/360*a
x= (px - cx)*math.cos(a) - (py - cy)*math.sin(a) + cx
y= (px - cx)*math.sin(a) + (py - cy)*math.cos(a) + cy
return x , y
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _,IMAGE_SIZE,bbox):
return ImageOps.autocontrast(pil_img) ,bbox
def equalize(pil_img, _,IMAGE_SIZE,bbox):
return ImageOps.equalize(pil_img) ,bbox
def posterize(pil_img, level,IMAGE_SIZE,bbox):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level) ,bbox
def rotate(pil_img, level,IMAGE_SIZE,bbox):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
px,py,_ ,_ = corner2center(bbox)# 原始矩形角点
a,b,c,d = bbox
#print(bbox)
cenx,ceny = center_ro(pil_img.size[0]/2,pil_img.size[1]/2, px , py,degrees)
po2x,po2y = center_ro(pil_img.size[0]/2,pil_img.size[1]/2, a , d,degrees)
po3x,po3y = center_ro(pil_img.size[0]/2,pil_img.size[1]/2, c , d,degrees)
hh=max(abs(po2y-ceny)*2,abs(po3y-ceny)*2)
ww=max(abs(po3x-cenx)*2,abs(po2x-cenx)*2)
bbox = center2corner(Center(cenx,ceny,ww,hh))
#print(bbox)
return pil_img.rotate(degrees, resample=Image.BILINEAR) , bbox
def solarize(pil_img, level,IMAGE_SIZE,bbox):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level) , bbox
def paste(im,level,IMAGE_SIZE,bbox):
#print(type(im))
#img = Image.fromarray(im) np.array(im).transpose(2,0,1)
avg = np.array(im).sum()/IMAGE_SIZE/IMAGE_SIZE/3
mk = np.ones((10,10,3))*avg
mk = Image.fromarray(np.uint8(mk))
x = int(np.random.uniform(low=45,high=IMAGE_SIZE-55))
y = int(np.random.uniform(low=45,high=IMAGE_SIZE-55))
im.paste(mk,(x,y))
return im, bbox
def shear_x(pil_img, level,IMAGE_SIZE,bbox):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
#print(bbox)
x,y,w,h = corner2center(bbox)
x-=y*level
w = w*(1+abs(0.5*level))
bbox = center2corner(Center(x,y,w,h))
#print(bbox)
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR) , bbox
def shear_y(pil_img, level,IMAGE_SIZE,bbox):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
#print(bbox)
x,y,w,h = corner2center(bbox)
y-=x*level
h = h * (1+abs(2*level))
bbox = center2corner(Center(x,y,w,h))
#print(bbox)
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR) , bbox
def translate_x(pil_img, level,IMAGE_SIZE,bbox):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR) , bbox
def translate_y(pil_img, level,IMAGE_SIZE,bbox):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR) , bbox
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level,IMAGE_SIZE,bbox):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level) , bbox
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level,IMAGE_SIZE,bbox):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level) , bbox
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level,IMAGE_SIZE,bbox):
level = float_parameter(sample_level(level), 1.9) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level) , bbox
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level,IMAGE_SIZE,bbox):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level) , bbox
#def scale()
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
augmentations_all = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
augmentations_augmix = [
rotate, shear_x, shear_y, autocontrast, equalize, posterize, solarize,
color, contrast, brightness, sharpness
]#scale,
augmentations_feature = [
autocontrast, equalize, posterize, solarize,
color, contrast, brightness, sharpness
]
augmentations_duo = [
paste, shear_x, shear_y, rotate, contrast , translate_x, translate_y , brightness
]
| [
"numpy.random.uniform",
"numpy.uint8",
"PIL.ImageEnhance.Brightness",
"PIL.ImageOps.solarize",
"PIL.ImageEnhance.Color",
"PIL.ImageEnhance.Contrast",
"numpy.ones",
"math.sin",
"pysot.utils.bbox.Center",
"PIL.ImageEnhance.Sharpness",
"numpy.random.random",
"numpy.array",
"PIL.ImageOps.equaliz... | [((1238, 1272), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.1)', 'high': 'n'}), '(low=0.1, high=n)\n', (1255, 1272), True, 'import numpy as np\n'), ((1771, 1790), 'pysot.utils.bbox.corner2center', 'corner2center', (['bbox'], {}), '(bbox)\n', (1784, 1790), False, 'from pysot.utils.bbox import center2corner, Center, Corner, corner2center\n'), ((3009, 3028), 'pysot.utils.bbox.corner2center', 'corner2center', (['bbox'], {}), '(bbox)\n', (3022, 3028), False, 'from pysot.utils.bbox import center2corner, Center, Corner, corner2center\n'), ((3485, 3504), 'pysot.utils.bbox.corner2center', 'corner2center', (['bbox'], {}), '(bbox)\n', (3498, 3504), False, 'from pysot.utils.bbox import center2corner, Center, Corner, corner2center\n'), ((1330, 1360), 'PIL.ImageOps.autocontrast', 'ImageOps.autocontrast', (['pil_img'], {}), '(pil_img)\n', (1351, 1360), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((1420, 1446), 'PIL.ImageOps.equalize', 'ImageOps.equalize', (['pil_img'], {}), '(pil_img)\n', (1437, 1446), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((1559, 1597), 'PIL.ImageOps.posterize', 'ImageOps.posterize', (['pil_img', '(4 - level)'], {}), '(pil_img, 4 - level)\n', (1577, 1597), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((1706, 1725), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1723, 1725), True, 'import numpy as np\n'), ((2173, 2199), 'pysot.utils.bbox.Center', 'Center', (['cenx', 'ceny', 'ww', 'hh'], {}), '(cenx, ceny, ww, hh)\n', (2179, 2199), False, 'from pysot.utils.bbox import center2corner, Center, Corner, corner2center\n'), ((2393, 2432), 'PIL.ImageOps.solarize', 'ImageOps.solarize', (['pil_img', '(256 - level)'], {}), '(pil_img, 256 - level)\n', (2410, 2432), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2619, 2639), 'numpy.ones', 'np.ones', (['(10, 10, 3)'], {}), '((10, 10, 3))\n', (2626, 2639), True, 'import numpy as np\n'), ((2665, 2677), 'numpy.uint8', 'np.uint8', (['mk'], {}), '(mk)\n', (2673, 2677), True, 'import numpy as np\n'), ((2689, 2736), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(45)', 'high': '(IMAGE_SIZE - 55)'}), '(low=45, high=IMAGE_SIZE - 55)\n', (2706, 2736), True, 'import numpy as np\n'), ((2745, 2792), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(45)', 'high': '(IMAGE_SIZE - 55)'}), '(low=45, high=IMAGE_SIZE - 55)\n', (2762, 2792), True, 'import numpy as np\n'), ((2936, 2955), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2953, 2955), True, 'import numpy as np\n'), ((3092, 3110), 'pysot.utils.bbox.Center', 'Center', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (3098, 3110), False, 'from pysot.utils.bbox import center2corner, Center, Corner, corner2center\n'), ((3412, 3431), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3429, 3431), True, 'import numpy as np\n'), ((3568, 3586), 'pysot.utils.bbox.Center', 'Center', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (3574, 3586), False, 'from pysot.utils.bbox import center2corner, Center, Corner, corner2center\n'), ((3898, 3916), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3914, 3916), True, 'import numpy as np\n'), ((4238, 4256), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4254, 4256), True, 'import numpy as np\n'), ((273, 284), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (281, 284), False, 'import math\n'), ((297, 308), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (305, 308), False, 'import math\n'), ((331, 342), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (339, 342), False, 'import math\n'), ((355, 366), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (363, 366), False, 'import math\n'), ((4630, 4657), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['pil_img'], {}), '(pil_img)\n', (4648, 4657), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((4853, 4883), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['pil_img'], {}), '(pil_img)\n', (4874, 4883), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((5081, 5113), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['pil_img'], {}), '(pil_img)\n', (5104, 5113), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((5310, 5341), 'PIL.ImageEnhance.Sharpness', 'ImageEnhance.Sharpness', (['pil_img'], {}), '(pil_img)\n', (5332, 5341), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2569, 2581), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (2577, 2581), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from sciml_bench.benchmarks.em_denoise.model import autoencoder
def test_autoencoder():
model = autoencoder((128, 128, 1))
assert isinstance(model, tf.keras.Model)
assert model.input_shape == (None, 128, 128, 1)
assert model.output_shape == (None, 128, 128, 1)
def test_autoencoder_feed_forward():
model = autoencoder((128, 128, 1))
output = model.predict(np.random.random((1, 128, 128, 1)))
assert output.shape == (1, 128, 128, 1)
def test_autoencoder_backprop():
X = np.random.random((1, 128, 128, 1))
model = autoencoder((128, 128, 1), learning_rate=0.001)
model.compile(loss='mse', optimizer='adam')
history = model.fit(X, X)
assert isinstance(history, tf.keras.callbacks.History)
| [
"sciml_bench.benchmarks.em_denoise.model.autoencoder",
"numpy.random.random"
] | [((145, 171), 'sciml_bench.benchmarks.em_denoise.model.autoencoder', 'autoencoder', (['(128, 128, 1)'], {}), '((128, 128, 1))\n', (156, 171), False, 'from sciml_bench.benchmarks.em_denoise.model import autoencoder\n'), ((374, 400), 'sciml_bench.benchmarks.em_denoise.model.autoencoder', 'autoencoder', (['(128, 128, 1)'], {}), '((128, 128, 1))\n', (385, 400), False, 'from sciml_bench.benchmarks.em_denoise.model import autoencoder\n'), ((551, 585), 'numpy.random.random', 'np.random.random', (['(1, 128, 128, 1)'], {}), '((1, 128, 128, 1))\n', (567, 585), True, 'import numpy as np\n'), ((598, 645), 'sciml_bench.benchmarks.em_denoise.model.autoencoder', 'autoencoder', (['(128, 128, 1)'], {'learning_rate': '(0.001)'}), '((128, 128, 1), learning_rate=0.001)\n', (609, 645), False, 'from sciml_bench.benchmarks.em_denoise.model import autoencoder\n'), ((428, 462), 'numpy.random.random', 'np.random.random', (['(1, 128, 128, 1)'], {}), '((1, 128, 128, 1))\n', (444, 462), True, 'import numpy as np\n')] |
"""
File: pylinex/expander/PadExpander.py
Author: <NAME>
Date: 3 Sep 2017
Description: File containing class representing an Expander which expands the
data by padding it with zeros (or any other value).
"""
import numpy as np
from ..util import int_types, numerical_types
from .Expander import Expander
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class PadExpander(Expander):
"""
Class representing an Expander which expands the data by padding it with
zeros (or any other value).
"""
def __init__(self, pads_before, pads_after, pad_value=0):
"""
Initializes a new PadExpander.
pads_before, pads_after: strings of the form N+S where N is a positive
integer string and S is either '+' or '*'.
If S is '*', then the number in N is taken to
be the factor by which inputs should be
expanded. If S is '+', then the number in N is
taken to be the number of pads to put in the
given position regardless of the input size.
pad_value: numerical value to place in the pad positions
"""
self.pads_before = pads_before
self.pads_after = pads_after
self.pad_value = pad_value
def make_expansion_matrix(self, original_space_size):
"""
Computes the matrix of this expander.
original_space_size: size of unexpanded space
returns: expansion matrix of this expander
"""
if self.pad_value != 0:
raise ValueError("If pad_value is not zero, then Expander " +\
"cannot be represented by an expansion matrix.")
(pads_before, pads_after) =\
self.get_pad_sizes_from_original_space_length(original_space_size)
first_pad = np.zeros((pads_before, original_space_size))
second_pad = np.zeros((pads_after, original_space_size))
in_between = np.identity(original_space_size)
return np.concatenate([first_pad, in_between, second_pad], axis=0)
def check_and_reprocess_pad_number(self, pad_number):
"""
Checks the given pad_number string to see if it has the right format
and reprocesses it.
pad_number: string of the form N+S where N is a positive integer string
and S is either '+' or '*'. If S is '*', then the number in
N is taken to be the factor by which inputs should be
expanded. If S is '+', then the number in N is taken to be
the number of pads to put in the given position regardless
of the input size.
returns: tuple of the form (number, is_multiplicative) representing
the number and symbol represented in the string.
"""
if type(pad_number) in int_types:
if pad_number >= 0:
return (pad_number, True)
else:
raise ValueError("pad_number cannot be negative.")
elif isinstance(pad_number, basestring):
if pad_number[-1] in ['+', '*']:
try:
int_part = int(pad_number[:-1])
except:
raise ValueError("pad_number should be of the form X+Y " +\
"where X is a string form of an " +\
"integer and Y is either '+' or '*'.")
else:
if int_part >= 0:
return (int_part, pad_number[-1] == '*')
else:
raise ValueError("integer part of pad number " +\
"string must be non-negative.")
else:
raise ValueError("If pad_number is a string, it must end " +\
"in '+' or '*'.")
else:
raise TypeError("pad_number was neither a string nor an integer.")
@property
def pads_before(self):
"""
Property storing the number associated with the pads before the input,
regardless of whether or not it is to be taken as multiplicative.
"""
if not hasattr(self, '_pads_before'):
raise AttributeError("pads_before was referenced before it was " +\
"set.")
return self._pads_before
@property
def pads_before_multiplicative(self):
"""
Property storing whether or not the pads_before property is to be taken
as multiplicative.
"""
if not hasattr(self, '_pads_before_multiplicative'):
raise AttributeError("pads_before_multiplicative was " +\
"referenced before it was set.")
return self._pads_before_multiplicative
@pads_before.setter
def pads_before(self, value):
"""
Setter for the pads_before string.
value: string of the form N+S where N is a positive integer string and
S is either '+' or '*'. If S is '*', then the number in N is
taken to be the factor by which inputs should be expanded. If S
is '+', then the number in N is taken to be the number of pads
to put in the given position regardless of the input size.
"""
(self._pads_before, self._pads_before_multiplicative) =\
self.check_and_reprocess_pad_number(value)
@property
def pads_after(self):
"""
Property storing the number associated with the pads after the input,
regardless of whether or not it is to be taken as multiplicative.
"""
if not hasattr(self, '_pads_after'):
raise AttributeError("pads_after was referenced before it was " +\
"set.")
return self._pads_after
@property
def pads_after_multiplicative(self):
"""
Property storing whether or not the pads_after property is to be taken
as multiplicative.
"""
if not hasattr(self, '_pads_after_multiplicative'):
raise AttributeError("pads_after_multiplicative was referenced " +\
"before it was set.")
return self._pads_after_multiplicative
@pads_after.setter
def pads_after(self, value):
"""
Setter for the pads_after string.
value: string of the form N+S where N is a positive integer string and
S is either '+' or '*'. If S is '*', then the number in N is
taken to be the factor by which inputs should be expanded. If S
is '+', then the number in N is taken to be the number of pads
to put in the given position regardless of the input size.
"""
(self._pads_after, self._pads_after_multiplicative) =\
self.check_and_reprocess_pad_number(value)
@property
def pad_value(self):
"""
Property storing the value which will pad either side of the input.
"""
if not hasattr(self, '_pad_value'):
raise AttributeError("pad_value was referenced before it was set.")
return self._pad_value
@pad_value.setter
def pad_value(self, value):
"""
Setter for the value with which to fill bad positions
value: single number with which to fill pad positions
"""
if type(value) in numerical_types:
self._pad_value = value
else:
raise TypeError("pad_value was set to a non-number.")
def overlap(self, vectors, error=None):
"""
Computes Psi^T C^{-1} y for one or more vectors y and for a diagonal C
defined by the given error.
vectors: either a 1D array of length expanded_space_size or a 2D array
of shape (nvectors, expanded_space_size)
error: the standard deviations of the independent noise defining the
dot product
returns: if vectors is 1D, result is a 1D array of length
original_space_size
else, result is a 2D array of shape
(nvectors, original_space_size)
"""
onedim = (vectors.ndim == 1)
if onedim:
vectors = vectors[np.newaxis,:]
if type(error) is type(None):
weighted_vectors = vectors
else:
weighted_vectors = vectors / (error ** 2)[np.newaxis,:]
expanded_space_size = vectors.shape[-1]
original_space_size = self.original_space_size(expanded_space_size)
(pad_size_before, pad_size_after) =\
self.get_pad_sizes_from_original_space_length(original_space_size)
result = weighted_vectors[:,pad_size_before:-pad_size_after]
if onedim:
return result[0]
else:
return result
def copy(self):
"""
Finds and returns a deep copy of this expander.
returns: copied PadExpander
"""
pads_before_string = '{0:d}{1!s}'.format(self.pads_before,\
'*' if self.pads_before_multiplicative else '+')
pads_after_string = '{0:d}{1!s}'.format(self.pads_after,\
'*' if self.pads_after_multiplicative else '+')
return PadExpander(pads_before_string, pads_after_string,\
pad_value=self.pad_value)
def get_pad_size(self, original_space_size, pad_number, is_multiplicative):
"""
Gets the pad size associated with the given input size as well as a
number of pads and whether that number should be taken as
multiplicative.
original_space_size: the size of the input vector
pad_number: number with which to find number of pad values
is_multiplicative: bool determining whether pad_number is to be taken
as multiplicative
returns: the actual number of pad values to put in the given position
"""
if is_multiplicative:
return pad_number * original_space_size
else:
return pad_number
def get_pad_sizes_from_original_space_length(self, original_space_size):
"""
Gets the sizes of the pads both before and after the input vector from
the size of the input vector.
original_space_size: length of input vector
returns: tuple of form (number_of_pads_before, number_of_pads_after)
"""
size_before = self.get_pad_size(original_space_size, self.pads_before,\
self.pads_before_multiplicative)
size_after = self.get_pad_size(original_space_size, self.pads_after,\
self.pads_after_multiplicative)
return (size_before, size_after)
def get_pad_sizes_from_expanded_space_length(self, expanded_space_size):
"""
Gets the sizes of the pads both before and after the input vector from
the size of the output vector.
expanded_space_size: length of output vector
returns: tuple of form (number_of_pads_before, number_of_pads_after)
"""
if self.pads_before_multiplicative and self.pads_after_multiplicative:
original_space_size =\
expanded_space_size // (1 + self.pads_before + self.pads_after)
return (self.pads_before * original_space_size,\
self.pads_after * original_space_size)
elif self.pads_before_multiplicative:
original_space_size = (expanded_space_size - self.pads_after) //\
(1 + self.pads_before)
return (self.pads_before * original_space_size, self.pads_after)
elif self.pads_after_multiplicative:
original_space_size = (expanded_space_size - self.pads_before) //\
(1 + self.pads_after)
return (self.pads_before, self.pads_after * original_space_size)
else:
return (self.pads_before, self.pads_after)
def apply(self, vector):
"""
Expands vector from smaller original space to larger expanded space by
padding the vector with expander.pad_value.
vector: 1D vector from original space
returns: 1D vector from expanded space
"""
vector_length = vector.shape[-1]
(pad_size_before, pad_size_after) =\
self.get_pad_sizes_from_original_space_length(vector_length)
pad_before_shape = vector.shape[:-1] + (pad_size_before,)
pad_after_shape = vector.shape[:-1] + (pad_size_after,)
pad_array_before = np.ones(pad_before_shape) * self.pad_value
pad_array_after = np.ones(pad_after_shape) * self.pad_value
return np.concatenate([pad_array_before, vector, pad_array_after],\
axis=-1)
def contracted_covariance(self, error):
"""
Finds the covariance matrix associated with contracted noise.
error: 1D vector from expanded space
returns: 2D array of shape (original_space_size, original_space_size)
"""
return np.diag(self.contract_error(error) ** 2)
def contract_error(self, error):
"""
Contracts error from full expanded space to smaller original space
simply by slicing.
error: 1D vector from expanded space
returns: 1D vector from original space
"""
error_length = len(error)
(pad_size_before, pad_size_after) =\
self.get_pad_sizes_from_expanded_space_length(error_length)
return error[pad_size_before:error_length-pad_size_after]
def invert(self, data, error):
"""
(Pseudo-)Inverts this expander in order to infer an original-space
curve from the given expanded-space data and error.
data: data vector from which to imply an original space cause
error: Gaussian noise level in data
returns: most likely original-space curve to cause given data
"""
num_channels = len(data)
(pad_size_before, pad_size_after) =\
self.get_pad_sizes_from_expanded_space_length(num_channels)
return data[pad_size_before:num_channels-pad_size_after]
def is_compatible(self, original_space_size, expanded_space_size):
"""
Checks whether this Expander is compatible with the given sizes of the
original expanded spaces.
original_space_size: size of (typically smaller) original space
expanded_space_size: size of (typically larger) expanded space
returns: True iff the given sizes are compatible with this Expander
"""
(pad_size_before, pad_size_after) =\
self.get_pad_sizes_from_original_space_length(original_space_size)
expected_expanded_space_size =\
(pad_size_before + original_space_size + pad_size_after)
return (expected_expanded_space_size == expanded_space_size)
def original_space_size(self, expanded_space_size):
"""
Finds the input space size from the output space size.
expanded_space_size: positive integer compatible with this Expander
returns: input space size
"""
(pad_size_before, pad_size_after) =\
self.get_pad_sizes_from_expanded_space_length(expanded_space_size)
return (expanded_space_size - (pad_size_before + pad_size_after))
def expanded_space_size(self, original_space_size):
"""
Finds the output space size from the input space size.
original_space_size: positive integer compatible with this Expander
returns: output space size
"""
(pad_size_before, pad_size_after) =\
self.get_pad_sizes_from_original_space_length(original_space_size)
return (pad_size_before + original_space_size + pad_size_after)
def channels_affected(self, original_space_size):
"""
Finds the indices of the data channels affected by data of the given
size given to this Expander object.
original_space_size: positive integer to assume as input size
returns: 1D numpy.ndarray of indices of data channels possibly affected
by data expanded by this Expander object
"""
(pad_size_before, pad_size_after) =\
self.get_pad_sizes_from_original_space_length(original_space_size)
return np.arange(original_space_size) + pad_size_before
def fill_hdf5_group(self, group):
"""
Saves data about this in the given hdf5 file group.
group: hdf5 file group to which to write
"""
group.attrs['class'] = 'PadExpander'
if self.pads_before_multiplicative:
group.attrs['pads_before'] = ('{}*'.format(self.pads_before))
else:
group.attrs['pads_before'] = ('{}+'.format(self.pads_before))
if self.pads_after_multiplicative:
group.attrs['pads_after'] = ('{}*'.format(self.pads_after))
else:
group.attrs['pads_after'] = ('{}+'.format(self.pads_after))
group.attrs['pad_value'] = self.pad_value
def __eq__(self, other):
"""
Checks for equality between this Expander and other.
other: object with which to check for equality
returns: True if this object and other are identical,
False otherwise
"""
if isinstance(other, PadExpander):
if self.pads_before != other.pads_before:
return False
if self.pads_before_multiplicative !=\
other.pads_before_multiplicative:
return False
if self.pads_after != other.pads_after:
return False
if self.pads_after_multiplicative !=\
other.pads_after_multiplicative:
return False
return True
else:
return False
| [
"numpy.zeros",
"numpy.ones",
"numpy.identity",
"numpy.arange",
"numpy.concatenate"
] | [((2091, 2135), 'numpy.zeros', 'np.zeros', (['(pads_before, original_space_size)'], {}), '((pads_before, original_space_size))\n', (2099, 2135), True, 'import numpy as np\n'), ((2157, 2200), 'numpy.zeros', 'np.zeros', (['(pads_after, original_space_size)'], {}), '((pads_after, original_space_size))\n', (2165, 2200), True, 'import numpy as np\n'), ((2222, 2254), 'numpy.identity', 'np.identity', (['original_space_size'], {}), '(original_space_size)\n', (2233, 2254), True, 'import numpy as np\n'), ((2270, 2329), 'numpy.concatenate', 'np.concatenate', (['[first_pad, in_between, second_pad]'], {'axis': '(0)'}), '([first_pad, in_between, second_pad], axis=0)\n', (2284, 2329), True, 'import numpy as np\n'), ((13167, 13235), 'numpy.concatenate', 'np.concatenate', (['[pad_array_before, vector, pad_array_after]'], {'axis': '(-1)'}), '([pad_array_before, vector, pad_array_after], axis=-1)\n', (13181, 13235), True, 'import numpy as np\n'), ((13041, 13066), 'numpy.ones', 'np.ones', (['pad_before_shape'], {}), '(pad_before_shape)\n', (13048, 13066), True, 'import numpy as np\n'), ((13110, 13134), 'numpy.ones', 'np.ones', (['pad_after_shape'], {}), '(pad_after_shape)\n', (13117, 13134), True, 'import numpy as np\n'), ((16969, 16999), 'numpy.arange', 'np.arange', (['original_space_size'], {}), '(original_space_size)\n', (16978, 16999), True, 'import numpy as np\n')] |
import numpy as np
from scipy import signal
import math
import skimage.measure
import copy
from PIL import Image
import os
def loadimage(nom):
im = Image.open(nom).convert('L')
return (np.array(im).flatten() / 255)
PERSISTANCE = 0.05
pathCroix = 'DATAIN\\CROIX\\'
pathRond = 'DATAIN\\ROND\\'
imgCROIX = []
imgROND = []
for _, _, f in os.walk(pathCroix):
for issou in f:
imgCROIX.append(np.reshape(loadimage(pathCroix + issou), (10, 10)))
for _, _, f in os.walk(pathRond):
for issou in f:
imgROND.append(np.reshape(loadimage(pathRond + issou), (10, 10)))
DATAIN = imgCROIX + imgROND
EXPECT = np.concatenate((np.full(len(imgCROIX), [1, 0], dtype = '2f'), np.full(len(imgROND), [0, 1], dtype = '2f')))
def reLu(x):
return np.maximum(x, 0, x)
def sigmoid(x):
try:
ans = 1 / (1 + np.exp(-x))
except OverflowError:
ans = 0.5
print('Overflow !!!!!!!')
return ans
class inputLayer:
def __init__(self, nom, tailleIn):
self.data = np.zeros((tailleIn, tailleIn))
self.name = nom
def inputData(self, data):
self.data = data
class convolutionLayer:
def __init__(self, nom, tailleIn, tailleNoyau, nbFiltres): #tailleIn est la taille de l'image étudiée, tailleNoyau celle du noyau
self.nom = nom
self.nbFiltres = nbFiltres
self.tailleIn = tailleIn
self.tailleNoyau = tailleNoyau
self.data = np.zeros((nbFiltres, tailleIn - tailleNoyau + 1, tailleIn - tailleNoyau + 1), dtype = 'float')
self.filtres = np.random.rand(nbFiltres, tailleNoyau, tailleNoyau) * 2 - 1
def propagate(self, dataIn):
"""Remplit le data de la couche en faisant les convolutions sur dataIn."""
for i in range(self.nbFiltres):
self.data[i] = reLu(signal.convolve(dataIn, self.filtres[i], mode = 'valid'))
def backPropagate(self, dH, W, nbFiltres, DELTA, DATA):
global PERSISTANCE
"""dH est le gradient des couches en aval, déjà calculé."""
dX = np.zeros((self.tailleIn, self.tailleIn))
dF = np.zeros((self.nbFiltres, self.tailleNoyau, self.tailleNoyau))
temp = np.rot90(np.rot90(dH))
nbFiltres, X, Y = dH.shape
for i in range(self.nbFiltres):
for x in range(X):
for y in range(Y):
dX[x:x + self.tailleNoyau, y:y + self.tailleNoyau] += self.filtres[i] * temp[i, x, y]
dF[i] += DATA[x:x + self.tailleNoyau, y:y + self.tailleNoyau] * temp[i, x, y]
self.filtres[i] -= PERSISTANCE * dF[i] * (self.filtres[i])
return (0, 0, 0, DELTA)
class poolLayer:
def __init__(self, nom, tailleIn, tailleNoyau = 2, mode = 'max'):
self.nom = nom
self.tailleIn = tailleIn
self.mode = mode
self.tailleNoyau = tailleNoyau
self.data = None
def propagate(self, data):
"""Propage sur la couche de pool, ie. fait un downsampling."""
liste = []
for dat in data:
if self.mode == 'max':
liste.append(skimage.measure.block_reduce(dat, (self.tailleNoyau, self.tailleNoyau), np.max))
elif mode == 'meam':
liste.append(skimage.measure.block_reduce(dat, (self.tailleNoyau, self.tailleNoyau), np.mean))
elif mode == 'sum':
liste.append(skimage.measure.block_reduce(dat, (self.tailleNoyau, self.tailleNoyau), np.sum))
self.data = np.array(liste)
def backPropagate(self, dH, W, nbFiltres, DELTA, DATA):
dW = np.zeros((dH.shape[0] * self.tailleNoyau, dH.shape[1] * self.tailleNoyau, dH.shape[2] * self.tailleNoyau))
for i in range(nbFiltres):
for x in range(dW.shape[1]):
for y in range(dW.shape[2]):
dW[x, y] = dH[i, x // self.tailleNoyau, y // self.tailleNoyau]
return dW, W, nbFiltres, DELTA
class fullyConnected:
def __init__(self, nom, tailleIn, taille, type = 'hidden'):
"""taille est la taille de la couche, tailleIn celle de la couche précédente."""
"""type peut être 'hidden', 'junction' ou 'output'."""
self.nom = nom
self.tailleIn = tailleIn
self.taille = taille
self.weights = np.random.rand(self.taille, self.tailleIn) * 2 - 1 #Les poids sont ceux pour venir à la matrice!
self.data = np.zeros(self.taille)
self.type = type
def propagate(self, data):
if self.type == 'junction':
self.data = np.ndarray.flatten(data)
else:
self.data = sigmoid(np.dot(data, self.weights.T))
def outputData(self):
return self.data
def backPropagate(self, dH, W, nbFiltres, DELTA, DATA):
"""W est la matrice de poids associés au passage à la couche suivante."""
global PERSISTANCE
if self.type == 'junction':
dX = np.array(self.tailleIn)
dW = np.array(W.shape)
der = np.expand_dims(self.data, 0) * (1 - np.expand_dims(self.data, 0))
dW = np.dot(dH, W) * der
tailleAvant = int(np.sqrt(self.tailleIn // nbFiltres))
return (np.reshape(dW, (nbFiltres, tailleAvant, tailleAvant)), W, nbFiltres, DELTA)
else:
dX = np.array(self.tailleIn)
dW = np.array(W.shape)
der = np.expand_dims(self.data, 0) * (1 - np.expand_dims(self.data, 0))
dW = np.dot(dH, W) * der
#self.weights -= PERSISTANCE * np.dot(self.data, dW)
DELTA.append(np.dot(self.data, dW.T))
return(dW, self.weights, nbFiltres, DELTA)
class network:
def __init__(self, nom, layers):
self.nom = nom
self.layers = layers
self.layersNumber = len(layers)
self.epochs = 0 #Le nombre de cycles de retropropagation deja faits
def propagateLayers(self):
for i in range(1, self.layersNumber):
self.layers[i].propagate(self.layers[i - 1].data)
def inputData(self, data):
self.layers[0].inputData(data)
def outputData(self):
return self.layers[self.layersNumber - 1].data
def lossOne(self, data, expect):
self.inputData(data)
self.propagateLayers()
resultat = self.outputData()
ret = 0
for i in range(len(expect)):
ret += (expect[i] - resultat[i]) ** 2
return ret
def lossAll(self, DATAIN, EXPECT):
loss = 0
for i in range(len(DATAIN)):
loss += self.lossOne(DATAIN[i], EXPECT[i])
return loss
def train(self, DATAIN, EXPECT, number):
for j in range(number):
print(j)
print(self.lossAll(DATAIN, EXPECT))
for i in range(len(DATAIN)):
self.inputData(DATAIN[i])
self.propagateLayers()
self.backPropagateLayers(EXPECT[i], DATAIN[i])
self.epochs += 1
def backPropagateLayers(self, expect, DATAIN):
global PERSISTANCE
i = self.layersNumber - 1
data = self.layers[i].data
diff = data - expect
dH = diff * np.expand_dims(data, 0) * (1 - np.expand_dims(data, 0))
W = self.layers[i].weights
nbFiltres = self.layers[1].nbFiltres
DELTA = []
for i in reversed(range(1, self.layersNumber - 1)):
dH, W, nbFiltres, DELTA = self.layers[i].backPropagate(dH, W, nbFiltres, DELTA, DATAIN)
#Application sur les fullyConnected
for i in range(len(DELTA)):
self.layers[self.layersNumber - i - 1].weights -= PERSISTANCE * np.dot(np.expand_dims(self.layers[self.layersNumber - i - 2].data, axis = 0).T, np.expand_dims(DELTA[i], axis = 0)).T
input = inputLayer('input', 10)
conv = convolutionLayer('conv', 10, 3, 16)
po = poolLayer('po', 8)
trans = fullyConnected('trans', 256, 256, type = 'junction')
f2 = fullyConnected('f2', 256, 64)
f25 = fullyConnected('f25', 64, 16)
f3 = fullyConnected('f3', 16, 2, type = 'output')
net = network('net', [input, conv, po, trans, f2, f25, f3])
| [
"numpy.maximum",
"os.walk",
"numpy.zeros",
"numpy.expand_dims",
"PIL.Image.open",
"numpy.rot90",
"numpy.array",
"numpy.exp",
"numpy.reshape",
"numpy.random.rand",
"numpy.dot",
"numpy.sqrt",
"scipy.signal.convolve",
"numpy.ndarray.flatten"
] | [((367, 385), 'os.walk', 'os.walk', (['pathCroix'], {}), '(pathCroix)\n', (374, 385), False, 'import os\n'), ((503, 520), 'os.walk', 'os.walk', (['pathRond'], {}), '(pathRond)\n', (510, 520), False, 'import os\n'), ((795, 814), 'numpy.maximum', 'np.maximum', (['x', '(0)', 'x'], {}), '(x, 0, x)\n', (805, 814), True, 'import numpy as np\n'), ((1077, 1107), 'numpy.zeros', 'np.zeros', (['(tailleIn, tailleIn)'], {}), '((tailleIn, tailleIn))\n', (1085, 1107), True, 'import numpy as np\n'), ((1510, 1607), 'numpy.zeros', 'np.zeros', (['(nbFiltres, tailleIn - tailleNoyau + 1, tailleIn - tailleNoyau + 1)'], {'dtype': '"""float"""'}), "((nbFiltres, tailleIn - tailleNoyau + 1, tailleIn - tailleNoyau + 1\n ), dtype='float')\n", (1518, 1607), True, 'import numpy as np\n'), ((2117, 2157), 'numpy.zeros', 'np.zeros', (['(self.tailleIn, self.tailleIn)'], {}), '((self.tailleIn, self.tailleIn))\n', (2125, 2157), True, 'import numpy as np\n'), ((2172, 2234), 'numpy.zeros', 'np.zeros', (['(self.nbFiltres, self.tailleNoyau, self.tailleNoyau)'], {}), '((self.nbFiltres, self.tailleNoyau, self.tailleNoyau))\n', (2180, 2234), True, 'import numpy as np\n'), ((3585, 3600), 'numpy.array', 'np.array', (['liste'], {}), '(liste)\n', (3593, 3600), True, 'import numpy as np\n'), ((3678, 3789), 'numpy.zeros', 'np.zeros', (['(dH.shape[0] * self.tailleNoyau, dH.shape[1] * self.tailleNoyau, dH.shape[2\n ] * self.tailleNoyau)'], {}), '((dH.shape[0] * self.tailleNoyau, dH.shape[1] * self.tailleNoyau, \n dH.shape[2] * self.tailleNoyau))\n', (3686, 3789), True, 'import numpy as np\n'), ((4507, 4528), 'numpy.zeros', 'np.zeros', (['self.taille'], {}), '(self.taille)\n', (4515, 4528), True, 'import numpy as np\n'), ((162, 177), 'PIL.Image.open', 'Image.open', (['nom'], {}), '(nom)\n', (172, 177), False, 'from PIL import Image\n'), ((2260, 2272), 'numpy.rot90', 'np.rot90', (['dH'], {}), '(dH)\n', (2268, 2272), True, 'import numpy as np\n'), ((4651, 4675), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['data'], {}), '(data)\n', (4669, 4675), True, 'import numpy as np\n'), ((5038, 5061), 'numpy.array', 'np.array', (['self.tailleIn'], {}), '(self.tailleIn)\n', (5046, 5061), True, 'import numpy as np\n'), ((5080, 5097), 'numpy.array', 'np.array', (['W.shape'], {}), '(W.shape)\n', (5088, 5097), True, 'import numpy as np\n'), ((5419, 5442), 'numpy.array', 'np.array', (['self.tailleIn'], {}), '(self.tailleIn)\n', (5427, 5442), True, 'import numpy as np\n'), ((5461, 5478), 'numpy.array', 'np.array', (['W.shape'], {}), '(W.shape)\n', (5469, 5478), True, 'import numpy as np\n'), ((204, 216), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (212, 216), True, 'import numpy as np\n'), ((868, 878), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (874, 878), True, 'import numpy as np\n'), ((1631, 1682), 'numpy.random.rand', 'np.random.rand', (['nbFiltres', 'tailleNoyau', 'tailleNoyau'], {}), '(nbFiltres, tailleNoyau, tailleNoyau)\n', (1645, 1682), True, 'import numpy as np\n'), ((1885, 1939), 'scipy.signal.convolve', 'signal.convolve', (['dataIn', 'self.filtres[i]'], {'mode': '"""valid"""'}), "(dataIn, self.filtres[i], mode='valid')\n", (1900, 1939), False, 'from scipy import signal\n'), ((4389, 4431), 'numpy.random.rand', 'np.random.rand', (['self.taille', 'self.tailleIn'], {}), '(self.taille, self.tailleIn)\n', (4403, 4431), True, 'import numpy as np\n'), ((4724, 4752), 'numpy.dot', 'np.dot', (['data', 'self.weights.T'], {}), '(data, self.weights.T)\n', (4730, 4752), True, 'import numpy as np\n'), ((5117, 5145), 'numpy.expand_dims', 'np.expand_dims', (['self.data', '(0)'], {}), '(self.data, 0)\n', (5131, 5145), True, 'import numpy as np\n'), ((5201, 5214), 'numpy.dot', 'np.dot', (['dH', 'W'], {}), '(dH, W)\n', (5207, 5214), True, 'import numpy as np\n'), ((5252, 5287), 'numpy.sqrt', 'np.sqrt', (['(self.tailleIn // nbFiltres)'], {}), '(self.tailleIn // nbFiltres)\n', (5259, 5287), True, 'import numpy as np\n'), ((5310, 5363), 'numpy.reshape', 'np.reshape', (['dW', '(nbFiltres, tailleAvant, tailleAvant)'], {}), '(dW, (nbFiltres, tailleAvant, tailleAvant))\n', (5320, 5363), True, 'import numpy as np\n'), ((5498, 5526), 'numpy.expand_dims', 'np.expand_dims', (['self.data', '(0)'], {}), '(self.data, 0)\n', (5512, 5526), True, 'import numpy as np\n'), ((5582, 5595), 'numpy.dot', 'np.dot', (['dH', 'W'], {}), '(dH, W)\n', (5588, 5595), True, 'import numpy as np\n'), ((5694, 5717), 'numpy.dot', 'np.dot', (['self.data', 'dW.T'], {}), '(self.data, dW.T)\n', (5700, 5717), True, 'import numpy as np\n'), ((7322, 7345), 'numpy.expand_dims', 'np.expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (7336, 7345), True, 'import numpy as np\n'), ((7353, 7376), 'numpy.expand_dims', 'np.expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (7367, 7376), True, 'import numpy as np\n'), ((5153, 5181), 'numpy.expand_dims', 'np.expand_dims', (['self.data', '(0)'], {}), '(self.data, 0)\n', (5167, 5181), True, 'import numpy as np\n'), ((5534, 5562), 'numpy.expand_dims', 'np.expand_dims', (['self.data', '(0)'], {}), '(self.data, 0)\n', (5548, 5562), True, 'import numpy as np\n'), ((7883, 7915), 'numpy.expand_dims', 'np.expand_dims', (['DELTA[i]'], {'axis': '(0)'}), '(DELTA[i], axis=0)\n', (7897, 7915), True, 'import numpy as np\n'), ((7810, 7877), 'numpy.expand_dims', 'np.expand_dims', (['self.layers[self.layersNumber - i - 2].data'], {'axis': '(0)'}), '(self.layers[self.layersNumber - i - 2].data, axis=0)\n', (7824, 7877), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of exma (https://github.com/fernandezfran/exma/).
# Copyright (c) 2021, <NAME>
# License: MIT
# Full Text: https://github.com/fernandezfran/exma/blob/master/LICENSE
# ============================================================================
# IMPORTS
# ============================================================================
import exma.io.positions
import numpy as np
import pytest
# ============================================================================
# TESTS
# ============================================================================
def test_sc():
"""Test that the atoms are placed in a simple cubic crystal."""
boxref = np.full(3, 1.0)
xref = np.array([0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5])
yref = np.array([0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5])
zref = np.array([0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5])
result = exma.io.positions.Positions(8, 1.0).sc()
assert result["natoms"] == 8
np.testing.assert_array_equal(result["box"], boxref)
np.testing.assert_array_equal(result["x"], xref)
np.testing.assert_array_equal(result["y"], yref)
np.testing.assert_array_equal(result["z"], zref)
def test_bcc():
"""Test that the atoms are placed in a body-centered cubic crystal."""
boxref = np.full(3, 1.0)
xref = np.array(
[
0.0,
0.0,
0.0,
0.0,
0.5,
0.5,
0.5,
0.5,
0.25,
0.25,
0.25,
0.25,
0.75,
0.75,
0.75,
0.75,
]
)
yref = np.array(
[
0.0,
0.0,
0.5,
0.5,
0.0,
0.0,
0.5,
0.5,
0.25,
0.25,
0.75,
0.75,
0.25,
0.25,
0.75,
0.75,
]
)
zref = np.array(
[
0.0,
0.5,
0.0,
0.5,
0.0,
0.5,
0.0,
0.5,
0.25,
0.75,
0.25,
0.75,
0.25,
0.75,
0.25,
0.75,
]
)
result = exma.io.positions.Positions(16, 1.0).bcc()
assert result["natoms"] == 16
np.testing.assert_array_equal(result["box"], boxref)
np.testing.assert_array_equal(result["x"], xref)
np.testing.assert_array_equal(result["y"], yref)
np.testing.assert_array_equal(result["z"], zref)
def test_fcc():
"""Test that the atoms are placed in a face-centered cubic crystal."""
boxref = np.full(3, 1.0)
xref = np.array(
[
0.0,
0.0,
0.0,
0.0,
0.5,
0.5,
0.5,
0.5,
0.25,
0.25,
0.25,
0.25,
0.75,
0.75,
0.75,
0.75,
0.25,
0.25,
0.25,
0.25,
0.75,
0.75,
0.75,
0.75,
0.0,
0.0,
0.0,
0.0,
0.5,
0.5,
0.5,
0.5,
]
)
yref = np.array(
[
0.0,
0.0,
0.5,
0.5,
0.0,
0.0,
0.5,
0.5,
0.25,
0.25,
0.75,
0.75,
0.25,
0.25,
0.75,
0.75,
0.0,
0.0,
0.5,
0.5,
0.0,
0.0,
0.5,
0.5,
0.25,
0.25,
0.75,
0.75,
0.25,
0.25,
0.75,
0.75,
]
)
zref = np.array(
[
0.0,
0.5,
0.0,
0.5,
0.0,
0.5,
0.0,
0.5,
0.0,
0.5,
0.0,
0.5,
0.0,
0.5,
0.0,
0.5,
0.25,
0.75,
0.25,
0.75,
0.25,
0.75,
0.25,
0.75,
0.25,
0.75,
0.25,
0.75,
0.25,
0.75,
0.25,
0.75,
]
)
result = exma.io.positions.Positions(32, 1.0).fcc()
assert result["natoms"] == 32
np.testing.assert_array_equal(result["box"], boxref)
np.testing.assert_array_equal(result["x"], xref)
np.testing.assert_array_equal(result["y"], yref)
np.testing.assert_array_equal(result["z"], zref)
def test_dc():
"""Test that the atoms are placed in a diamond cubic crystal."""
boxref = np.full(3, 1.0)
xref = np.array([0.25, 0.0, 0.25, 0.0, 0.75, 0.5, 0.75, 0.5])
yref = np.array([0.75, 0.0, 0.25, 0.5, 0.75, 0.0, 0.25, 0.5])
zref = np.array([0.25, 0.5, 0.75, 0.0, 0.75, 0.0, 0.25, 0.5])
result = exma.io.positions.Positions(8, 1.0).dc()
assert result["natoms"] == 8
np.testing.assert_array_equal(result["box"], boxref)
np.testing.assert_array_equal(result["x"], xref)
np.testing.assert_array_equal(result["y"], yref)
np.testing.assert_array_equal(result["z"], zref)
def test_sc_raise():
"""Test the raise of sc."""
particles = exma.io.positions.Positions(7, 1.0)
with pytest.raises(ValueError):
particles.sc()
def test_bcc_raise():
"""Test the raise of the bcc."""
particles = exma.io.positions.Positions(19, 1.0)
with pytest.raises(ValueError):
particles.bcc()
def test_fcc_raise():
"""Test the raise of the fcc."""
particles = exma.io.positions.Positions(37, 1.0)
with pytest.raises(ValueError):
particles.fcc()
def test_dc_raise():
"""Test the raise of the dc."""
particles = exma.io.positions.Positions(9, 1.0)
with pytest.raises(ValueError):
particles.dc()
def test_spherical_nanoparticle():
"""Test the spherical nanoparticle"""
xref = np.array([-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5])
yref = np.array([0.0, -0.5, 0.0, 0.0, 0.0, 0.5, 0.0])
zref = np.array([0.0, 0.0, -0.5, 0.0, 0.5, 0.0, 0.0])
frame = {
"box": np.full(3, 1.0),
"x": np.array([0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5]),
"y": np.array([0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5]),
"z": np.array([0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5]),
}
result = exma.io.positions.spherical_nanoparticle(frame, 0.6)
assert result["natoms"] == 7
np.testing.assert_array_equal(result["x"], xref)
np.testing.assert_array_equal(result["y"], yref)
np.testing.assert_array_equal(result["z"], zref)
def test_replicate():
"""Test the replicate function."""
natomsref = 8 * 2 * 2 * 2
boxref = np.full(3, 2 * 5.468728)
typesref = ["Si"] * 8 * 2 * 2 * 2
xref = np.array(
[
1.367182,
0.000000,
1.367182,
0.000000,
4.101546,
2.734364,
4.101546,
2.734364,
1.367182,
0.000000,
1.367182,
0.000000,
4.101546,
2.734364,
4.101546,
2.734364,
1.367182,
0.000000,
1.367182,
0.000000,
4.101546,
2.734364,
4.101546,
2.734364,
1.367182,
0.000000,
1.367182,
0.000000,
4.101546,
2.734364,
4.101546,
2.734364,
6.83591,
5.468728,
6.83591,
5.468728,
9.570274,
8.203092,
9.570274,
8.203092,
6.83591,
5.468728,
6.83591,
5.468728,
9.570274,
8.203092,
9.570274,
8.203092,
6.83591,
5.468728,
6.83591,
5.468728,
9.570274,
8.203092,
9.570274,
8.203092,
6.83591,
5.468728,
6.83591,
5.468728,
9.570274,
8.203092,
9.570274,
8.203092,
]
)
yref = np.array(
[
4.101546,
0.000000,
1.367182,
2.734364,
4.101546,
0.000000,
1.367182,
2.734364,
4.101546,
0.000000,
1.367182,
2.734364,
4.101546,
0.000000,
1.367182,
2.734364,
9.570274,
5.468728,
6.83591,
8.203092,
9.570274,
5.468728,
6.83591,
8.203092,
9.570274,
5.468728,
6.83591,
8.203092,
9.570274,
5.468728,
6.83591,
8.203092,
4.101546,
0.000000,
1.367182,
2.734364,
4.101546,
0.000000,
1.367182,
2.734364,
4.101546,
0.000000,
1.367182,
2.734364,
4.101546,
0.000000,
1.367182,
2.734364,
9.570274,
5.468728,
6.83591,
8.203092,
9.570274,
5.468728,
6.83591,
8.203092,
9.570274,
5.468728,
6.83591,
8.203092,
9.570274,
5.468728,
6.83591,
8.203092,
]
)
zref = np.array(
[
1.367182,
2.734364,
4.101546,
0.000000,
4.101546,
0.000000,
1.367182,
2.734364,
6.83591,
8.203092,
9.570274,
5.468728,
9.570274,
5.468728,
6.83591,
8.203092,
1.367182,
2.734364,
4.101546,
0.000000,
4.101546,
0.000000,
1.367182,
2.734364,
6.83591,
8.203092,
9.570274,
5.468728,
9.570274,
5.468728,
6.83591,
8.203092,
1.367182,
2.734364,
4.101546,
0.000000,
4.101546,
0.000000,
1.367182,
2.734364,
6.83591,
8.203092,
9.570274,
5.468728,
9.570274,
5.468728,
6.83591,
8.203092,
1.367182,
2.734364,
4.101546,
0.000000,
4.101546,
0.000000,
1.367182,
2.734364,
6.83591,
8.203092,
9.570274,
5.468728,
9.570274,
5.468728,
6.83591,
8.203092,
]
)
frame = {
"natoms": 8,
"box": np.full(3, 5.468728),
"type": ["Si"] * 8,
"x": np.array([0.25, 0.0, 0.25, 0.0, 0.75, 0.5, 0.75, 0.5]),
"y": np.array([0.75, 0.0, 0.25, 0.5, 0.75, 0.0, 0.25, 0.5]),
"z": np.array([0.25, 0.5, 0.75, 0.0, 0.75, 0.0, 0.25, 0.5]),
}
result = exma.io.positions.replicate(frame, [2, 2, 2])
assert result["natoms"] == natomsref
np.testing.assert_array_almost_equal(result["box"], boxref)
np.testing.assert_array_equal(result["type"], typesref)
np.testing.assert_array_almost_equal(result["x"], xref)
np.testing.assert_array_almost_equal(result["y"], yref)
np.testing.assert_array_almost_equal(result["z"], zref)
| [
"numpy.full",
"numpy.testing.assert_array_equal",
"pytest.raises",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
] | [((729, 744), 'numpy.full', 'np.full', (['(3)', '(1.0)'], {}), '(3, 1.0)\n', (736, 744), True, 'import numpy as np\n'), ((756, 806), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5])\n', (764, 806), True, 'import numpy as np\n'), ((818, 868), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5]'], {}), '([0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5])\n', (826, 868), True, 'import numpy as np\n'), ((880, 930), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5]'], {}), '([0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5])\n', (888, 930), True, 'import numpy as np\n'), ((1024, 1076), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['box']", 'boxref'], {}), "(result['box'], boxref)\n", (1053, 1076), True, 'import numpy as np\n'), ((1081, 1129), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['x']", 'xref'], {}), "(result['x'], xref)\n", (1110, 1129), True, 'import numpy as np\n'), ((1134, 1182), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['y']", 'yref'], {}), "(result['y'], yref)\n", (1163, 1182), True, 'import numpy as np\n'), ((1187, 1235), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['z']", 'zref'], {}), "(result['z'], zref)\n", (1216, 1235), True, 'import numpy as np\n'), ((1342, 1357), 'numpy.full', 'np.full', (['(3)', '(1.0)'], {}), '(3, 1.0)\n', (1349, 1357), True, 'import numpy as np\n'), ((1369, 1472), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.25, 0.25, 0.25, 0.25, 0.75, 0.75,\n 0.75, 0.75]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.25, 0.25, 0.25, 0.25, \n 0.75, 0.75, 0.75, 0.75])\n', (1377, 1472), True, 'import numpy as np\n'), ((1696, 1799), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.25, 0.25, 0.75, 0.75, 0.25, 0.25,\n 0.75, 0.75]'], {}), '([0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.25, 0.25, 0.75, 0.75, \n 0.25, 0.25, 0.75, 0.75])\n', (1704, 1799), True, 'import numpy as np\n'), ((2023, 2126), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75,\n 0.25, 0.75]'], {}), '([0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.25, 0.75, 0.25, 0.75, \n 0.25, 0.75, 0.25, 0.75])\n', (2031, 2126), True, 'import numpy as np\n'), ((2435, 2487), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['box']", 'boxref'], {}), "(result['box'], boxref)\n", (2464, 2487), True, 'import numpy as np\n'), ((2492, 2540), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['x']", 'xref'], {}), "(result['x'], xref)\n", (2521, 2540), True, 'import numpy as np\n'), ((2545, 2593), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['y']", 'yref'], {}), "(result['y'], yref)\n", (2574, 2593), True, 'import numpy as np\n'), ((2598, 2646), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['z']", 'zref'], {}), "(result['z'], zref)\n", (2627, 2646), True, 'import numpy as np\n'), ((2753, 2768), 'numpy.full', 'np.full', (['(3)', '(1.0)'], {}), '(3, 1.0)\n', (2760, 2768), True, 'import numpy as np\n'), ((2780, 2975), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.25, 0.25, 0.25, 0.25, 0.75, 0.75,\n 0.75, 0.75, 0.25, 0.25, 0.25, 0.25, 0.75, 0.75, 0.75, 0.75, 0.0, 0.0, \n 0.0, 0.0, 0.5, 0.5, 0.5, 0.5]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.25, 0.25, 0.25, 0.25, \n 0.75, 0.75, 0.75, 0.75, 0.25, 0.25, 0.25, 0.25, 0.75, 0.75, 0.75, 0.75,\n 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5])\n', (2788, 2975), True, 'import numpy as np\n'), ((3387, 3583), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.25, 0.25, 0.75, 0.75, 0.25, 0.25,\n 0.75, 0.75, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.25, 0.25, 0.75, \n 0.75, 0.25, 0.25, 0.75, 0.75]'], {}), '([0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.25, 0.25, 0.75, 0.75, \n 0.25, 0.25, 0.75, 0.75, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.25, \n 0.25, 0.75, 0.75, 0.25, 0.25, 0.75, 0.75])\n', (3395, 3583), True, 'import numpy as np\n'), ((3994, 4190), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0,\n 0.5, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, \n 0.75, 0.25, 0.75, 0.25, 0.75]'], {}), '([0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, \n 0.5, 0.0, 0.5, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, \n 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75])\n', (4002, 4190), True, 'import numpy as np\n'), ((4686, 4738), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['box']", 'boxref'], {}), "(result['box'], boxref)\n", (4715, 4738), True, 'import numpy as np\n'), ((4743, 4791), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['x']", 'xref'], {}), "(result['x'], xref)\n", (4772, 4791), True, 'import numpy as np\n'), ((4796, 4844), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['y']", 'yref'], {}), "(result['y'], yref)\n", (4825, 4844), True, 'import numpy as np\n'), ((4849, 4897), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['z']", 'zref'], {}), "(result['z'], zref)\n", (4878, 4897), True, 'import numpy as np\n'), ((4997, 5012), 'numpy.full', 'np.full', (['(3)', '(1.0)'], {}), '(3, 1.0)\n', (5004, 5012), True, 'import numpy as np\n'), ((5024, 5078), 'numpy.array', 'np.array', (['[0.25, 0.0, 0.25, 0.0, 0.75, 0.5, 0.75, 0.5]'], {}), '([0.25, 0.0, 0.25, 0.0, 0.75, 0.5, 0.75, 0.5])\n', (5032, 5078), True, 'import numpy as np\n'), ((5090, 5144), 'numpy.array', 'np.array', (['[0.75, 0.0, 0.25, 0.5, 0.75, 0.0, 0.25, 0.5]'], {}), '([0.75, 0.0, 0.25, 0.5, 0.75, 0.0, 0.25, 0.5])\n', (5098, 5144), True, 'import numpy as np\n'), ((5156, 5210), 'numpy.array', 'np.array', (['[0.25, 0.5, 0.75, 0.0, 0.75, 0.0, 0.25, 0.5]'], {}), '([0.25, 0.5, 0.75, 0.0, 0.75, 0.0, 0.25, 0.5])\n', (5164, 5210), True, 'import numpy as np\n'), ((5304, 5356), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['box']", 'boxref'], {}), "(result['box'], boxref)\n", (5333, 5356), True, 'import numpy as np\n'), ((5361, 5409), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['x']", 'xref'], {}), "(result['x'], xref)\n", (5390, 5409), True, 'import numpy as np\n'), ((5414, 5462), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['y']", 'yref'], {}), "(result['y'], yref)\n", (5443, 5462), True, 'import numpy as np\n'), ((5467, 5515), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['z']", 'zref'], {}), "(result['z'], zref)\n", (5496, 5515), True, 'import numpy as np\n'), ((6290, 6336), 'numpy.array', 'np.array', (['[-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5]'], {}), '([-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5])\n', (6298, 6336), True, 'import numpy as np\n'), ((6348, 6394), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.0, 0.0, 0.0, 0.5, 0.0]'], {}), '([0.0, -0.5, 0.0, 0.0, 0.0, 0.5, 0.0])\n', (6356, 6394), True, 'import numpy as np\n'), ((6406, 6452), 'numpy.array', 'np.array', (['[0.0, 0.0, -0.5, 0.0, 0.5, 0.0, 0.0]'], {}), '([0.0, 0.0, -0.5, 0.0, 0.5, 0.0, 0.0])\n', (6414, 6452), True, 'import numpy as np\n'), ((6806, 6854), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['x']", 'xref'], {}), "(result['x'], xref)\n", (6835, 6854), True, 'import numpy as np\n'), ((6859, 6907), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['y']", 'yref'], {}), "(result['y'], yref)\n", (6888, 6907), True, 'import numpy as np\n'), ((6912, 6960), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['z']", 'zref'], {}), "(result['z'], zref)\n", (6941, 6960), True, 'import numpy as np\n'), ((7067, 7091), 'numpy.full', 'np.full', (['(3)', '(2 * 5.468728)'], {}), '(3, 2 * 5.468728)\n', (7074, 7091), True, 'import numpy as np\n'), ((7141, 7783), 'numpy.array', 'np.array', (['[1.367182, 0.0, 1.367182, 0.0, 4.101546, 2.734364, 4.101546, 2.734364, \n 1.367182, 0.0, 1.367182, 0.0, 4.101546, 2.734364, 4.101546, 2.734364, \n 1.367182, 0.0, 1.367182, 0.0, 4.101546, 2.734364, 4.101546, 2.734364, \n 1.367182, 0.0, 1.367182, 0.0, 4.101546, 2.734364, 4.101546, 2.734364, \n 6.83591, 5.468728, 6.83591, 5.468728, 9.570274, 8.203092, 9.570274, \n 8.203092, 6.83591, 5.468728, 6.83591, 5.468728, 9.570274, 8.203092, \n 9.570274, 8.203092, 6.83591, 5.468728, 6.83591, 5.468728, 9.570274, \n 8.203092, 9.570274, 8.203092, 6.83591, 5.468728, 6.83591, 5.468728, \n 9.570274, 8.203092, 9.570274, 8.203092]'], {}), '([1.367182, 0.0, 1.367182, 0.0, 4.101546, 2.734364, 4.101546, \n 2.734364, 1.367182, 0.0, 1.367182, 0.0, 4.101546, 2.734364, 4.101546, \n 2.734364, 1.367182, 0.0, 1.367182, 0.0, 4.101546, 2.734364, 4.101546, \n 2.734364, 1.367182, 0.0, 1.367182, 0.0, 4.101546, 2.734364, 4.101546, \n 2.734364, 6.83591, 5.468728, 6.83591, 5.468728, 9.570274, 8.203092, \n 9.570274, 8.203092, 6.83591, 5.468728, 6.83591, 5.468728, 9.570274, \n 8.203092, 9.570274, 8.203092, 6.83591, 5.468728, 6.83591, 5.468728, \n 9.570274, 8.203092, 9.570274, 8.203092, 6.83591, 5.468728, 6.83591, \n 5.468728, 9.570274, 8.203092, 9.570274, 8.203092])\n', (7149, 7783), True, 'import numpy as np\n'), ((8588, 9230), 'numpy.array', 'np.array', (['[4.101546, 0.0, 1.367182, 2.734364, 4.101546, 0.0, 1.367182, 2.734364, \n 4.101546, 0.0, 1.367182, 2.734364, 4.101546, 0.0, 1.367182, 2.734364, \n 9.570274, 5.468728, 6.83591, 8.203092, 9.570274, 5.468728, 6.83591, \n 8.203092, 9.570274, 5.468728, 6.83591, 8.203092, 9.570274, 5.468728, \n 6.83591, 8.203092, 4.101546, 0.0, 1.367182, 2.734364, 4.101546, 0.0, \n 1.367182, 2.734364, 4.101546, 0.0, 1.367182, 2.734364, 4.101546, 0.0, \n 1.367182, 2.734364, 9.570274, 5.468728, 6.83591, 8.203092, 9.570274, \n 5.468728, 6.83591, 8.203092, 9.570274, 5.468728, 6.83591, 8.203092, \n 9.570274, 5.468728, 6.83591, 8.203092]'], {}), '([4.101546, 0.0, 1.367182, 2.734364, 4.101546, 0.0, 1.367182, \n 2.734364, 4.101546, 0.0, 1.367182, 2.734364, 4.101546, 0.0, 1.367182, \n 2.734364, 9.570274, 5.468728, 6.83591, 8.203092, 9.570274, 5.468728, \n 6.83591, 8.203092, 9.570274, 5.468728, 6.83591, 8.203092, 9.570274, \n 5.468728, 6.83591, 8.203092, 4.101546, 0.0, 1.367182, 2.734364, \n 4.101546, 0.0, 1.367182, 2.734364, 4.101546, 0.0, 1.367182, 2.734364, \n 4.101546, 0.0, 1.367182, 2.734364, 9.570274, 5.468728, 6.83591, \n 8.203092, 9.570274, 5.468728, 6.83591, 8.203092, 9.570274, 5.468728, \n 6.83591, 8.203092, 9.570274, 5.468728, 6.83591, 8.203092])\n', (8596, 9230), True, 'import numpy as np\n'), ((10035, 10677), 'numpy.array', 'np.array', (['[1.367182, 2.734364, 4.101546, 0.0, 4.101546, 0.0, 1.367182, 2.734364, \n 6.83591, 8.203092, 9.570274, 5.468728, 9.570274, 5.468728, 6.83591, \n 8.203092, 1.367182, 2.734364, 4.101546, 0.0, 4.101546, 0.0, 1.367182, \n 2.734364, 6.83591, 8.203092, 9.570274, 5.468728, 9.570274, 5.468728, \n 6.83591, 8.203092, 1.367182, 2.734364, 4.101546, 0.0, 4.101546, 0.0, \n 1.367182, 2.734364, 6.83591, 8.203092, 9.570274, 5.468728, 9.570274, \n 5.468728, 6.83591, 8.203092, 1.367182, 2.734364, 4.101546, 0.0, \n 4.101546, 0.0, 1.367182, 2.734364, 6.83591, 8.203092, 9.570274, \n 5.468728, 9.570274, 5.468728, 6.83591, 8.203092]'], {}), '([1.367182, 2.734364, 4.101546, 0.0, 4.101546, 0.0, 1.367182, \n 2.734364, 6.83591, 8.203092, 9.570274, 5.468728, 9.570274, 5.468728, \n 6.83591, 8.203092, 1.367182, 2.734364, 4.101546, 0.0, 4.101546, 0.0, \n 1.367182, 2.734364, 6.83591, 8.203092, 9.570274, 5.468728, 9.570274, \n 5.468728, 6.83591, 8.203092, 1.367182, 2.734364, 4.101546, 0.0, \n 4.101546, 0.0, 1.367182, 2.734364, 6.83591, 8.203092, 9.570274, \n 5.468728, 9.570274, 5.468728, 6.83591, 8.203092, 1.367182, 2.734364, \n 4.101546, 0.0, 4.101546, 0.0, 1.367182, 2.734364, 6.83591, 8.203092, \n 9.570274, 5.468728, 9.570274, 5.468728, 6.83591, 8.203092])\n', (10043, 10677), True, 'import numpy as np\n'), ((11891, 11950), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["result['box']", 'boxref'], {}), "(result['box'], boxref)\n", (11927, 11950), True, 'import numpy as np\n'), ((11955, 12010), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["result['type']", 'typesref'], {}), "(result['type'], typesref)\n", (11984, 12010), True, 'import numpy as np\n'), ((12015, 12070), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["result['x']", 'xref'], {}), "(result['x'], xref)\n", (12051, 12070), True, 'import numpy as np\n'), ((12075, 12130), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["result['y']", 'yref'], {}), "(result['y'], yref)\n", (12111, 12130), True, 'import numpy as np\n'), ((12135, 12190), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["result['z']", 'zref'], {}), "(result['z'], zref)\n", (12171, 12190), True, 'import numpy as np\n'), ((5632, 5657), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5645, 5657), False, 'import pytest\n'), ((5805, 5830), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5818, 5830), False, 'import pytest\n'), ((5979, 6004), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5992, 6004), False, 'import pytest\n'), ((6150, 6175), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6163, 6175), False, 'import pytest\n'), ((6483, 6498), 'numpy.full', 'np.full', (['(3)', '(1.0)'], {}), '(3, 1.0)\n', (6490, 6498), True, 'import numpy as np\n'), ((6513, 6563), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5])\n', (6521, 6563), True, 'import numpy as np\n'), ((6578, 6628), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5]'], {}), '([0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.5, 0.5])\n', (6586, 6628), True, 'import numpy as np\n'), ((6643, 6693), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5]'], {}), '([0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5])\n', (6651, 6693), True, 'import numpy as np\n'), ((11522, 11542), 'numpy.full', 'np.full', (['(3)', '(5.468728)'], {}), '(3, 5.468728)\n', (11529, 11542), True, 'import numpy as np\n'), ((11585, 11639), 'numpy.array', 'np.array', (['[0.25, 0.0, 0.25, 0.0, 0.75, 0.5, 0.75, 0.5]'], {}), '([0.25, 0.0, 0.25, 0.0, 0.75, 0.5, 0.75, 0.5])\n', (11593, 11639), True, 'import numpy as np\n'), ((11654, 11708), 'numpy.array', 'np.array', (['[0.75, 0.0, 0.25, 0.5, 0.75, 0.0, 0.25, 0.5]'], {}), '([0.75, 0.0, 0.25, 0.5, 0.75, 0.0, 0.25, 0.5])\n', (11662, 11708), True, 'import numpy as np\n'), ((11723, 11777), 'numpy.array', 'np.array', (['[0.25, 0.5, 0.75, 0.0, 0.75, 0.0, 0.25, 0.5]'], {}), '([0.25, 0.5, 0.75, 0.0, 0.75, 0.0, 0.25, 0.5])\n', (11731, 11777), True, 'import numpy as np\n')] |
from baconian.common.spaces.base import Space
import numpy as np
from typeguard import typechecked
from baconian.core.parameters import Parameters
from baconian.common.schedules import Scheduler
class ExplorationStrategy(object):
def __init__(self):
self.parameters = None
def predict(self, **kwargs):
raise NotImplementedError
class EpsilonGreedy(ExplorationStrategy):
@typechecked
def __init__(self, action_space: Space, init_random_prob: float, prob_scheduler: Scheduler = None):
super(ExplorationStrategy, self).__init__()
self.action_space = action_space
self.random_prob_func = lambda: init_random_prob
if prob_scheduler:
self.random_prob_func = prob_scheduler.value
self.parameters = Parameters(parameters=dict(random_prob_func=self.random_prob_func),
name='eps_greedy_params')
def predict(self, **kwargs):
if np.random.random() < self.parameters('random_prob_func')():
return self.action_space.sample()
else:
algo = kwargs.pop('algo')
return algo.predict(**kwargs)
| [
"numpy.random.random"
] | [((958, 976), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (974, 976), True, 'import numpy as np\n')] |
"""
====================================
Comparing Linear Bayesian Regressors
====================================
This example compares two different bayesian regressors:
- a :ref:`automatic_relevance_determination`
- a :ref:`bayesian_ridge_regression`
In the first part, we use an :ref:`ordinary_least_squares` (OLS) model as a
baseline for comparing the models' coefficients with respect to the true
coefficients. Thereafter, we show that the estimation of such models is done by
iteratively maximizing the marginal log-likelihood of the observations.
In the last section we plot predictions and uncertainties for the ARD and the
Bayesian Ridge regressions using a polynomial feature expansion to fit a
non-linear relationship between `X` and `y`.
"""
# Author: <NAME> <<EMAIL>>
# %%
# Models robustness to recover the ground truth weights
# =====================================================
#
# Generate synthetic dataset
# --------------------------
#
# We generate a dataset where `X` and `y` are linearly linked: 10 of the
# features of `X` will be used to generate `y`. The other features are not
# useful at predicting `y`. In addition, we generate a dataset where `n_samples
# == n_features`. Such a setting is challenging for an OLS model and leads
# potentially to arbitrary large weights. Having a prior on the weights and a
# penalty alleviates the problem. Finally, gaussian noise is added.
from sklearn.datasets import make_regression
X, y, true_weights = make_regression(
n_samples=100,
n_features=100,
n_informative=10,
noise=8,
coef=True,
random_state=42,
)
# %%
# Fit the regressors
# ------------------
#
# We now fit both Bayesian models and the OLS to later compare the models'
# coefficients.
import pandas as pd
from sklearn.linear_model import ARDRegression, LinearRegression, BayesianRidge
olr = LinearRegression().fit(X, y)
brr = BayesianRidge(compute_score=True, n_iter=30).fit(X, y)
ard = ARDRegression(compute_score=True, n_iter=30).fit(X, y)
df = pd.DataFrame(
{
"Weights of true generative process": true_weights,
"ARDRegression": ard.coef_,
"BayesianRidge": brr.coef_,
"LinearRegression": olr.coef_,
}
)
# %%
# Plot the true and estimated coefficients
# ----------------------------------------
#
# Now we compare the coefficients of each model with the weights of
# the true generative model.
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import SymLogNorm
plt.figure(figsize=(10, 6))
ax = sns.heatmap(
df.T,
norm=SymLogNorm(linthresh=10e-4, vmin=-80, vmax=80),
cbar_kws={"label": "coefficients' values"},
cmap="seismic_r",
)
plt.ylabel("linear model")
plt.xlabel("coefficients")
plt.tight_layout(rect=(0, 0, 1, 0.95))
_ = plt.title("Models' coefficients")
# %%
# Due to the added noise, none of the models recover the true weights. Indeed,
# all models always have more than 10 non-zero coefficients. Compared to the OLS
# estimator, the coefficients using a Bayesian Ridge regression are slightly
# shifted toward zero, which stabilises them. The ARD regression provides a
# sparser solution: some of the non-informative coefficients are set exactly to
# zero, while shifting others closer to zero. Some non-informative coefficients
# are still present and retain large values.
# %%
# Plot the marginal log-likelihood
# --------------------------------
import numpy as np
ard_scores = -np.array(ard.scores_)
brr_scores = -np.array(brr.scores_)
plt.plot(ard_scores, color="navy", label="ARD")
plt.plot(brr_scores, color="red", label="BayesianRidge")
plt.ylabel("Log-likelihood")
plt.xlabel("Iterations")
plt.xlim(1, 30)
plt.legend()
_ = plt.title("Models log-likelihood")
# %%
# Indeed, both models minimize the log-likelihood up to an arbitrary cutoff
# defined by the `n_iter` parameter.
#
# Bayesian regressions with polynomial feature expansion
# ======================================================
# Generate synthetic dataset
# --------------------------
# We create a target that is a non-linear function of the input feature.
# Noise following a standard uniform distribution is added.
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
rng = np.random.RandomState(0)
n_samples = 110
# sort the data to make plotting easier later
X = np.sort(-10 * rng.rand(n_samples) + 10)
noise = rng.normal(0, 1, n_samples) * 1.35
y = np.sqrt(X) * np.sin(X) + noise
full_data = pd.DataFrame({"input_feature": X, "target": y})
X = X.reshape((-1, 1))
# extrapolation
X_plot = np.linspace(10, 10.4, 10)
y_plot = np.sqrt(X_plot) * np.sin(X_plot)
X_plot = np.concatenate((X, X_plot.reshape((-1, 1))))
y_plot = np.concatenate((y - noise, y_plot))
# %%
# Fit the regressors
# ------------------
#
# Here we try a degree 10 polynomial to potentially overfit, though the bayesian
# linear models regularize the size of the polynomial coefficients. As
# `fit_intercept=True` by default for
# :class:`~sklearn.linear_model.ARDRegression` and
# :class:`~sklearn.linear_model.BayesianRidge`, then
# :class:`~sklearn.preprocessing.PolynomialFeatures` should not introduce an
# additional bias feature. By setting `return_std=True`, the bayesian regressors
# return the standard deviation of the posterior distribution for the model
# parameters.
ard_poly = make_pipeline(
PolynomialFeatures(degree=10, include_bias=False),
StandardScaler(),
ARDRegression(),
).fit(X, y)
brr_poly = make_pipeline(
PolynomialFeatures(degree=10, include_bias=False),
StandardScaler(),
BayesianRidge(),
).fit(X, y)
y_ard, y_ard_std = ard_poly.predict(X_plot, return_std=True)
y_brr, y_brr_std = brr_poly.predict(X_plot, return_std=True)
# %%
# Plotting polynomial regressions with std errors of the scores
# -------------------------------------------------------------
ax = sns.scatterplot(
data=full_data, x="input_feature", y="target", color="black", alpha=0.75
)
ax.plot(X_plot, y_plot, color="black", label="Ground Truth")
ax.plot(X_plot, y_brr, color="red", label="BayesianRidge with polynomial features")
ax.plot(X_plot, y_ard, color="navy", label="ARD with polynomial features")
ax.fill_between(
X_plot.ravel(),
y_ard - y_ard_std,
y_ard + y_ard_std,
color="navy",
alpha=0.3,
)
ax.fill_between(
X_plot.ravel(),
y_brr - y_brr_std,
y_brr + y_brr_std,
color="red",
alpha=0.3,
)
ax.legend()
_ = ax.set_title("Polynomial fit of a non-linear feature")
# %%
# The error bars represent one standard deviation of the predicted gaussian
# distribution of the query points. Notice that the ARD regression captures the
# ground truth the best when using the default parameters in both models, but
# further reducing the `lambda_init` hyperparameter of the Bayesian Ridge can
# reduce its bias (see example
# :ref:`sphx_glr_auto_examples_linear_model_plot_bayesian_ridge_curvefit.py`).
# Finally, due to the intrinsic limitations of a polynomial regression, both
# models fail when extrapolating.
| [
"matplotlib.pyplot.title",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.figure",
"numpy.sin",
"matplotlib.pyplot.tight_layout",
"matplotlib.colors.SymLogNorm",
"pandas.DataFrame",
"sklearn.datasets.make_regression",
"numpy.random.RandomState",
"numpy.linspace",
"sklearn.linear_mode... | [((1487, 1592), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': '(100)', 'n_features': '(100)', 'n_informative': '(10)', 'noise': '(8)', 'coef': '(True)', 'random_state': '(42)'}), '(n_samples=100, n_features=100, n_informative=10, noise=8,\n coef=True, random_state=42)\n', (1502, 1592), False, 'from sklearn.datasets import make_regression\n'), ((2021, 2182), 'pandas.DataFrame', 'pd.DataFrame', (["{'Weights of true generative process': true_weights, 'ARDRegression': ard.\n coef_, 'BayesianRidge': brr.coef_, 'LinearRegression': olr.coef_}"], {}), "({'Weights of true generative process': true_weights,\n 'ARDRegression': ard.coef_, 'BayesianRidge': brr.coef_,\n 'LinearRegression': olr.coef_})\n", (2033, 2182), True, 'import pandas as pd\n'), ((2507, 2534), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2517, 2534), True, 'import matplotlib.pyplot as plt\n'), ((2692, 2718), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""linear model"""'], {}), "('linear model')\n", (2702, 2718), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2745), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""coefficients"""'], {}), "('coefficients')\n", (2729, 2745), True, 'import matplotlib.pyplot as plt\n'), ((2746, 2784), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '(0, 0, 1, 0.95)'}), '(rect=(0, 0, 1, 0.95))\n', (2762, 2784), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2822), 'matplotlib.pyplot.title', 'plt.title', (['"""Models\' coefficients"""'], {}), '("Models\' coefficients")\n', (2798, 2822), True, 'import matplotlib.pyplot as plt\n'), ((3515, 3562), 'matplotlib.pyplot.plot', 'plt.plot', (['ard_scores'], {'color': '"""navy"""', 'label': '"""ARD"""'}), "(ard_scores, color='navy', label='ARD')\n", (3523, 3562), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3619), 'matplotlib.pyplot.plot', 'plt.plot', (['brr_scores'], {'color': '"""red"""', 'label': '"""BayesianRidge"""'}), "(brr_scores, color='red', label='BayesianRidge')\n", (3571, 3619), True, 'import matplotlib.pyplot as plt\n'), ((3620, 3648), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log-likelihood"""'], {}), "('Log-likelihood')\n", (3630, 3648), True, 'import matplotlib.pyplot as plt\n'), ((3649, 3673), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (3659, 3673), True, 'import matplotlib.pyplot as plt\n'), ((3674, 3689), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(30)'], {}), '(1, 30)\n', (3682, 3689), True, 'import matplotlib.pyplot as plt\n'), ((3690, 3702), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3700, 3702), True, 'import matplotlib.pyplot as plt\n'), ((3707, 3741), 'matplotlib.pyplot.title', 'plt.title', (['"""Models log-likelihood"""'], {}), "('Models log-likelihood')\n", (3716, 3741), True, 'import matplotlib.pyplot as plt\n'), ((4288, 4312), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (4309, 4312), True, 'import numpy as np\n'), ((4510, 4557), 'pandas.DataFrame', 'pd.DataFrame', (["{'input_feature': X, 'target': y}"], {}), "({'input_feature': X, 'target': y})\n", (4522, 4557), True, 'import pandas as pd\n'), ((4607, 4632), 'numpy.linspace', 'np.linspace', (['(10)', '(10.4)', '(10)'], {}), '(10, 10.4, 10)\n', (4618, 4632), True, 'import numpy as np\n'), ((4738, 4773), 'numpy.concatenate', 'np.concatenate', (['(y - noise, y_plot)'], {}), '((y - noise, y_plot))\n', (4752, 4773), True, 'import numpy as np\n'), ((5902, 5996), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'full_data', 'x': '"""input_feature"""', 'y': '"""target"""', 'color': '"""black"""', 'alpha': '(0.75)'}), "(data=full_data, x='input_feature', y='target', color=\n 'black', alpha=0.75)\n", (5917, 5996), True, 'import seaborn as sns\n'), ((3457, 3478), 'numpy.array', 'np.array', (['ard.scores_'], {}), '(ard.scores_)\n', (3465, 3478), True, 'import numpy as np\n'), ((3493, 3514), 'numpy.array', 'np.array', (['brr.scores_'], {}), '(brr.scores_)\n', (3501, 3514), True, 'import numpy as np\n'), ((4642, 4657), 'numpy.sqrt', 'np.sqrt', (['X_plot'], {}), '(X_plot)\n', (4649, 4657), True, 'import numpy as np\n'), ((4660, 4674), 'numpy.sin', 'np.sin', (['X_plot'], {}), '(X_plot)\n', (4666, 4674), True, 'import numpy as np\n'), ((1865, 1883), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1881, 1883), False, 'from sklearn.linear_model import ARDRegression, LinearRegression, BayesianRidge\n'), ((1900, 1944), 'sklearn.linear_model.BayesianRidge', 'BayesianRidge', ([], {'compute_score': '(True)', 'n_iter': '(30)'}), '(compute_score=True, n_iter=30)\n', (1913, 1944), False, 'from sklearn.linear_model import ARDRegression, LinearRegression, BayesianRidge\n'), ((1961, 2005), 'sklearn.linear_model.ARDRegression', 'ARDRegression', ([], {'compute_score': '(True)', 'n_iter': '(30)'}), '(compute_score=True, n_iter=30)\n', (1974, 2005), False, 'from sklearn.linear_model import ARDRegression, LinearRegression, BayesianRidge\n'), ((2572, 2618), 'matplotlib.colors.SymLogNorm', 'SymLogNorm', ([], {'linthresh': '(0.001)', 'vmin': '(-80)', 'vmax': '(80)'}), '(linthresh=0.001, vmin=-80, vmax=80)\n', (2582, 2618), False, 'from matplotlib.colors import SymLogNorm\n'), ((4467, 4477), 'numpy.sqrt', 'np.sqrt', (['X'], {}), '(X)\n', (4474, 4477), True, 'import numpy as np\n'), ((4480, 4489), 'numpy.sin', 'np.sin', (['X'], {}), '(X)\n', (4486, 4489), True, 'import numpy as np\n'), ((5397, 5446), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(10)', 'include_bias': '(False)'}), '(degree=10, include_bias=False)\n', (5415, 5446), False, 'from sklearn.preprocessing import PolynomialFeatures, StandardScaler\n'), ((5452, 5468), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5466, 5468), False, 'from sklearn.preprocessing import PolynomialFeatures, StandardScaler\n'), ((5474, 5489), 'sklearn.linear_model.ARDRegression', 'ARDRegression', ([], {}), '()\n', (5487, 5489), False, 'from sklearn.linear_model import ARDRegression, LinearRegression, BayesianRidge\n'), ((5533, 5582), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(10)', 'include_bias': '(False)'}), '(degree=10, include_bias=False)\n', (5551, 5582), False, 'from sklearn.preprocessing import PolynomialFeatures, StandardScaler\n'), ((5588, 5604), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5602, 5604), False, 'from sklearn.preprocessing import PolynomialFeatures, StandardScaler\n'), ((5610, 5625), 'sklearn.linear_model.BayesianRidge', 'BayesianRidge', ([], {}), '()\n', (5623, 5625), False, 'from sklearn.linear_model import ARDRegression, LinearRegression, BayesianRidge\n')] |
import math
import numpy as np
import torch
from collections import defaultdict
from hover.utils.torch_helper import cross_entropy_with_probs
def loss_coteaching_directed(y_student, y_teacher, target, forget_rate):
"""
Subroutine for loss_coteaching_graph.
"""
num_remember = math.ceil((1 - forget_rate) * target.size(0))
assert (
num_remember > 0
), f"Expected at least one remembered target, got {num_remember}"
loss_teacher_detail = cross_entropy_with_probs(y_teacher, target, reduction="none")
idx_to_learn = np.argsort(loss_teacher_detail.data)[:num_remember]
loss_student = cross_entropy_with_probs(
y_student[idx_to_learn], target[idx_to_learn], reduction="mean"
).unsqueeze(0)
return loss_student
def prediction_disagreement(pred_list, reduce=False):
"""
Compute disagreements between predictions.
"""
disagreement = defaultdict(dict)
for i in range(0, len(pred_list)):
for j in range(i, len(pred_list)):
_disagreed = np.not_equal(pred_list[i], pred_list[j])
if reduce:
_disagreed = np.mean(_disagreed)
disagreement[i][j] = _disagreed
disagreement[j][i] = _disagreed
return dict(disagreement)
def loss_coteaching_graph(y_list, target, tail_head_adjacency_list, forget_rate):
"""
Co-teaching from differences.
Generalized to graph representation where each vertex is a classifier and each edge is a source to check differences with and to learn from.
y_list: list of logits from different classifiers.
target: target, which is allowed to be probabilistic.
tail_head_adjacency_list: the 'tail' classifier learns from the 'head'.
forget_rate: the proportion of high-loss contributions to discard.
"""
# initialize co-teaching losses
loss_list = []
for i in range(0, len(y_list)):
assert tail_head_adjacency_list[i], f"Expected at least one teacher for {i}"
_losses = []
for j in tail_head_adjacency_list[i]:
# fetch yi as student(tail), yj as teacher(head)
_yi, _yj = y_list[i], y_list[j]
_tar = target
# add loss contribution to list
_contribution = loss_coteaching_directed(_yi, _yj, _tar, forget_rate)
_losses.append(_contribution)
# concatenate and average up
_loss = torch.mean(torch.cat(_losses))
loss_list.append(_loss)
return loss_list
def identity_adjacency(info_dict):
"""
Each node points to itself.
"""
refs = []
acc_list = info_dict["accuracy"]
for i in range(0, len(acc_list)):
refs.append([i])
return refs
def cyclic_adjacency(info_dict, acc_bar=0.5):
"""
Nodes form a cycle.
Triggers if accuracies are high enough.
"""
refs = []
acc_list = info_dict["accuracy"]
for i in range(0, len(acc_list)):
candidate = (i + 1) % (len(acc_list))
if acc_list[i] > acc_bar and acc_list[candidate] > acc_bar:
refs.append([candidate])
else:
refs.append([i])
return refs
def cyclic_except_last(info_dict, acc_bar=0.5):
"""
Cyclic except the last member.
Triggers if accuracies are high enough.
"""
refs = []
acc_list = info_dict["accuracy"]
for i in range(0, len(acc_list) - 1):
candidate = (i + 1) % (len(acc_list) - 1)
if acc_list[i] > acc_bar and acc_list[candidate] > acc_bar:
refs.append([candidate])
else:
refs.append([i])
refs.append([len(acc_list) - 1])
return refs
def accuracy_priority(info_dict, acc_bar=0.5):
"""
Every node points at the most accurate member that is not itself.
Triggers if accuracies are high enough.
"""
refs = []
acc_list = info_dict["accuracy"]
for i in range(0, len(acc_list)):
top_candidates = sorted(
range(len(acc_list)), key=lambda j: acc_list[j], reverse=True
)
candidate = top_candidates[0] if top_candidates[0] != i else top_candidates[1]
if acc_list[i] > acc_bar and acc_list[candidate] > acc_bar:
refs.append([candidate])
else:
refs.append([i])
return refs
def disagreement_priority(info_dict, acc_bar=0.5):
"""
Everyone node points at the most different member that is not itself.
Triggers if accuracies are high enough.
"""
refs = []
acc_list = info_dict["accuracy"]
disagree_dict = info_dict["disagreement_rate"]
for i in range(0, len(acc_list)):
top_candidates = sorted(
disagree_dict[i].keys(), key=lambda j: disagree_dict[i][j], reverse=True
)
candidate = top_candidates[0] if top_candidates[0] != i else top_candidates[1]
if acc_list[i] > acc_bar and acc_list[candidate] > acc_bar:
refs.append([candidate])
else:
refs.append([i])
return refs
| [
"torch.cat",
"numpy.not_equal",
"collections.defaultdict",
"numpy.argsort",
"numpy.mean",
"hover.utils.torch_helper.cross_entropy_with_probs"
] | [((475, 536), 'hover.utils.torch_helper.cross_entropy_with_probs', 'cross_entropy_with_probs', (['y_teacher', 'target'], {'reduction': '"""none"""'}), "(y_teacher, target, reduction='none')\n", (499, 536), False, 'from hover.utils.torch_helper import cross_entropy_with_probs\n'), ((906, 923), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (917, 923), False, 'from collections import defaultdict\n'), ((556, 592), 'numpy.argsort', 'np.argsort', (['loss_teacher_detail.data'], {}), '(loss_teacher_detail.data)\n', (566, 592), True, 'import numpy as np\n'), ((627, 720), 'hover.utils.torch_helper.cross_entropy_with_probs', 'cross_entropy_with_probs', (['y_student[idx_to_learn]', 'target[idx_to_learn]'], {'reduction': '"""mean"""'}), "(y_student[idx_to_learn], target[idx_to_learn],\n reduction='mean')\n", (651, 720), False, 'from hover.utils.torch_helper import cross_entropy_with_probs\n'), ((1031, 1071), 'numpy.not_equal', 'np.not_equal', (['pred_list[i]', 'pred_list[j]'], {}), '(pred_list[i], pred_list[j])\n', (1043, 1071), True, 'import numpy as np\n'), ((2409, 2427), 'torch.cat', 'torch.cat', (['_losses'], {}), '(_losses)\n', (2418, 2427), False, 'import torch\n'), ((1124, 1143), 'numpy.mean', 'np.mean', (['_disagreed'], {}), '(_disagreed)\n', (1131, 1143), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Run some experiments and visualize the results.
"""
from mdso import SpectralOrdering, SimilarityMatrix, evaluate_ordering
import numpy as np
import matplotlib.pyplot as plt
# Set parameters for data generation
n = 500 # size of matrix
type_noise = 'gaussian' # distribution of the values of the noise
ampl_noise = 3 # amplitude of the noise
type_similarity = 'CircularStrongDecrease' # type of synthetic similarity matrix
# ("Linear" [vs "Circular"], "Banded" [vs "StrongDecrease"])
apply_perm = True # randomly permute the matrix, so that the ground truth is
# not the trivial permutation (1, ..., n).
# Set parameters for the ordering algorithm
k_nbrs = 15 # number of neighbors in the local linear fit in the embedding
n_components = 8 # number of dimensions of the embedding
circular = True if type_similarity[0] == 'C' else False # circular or linear
scaled = 'heuristic' # whether or not to scale the coordinates of the
# embedding so that the larger dimensions have fewer importance
# Build data matrix
data_gen = SimilarityMatrix()
data_gen.gen_matrix(n, type_matrix=type_similarity, apply_perm=apply_perm,
noise_ampl=ampl_noise, law=type_noise)
# Call Spectral Ordering method
reord_method = SpectralOrdering(n_components=n_components, k_nbrs=k_nbrs,
circular=circular, scale_embedding=scaled,
norm_laplacian='random_walk')
my_perm = reord_method.fit_transform(data_gen.sim_matrix)
reord_method.new_sim = reord_method.new_sim.toarray()
# reord_method.fit(data_gen.sim_matrix)
score = evaluate_ordering(my_perm, data_gen.true_perm,
circular=circular)
print("Kendall-Tau score = {}".format(score))
inv_perm = np.argsort(data_gen.true_perm)
# Display some results
fig, axes = plt.subplots(2, 2)
axes[0, 0].tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
axes[0, 0].matshow(data_gen.sim_matrix[:, inv_perm][inv_perm, :])
axes[0, 0].set_title("raw matrix")
axes[0, 1].tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
axes[0, 1].matshow(reord_method.new_sim[:, inv_perm][inv_perm, :])
axes[0, 1].set_title("new matrix")
axes[1, 0].scatter(reord_method.embedding[:, 0], reord_method.embedding[:, 1],
c=data_gen.true_perm)
axes[1, 0].set_title("2d embedding")
axes[1, 1].scatter(np.arange(data_gen.n),
data_gen.true_perm[reord_method.ordering])
axes[1, 1].set_title("perm vs ground truth")
plt.show()
fig = plt.figure(); plt.scatter(reord_method.embedding[:, 0], reord_method.embedding[:, 1],
c=data_gen.true_perm);
plt.xlabel('f_1', fontsize=16);
plt.ylabel('f_2', fontsize=16);
plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, labelleft=False, left=False);
figpath='/Users/antlaplante/THESE/manuscript/thesis/figs/reconstructing_by_spectral_figs/embedding_noisy_circular_ampl_3.pdf'
plt.savefig(figpath, bbox_inches='tight', transparent=True, dpi=150)
reord_method2 = SpectralOrdering(n_components=n_components, k_nbrs=k_nbrs,
circular=circular, scale_embedding=scaled,
norm_laplacian=None)
reord_method2.fit(reord_method.new_sim)
fig = plt.figure(); plt.scatter(reord_method2.embedding[:, 0], reord_method2.embedding[:, 1],
c=data_gen.true_perm);
plt.xlabel('f_1', fontsize=16);
plt.ylabel('f_2', fontsize=16);
plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, labelleft=False, left=False);
figpath='/Users/antlaplante/THESE/manuscript/thesis/figs/reconstructing_by_spectral_figs/embedding_cleaned_circular_ampl_3.pdf'
plt.savefig(figpath, bbox_inches='tight', transparent=True, dpi=150) | [
"matplotlib.pyplot.savefig",
"mdso.SimilarityMatrix",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"numpy.argsort",
"mdso.evaluate_ordering",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotl... | [((1086, 1104), 'mdso.SimilarityMatrix', 'SimilarityMatrix', ([], {}), '()\n', (1102, 1104), False, 'from mdso import SpectralOrdering, SimilarityMatrix, evaluate_ordering\n'), ((1287, 1423), 'mdso.SpectralOrdering', 'SpectralOrdering', ([], {'n_components': 'n_components', 'k_nbrs': 'k_nbrs', 'circular': 'circular', 'scale_embedding': 'scaled', 'norm_laplacian': '"""random_walk"""'}), "(n_components=n_components, k_nbrs=k_nbrs, circular=\n circular, scale_embedding=scaled, norm_laplacian='random_walk')\n", (1303, 1423), False, 'from mdso import SpectralOrdering, SimilarityMatrix, evaluate_ordering\n'), ((1644, 1709), 'mdso.evaluate_ordering', 'evaluate_ordering', (['my_perm', 'data_gen.true_perm'], {'circular': 'circular'}), '(my_perm, data_gen.true_perm, circular=circular)\n', (1661, 1709), False, 'from mdso import SpectralOrdering, SimilarityMatrix, evaluate_ordering\n'), ((1794, 1824), 'numpy.argsort', 'np.argsort', (['data_gen.true_perm'], {}), '(data_gen.true_perm)\n', (1804, 1824), True, 'import numpy as np\n'), ((1860, 1878), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (1872, 1878), True, 'import matplotlib.pyplot as plt\n'), ((3036, 3046), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3044, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3066, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3070, 3168), 'matplotlib.pyplot.scatter', 'plt.scatter', (['reord_method.embedding[:, 0]', 'reord_method.embedding[:, 1]'], {'c': 'data_gen.true_perm'}), '(reord_method.embedding[:, 0], reord_method.embedding[:, 1], c=\n data_gen.true_perm)\n', (3081, 3168), True, 'import matplotlib.pyplot as plt\n'), ((3165, 3195), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f_1"""'], {'fontsize': '(16)'}), "('f_1', fontsize=16)\n", (3175, 3195), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3227), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f_2"""'], {'fontsize': '(16)'}), "('f_2', fontsize=16)\n", (3207, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3229, 3348), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)', 'labelleft': '(False)', 'left': '(False)'}), "(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, labelleft=False, left=False)\n", (3244, 3348), True, 'import matplotlib.pyplot as plt\n'), ((3474, 3542), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figpath'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'dpi': '(150)'}), "(figpath, bbox_inches='tight', transparent=True, dpi=150)\n", (3485, 3542), True, 'import matplotlib.pyplot as plt\n'), ((3560, 3687), 'mdso.SpectralOrdering', 'SpectralOrdering', ([], {'n_components': 'n_components', 'k_nbrs': 'k_nbrs', 'circular': 'circular', 'scale_embedding': 'scaled', 'norm_laplacian': 'None'}), '(n_components=n_components, k_nbrs=k_nbrs, circular=\n circular, scale_embedding=scaled, norm_laplacian=None)\n', (3576, 3687), False, 'from mdso import SpectralOrdering, SimilarityMatrix, evaluate_ordering\n'), ((3793, 3805), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3803, 3805), True, 'import matplotlib.pyplot as plt\n'), ((3807, 3907), 'matplotlib.pyplot.scatter', 'plt.scatter', (['reord_method2.embedding[:, 0]', 'reord_method2.embedding[:, 1]'], {'c': 'data_gen.true_perm'}), '(reord_method2.embedding[:, 0], reord_method2.embedding[:, 1], c\n =data_gen.true_perm)\n', (3818, 3907), True, 'import matplotlib.pyplot as plt\n'), ((3904, 3934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f_1"""'], {'fontsize': '(16)'}), "('f_1', fontsize=16)\n", (3914, 3934), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3966), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f_2"""'], {'fontsize': '(16)'}), "('f_2', fontsize=16)\n", (3946, 3966), True, 'import matplotlib.pyplot as plt\n'), ((3968, 4087), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)', 'labelleft': '(False)', 'left': '(False)'}), "(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, labelleft=False, left=False)\n", (3983, 4087), True, 'import matplotlib.pyplot as plt\n'), ((4214, 4282), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figpath'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'dpi': '(150)'}), "(figpath, bbox_inches='tight', transparent=True, dpi=150)\n", (4225, 4282), True, 'import matplotlib.pyplot as plt\n'), ((2906, 2927), 'numpy.arange', 'np.arange', (['data_gen.n'], {}), '(data_gen.n)\n', (2915, 2927), True, 'import numpy as np\n')] |
from distutils.core import setup,Extension
from Cython.Build import cythonize
import numpy
setup(
ext_modules = [Extension('_adjustments',['_adjustments.c'],
include_dirs = [numpy.get_include()]
)],
)
setup(
ext_modules=cythonize('_adjustments.pyx'),
include_dirs = [numpy.get_include()]
)
'''
setup(
ext_modules=cythonize('_adjustments.pyx'),
include_dirs = [numpy.get_include()]
)'''
| [
"Cython.Build.cythonize",
"numpy.get_include"
] | [((251, 280), 'Cython.Build.cythonize', 'cythonize', (['"""_adjustments.pyx"""'], {}), "('_adjustments.pyx')\n", (260, 280), False, 'from Cython.Build import cythonize\n'), ((300, 319), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (317, 319), False, 'import numpy\n'), ((191, 210), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (208, 210), False, 'import numpy\n')] |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import os
import pandas as pd
import sqlalchemy
import yaml
from gzip import zlib
from pickle import loads
ref_time = 3.0
benchmarks = {'bnn': 'BNN', 'spam-filter': 'Spam filter', '3d-rendering': '3D rendering',
'digit-recognition': 'Digit recognition', 'optical-flow': 'Optical flow',
'face-detection': 'Face detection'}
pd.set_option('display.max_rows', None)
def add_worst_case(data):
row = data.iloc[-1]
row['time'] = float('inf')
row['run_time'] = 0.0
return data.append(row)
script_dir = os.path.dirname(os.path.realpath("__file__"))
with open('../../cfg.yml', 'r') as cfg_file:
data = cfg_file.read()
tuner_cfg = yaml.safe_load(data)
database = tuner_cfg['database'].replace('mysql', 'mysql+pymysql')
engine = sqlalchemy.create_engine(database)
query = 'select program.name as benchmark, args, proc_freq, tuning_run.start_date, collection_date,' \
' result.state, run_time, fidelity, tuning_run.name from result' \
' inner join configuration on result.configuration_id = configuration.id' \
' inner join tuning_run on tuning_run.id = result.tuning_run_id' \
' inner join program_version on program_version.id = program_version_id' \
' inner join program on program.id = program_version.program_id' \
' inner join platform on platform.id = platform_id' \
' inner join test on test.id = test_id' \
' where test.name = "opentuner_zcu102" or test.name = "bayes_zcu102" or ' \
' test.name = "pipe_zcu102" and tuning_run.name like "bnn_4x1.1x2.1x2_%%"'
data = pd.read_sql_query(query, engine)
# I accidentally forgot to set the seed for some OpenTuner runs, but I can extract it from the name.
data['seed'] = data['name'].str.extract(r'.*_(\d+)$').astype(int)
data = data.drop(columns=['name'])
# Get command line arguments of each tuning run.
args = data['args'].transform(zlib.decompress)
args = args.transform(lambda field: loads(field, encoding='latin1'))
data = data.drop(columns=['args'])
# Get the search technique of each tuning run.
data['technique'] = args.transform(lambda field: field.technique[0])
# Determine the maximum fidelity.
data.loc[data['technique'] == 'PipelinedMultiFidBayes', 'max_fidelity'] = 3
data.loc[data['technique'] == 'AUCBanditMetaTechniqueA', 'max_fidelity'] = 0
data.loc[data['technique'] == 'Bayes', 'max_fidelity'] = 0
# Replace extremely large values with infinity.
data.loc[data['run_time'] > 1e30, 'run_time'] = float('inf')
# Set runtime of failed builds to infinity to make sure that they will be ignored later.
data.loc[data['state'] != 'OK', 'run_time'] = float('inf')
data = data.drop(columns=['state'])
# Set runtime of incomplete builds to infinity to make sure that they will be ignored later.
data.loc[data['fidelity'] != data['max_fidelity'], 'run_time'] = float('inf')
data = data.drop(columns=['fidelity', 'max_fidelity'])
# Convert the latency to seconds.
data['run_time'] = data['run_time'] / data['proc_freq']
data = data.drop(columns=['proc_freq'])
# Compute the tuning time in days.
data['time'] = (data['collection_date'] - data['start_date']).dt.total_seconds() / 86400.0
data = data.drop(columns=['start_date', 'collection_date'])
# Add a worst-case runtime to each experiment
data = data.groupby(['technique', 'benchmark', 'seed']).apply(add_worst_case).reset_index(drop=True)
# Compute the threshold.
# Extract OpenTuner results.
data_opentuner = data[data['technique'] == 'AUCBanditMetaTechniqueA']
data_opentuner = data_opentuner.drop(columns=['technique'])
# Cut the OpenTuner tuning runs off after the specified number of days.
thresholds = data_opentuner[data_opentuner['time'] < ref_time]
# Compute the minimum latency. We sort in case there are ties.
thresholds = thresholds.sort_values('time')
thresholds = thresholds.groupby(['benchmark', 'seed']).min()
thresholds = thresholds.drop(columns=['time'])
# Average the minimum latency over the seeds to get the latency threshold.
thresholds = thresholds.groupby(['benchmark']).mean()
# Rename the runtime column.
thresholds = thresholds.rename(columns={'run_time': 'thres'})
# Determine when the threshold is reached in single-fidelity Bayesian optimization.
# Extract single-fidelity Bayesian optimization results.
data_bayes = data[data['technique'] == 'Bayes']
data_bayes = data_bayes.drop(columns=['technique'])
# Remove results that are worse than the threshold.
data_bayes = data_bayes.set_index('benchmark').join(thresholds).reset_index()
data_bayes = data_bayes[data_bayes['run_time'] <= data_bayes['thres']]
data_bayes = data_bayes.drop(columns=['run_time', 'thres'])
# Find the earliest time at which the threshold was not exceeded.
data_bayes = data_bayes.groupby(['benchmark', 'seed']).min().reset_index()
# Determine when the threshold is reached in Bayesian optimization pipeline.
# Extract Bayesian optimization pipeline results.
data_pipe = data[data['technique'] == 'PipelinedMultiFidBayes']
data_pipe = data_pipe.drop(columns=['technique'])
# Remove results that are worse than the threshold.
data_pipe = data_pipe.set_index('benchmark').join(thresholds).reset_index()
data_pipe = data_pipe[data_pipe['run_time'] <= data_pipe['thres']]
data_pipe = data_pipe.drop(columns=['run_time', 'thres'])
# Find the earliest time at which the threshold was not exceeded.
data_pipe = data_pipe.groupby(['benchmark', 'seed']).min().reset_index()
# Determine when the threshold is reached in OpenTuner.
# Average the run times over all seeds.
run_times = {}
new_data = {}
seed_cnt = data_opentuner['benchmark'].nunique()
for id, row in data_opentuner.sort_values('time').iterrows():
benchmark = row['benchmark']
seed = row['seed']
time = row['time']
if benchmark not in run_times:
run_times[benchmark] = [float('inf')] * seed_cnt
old_run_time = np.mean(run_times[benchmark])
run_times[benchmark][seed] = min(run_times[benchmark][seed], row['run_time'])
run_time = np.mean(run_times[benchmark])
if run_time < old_run_time:
new_data.setdefault(benchmark, {})[time] = run_time
data_opentuner = []
for benchmark, data in new_data.items():
for time, run_time in data.items():
data_opentuner.append({'benchmark': benchmark, 'time': time, 'run_time': run_time})
data_opentuner = pd.DataFrame(data_opentuner)
# Remove results that are worse than the threshold.
data_opentuner = data_opentuner.set_index('benchmark').join(thresholds).reset_index()
data_opentuner = data_opentuner[data_opentuner['run_time'] <= data_opentuner['thres']]
data_opentuner = data_opentuner.drop(columns=['run_time', 'thres'])
# Find the earliest time at which the threshold was not exceeded.
data_opentuner = data_opentuner.groupby(['benchmark']).min()
# Combine the data frames.
data = data_opentuner.join(data_bayes.set_index('benchmark'), lsuffix='_opentuner', rsuffix='_bayes')
data_pipe = data_pipe.rename(columns={'time': 'time_pipe'})
data = data.set_index('seed', append=True).join(data_pipe.set_index(['benchmark', 'seed']))
# Compute the speedups.
data['speedup_bayes'] = data['time_opentuner'] / data['time_bayes']
data['speedup_pipe'] = data['time_opentuner'] / data['time_pipe']
# Increase the font size.
plt.rcParams.update({'font.size': 15})
values = []
labels = []
for benchmark, group in data.groupby('benchmark'):
for technique in ['bayes', 'pipe']:
values.append(group['speedup_' + technique])
labels.append(benchmarks[benchmark])
box = plt.boxplot(values, patch_artist=True, boxprops=dict(facecolor='white', color='black'))
seed_cnt = data.reset_index()['seed'].nunique()
plt.title('ZCU102, {} repetitions'.format(seed_cnt))
plt.ylabel('Speedup')
axes = plt.gca()
axes.set_yscale('log')
axes.xaxis.set_major_locator(ticker.FixedLocator([0.5, 2.5, 4.5, 6.5, 8.5, 10.5, 12.5]))
axes.xaxis.set_minor_locator(ticker.FixedLocator([1.5, 3.5, 5.5, 7.5, 9.5, 11.5]))
axes.xaxis.set_major_formatter(ticker.NullFormatter())
axes.xaxis.set_minor_formatter(ticker.FixedFormatter(labels))
for tick in axes.xaxis.get_minor_ticks():
tick.tick1line.set_markersize(0)
tick.tick2line.set_markersize(0)
tick.label1.set_horizontalalignment('right')
axes.tick_params(axis="x", which="minor", rotation=30)
plt.setp(box['boxes'][1::2], color='#C0FFC0')
plt.setp(box['boxes'][::2], color='#FFC0C0')
plt.setp(box['boxes'], edgecolor='black')
plt.setp(box['medians'], color='black')
plt.grid(axis='y')
plt.ylim([0.7, None])
plt.legend(box["boxes"][:2], ['SF BO', 'MF BO'])
plt.savefig('tuning_time.pdf', bbox_inches='tight')
# Compute the average speedups.
avg_speedup_bayes = data['speedup_bayes'].mean()
avg_speedup_pipe = data['speedup_pipe'].mean()
# Show the average speedup.
print('Average tuning time speedup without pipeline:', avg_speedup_bayes)
print('Average tuning time speedup with pipeline:', avg_speedup_pipe)
# Output percentages to file.
with open('../callouts/tuning_time.tex', 'w') as output_file:
output_file.write('\\def \\avgspeedupbayes {{{:0.1f}}}\n'.format(avg_speedup_bayes))
output_file.write('\\def \\avgspeeduppipe {{{:0.1f}}}\n'.format(avg_speedup_pipe))
| [
"numpy.mean",
"yaml.safe_load",
"matplotlib.pyplot.gca",
"pandas.set_option",
"pandas.DataFrame",
"matplotlib.pyplot.setp",
"matplotlib.ticker.FixedLocator",
"matplotlib.pyplot.rcParams.update",
"pandas.read_sql_query",
"pickle.loads",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"o... | [((463, 502), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (476, 502), True, 'import pandas as pd\n'), ((783, 803), 'yaml.safe_load', 'yaml.safe_load', (['data'], {}), '(data)\n', (797, 803), False, 'import yaml\n'), ((881, 915), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['database'], {}), '(database)\n', (905, 915), False, 'import sqlalchemy\n'), ((1703, 1735), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'engine'], {}), '(query, engine)\n', (1720, 1735), True, 'import pandas as pd\n'), ((6428, 6456), 'pandas.DataFrame', 'pd.DataFrame', (['data_opentuner'], {}), '(data_opentuner)\n', (6440, 6456), True, 'import pandas as pd\n'), ((7348, 7386), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 15}"], {}), "({'font.size': 15})\n", (7367, 7386), True, 'import matplotlib.pyplot as plt\n'), ((7792, 7813), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Speedup"""'], {}), "('Speedup')\n", (7802, 7813), True, 'import matplotlib.pyplot as plt\n'), ((7821, 7830), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7828, 7830), True, 'import matplotlib.pyplot as plt\n'), ((8363, 8408), 'matplotlib.pyplot.setp', 'plt.setp', (["box['boxes'][1::2]"], {'color': '"""#C0FFC0"""'}), "(box['boxes'][1::2], color='#C0FFC0')\n", (8371, 8408), True, 'import matplotlib.pyplot as plt\n'), ((8409, 8453), 'matplotlib.pyplot.setp', 'plt.setp', (["box['boxes'][::2]"], {'color': '"""#FFC0C0"""'}), "(box['boxes'][::2], color='#FFC0C0')\n", (8417, 8453), True, 'import matplotlib.pyplot as plt\n'), ((8454, 8495), 'matplotlib.pyplot.setp', 'plt.setp', (["box['boxes']"], {'edgecolor': '"""black"""'}), "(box['boxes'], edgecolor='black')\n", (8462, 8495), True, 'import matplotlib.pyplot as plt\n'), ((8496, 8535), 'matplotlib.pyplot.setp', 'plt.setp', (["box['medians']"], {'color': '"""black"""'}), "(box['medians'], color='black')\n", (8504, 8535), True, 'import matplotlib.pyplot as plt\n'), ((8536, 8554), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""'}), "(axis='y')\n", (8544, 8554), True, 'import matplotlib.pyplot as plt\n'), ((8555, 8576), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.7, None]'], {}), '([0.7, None])\n', (8563, 8576), True, 'import matplotlib.pyplot as plt\n'), ((8577, 8625), 'matplotlib.pyplot.legend', 'plt.legend', (["box['boxes'][:2]", "['SF BO', 'MF BO']"], {}), "(box['boxes'][:2], ['SF BO', 'MF BO'])\n", (8587, 8625), True, 'import matplotlib.pyplot as plt\n'), ((8626, 8677), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tuning_time.pdf"""'], {'bbox_inches': '"""tight"""'}), "('tuning_time.pdf', bbox_inches='tight')\n", (8637, 8677), True, 'import matplotlib.pyplot as plt\n'), ((669, 697), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (685, 697), False, 'import os\n'), ((5969, 5998), 'numpy.mean', 'np.mean', (['run_times[benchmark]'], {}), '(run_times[benchmark])\n', (5976, 5998), True, 'import numpy as np\n'), ((6096, 6125), 'numpy.mean', 'np.mean', (['run_times[benchmark]'], {}), '(run_times[benchmark])\n', (6103, 6125), True, 'import numpy as np\n'), ((7883, 7941), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['[0.5, 2.5, 4.5, 6.5, 8.5, 10.5, 12.5]'], {}), '([0.5, 2.5, 4.5, 6.5, 8.5, 10.5, 12.5])\n', (7902, 7941), True, 'import matplotlib.ticker as ticker\n'), ((7972, 8024), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['[1.5, 3.5, 5.5, 7.5, 9.5, 11.5]'], {}), '([1.5, 3.5, 5.5, 7.5, 9.5, 11.5])\n', (7991, 8024), True, 'import matplotlib.ticker as ticker\n'), ((8057, 8079), 'matplotlib.ticker.NullFormatter', 'ticker.NullFormatter', ([], {}), '()\n', (8077, 8079), True, 'import matplotlib.ticker as ticker\n'), ((8112, 8141), 'matplotlib.ticker.FixedFormatter', 'ticker.FixedFormatter', (['labels'], {}), '(labels)\n', (8133, 8141), True, 'import matplotlib.ticker as ticker\n'), ((2072, 2103), 'pickle.loads', 'loads', (['field'], {'encoding': '"""latin1"""'}), "(field, encoding='latin1')\n", (2077, 2103), False, 'from pickle import loads\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 09:50:28 2017
@author: smullally
"""
import sys
import os
import time
import re
import json
import mastAPITools as api
try: # Python 3.x
from urllib.parse import quote as urlencode
from urllib.request import urlretrieve
except ImportError: # Python 2.x
from urllib import pathname2url as urlencode
from urllib import urlretrieve
try: # Python 3.x
import http.client as httplib
except ImportError: # Python 2.x
import httplib
from astropy.table import Table
import numpy as np
import pprint
pp = pprint.PrettyPrinter(indent=4)
import pandas as p
#%%
#I want to try to get a particular Kepler Id through the mastQuery API
#This does not require a cone search, only knowledge of the KIC ID.
kicid='011904151' #Kepler 10
#Step 0 get ra and dec for the kepler ID of interest
#Step one should be to ask if MAST has any data I want
#Step two should be to download the data (i.e. put it in the basket and retrieve)
#Step 0 -- get RA and Dec
objectOfInterest = 'KIC %s' % kicid
resolverRequest = {'service':'Mast.Name.Lookup',
'params':{'input':objectOfInterest,
'format':'json'},
}
headers,resolvedObjectString = api.mastQuery(resolverRequest)
resolvedObject = json.loads(resolvedObjectString)
print("Information about KIC Object")
pp.pprint(resolvedObject)
objRa = resolvedObject['resolvedCoordinate'][0]['ra']
objDec = resolvedObject['resolvedCoordinate'][0]['decl']
#Step 1
#Ask for data products within a cone search of that RA and Dec
coneradius_arcsec = 5
mastRequest = {'service':'Mast.Caom.Cone',
'params':{'ra':objRa,
'dec':objDec,
'radius':coneradius_arcsec/60},
'format':'json',
'pagesize':2000,
'page':1,
'removenullcolumns':True,
'removecache':True}
headers,mastDataString = api.mastQuery(mastRequest)
mastData = json.loads(mastDataString)
pp.pprint(mastData['fields'][:25])
#Limit that search to Kepler data with the original object ID.
#%%
#Limit that data to those with the right target_name and instrument_name
print(mastData.keys())
print("Query status:",mastData['status'])
#Convert the data to pandas dataframe
dfData=p.DataFrame.from_dict(mastData['data'])
print(dfData[:3])
#Create a dataframe of just those I want
wantdata=(dfData['target_name'] == 'kplr' + kicid) & (dfData['instrument_name']=='Kepler')
lcwant=(dfData['t_exptime'] == 1800)
scwant=(dfData['t_exptime'] == 60)
getdata=dfData[wantdata & lcwant]
obsid = np.int(dfData[wantdata & lcwant]['obsid'])
#Request The Products for this observation
productRequest = {'service':'Mast.Caom.Products',
'params':{'obsid':obsid},
'format':'json',
'pagesize':100,
'page':1}
headers,obsProductsString = api.mastQuery(productRequest)
obsProducts = json.loads(obsProductsString)
print("Number of data products:",len(obsProducts["data"]))
print("Product information column names:")
pp.pprint(obsProducts['fields'])
dfProd = p.DataFrame.from_dict(obsProducts["data"])
wantprod=(dfProd['description'].str.contains('CLC')) & (dfProd['description'].str.contains('Q'))
wantdv=(dfProd['description'].str.contains('Data Validation'))
want=wantprod | wantdv
#Get the URLS
uris=np.array(dfProd[want]['dataURI'])
filenames=np.array(dfProd[want]['productFilename'])
#%%
#Direct Download of Data, now done through mastAPITools.py
#Just need a list of URIs and FileNames.
getNewOnly=True #If True then won't download if the file already exists on disk
storedir="/Users/smullally/MastData/Kepler/" + kicid+'/'
api.retrieveMastData(uris,filenames,localDir=storedir,getNewOnly=True)
#%%
#Here is another way to find the data without doing a cone search.
#Mast.Caom.Filtered
#kicid=011904151
kicid='011904151'
kepid = 'kplr' + kicid
inst = 'Kepler'
exptime = 1800
#requestFilters = [{"paramName":"filters",
# "values":["KEPLER"]},
# {"paramName":"obs_collection",
# "values":["Kepler"]
# },
# {"paramName":"t_exptime",
# "values":[exptime],
# "separator":';'
# },
# {"paramName":"target_name",
# "values":[kepid]
# },
# {"paramName":"obs_id",
# "values":[],
# "freeText":"%lc%"
# }
# ]
requestFilters = [
{"paramName":"filters",
"values":["KEPLER"],
"separator":";"
},
{"paramName":"obs_id",
"values":[],
"freeText":"%lc%"},
# },
{"paramName":"target_name",
"values":[],
"freeText":"%"+kicid+"%"}
]
mashupRequest = {"service":"Mast.Caom.Filtered",
"format":"json",
"params":{
#"columns":"COUNT_BIG(*)",
"columns":"*",
"filters":requestFilters
}}
headers,outString = api.mastQuery(mashupRequest)
countData = json.loads(outString)
pp.pprint(countData)
#%%
#Let's try an epic search for for K2, do a narrow cone search
#
import pandas as p
epicid=228813918
targetName="EPIC %u" % epicid
radius_arcsec = 8
mastData=api.targetNameConeSearch(targetName, radius_arcsec)
pp.pprint(mastData['data'][:25])
data=p.DataFrame.from_dict(mastData['data'])
uniqueprojects=data.project.unique()
uniquefilters=data.filters.unique()
print(uniqueprojects)
print(uniquefilters)
#%%
#Try to get a list of targets observed by Kepler spacecraft wtih
start="2016-07-13 02:04:00"
from astropy.time import Time
times=[start]
t=Time(times,format='iso', scale='utc')
print(t)
print(t.mjd)
requestFilters = [
{"paramName":"project",
"values":["K2","Kepler"],
"separator":";"
},
{"paramName":"t_min",
"values":[{"min":t.mjd[0]-.5 , "max":t.mjd[0]+.5}]
},
]
mashupRequest = {"service":"Mast.Caom.Filtered",
"format":"json",
"params":{
#"columns":"COUNT_BIG(*)",
"columns":"*",
"filters":requestFilters
}}
headers,outString = api.mastQuery(mashupRequest)
countData = json.loads(outString)
pp.pprint(countData[:10])
#%%
#TIC Stuff
afilter=[{"paramName":"ID","values":["1234567"]}]
service="Mast.Catalogs.Filtered.Tic"
aformat="json"
cols="*"
request={"service":service,"format":aformat,
"params":{"columns":cols,"filters":afilter}}
headers,outString = api.mastQuery(request)
outData=json.loads(outString)
mydata=p.DataFrame.from_dict(outData['data'])
print(mydata.ID)
print(mydata.Tmag)
| [
"pandas.DataFrame.from_dict",
"json.loads",
"astropy.time.Time",
"pprint.PrettyPrinter",
"numpy.int",
"mastAPITools.mastQuery",
"numpy.array",
"mastAPITools.retrieveMastData",
"mastAPITools.targetNameConeSearch"
] | [((604, 634), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (624, 634), False, 'import pprint\n'), ((1294, 1324), 'mastAPITools.mastQuery', 'api.mastQuery', (['resolverRequest'], {}), '(resolverRequest)\n', (1307, 1324), True, 'import mastAPITools as api\n'), ((1343, 1375), 'json.loads', 'json.loads', (['resolvedObjectString'], {}), '(resolvedObjectString)\n', (1353, 1375), False, 'import json\n'), ((2014, 2040), 'mastAPITools.mastQuery', 'api.mastQuery', (['mastRequest'], {}), '(mastRequest)\n', (2027, 2040), True, 'import mastAPITools as api\n'), ((2053, 2079), 'json.loads', 'json.loads', (['mastDataString'], {}), '(mastDataString)\n', (2063, 2079), False, 'import json\n'), ((2370, 2409), 'pandas.DataFrame.from_dict', 'p.DataFrame.from_dict', (["mastData['data']"], {}), "(mastData['data'])\n", (2391, 2409), True, 'import pandas as p\n'), ((2677, 2719), 'numpy.int', 'np.int', (["dfData[wantdata & lcwant]['obsid']"], {}), "(dfData[wantdata & lcwant]['obsid'])\n", (2683, 2719), True, 'import numpy as np\n'), ((2984, 3013), 'mastAPITools.mastQuery', 'api.mastQuery', (['productRequest'], {}), '(productRequest)\n', (2997, 3013), True, 'import mastAPITools as api\n'), ((3029, 3058), 'json.loads', 'json.loads', (['obsProductsString'], {}), '(obsProductsString)\n', (3039, 3058), False, 'import json\n'), ((3205, 3247), 'pandas.DataFrame.from_dict', 'p.DataFrame.from_dict', (["obsProducts['data']"], {}), "(obsProducts['data'])\n", (3226, 3247), True, 'import pandas as p\n'), ((3451, 3484), 'numpy.array', 'np.array', (["dfProd[want]['dataURI']"], {}), "(dfProd[want]['dataURI'])\n", (3459, 3484), True, 'import numpy as np\n'), ((3495, 3536), 'numpy.array', 'np.array', (["dfProd[want]['productFilename']"], {}), "(dfProd[want]['productFilename'])\n", (3503, 3536), True, 'import numpy as np\n'), ((3780, 3853), 'mastAPITools.retrieveMastData', 'api.retrieveMastData', (['uris', 'filenames'], {'localDir': 'storedir', 'getNewOnly': '(True)'}), '(uris, filenames, localDir=storedir, getNewOnly=True)\n', (3800, 3853), True, 'import mastAPITools as api\n'), ((5555, 5583), 'mastAPITools.mastQuery', 'api.mastQuery', (['mashupRequest'], {}), '(mashupRequest)\n', (5568, 5583), True, 'import mastAPITools as api\n'), ((5596, 5617), 'json.loads', 'json.loads', (['outString'], {}), '(outString)\n', (5606, 5617), False, 'import json\n'), ((5805, 5856), 'mastAPITools.targetNameConeSearch', 'api.targetNameConeSearch', (['targetName', 'radius_arcsec'], {}), '(targetName, radius_arcsec)\n', (5829, 5856), True, 'import mastAPITools as api\n'), ((5896, 5935), 'pandas.DataFrame.from_dict', 'p.DataFrame.from_dict', (["mastData['data']"], {}), "(mastData['data'])\n", (5917, 5935), True, 'import pandas as p\n'), ((6198, 6236), 'astropy.time.Time', 'Time', (['times'], {'format': '"""iso"""', 'scale': '"""utc"""'}), "(times, format='iso', scale='utc')\n", (6202, 6236), False, 'from astropy.time import Time\n'), ((6912, 6940), 'mastAPITools.mastQuery', 'api.mastQuery', (['mashupRequest'], {}), '(mashupRequest)\n', (6925, 6940), True, 'import mastAPITools as api\n'), ((6953, 6974), 'json.loads', 'json.loads', (['outString'], {}), '(outString)\n', (6963, 6974), False, 'import json\n'), ((7253, 7275), 'mastAPITools.mastQuery', 'api.mastQuery', (['request'], {}), '(request)\n', (7266, 7275), True, 'import mastAPITools as api\n'), ((7285, 7306), 'json.loads', 'json.loads', (['outString'], {}), '(outString)\n', (7295, 7306), False, 'import json\n'), ((7315, 7353), 'pandas.DataFrame.from_dict', 'p.DataFrame.from_dict', (["outData['data']"], {}), "(outData['data'])\n", (7336, 7353), True, 'import pandas as p\n')] |
"""
Harvard CS286 Final Project, Fall 2020.
Data structures for simulation of autonomous vehicle controllers presented by <NAME> al. (2019):
'Feedback Control Algorithms for the Dissipation of Traffic Waves with Autonomous Vehicles'
https://doi.org/10.1007/978-3-030-25446-9_12
"""
import warnings
import random
import numpy as np
import pandas as pd
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from controllers import Controller, BandoFTL, PID, LearningController
class Position:
TOL = 1e-9 # Tolerance for floating point comparisons.
def __init__(self, x, L):
"""
Representation of a position x on ring road of length L, with modular arithmetic.
Positions are always positive, as stored as a float in the interval [ 0.0, L ).
"""
if hasattr(x, 'env'): # Check if the input is alread a Vehicle.
assert L == x.env.ring_length, "Cannot mix positions with different L values."
x = x.x
elif hasattr(x, 'L'): # Check if the input is a Position.
assert L == x.L, "Cannot mix positions with different L values."
x = x.x
assert L>0, "Must have non-negative length."
self._L = L
self._x = 1.0*( x % L )
@property
def x(self):
return self._x
@x.setter
def x(self, x):
self._x = 1.0*( x % self.L )
@property
def L(self):
return self._L
@L.setter
def L(self, L):
raise AttributeError("The length property is immutable.")
def __repr__(self):
return "Position(x={}, L={})".format(self.x, self.L)
def __eq__(self, other):
"""
Check if two positions are equal (within a set tolerance).
"""
s = self.x
o = Position(x=other, L=self.L).x # Convert other to a Position if needed.
return np.isclose(s,o, atol=Position.TOL)
def __add__(self, other):
try:
o = float(other)
except:
raise ValueError("Only numerical values can be added to Position objects.")
s = self.x
new_x = (s + o) % self.L
return Position(x=new_x, L=self.L)
def __radd__(self, other):
raise NotImplementedError("Reverse operations are ambigous for Position objects -- try Position + scalar instead.")
def __sub__(self, other):
try:
o = float(other)
except:
raise ValueError("Only numerical values can be subtracted from Position objects -- try pos1.to_distance(pos2).")
s = self.x
new_x = (s - o) % self.L
return Position(x=new_x, L=self.L)
def __rsub__(self, other):
raise NotImplementedError("Reverse operations are ambigous for Position objects -- try Position - scalar instead.")
def distance_to(self, other, reverse=False):
"""
Return the distance from self to other (i.e. by how much does other lead self?)
If reverse=True, returns the distance from other to self, travelling in direction of the road.
Distances are always positive and are measured in the direction of traffic.
"""
# Convert to Position (should work event if other is already a Position):
other = Position(other, self.L)
# Apply reverse case:
if reverse:
return other.distance_to(self)
# Get positions as numeric values:
s = self.x
o = other.x
# Get difference:
dist = (o - s) % self.L
return dist
class RingRoad:
def __init__(self,
num_vehicles=22,
ring_length=230.0,
vehicle_length=4.5,
safe_distance=4.0,
min_speed=0.00,
max_speed=9.75,
min_accel=-6.75,
max_accel=6.50,
starting_noise=0.5,
traffic_a=0.5,
traffic_b=20,
a_sigma=0.1,
b_sigma=4.0,
av_activate=60.0,
temporal_res=0.1,
control_lag=0.0,
num_avs=1,
av_even_spacing=False,
hv_heterogeneity=False,
uncertain_avs=False,
sigma_pct=0.2,
learning_mode=False,
seed=None,
):
# Store properties:
self.num_vehicles = num_vehicles # Total number of vehicles (including A.V.).
self.ring_length = ring_length # Length of ring road (meters).
self.vehicle_length = vehicle_length # Length of vehicles (meters).
self.safe_distance = safe_distance # Safe distance between vehicles (meters).
self.min_speed = min_speed # Min velocity (meters/second).
self.max_speed = max_speed # Max velocity (meters/second).
self.min_accel = min_accel # Min acceleration (meters/second^2).
self.max_accel = max_accel # Max acceleration (meters/second^2).
self.control_lag = control_lag # How long between when control is calculated and when it is applied (seconds).
self.temporal_res = temporal_res # Time between updates (in seconds).
self.traffic_a = traffic_a # Coefficient for the FTL model (meters/second).
self.traffic_b = traffic_b # Coefficient for the Bando-OV model (1/second).
self.a_sigma = a_sigma # Std. dev. for 'a' parameter on Bando-FTL model (only for HVs when using HV heterogeneity)
self.b_sigma = b_sigma # Std. dev. for 'b' parameter on Bando-FTL model (only for HVs when using HV heterogeneity)
self.av_activate = av_activate # When to activate AV controller (seconds).
self.starting_noise = starting_noise # Add noise (in meters) to starting positions.
self.seed = seed
self.num_avs = num_avs # Number of AVs
self.av_even_spacing = av_even_spacing # Set to True to spread AVs out, otherwise leave them all in a row.
self.hv_heterogeneity = hv_heterogeneity # Set to True for heterogeneity in HVs
self.uncertain_avs = uncertain_avs # Set to True for uncertainty in AVs
self.sigma_pct = sigma_pct # Tunes amount of uncertainty to add if uncertain_avs=True
self.learning_mode = learning_mode # If True, replaces the AV's PID controller with one that expects external commands.
# Store state information:
self.state = None
self.history = dict() # History of states, keyed by time step.
self.all_vehicles = set() # Set of all vehicles that were on the road at any point in time.
# Initialize:
self.random = np.random.RandomState(seed)
self.gauss_random = random.seed(seed)
self.reset_state()
self.archive_state()
@property
def av_indices(self):
return self.state['av_indices'].copy()
@property
def hv_indices(self):
return self.state['hv_indices'].copy()
@property
def vehicles(self):
return self.state['vehicles'].copy()
@property
def queue(self):
return self.state['queue'].copy()
@property
def step(self):
try:
return self.state['step']
except: # Before state initialization:
return 0
@property
def t(self):
try:
return self.state['time']
except: # Before state initialization:
return 0.0
@property
def dt(self):
return self.temporal_res
@property
def L(self):
return self.ring_length
@property
def l_v(self):
return self.vehicle_length
@property
def N(self):
return len(self.state['vehicles'])
def __repr__(self):
params_string = []
for param in [
'num_vehicles',
'ring_length',
'vehicle_length',
'safe_distance',
'min_speed',
'max_speed',
'min_accel',
'max_accel',
'starting_noise',
'traffic_a',
'traffic_b',
'a_sigma',
'b_sigma',
'av_activate',
'temporal_res',
'control_lag',
'num_avs',
'av_even_spacing',
'hv_heterogeneity',
'uncertain_avs',
'sigma_pct',
'learning_mode',
'seed',
]:
params_string.append( f"{param}={getattr(self,param)}" )
params_string = ", ".join(params_string)
return f"RingRoad({params_string})"
def __str__(self):
s = ""
s += "RingRoad at step {} (t={}) with {} AV and {} HV:".format(self.step, self.t, self.num_avs, self.num_vehicles-self.num_avs) + "\n"
for index,vehicle in enumerate(self.state['vehicles']):
s += " [{}] ".format(index) + vehicle.__str__() + "\n"
return s
def reset_road(self):
#self.random = np.random.RandomState(seed)
self.history = dict()
self.all_vehicles = set()
self.reset_state()
self.archive_state()
def reset_state(self):
assert self.num_vehicles >= 2, "Need at least 1 human and 1 robot."
d_start = self.ring_length / self.num_vehicles
vehicles = []
# Build AV and HV index lists:
if self.av_even_spacing:
av_indices = [int(i/self.num_avs*self.num_vehicles) for i in range(self.num_avs)]
else:
av_indices = [i for i in range(self.num_avs)]
hv_indices = [i for i in range(self.num_vehicles) if i not in set(av_indices)]
for index in range(self.num_vehicles):
if index in set(av_indices):
noise = self.starting_noise
noise = self.random.uniform(-noise/2,noise/2) # 1 centimeter.
if self.learning_mode:
active_controller = LearningController(env=self)
else:
active_controller = PID(env=self, safe_distance=self.safe_distance, gamma=2.0, m=38, is_uncertain=self.uncertain_avs, sigma_pct=self.sigma_pct)
robot = Robot(
env=self,
active_controller = active_controller,
passive_controller = BandoFTL(env=self, a=self.traffic_a, b=self.traffic_b),
init_pos = index * d_start + noise,
init_vel = 0.0,
init_acc = 0.0,
length = self.vehicle_length,
min_vel = self.min_speed,
max_vel = self.max_speed,
min_acc = self.min_accel,
max_acc = self.max_accel,
control_lag = self.control_lag,
)
robot.state['index'] = index
robot.active = (self.av_activate==0)
vehicles.append(robot)
elif index in set(hv_indices):
noise = self.starting_noise
noise = self.random.uniform(-noise/2,noise/2) # 1 centimeter.
a_noise = random.gauss(mu=0, sigma=self.a_sigma) # Noise on Bando-FTL a parameter
b_noise = random.gauss(mu=0, sigma=self.b_sigma) # Noise on Bando-FTL b parameter
uncertain_a = self.traffic_a + a_noise # if self.traffic_a + a_noise > 0 else 0.1
# uncertain_a = min(uncertain_a, 0.5)
uncertain_b = self.traffic_b + b_noise # if self.traffic_b + b_noise > 0 else 4.0
# uncertain_b = min(uncertain_b, 20.)
human = Human(
env=self,
controller = BandoFTL(env=self,
a=uncertain_a if self.hv_heterogeneity else self.traffic_a,
b=uncertain_b if self.hv_heterogeneity else self.traffic_b,
),
init_pos = index * d_start + noise,
init_vel = 0.0,
init_acc = 0.0,
length = self.vehicle_length,
min_vel = self.min_speed,
max_vel = self.max_speed,
min_acc = self.min_accel,
max_acc = self.max_accel,
control_lag = self.control_lag,
)
human.state['index'] = index
vehicles.append(human)
# Add vehicles to list:
for vehicle in vehicles:
# Add vehicle:
self.all_vehicles.add(vehicle)
# Build list of vehicle indices in increasing order of position, starting at x=0.
# Note: Unless (illegal) passing has occurred, this should just be an offset list of ordered indices.
queue = sorted(vehicles, key=lambda vehicle: vehicle.pos.x)
queue = [vehicle.state['index'] for vehicle in queue]
# Build state dictionaryL
self.state = {
'step' : 0,
'time' : 0.0,
'vehicles' : vehicles, # List of vehicles in 0,...,(N-1) index order, with A.V. at index 0.
'queue' : queue, # List of vehicle indices in increasing order of position (starting at x=0).
'av_active' : robot.active,
'av_indices' : av_indices,
'hv_indices' : hv_indices,
}
def copy_state(self):
state = self.state.copy()
state['vehicles'] = state['vehicles'].copy()
return state
def archive_state(self):
for vehicle in self.state['vehicles']:
vehicle.archive_state()
self.history[self.step] = self.copy_state()
def get_vehicle_state_table(self, key, steps=None):
"""
Get a DataFrame of a state value (specified by key) for each vehicle (column) at each time step (row).
If steps is specified (as an iterable), gets specific time steps; otherwise gets all available time steps.
"""
table = []
# Get list of all vehicles, sorted by id:
vehicles = sorted(self.all_vehicles, key=lambda vehicle: vehicle.id)
for vehicle in vehicles:
df = vehicle.get_state_table(keys=['step','time',key], steps=steps)
df = df.rename(columns={key:vehicle.id}).set_index(['step','time'])
table.append( df )
table = pd.concat(table, axis=1)
table.columns.name = 'vehicle_id'
return table
def get_vehicle_pos_table(self, steps=None):
"""Get a DataFrame of a position for each vehicle (column) at each time step (row)."""
return self.get_vehicle_state_table(key='pos', steps=steps)
def get_vehicle_vel_table(self, steps=None):
"""Get a DataFrame of a velocity for each vehicle (column) at each time step (row)."""
return self.get_vehicle_state_table(key='vel', steps=steps)
def get_vehicle_acc_table(self, steps=None):
"""Get a DataFrame of a acceleration for each vehicle (column) at each time step (row)."""
return self.get_vehicle_state_table(key='acc', steps=steps)
def get_vehicle_control_table(self, steps=None):
"""Get a DataFrame of a (unconstrained) control for each vehicle (column) at each time step (row)."""
return self.get_vehicle_state_table(key='control', steps=steps)
def get_vehicle_index(self, vehicle):
"""
Returns the index (or None) of a given Vehicle.
"""
index = None
for i,v in enumerate(self.state['vehicles']):
if v.id == vehicle.id:
index = i
break
return index
def get_lead_index(self, vehicle):
"""
Returns the index of the Vehicle that leads a given Vehicle.
"""
if isinstance(vehicle, int):
this_index = vehicle
else:
this_index = self.get_vehicle_index(vehicle)
if this_index is None:
raise RuntimeError("Vehicle not found: {}".format(vehicle))
if self.N < 2:
raise RuntimeError("Vehicle is alone on the road: {}".format(vehicle))
# Find index of vehicle just ahead of this one in the queue:
this_pos = self.state['queue'].index(this_index)
lead_pos = (this_pos + 1) % len(self.state['queue'])
lead_index = self.state['queue'][lead_pos]
return lead_index
def get_lead_vehicle(self, vehicle):
lead_index = self.get_lead_index(vehicle)
lead_vehicle = self.state['vehicles'][lead_index]
return lead_vehicle
def check_crash(self, vehicle=None, raise_error=False):
"""
Check if a vehicle has crashed into or passed through the vehicle in front of it.
If no vehicle is specified, then all vehicles are checked.
"""
# Build list of vechicles to check:
vehicles = self.vehicles if vehicle is None else [vehicle]
# Loop through vehicles:
for this_vehicle in vehicles:
# Check that the lead vehicle has the expected index:
lead_vehicle = self.get_lead_vehicle(this_vehicle)
this_index = self.get_vehicle_index(this_vehicle)
lead_index = self.get_vehicle_index(lead_vehicle)
if (this_index+1) % self.N != lead_index:
if raise_error:
raise RuntimeError("Illegal passing occured at step={} around index {} : {}".format(
self.step,
this_index,
this_vehicle, #this_vehicle.__repr__(),
))
return True
return False
def check_crowding(self, vehicle=None, pct=1.0, raise_warning=False):
"""
Check if a vehicle has gotten within the safety buffer of the one in front of it (or withing a specified percentage of it).
If no vehicle is specified, then all vehicles are checked.
"""
# Build list of vechicles to check:
vehicles = self.vehicles if vehicle is None else [vehicle]
# Loop through vehicles:
for this_vehicle in vehicles:
# Get lead vehicle:
lead_vehicle = self.get_lead_vehicle(this_vehicle)
# Check safety distance:
safe_distance = pct * self.safe_distance
if this_vehicle.distance_to(lead_vehicle) - lead_vehicle.length < safe_distance:
if raise_warning:
warning = "WARNING: Safe distance violation at step {}:".format(self.step)
warning += " [{}] {}".format(self.get_vehicle_index(this_vehicle),this_vehicle)
warning += " [{}] {}".format(self.get_vehicle_index(lead_vehicle),lead_vehicle)
warnings.warn(warning)
return True
return False
def run_step(self):
"""
Perform simulation update for one time step.
"""
# Calcualte control for each vehicle:
controls = dict() # Keyed by index.
for index,vehicle in enumerate(self.state['vehicles']):
if (vehicle.type == 'robot') and (not vehicle.active) and (self.t >= self.av_activate):
vehicle.active = True
controls[index] = vehicle.controller.calculate(vehicle)
# Apply control for each vehicle:
for index,vehicle in enumerate(self.state['vehicles']):
vehicle.state['index'] = index
vehicle.state['step'] = self.state['step']
vehicle.state['time'] = self.state['time']
vehicle.control = controls[index] # Add unconstrainted command to control buffer.
vehicle.acc = vehicle.control # Get control (possibly with lag).
vehicle.vel += vehicle.acc*self.dt # Apply acceleration (with constraints on acc and vel).
vehicle.pos += vehicle.vel*self.dt
# Update vehicle queue (list of vehicle indices in the order they are encountered on the right when starting from x=0):
queue = sorted(self.vehicles, key=lambda vehicle: vehicle.pos.x)
queue = [vehicle.state['index'] for vehicle in queue]
self.state['queue'] = queue
# Make sure there has been no illegal passing or tailgaiting.
# Note: `vehicle=None` checks all vehicles.
if not (self.learning_mode or self.hv_heterogeneity):
self.check_crash(vehicle=None, raise_error=True)
if not (self.learning_mode):
self.check_crowding(vehicle=None, raise_warning=True, pct=0.5)
# Increment time step for next iteration:
self.state['step'] += 1
self.state['time'] += self.dt
# Archive environment state:
self.archive_state()
def run(self, steps=100):
for s in range(steps):
self.run_step()
def visualize(self, *args, **kwargs):
"""(Redirect for `plot_ring`, included for backward compatibility.)"""
return self.plot_ring(*args, **kwargs)
def plot_ring(self, step=None, draw_cars_to_scale=False, draw_safety_buffer=False, label_step=True, label_cars=True, ax=None, animation_mode=False):
"""
Plot the positions of the vehicles on the ring road at the specified time step.
"""
# Plot latest step by default:
if step is None:
step = self.step
# Get corresponding state:
state = self.history[step]
# Set plotting options:
road_width = 6.
car_width = 3.
point_car_size = 6.
road_color = 'silver'
hv_color = '#386cb0'
av_color = '#33a02c'
# Create axes (or use existing ones):
if ax:
fig = ax.figure
else:
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(facecolor='white', frameon=False, projection='polar')
# Find the radius of the ring given the RingRoad length
road_radius = self.ring_length / (2 * np.pi)
# Collect artists (for pyplot animation):
artists = []
# Plot a circle: https://stackoverflow.com/a/19828753
polar_transform = ax.transProjectionAffine + ax.transAxes
#ring_road = plt.Circle((0, 0), road_radius, color=road_color, zorder=1, lw=road_width, fill=False, transform=polar_transform)
ring_road = plt.Rectangle(xy=(0, road_radius-road_width/2), width=2*np.pi, height=road_width, lw=0, color=road_color, zorder=1, fill=True)
ax.add_artist(ring_road)
artists.append(ring_road)
ax.bar(0, 1).remove() # Hack: https://github.com/matplotlib/matplotlib/issues/8521#issue-223234274
# Now plot the cars after transforming the 1-dimensional location of each to the polar coordinate system
for car in state['vehicles']:
# Get relevant state variables (each row is a time step and each column is a variable):
car_state = car.get_state_table(keys=['index','pos'], steps=[step])
car_state = car_state.iloc[0].to_dict() # Convert the single table row to a dictionary.
car_state['index'] = int(car_state['index']) # Make sure index is an integer.
if car.type=='human':
car_color = hv_color
car_zorder = 2
elif car.type=='robot':
car_color = av_color
car_zorder = 3
else:
raise NotImplementedError
# Transform the 1-D coord to polar system
car_theta = (2*np.pi) * car_state['pos'] / self.ring_length
# Now plot the cars, whether to scale or not, with color according to whether each is an AV or human driver
# Note: for large ring roads, it is likely better to NOT draw to scale, for easier visualization
if draw_cars_to_scale:
# Draw car:
car_polar_length = (2*np.pi) * car.length / self.ring_length
car_rectangle = plt.Rectangle(xy=(car_theta-car_polar_length, road_radius-car_width/2), width=car_polar_length, height=car_width, lw=0, color=car_color, zorder=car_zorder, fill=True)
ax.add_artist(car_rectangle)
artists.append(car_rectangle)
# Draw safety zone behind car:
if draw_safety_buffer:
car_polar_buffer = (2*np.pi) * self.safe_distance / self.ring_length
car_buffer = plt.Rectangle(xy=(car_theta-car_polar_length-car_polar_buffer, road_radius-car_width/2), width=car_polar_buffer, height=car_width, lw=0, color='#fb6a4a', zorder=car_zorder-0.1, alpha=0.4, fill=True)
ax.add_artist(car_buffer)
artists.append(car_buffer)
else:
# Draw car:
car_point, = ax.plot(car_theta, road_radius, color=car_color, zorder=car_zorder, marker='o', markersize=point_car_size)
artists.append(car_point)
# Add text:
if label_cars:
car_label = "{}".format(car_state['index'])
label = ax.text(car_theta, (road_radius+road_width*1.25), car_label, fontsize=10, ha='center', va='center')
artists.append(label)
# Add text:
if label_step:
step_label = "t = {:.1f} s".format(state['time'])
# if label_cars:
# step_label = " \n\n"+step_label+"\n\n"+"A.V. = 0"
label = ax.text(0,0, step_label, fontsize=14, ha='center', va='center')
artists.append(label)
# Hide ticks and gridlines:
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.spines['polar'].set_visible(False)
ax.grid(False)
ax.set_xlim((0,np.pi*2))
ax.set_ylim((0,(road_radius+road_width/2)*1.05))
# Return artists or figure:
if animation_mode:
return tuple(artists)
else:
return fig, ax
def plot_positions(self, steps=None, total_steps=None, ax=None, animation_mode=False):
"""
Plot positions of vehicles (y axis) over time (x axis).
Optionally, specify step with an iterable (for animation).
"""
# Set plotting options:
hv_color = '#386cb0'
av_color = '#7fc97f'
# Create axes (or use existing ones):
if ax:
fig = ax.figure
else:
fig,ax = plt.subplots(1,1, figsize=(9,4))
# Collect artists (for pyplot animation):
artists = []
# Get steps to plot:
if steps is None:
steps = range(0,self.step)
# Plot each vehicle:
for vehicle in self.all_vehicles:
# Get a table of state history for this vehicle:
table = vehicle.get_state_table(keys=['step','time','pos'], steps=steps)
# Set plotting options:
if vehicle.type=='human':
color = hv_color
alpha = 0.5
zorder = 2
elif vehicle.type=='robot':
color = av_color
alpha = 0.75
zorder = 3
else:
raise NotImplementedError
# Plot a separate chunk for each revolution:
prev_break = 0
prev_row = None
for i in range(len(table)):
this_row = table.iloc[i]
# Determine whether to plot new chunk:
if prev_row is None:
new_chunk = False # First row.
elif i == len(table)-1:
new_chunk = True # Last row.
elif this_row['pos'] < prev_row['pos']:
new_chunk = True # Row with wrap around.
else:
new_chunk = False # All other rows.
# Plot new chunk if needed:
if new_chunk:
df = table.iloc[prev_break:i]
lines, = ax.plot(df['time'],df['pos'], color=color, alpha=alpha, zorder=zorder)
artists.append(lines)
prev_break = i
prev_row = this_row
# Add line for AV activation:
y_min,y_max = 0, self.L
if self.av_activate < self.t:
ax.plot([self.av_activate,self.av_activate],[y_min,y_max], ls=':', color='black', alpha=1, zorder=5)
ax.set_ylim((y_min,y_max))
# Set axes:
#ax.set_title("Position over time")
ax.set_xlabel("time (seconds)")
ax.set_ylabel("position (meters)")
# Set x limits:
if total_steps:
ax.set_xlim(0,total_steps*self.dt)
# Return artists or figure:
if animation_mode:
return tuple(artists)
else:
return fig, ax
def plot_velocities(self, steps=None, total_steps=None, show_sigma=False, ax=None, animation_mode=False):
"""
Plot velocities of vehicles (y axis) over time (x axis).
Optionally, specify step with an iterable (for animation).
"""
# Set plotting options:
hv_color = '#386cb0'
av_color = '#33a02c'
# Create axes (or use existing ones):
if ax:
fig = ax.figure
else:
fig,ax = plt.subplots(1,1, figsize=(9,4))
# Collect artists (for pyplot animation):
artists = []
# Get steps to plot:
if steps is None:
steps = range(0,self.step)
# Plot each vehicle:
for vehicle in self.all_vehicles:
# Get a table of state history for this vehicle:
table = vehicle.get_state_table(keys=['step','time','vel'], steps=steps)
# Set plotting options:
if vehicle.type=='human':
color = hv_color
alpha = 0.5
zorder = 2
elif vehicle.type=='robot':
color = av_color
alpha = 0.75
zorder = 3
else:
raise NotImplementedError
# Plot:
lines, = ax.plot(table['time'],table['vel'], color=color, alpha=alpha, zorder=zorder)
artists.append(lines)
# Plot standard deviation across vehicles:
if show_sigma:
table = self.get_vehicle_vel_table(steps=steps).std(axis=1).to_frame(name='sigma').reset_index()
ax.plot(table['time'], table['sigma'], lw=1, color='grey', label="Standard deviation\nacross all vehicles")
ax.legend(loc='center right', fontsize=6)
# Add line for AV activation:
#y_min,y_max = ax.get_ylim()
y_min,y_max = 0, min(30,self.max_speed)*1.05
if self.av_activate < self.t:
ax.plot([self.av_activate,self.av_activate],[y_min,y_max], ls=':', color='black', alpha=1, zorder=5)
ax.set_ylim((y_min,y_max))
# Set x limits:
if total_steps:
ax.set_xlim(0,total_steps*self.dt)
# Set axes:
#ax.set_title("Velocity over time")
ax.set_xlabel("time (seconds)")
ax.set_ylabel("velocity (meters/second)")
# Return artists or figure:
if animation_mode:
return tuple(artists)
else:
return fig, ax
def plot_dashboard(self, step=None, total_steps=None, axs=None, animation_mode=False, **plot_options):
"""
Plot a combination of plots for a specific step.
"""
# Create axes (or use existing ones):
if axs:
fig = axs[0].figure
assert len(axs)==3, "Expect axs as a tuple of three axes."
ax1, ax2, ax3 = axs
else:
fig = plt.figure(figsize=(9,7))
ax1 = fig.add_subplot(1, 2, 1, facecolor='white', frameon=False, projection='polar')
ax2 = fig.add_subplot(2, 2, 2, facecolor='white')
ax3 = fig.add_subplot(2, 2, 4, facecolor='white')
axs = (ax1,ax2,ax3)
if step is None:
step = self.step
# Parse options:
ax1_options = dict()
ax2_options = dict()
ax3_options = dict()
for k,v in plot_options.items():
if k in {'draw_cars_to_scale','draw_safety_buffer','label_cars','label_step'}:
ax1_options[k] = v
if k in {}:
ax2_options[k] = v
if k in {'show_sigma'}:
ax3_options[k] = v
artists = [] # Collect arists for animation.
artists.extend( self.visualize(ax=ax1, step=step, **ax1_options) )
artists.extend( self.plot_positions(ax=ax2, steps=range(0,step), total_steps=total_steps, **ax2_options) )
artists.extend( self.plot_velocities(ax=ax3, steps=range(0,step), total_steps=total_steps, **ax3_options) )
# Return artists or figure:
if animation_mode:
return tuple(artists)
else:
return fig, (ax1,ax2,ax3)
class Vehicle:
all_vehicles = []
def __init__(self, env,
controller=None, control_lag=0.0,
init_pos=0.0, init_vel=0.0, init_acc=0.0, length=4.5,
min_vel=0, max_vel=float("+Inf"), min_acc=float("-Inf"), max_acc=float("+Inf"),
):
# Generate unique ID and add to master list:
self.id = len(Vehicle.all_vehicles)
Vehicle.all_vehicles.append(self)
# Use null contoller as placeholder:
if controller is None:
controller = Controller(env)
# Store properties:
self.type = None # 'robot' or 'human'
self.env = env
self.controller = controller
self.init_pos = init_pos
self.init_vel = init_vel
self.init_acc = init_acc
self.length = length
# Set constraints:
self.min_vel = min_vel
self.max_vel = max_vel
self.min_acc = min_acc
self.max_acc = max_acc
self.control_lag = control_lag # Lag in seconds between when control is queued and when it is applied.
# Store state information:
self.state = None
self.history = dict() # History of states, keyed by time step.
# Initialize:
self.reset_state()
def __repr__(self):
typ = 'Vehicle' if self.type is None else self.type.capitalize()
return "<{}(id={})@x={:.2f}>".format(typ, self.id, self.x)
@property
def l(self):
return self.length
@property
def x(self):
return self.state['pos'].x
@property
def pos(self):
return self.state['pos']
@property
def vel(self):
return self.state['vel']
@property
def acc(self):
return self.state['acc']
@property
def control(self):
return self.state['control']
@pos.setter
def pos(self, pos):
self.state['pos'] = Position(x=pos, L=self.env.L)
@vel.setter
def vel(self, vel):
vel = max(vel, self.min_vel)
vel = min(vel, self.max_vel)
self.state['vel'] = vel
@acc.setter
def acc(self, acc):
acc = max(acc, self.min_acc)
acc = min(acc, self.max_acc)
self.state['acc'] = acc
@control.setter
def control(self, control):
self.state['control_buffer'].append(control) # Add new value to end of queue.
self.state['control'] = self.state['control_buffer'].pop(0) # Promote first value in queue to next control.
def reset_state(self):
control_lag_steps = int(np.ceil(self.control_lag/self.env.dt))
self.state = {
'time' : self.env.t,
'step' : self.env.step,
'index' : None, # Set by environment.
'pos' : Position(x=self.init_pos, L=self.env.L),
'vel' : self.init_vel,
'acc' : self.init_acc,
'control' : self.init_acc,
'control_buffer' : [self.init_acc for _ in range(control_lag_steps)],
'controller_type' : self.controller.type,
}
def copy_state(self):
state = self.state.copy()
state['control_buffer'] = state['control_buffer'].copy()
return self.state.copy()
def archive_state(self):
state = self.copy_state()
state['time'] = self.env.t
state['step'] = self.env.step
state['pos'] = state['pos'].x # Extract position from Position object.
self.history[self.env.step] = state
def get_state_table(self, keys=['step', 'time', 'index', 'pos', 'vel', 'acc', 'control'], steps=None):
"""
Build a DataFrame of the state history
(with specified keys as columns and all available time steps as rows).
If steps is specified (as an iterable), gets specific time steps; otherwise gets all available time steps.
"""
table = []
if steps is None:
steps = self.history.keys()
for step in steps:
state = self.history[step]
table.append( {key : state[key] for key in keys if key in state.keys()} )
table = pd.DataFrame(table, columns=keys, index=steps)
return table
def distance_to(self, other):
"""
Return the distance from self to other (i.e. by how much does other lead self?)
If reverse=True, returns the distance from other to self, travelling in direction of the road.
Distances are always positive and are measured in the direction of traffic.
"""
other = Position(other, self.env.L) # Convert to position.
return self.pos.distance_to(other) # Call Position.distance_to(Position) .
class Human(Vehicle):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.type = 'human'
def __str__(self):
s = "Human driver at position {} with velocity {} and acceleration {}.".format(
self.state['pos'].x, self.state['vel'], self.state['acc'],
)
return s
class Robot(Vehicle):
def __init__(self, env, active_controller, passive_controller, *args, **kwargs):
# Initialize Vehicle:
super().__init__(env=env, controller=active_controller, *args, **kwargs)
# Store additional properties:
self.type = 'robot'
self.active_controller = active_controller
self.passive_controller = passive_controller
# Initialize:
self.reset_state()
@property
def active(self):
return self.state['active']
@active.setter
def active(self, active):
self.state['active'] = active
if active:
self.controller = self.active_controller
else:
self.controller = self.passive_controller
def __str__(self):
s = "AV ({}) at position {} with velocity {} and acceleration {}.".format(
'active' if self.active else 'passive',
self.state['pos'].x, self.state['vel'], self.state['acc'],
)
return s
def reset_state(self):
super().reset_state()
self.state['active'] = True # Flag to determine autonomous control.
if __name__=='__main__':
print("""
This code implements the RingRoad and other data structures
but does not perform any simulation experiments.
To perform the baseline and extension simulations please run one of these scripts:
- code/baseline.py
- code/extension_one.py
- code/extension_two.py
- code/extension_three.py
or notebooks:
- notebooks/Baseline-Results.ipynb
- notebooks/Extension-One.ipynb
- notebooks/Extension-Two.ipynb
- notebooks/Extension-Three.ipynb
""")
| [
"pandas.DataFrame",
"controllers.LearningController",
"numpy.ceil",
"controllers.Controller",
"numpy.random.RandomState",
"matplotlib.pyplot.Rectangle",
"warnings.warn",
"numpy.isclose",
"matplotlib.pyplot.figure",
"random.seed",
"controllers.BandoFTL",
"random.gauss",
"controllers.PID",
"... | [((1866, 1901), 'numpy.isclose', 'np.isclose', (['s', 'o'], {'atol': 'Position.TOL'}), '(s, o, atol=Position.TOL)\n', (1876, 1901), True, 'import numpy as np\n'), ((6667, 6694), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (6688, 6694), True, 'import numpy as np\n'), ((6723, 6740), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (6734, 6740), False, 'import random\n'), ((14356, 14380), 'pandas.concat', 'pd.concat', (['table'], {'axis': '(1)'}), '(table, axis=1)\n', (14365, 14380), True, 'import pandas as pd\n'), ((22316, 22453), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', ([], {'xy': '(0, road_radius - road_width / 2)', 'width': '(2 * np.pi)', 'height': 'road_width', 'lw': '(0)', 'color': 'road_color', 'zorder': '(1)', 'fill': '(True)'}), '(xy=(0, road_radius - road_width / 2), width=2 * np.pi, height\n =road_width, lw=0, color=road_color, zorder=1, fill=True)\n', (22329, 22453), True, 'import matplotlib.pyplot as plt\n'), ((37130, 37176), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {'columns': 'keys', 'index': 'steps'}), '(table, columns=keys, index=steps)\n', (37142, 37176), True, 'import pandas as pd\n'), ((21721, 21747), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (21731, 21747), True, 'import matplotlib.pyplot as plt\n'), ((26444, 26478), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(9, 4)'}), '(1, 1, figsize=(9, 4))\n', (26456, 26478), True, 'import matplotlib.pyplot as plt\n'), ((29353, 29387), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(9, 4)'}), '(1, 1, figsize=(9, 4))\n', (29365, 29387), True, 'import matplotlib.pyplot as plt\n'), ((31796, 31822), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (31806, 31822), True, 'import matplotlib.pyplot as plt\n'), ((33569, 33584), 'controllers.Controller', 'Controller', (['env'], {}), '(env)\n', (33579, 33584), False, 'from controllers import Controller, BandoFTL, PID, LearningController\n'), ((35586, 35625), 'numpy.ceil', 'np.ceil', (['(self.control_lag / self.env.dt)'], {}), '(self.control_lag / self.env.dt)\n', (35593, 35625), True, 'import numpy as np\n'), ((23955, 24136), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', ([], {'xy': '(car_theta - car_polar_length, road_radius - car_width / 2)', 'width': 'car_polar_length', 'height': 'car_width', 'lw': '(0)', 'color': 'car_color', 'zorder': 'car_zorder', 'fill': '(True)'}), '(xy=(car_theta - car_polar_length, road_radius - car_width / 2\n ), width=car_polar_length, height=car_width, lw=0, color=car_color,\n zorder=car_zorder, fill=True)\n', (23968, 24136), True, 'import matplotlib.pyplot as plt\n'), ((9902, 9930), 'controllers.LearningController', 'LearningController', ([], {'env': 'self'}), '(env=self)\n', (9920, 9930), False, 'from controllers import Controller, BandoFTL, PID, LearningController\n'), ((9993, 10120), 'controllers.PID', 'PID', ([], {'env': 'self', 'safe_distance': 'self.safe_distance', 'gamma': '(2.0)', 'm': '(38)', 'is_uncertain': 'self.uncertain_avs', 'sigma_pct': 'self.sigma_pct'}), '(env=self, safe_distance=self.safe_distance, gamma=2.0, m=38,\n is_uncertain=self.uncertain_avs, sigma_pct=self.sigma_pct)\n', (9996, 10120), False, 'from controllers import Controller, BandoFTL, PID, LearningController\n'), ((11095, 11133), 'random.gauss', 'random.gauss', ([], {'mu': '(0)', 'sigma': 'self.a_sigma'}), '(mu=0, sigma=self.a_sigma)\n', (11107, 11133), False, 'import random\n'), ((11197, 11235), 'random.gauss', 'random.gauss', ([], {'mu': '(0)', 'sigma': 'self.b_sigma'}), '(mu=0, sigma=self.b_sigma)\n', (11209, 11235), False, 'import random\n'), ((18738, 18760), 'warnings.warn', 'warnings.warn', (['warning'], {}), '(warning)\n', (18751, 18760), False, 'import warnings\n'), ((24438, 24655), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', ([], {'xy': '(car_theta - car_polar_length - car_polar_buffer, road_radius - car_width / 2)', 'width': 'car_polar_buffer', 'height': 'car_width', 'lw': '(0)', 'color': '"""#fb6a4a"""', 'zorder': '(car_zorder - 0.1)', 'alpha': '(0.4)', 'fill': '(True)'}), "(xy=(car_theta - car_polar_length - car_polar_buffer, \n road_radius - car_width / 2), width=car_polar_buffer, height=car_width,\n lw=0, color='#fb6a4a', zorder=car_zorder - 0.1, alpha=0.4, fill=True)\n", (24451, 24655), True, 'import matplotlib.pyplot as plt\n'), ((10278, 10332), 'controllers.BandoFTL', 'BandoFTL', ([], {'env': 'self', 'a': 'self.traffic_a', 'b': 'self.traffic_b'}), '(env=self, a=self.traffic_a, b=self.traffic_b)\n', (10286, 10332), False, 'from controllers import Controller, BandoFTL, PID, LearningController\n'), ((11672, 11815), 'controllers.BandoFTL', 'BandoFTL', ([], {'env': 'self', 'a': '(uncertain_a if self.hv_heterogeneity else self.traffic_a)', 'b': '(uncertain_b if self.hv_heterogeneity else self.traffic_b)'}), '(env=self, a=uncertain_a if self.hv_heterogeneity else self.\n traffic_a, b=uncertain_b if self.hv_heterogeneity else self.traffic_b)\n', (11680, 11815), False, 'from controllers import Controller, BandoFTL, PID, LearningController\n')] |
# -*- coding: utf-8 -*-
'''Implementation of TransD.'''
import numpy as np
import torch
import torch.autograd
import torch.nn as nn
from keen.constants import *
'''
TODO: Check, whether it makes sense to integrate identity matrices.
'''
class TransD(nn.Module):
def __init__(self, config):
super(TransD, self).__init__()
self.model_name = TRANS_D
self.num_entities = config[NUM_ENTITIES]
self.num_relations = config[NUM_RELATIONS]
self.entity_embedding_dim = config[EMBEDDING_DIM]
self.relation_embedding_dim = self.entity_embedding_dim
self.margin_loss = config[MARGIN_LOSS]
self.device = torch.device(
'cuda:0' if torch.cuda.is_available() and config[PREFERRED_DEVICE] == GPU else CPU)
# A simple lookup table that stores embeddings of a fixed dictionary and size
self.entity_embeddings = nn.Embedding(self.num_entities, self.entity_embedding_dim, max_norm=1)
self.relation_embeddings = nn.Embedding(self.num_relations, self.relation_embedding_dim, max_norm=1)
self.entity_projections = nn.Embedding(self.num_entities, self.entity_embedding_dim)
self.relation_projections = nn.Embedding(self.num_relations, self.relation_embedding_dim)
self.criterion = nn.MarginRankingLoss(margin=self.margin_loss, size_average=True)
self.scoring_fct_norm = config[SCORING_FUNCTION_NORM]
# self._initialize()
def _compute_scores(self, h_embs, r_embs, t_embs):
"""
:param h_embs:
:param r_embs:
:param t_embs:
:return:
"""
# Add the vector element wise
sum_res = h_embs + r_embs - t_embs
distances = torch.norm(sum_res, dim=1, p=self.scoring_fct_norm).view(size=(-1,))
distances = torch.mul(distances, distances)
return distances
def _compute_loss(self, pos_scores, neg_scores):
"""
:param pos_scores:
:param neg_scores:
:return:
"""
# y == -1 indicates that second input to criterion should get a larger loss
# y = torch.Tensor([-1]).cuda()
# NOTE: y = 1 is important
# y = torch.tensor([-1], dtype=torch.float, device=self.device)
y = np.repeat([-1], repeats=pos_scores.shape[0])
y = torch.tensor(y, dtype=torch.float, device=self.device)
# Scores for the psotive and negative triples
pos_scores = torch.tensor(pos_scores, dtype=torch.float, device=self.device)
neg_scores = torch.tensor(neg_scores, dtype=torch.float, device=self.device)
loss = self.criterion(pos_scores, neg_scores, y)
return loss
def _project_entities(self, entity_embs, entity_proj_vecs, relation_projs):
# batch_size = entity_embs.shape[0]
# identity_matrices = torch.eye(batch_size,self.relation_embedding_dim,self.entity_embedding_dim)
transfer_matrices = torch.einsum('nm,nk->nmk',
[relation_projs, entity_proj_vecs]) # TODO: Check + identity_matrices
projected_entity_embs = torch.einsum('nmk,nk->nm', [transfer_matrices, entity_embs])
# projected_entity_embs = F.normalize(projected_entity_embs, 2, 1)
return projected_entity_embs
# def _initialize(self):
# lower_bound = -6 / np.sqrt(self.entity_embedding_dim)
# upper_bound = 6 / np.sqrt(self.entity_embedding_dim)
# nn.init.uniform_(self.entity_embeddings.weight.data, a=lower_bound, b=upper_bound)
# nn.init.uniform_(self.relation_embeddings.weight.data, a=lower_bound, b=upper_bound)
#
# norms = torch.norm(self.relation_embeddings.weight, p=2, dim=1).data
# self.relation_embeddings.weight.data = self.relation_embeddings.weight.data.div(
# norms.view(self.num_relations, 1).expand_as(self.relation_embeddings.weight))
def forward(self, batch_positives, batch_negatives):
pos_heads = batch_positives[:, 0:1]
pos_relations = batch_positives[:, 1:2]
pos_tails = batch_positives[:, 2:3]
neg_heads = batch_negatives[:, 0:1]
neg_relations = batch_negatives[:, 1:2]
neg_tails = batch_negatives[:, 2:3]
pos_h_embs = self.entity_embeddings(pos_heads).view(-1, self.entity_embedding_dim)
pos_r_embs = self.relation_embeddings(pos_relations).view(-1, self.relation_embedding_dim)
pos_t_embs = self.entity_embeddings(pos_tails).view(-1, self.entity_embedding_dim)
pos_h_proj_vec_embs = self.entity_projections(pos_heads).view(-1, self.entity_embedding_dim)
pos_r_projs_embs = self.relation_projections(pos_relations).view(-1, self.relation_embedding_dim)
pos_t_proj_vec_embs = self.entity_projections(pos_tails).view(-1, self.entity_embedding_dim)
neg_h_embs = self.entity_embeddings(neg_heads).view(-1, self.entity_embedding_dim)
neg_r_embs = self.relation_embeddings(neg_relations).view(-1, self.relation_embedding_dim)
neg_t_embs = self.entity_embeddings(neg_tails).view(-1, self.entity_embedding_dim)
neg_h_proj_vec_embs = self.entity_projections(neg_heads).view(-1, self.entity_embedding_dim)
neg_r_projs_embs = self.relation_projections(neg_relations).view(-1, self.relation_embedding_dim)
neg_t_proj_vec_embs = self.entity_projections(neg_tails).view(-1, self.entity_embedding_dim)
# Project entities
proj_pos_heads = self._project_entities(pos_h_embs, pos_h_proj_vec_embs, pos_r_projs_embs)
proj_pos_tails = self._project_entities(pos_t_embs, pos_t_proj_vec_embs, pos_r_projs_embs)
proj_neg_heads = self._project_entities(neg_h_embs, neg_h_proj_vec_embs, neg_r_projs_embs)
proj_neg_tails = self._project_entities(neg_t_embs, neg_t_proj_vec_embs, neg_r_projs_embs)
pos_scores = self._compute_scores(h_embs=proj_pos_heads, r_embs=pos_r_embs, t_embs=proj_pos_tails)
neg_scores = self._compute_scores(h_embs=proj_neg_heads, r_embs=neg_r_embs, t_embs=proj_neg_tails)
# pos_scores = self._compute_scores(h_embs=pos_h_embs, r_embs=pos_r_embs, t_embs=pos_t_embs)
# neg_scores = self._compute_scores(h_embs=neg_h_embs, r_embs=neg_r_embs, t_embs=neg_t_embs)
print(pos_scores)
print(neg_scores)
exit(0)
loss = self._compute_loss(pos_scores=pos_scores, neg_scores=neg_scores)
return loss
| [
"torch.norm",
"torch.nn.Embedding",
"torch.mul",
"torch.einsum",
"torch.nn.MarginRankingLoss",
"torch.cuda.is_available",
"torch.tensor",
"numpy.repeat"
] | [((895, 965), 'torch.nn.Embedding', 'nn.Embedding', (['self.num_entities', 'self.entity_embedding_dim'], {'max_norm': '(1)'}), '(self.num_entities, self.entity_embedding_dim, max_norm=1)\n', (907, 965), True, 'import torch.nn as nn\n'), ((1001, 1074), 'torch.nn.Embedding', 'nn.Embedding', (['self.num_relations', 'self.relation_embedding_dim'], {'max_norm': '(1)'}), '(self.num_relations, self.relation_embedding_dim, max_norm=1)\n', (1013, 1074), True, 'import torch.nn as nn\n'), ((1109, 1167), 'torch.nn.Embedding', 'nn.Embedding', (['self.num_entities', 'self.entity_embedding_dim'], {}), '(self.num_entities, self.entity_embedding_dim)\n', (1121, 1167), True, 'import torch.nn as nn\n'), ((1204, 1265), 'torch.nn.Embedding', 'nn.Embedding', (['self.num_relations', 'self.relation_embedding_dim'], {}), '(self.num_relations, self.relation_embedding_dim)\n', (1216, 1265), True, 'import torch.nn as nn\n'), ((1292, 1356), 'torch.nn.MarginRankingLoss', 'nn.MarginRankingLoss', ([], {'margin': 'self.margin_loss', 'size_average': '(True)'}), '(margin=self.margin_loss, size_average=True)\n', (1312, 1356), True, 'import torch.nn as nn\n'), ((1806, 1837), 'torch.mul', 'torch.mul', (['distances', 'distances'], {}), '(distances, distances)\n', (1815, 1837), False, 'import torch\n'), ((2258, 2302), 'numpy.repeat', 'np.repeat', (['[-1]'], {'repeats': 'pos_scores.shape[0]'}), '([-1], repeats=pos_scores.shape[0])\n', (2267, 2302), True, 'import numpy as np\n'), ((2315, 2369), 'torch.tensor', 'torch.tensor', (['y'], {'dtype': 'torch.float', 'device': 'self.device'}), '(y, dtype=torch.float, device=self.device)\n', (2327, 2369), False, 'import torch\n'), ((2446, 2509), 'torch.tensor', 'torch.tensor', (['pos_scores'], {'dtype': 'torch.float', 'device': 'self.device'}), '(pos_scores, dtype=torch.float, device=self.device)\n', (2458, 2509), False, 'import torch\n'), ((2531, 2594), 'torch.tensor', 'torch.tensor', (['neg_scores'], {'dtype': 'torch.float', 'device': 'self.device'}), '(neg_scores, dtype=torch.float, device=self.device)\n', (2543, 2594), False, 'import torch\n'), ((2933, 2995), 'torch.einsum', 'torch.einsum', (['"""nm,nk->nmk"""', '[relation_projs, entity_proj_vecs]'], {}), "('nm,nk->nmk', [relation_projs, entity_proj_vecs])\n", (2945, 2995), False, 'import torch\n'), ((3105, 3165), 'torch.einsum', 'torch.einsum', (['"""nmk,nk->nm"""', '[transfer_matrices, entity_embs]'], {}), "('nmk,nk->nm', [transfer_matrices, entity_embs])\n", (3117, 3165), False, 'import torch\n'), ((1717, 1768), 'torch.norm', 'torch.norm', (['sum_res'], {'dim': '(1)', 'p': 'self.scoring_fct_norm'}), '(sum_res, dim=1, p=self.scoring_fct_norm)\n', (1727, 1768), False, 'import torch\n'), ((703, 728), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (726, 728), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
mat_dim = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000]
cuda_10 = np.array([ 0.200874667, 0.355701333, 0.825088, 1.690432, 3.307168,
8.166197333, 17.14825567, 32.63356933, 111.283745, 484.6938273, 2846.308431])
cuda_10 /= 1000
mkl_10 = np.array([ 33.91566667, 33.97633333, 37.76466667, 45.224, 47.59066667,
39.62233333, 45.55233333, 113.8413333, 446.626, 1228.294667, 5292.633])
mkl_10 /= 1000
cuda_50 = np.array([ 0.200096, 0.367925333, 0.857098667, 1.661514667, 3.430613333,
8.038730667, 21.216544, 91.01067833, 970.1171467, 7377.11556])
cuda_50 /= 1000
mkl_50 = np.array([ 40.231, 37.823, 39.38833333, 36.63533333, 43.89133333, 60.934,
255.7156667, 407.8676667, 1616.073, 9350.764333])
mkl_50 /= 1000
cuda_90 = np.array([ 0.201301333, 0.364021333, 0.847733333, 1.685066667, 3.446464,
9.327872, 37.00229367, 204.1055347, 2753.997721, 21860.26823])
cuda_90 /= 1000
mkl_90 = np.array([ 34, 37.97666667, 37.71, 35.79066667, 66.924, 242.3616667,
280.5513333, 500.4723333, 3453.995333, 22564.02533])
mkl_90 /= 1000
plt.plot(mat_dim, cuda_10, color="red", linestyle="-", label="CUDA, k/n = 0.1")
plt.plot(mat_dim, mkl_10, color="red", linestyle="--", label="MKL, k/n = 0.1")
plt.plot(mat_dim[:-1], cuda_50, color="blue", linestyle="-", label="CUDA, k/n = 0.5")
plt.plot(mat_dim[:-1], mkl_50, color="blue", linestyle="--", label="MKL, k/n = 0.5")
plt.plot(mat_dim[:-1], cuda_90, color="green", linestyle="-", label="CUDA, k/n = 0.9")
plt.plot(mat_dim[:-1], mkl_90, color="green", linestyle="--", label="MKL, k/n = 0.9")
plt.ylabel("execution time (s)")
plt.xlabel("n")
plt.xscale("linear")
plt.yscale("linear")
plt.xlim([1E3, 1E4])
plt.legend(loc=2)
font = {'family' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.savefig("cuda_results.png")
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.yscale",
"matplotlib.rc",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((151, 304), 'numpy.array', 'np.array', (['[0.200874667, 0.355701333, 0.825088, 1.690432, 3.307168, 8.166197333, \n 17.14825567, 32.63356933, 111.283745, 484.6938273, 2846.308431]'], {}), '([0.200874667, 0.355701333, 0.825088, 1.690432, 3.307168, \n 8.166197333, 17.14825567, 32.63356933, 111.283745, 484.6938273, \n 2846.308431])\n', (159, 304), True, 'import numpy as np\n'), ((322, 468), 'numpy.array', 'np.array', (['[33.91566667, 33.97633333, 37.76466667, 45.224, 47.59066667, 39.62233333, \n 45.55233333, 113.8413333, 446.626, 1228.294667, 5292.633]'], {}), '([33.91566667, 33.97633333, 37.76466667, 45.224, 47.59066667, \n 39.62233333, 45.55233333, 113.8413333, 446.626, 1228.294667, 5292.633])\n', (330, 468), True, 'import numpy as np\n'), ((492, 631), 'numpy.array', 'np.array', (['[0.200096, 0.367925333, 0.857098667, 1.661514667, 3.430613333, 8.038730667,\n 21.216544, 91.01067833, 970.1171467, 7377.11556]'], {}), '([0.200096, 0.367925333, 0.857098667, 1.661514667, 3.430613333, \n 8.038730667, 21.216544, 91.01067833, 970.1171467, 7377.11556])\n', (500, 631), True, 'import numpy as np\n'), ((654, 781), 'numpy.array', 'np.array', (['[40.231, 37.823, 39.38833333, 36.63533333, 43.89133333, 60.934, 255.7156667,\n 407.8676667, 1616.073, 9350.764333]'], {}), '([40.231, 37.823, 39.38833333, 36.63533333, 43.89133333, 60.934, \n 255.7156667, 407.8676667, 1616.073, 9350.764333])\n', (662, 781), True, 'import numpy as np\n'), ((804, 943), 'numpy.array', 'np.array', (['[0.201301333, 0.364021333, 0.847733333, 1.685066667, 3.446464, 9.327872, \n 37.00229367, 204.1055347, 2753.997721, 21860.26823]'], {}), '([0.201301333, 0.364021333, 0.847733333, 1.685066667, 3.446464, \n 9.327872, 37.00229367, 204.1055347, 2753.997721, 21860.26823])\n', (812, 943), True, 'import numpy as np\n'), ((966, 1091), 'numpy.array', 'np.array', (['[34, 37.97666667, 37.71, 35.79066667, 66.924, 242.3616667, 280.5513333, \n 500.4723333, 3453.995333, 22564.02533]'], {}), '([34, 37.97666667, 37.71, 35.79066667, 66.924, 242.3616667, \n 280.5513333, 500.4723333, 3453.995333, 22564.02533])\n', (974, 1091), True, 'import numpy as np\n'), ((1105, 1184), 'matplotlib.pyplot.plot', 'plt.plot', (['mat_dim', 'cuda_10'], {'color': '"""red"""', 'linestyle': '"""-"""', 'label': '"""CUDA, k/n = 0.1"""'}), "(mat_dim, cuda_10, color='red', linestyle='-', label='CUDA, k/n = 0.1')\n", (1113, 1184), True, 'import matplotlib.pyplot as plt\n'), ((1185, 1263), 'matplotlib.pyplot.plot', 'plt.plot', (['mat_dim', 'mkl_10'], {'color': '"""red"""', 'linestyle': '"""--"""', 'label': '"""MKL, k/n = 0.1"""'}), "(mat_dim, mkl_10, color='red', linestyle='--', label='MKL, k/n = 0.1')\n", (1193, 1263), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1354), 'matplotlib.pyplot.plot', 'plt.plot', (['mat_dim[:-1]', 'cuda_50'], {'color': '"""blue"""', 'linestyle': '"""-"""', 'label': '"""CUDA, k/n = 0.5"""'}), "(mat_dim[:-1], cuda_50, color='blue', linestyle='-', label=\n 'CUDA, k/n = 0.5')\n", (1272, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1350, 1439), 'matplotlib.pyplot.plot', 'plt.plot', (['mat_dim[:-1]', 'mkl_50'], {'color': '"""blue"""', 'linestyle': '"""--"""', 'label': '"""MKL, k/n = 0.5"""'}), "(mat_dim[:-1], mkl_50, color='blue', linestyle='--', label=\n 'MKL, k/n = 0.5')\n", (1358, 1439), True, 'import matplotlib.pyplot as plt\n'), ((1435, 1526), 'matplotlib.pyplot.plot', 'plt.plot', (['mat_dim[:-1]', 'cuda_90'], {'color': '"""green"""', 'linestyle': '"""-"""', 'label': '"""CUDA, k/n = 0.9"""'}), "(mat_dim[:-1], cuda_90, color='green', linestyle='-', label=\n 'CUDA, k/n = 0.9')\n", (1443, 1526), True, 'import matplotlib.pyplot as plt\n'), ((1522, 1612), 'matplotlib.pyplot.plot', 'plt.plot', (['mat_dim[:-1]', 'mkl_90'], {'color': '"""green"""', 'linestyle': '"""--"""', 'label': '"""MKL, k/n = 0.9"""'}), "(mat_dim[:-1], mkl_90, color='green', linestyle='--', label=\n 'MKL, k/n = 0.9')\n", (1530, 1612), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1641), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""execution time (s)"""'], {}), "('execution time (s)')\n", (1619, 1641), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1657), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n"""'], {}), "('n')\n", (1652, 1657), True, 'import matplotlib.pyplot as plt\n'), ((1659, 1679), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""linear"""'], {}), "('linear')\n", (1669, 1679), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1700), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""linear"""'], {}), "('linear')\n", (1690, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1728), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[1000.0, 10000.0]'], {}), '([1000.0, 10000.0])\n', (1709, 1728), True, 'import matplotlib.pyplot as plt\n'), ((1723, 1740), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (1733, 1740), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1825), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (1809, 1825), False, 'import matplotlib\n'), ((1889, 1920), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cuda_results.png"""'], {}), "('cuda_results.png')\n", (1900, 1920), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import imageio
from pathlib import Path
import multiprocessing as mp
from starfish import data, FieldOfView
from starfish.types import Axes, Features
from starfish.image import Filter
from starfish.core.imagestack.imagestack import ImageStack
from starfish.spots import DecodeSpots, FindSpots
from starfish import Codebook
def find_spots_all_samples(config, parallelize=0):
"""
"""
# TODO: Fill in docstring
workspace_directory = config["workspace_directory"]
input_directory = Path(workspace_directory, "stitched")
output_directory = Path(workspace_directory, "spots_only")
output_directory.mkdir(exist_ok=True)
samples = config["samples"]
# TODO: Figure out if it's possible to parallelize by sample here.
if parallelize > 0:
num_processes = mp.cpu_count()
print(num_processes)
processes = []
for sample in samples:
process = mp.Process(target=find_spots_single_sample, args=(sample, input_directory, output_directory, parallelize - 1))
process.start()
processes.append(process)
for process in processes:
process.join()
else:
for sample in samples:
find_spots_single_sample(sample, input_directory, output_directory)
def find_spots_single_sample(sample, input_directory, output_directory, parallelize=0):
"""
"""
# TODO: Fill in docstring
sample_name = sample["name"]
rounds = sample["rounds"]
processes = []
for round_index, imaging_round in enumerate(rounds, start=1):
if parallelize > 0:
process = mp.Process(target=find_spots_in_round, args=(sample_name, round_index, imaging_round, input_directory, output_directory))
process.start()
processes.append(process)
else:
find_spots_in_round(sample_name, round_index, imaging_round, input_directory, output_directory)
for process in processes:
process.join()
def find_spots_in_round(sample_name, round_index, imaging_round, input_directory, output_directory):
"""
"""
round_directory = ("round%d" % round_index)
sample_input_subdirectory = input_directory / sample_name / round_directory
sample_output_subdirectory = output_directory / sample_name / round_directory
sample_output_subdirectory.mkdir(parents=True, exist_ok=True)
channels = imaging_round["channels"]
filename = imaging_round["filename"]
reference_channel = imaging_round["reference_channel"]
for channel_index, channel in enumerate(channels):
input_filename = filename + ("_fused_tp_0_ch_%d.tif" % channel_index)
output_filename = channel + ".tif"
input_path = sample_input_subdirectory / input_filename
output_path = sample_output_subdirectory / output_filename
if channel_index == reference_channel:
image_stack = imageio.volread(input_path)
max_filtered_image_stack = image_stack.max(0)
imageio.imsave(output_path, max_filtered_image_stack)
else:
find_spots(input_path, output_path)
def find_spots(input_path, output_path, intensity_percentile=99.995, filter_width=2, small_peak_min=4, small_peak_max=100,
big_peak_min=25, big_peak_max=10000, small_peak_dist=2, big_peak_dist=0.75, block_dim_fraction=0.25,
spot_pad_pixels=2, keep_existing=False):
"""
Find and keep only spots from stitched images.
"""
image_stack = imageio.volread(input_path)
print(image_stack.shape)
thresholded_image = np.copy(image_stack)
_, height, width = image_stack.shape
threshold = np.percentile(thresholded_image, intensity_percentile)
thresholded_image[thresholded_image > threshold] = threshold + (np.log(thresholded_image[thresholded_image > threshold] - threshold)/np.log(1.1)).astype(thresholded_image.dtype)
#May need to fiddle with the sigma parameters in each step, depending on the image.
#High Pass Filter (Background Subtraction)
gaussian_high_pass = Filter.GaussianHighPass(sigma=(1, filter_width, filter_width), is_volume=True)
# enhance brightness of spots
laplace_filter = Filter.Laplace(sigma=(0.2, 0.5, 0.5), is_volume=True)
local_max_peakfinder_small = FindSpots.LocalMaxPeakFinder(
min_distance=small_peak_dist,
stringency=0,
min_obj_area=small_peak_min,
max_obj_area=small_peak_max,
min_num_spots_detected=2500,
is_volume=True,
verbose=True
)
local_max_peakfinder_big = FindSpots.LocalMaxPeakFinder(
min_distance=big_peak_dist,
stringency=0,
min_obj_area=big_peak_min,
max_obj_area=big_peak_max,
min_num_spots_detected=2500,
is_volume=True,
verbose=True
)
synthetic_codebook= Codebook.synthetic_one_hot_codebook(n_round=1, n_channel=1, n_codes=1)
decoder = DecodeSpots.PerRoundMaxChannel(codebook=synthetic_codebook)
block_dimension = int(max(thresholded_image.shape) * block_dim_fraction)
spot_coordinates= np.zeros((0, 2), dtype=np.int64)
# Finding spots by block_dimension x block_dimension size blocks
# We skip the blocks at the edges with the - 1 (TODO: pad to full block size)
for row in range(0, height - 1, block_dimension):
for column in range(0, width - 1, block_dimension):
# Cutout block and expand dimensions for channel and round
block = thresholded_image[np.newaxis, np.newaxis, :, row:row+block_dimension, column:column+block_dimension]
images = ImageStack.from_numpy(block)
high_pass_filtered = gaussian_high_pass.run(images, verbose=False, in_place=False)
laplace = laplace_filter.run(high_pass_filtered, in_place=False,verbose=False)
small_spots = local_max_peakfinder_small.run(laplace.reduce({Axes.ZPLANE}, func="max"))
decoded_intensities = decoder.run(spots=small_spots)
small_spot_coords = np.stack([decoded_intensities[Axes.Y.value], decoded_intensities[Axes.X.value]]).T
big_spots = local_max_peakfinder_big.run(laplace.reduce({Axes.ZPLANE}, func="max"))
decoded_intensities = decoder.run(spots=big_spots)
big_spot_coords = np.stack([decoded_intensities[Axes.Y.value], decoded_intensities[Axes.X.value]]).T
all_spot_coords = np.vstack([small_spot_coords, big_spot_coords])
all_spot_coords += (row, column)
spot_coordinates = np.vstack([spot_coordinates, all_spot_coords])
# Copying over only non-zero pixels
image_spots = np.zeros((height, width), dtype=np.uint16)
for spot_coordinate in spot_coordinates:
spot_column, spot_row = spot_coordinate
for row in range(max(0, spot_column-spot_pad_pixels), min(spot_column+spot_pad_pixels+1, height)):
for column in range(max(0, spot_row-spot_pad_pixels), min(spot_row+spot_pad_pixels+1, width)):
# Max projecting over z-stack
image_spots[row, column] = image_stack[:, row, column].max(0)
imageio.imsave(output_path, image_spots)
return image_spots
| [
"numpy.stack",
"starfish.Codebook.synthetic_one_hot_codebook",
"numpy.copy",
"numpy.log",
"starfish.spots.FindSpots.LocalMaxPeakFinder",
"imageio.imsave",
"numpy.zeros",
"starfish.image.Filter.GaussianHighPass",
"numpy.percentile",
"starfish.spots.DecodeSpots.PerRoundMaxChannel",
"pathlib.Path",... | [((520, 557), 'pathlib.Path', 'Path', (['workspace_directory', '"""stitched"""'], {}), "(workspace_directory, 'stitched')\n", (524, 557), False, 'from pathlib import Path\n'), ((581, 620), 'pathlib.Path', 'Path', (['workspace_directory', '"""spots_only"""'], {}), "(workspace_directory, 'spots_only')\n", (585, 620), False, 'from pathlib import Path\n'), ((3512, 3539), 'imageio.volread', 'imageio.volread', (['input_path'], {}), '(input_path)\n', (3527, 3539), False, 'import imageio\n'), ((3594, 3614), 'numpy.copy', 'np.copy', (['image_stack'], {}), '(image_stack)\n', (3601, 3614), True, 'import numpy as np\n'), ((3678, 3732), 'numpy.percentile', 'np.percentile', (['thresholded_image', 'intensity_percentile'], {}), '(thresholded_image, intensity_percentile)\n', (3691, 3732), True, 'import numpy as np\n'), ((4081, 4159), 'starfish.image.Filter.GaussianHighPass', 'Filter.GaussianHighPass', ([], {'sigma': '(1, filter_width, filter_width)', 'is_volume': '(True)'}), '(sigma=(1, filter_width, filter_width), is_volume=True)\n', (4104, 4159), False, 'from starfish.image import Filter\n'), ((4220, 4273), 'starfish.image.Filter.Laplace', 'Filter.Laplace', ([], {'sigma': '(0.2, 0.5, 0.5)', 'is_volume': '(True)'}), '(sigma=(0.2, 0.5, 0.5), is_volume=True)\n', (4234, 4273), False, 'from starfish.image import Filter\n'), ((4307, 4504), 'starfish.spots.FindSpots.LocalMaxPeakFinder', 'FindSpots.LocalMaxPeakFinder', ([], {'min_distance': 'small_peak_dist', 'stringency': '(0)', 'min_obj_area': 'small_peak_min', 'max_obj_area': 'small_peak_max', 'min_num_spots_detected': '(2500)', 'is_volume': '(True)', 'verbose': '(True)'}), '(min_distance=small_peak_dist, stringency=0,\n min_obj_area=small_peak_min, max_obj_area=small_peak_max,\n min_num_spots_detected=2500, is_volume=True, verbose=True)\n', (4335, 4504), False, 'from starfish.spots import DecodeSpots, FindSpots\n'), ((4591, 4782), 'starfish.spots.FindSpots.LocalMaxPeakFinder', 'FindSpots.LocalMaxPeakFinder', ([], {'min_distance': 'big_peak_dist', 'stringency': '(0)', 'min_obj_area': 'big_peak_min', 'max_obj_area': 'big_peak_max', 'min_num_spots_detected': '(2500)', 'is_volume': '(True)', 'verbose': '(True)'}), '(min_distance=big_peak_dist, stringency=0,\n min_obj_area=big_peak_min, max_obj_area=big_peak_max,\n min_num_spots_detected=2500, is_volume=True, verbose=True)\n', (4619, 4782), False, 'from starfish.spots import DecodeSpots, FindSpots\n'), ((4862, 4932), 'starfish.Codebook.synthetic_one_hot_codebook', 'Codebook.synthetic_one_hot_codebook', ([], {'n_round': '(1)', 'n_channel': '(1)', 'n_codes': '(1)'}), '(n_round=1, n_channel=1, n_codes=1)\n', (4897, 4932), False, 'from starfish import Codebook\n'), ((4947, 5006), 'starfish.spots.DecodeSpots.PerRoundMaxChannel', 'DecodeSpots.PerRoundMaxChannel', ([], {'codebook': 'synthetic_codebook'}), '(codebook=synthetic_codebook)\n', (4977, 5006), False, 'from starfish.spots import DecodeSpots, FindSpots\n'), ((5107, 5139), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {'dtype': 'np.int64'}), '((0, 2), dtype=np.int64)\n', (5115, 5139), True, 'import numpy as np\n'), ((6674, 6716), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint16'}), '((height, width), dtype=np.uint16)\n', (6682, 6716), True, 'import numpy as np\n'), ((7153, 7193), 'imageio.imsave', 'imageio.imsave', (['output_path', 'image_spots'], {}), '(output_path, image_spots)\n', (7167, 7193), False, 'import imageio\n'), ((816, 830), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (828, 830), True, 'import multiprocessing as mp\n'), ((936, 1050), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'find_spots_single_sample', 'args': '(sample, input_directory, output_directory, parallelize - 1)'}), '(target=find_spots_single_sample, args=(sample, input_directory,\n output_directory, parallelize - 1))\n', (946, 1050), True, 'import multiprocessing as mp\n'), ((1631, 1756), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'find_spots_in_round', 'args': '(sample_name, round_index, imaging_round, input_directory, output_directory)'}), '(target=find_spots_in_round, args=(sample_name, round_index,\n imaging_round, input_directory, output_directory))\n', (1641, 1756), True, 'import multiprocessing as mp\n'), ((2914, 2941), 'imageio.volread', 'imageio.volread', (['input_path'], {}), '(input_path)\n', (2929, 2941), False, 'import imageio\n'), ((3012, 3065), 'imageio.imsave', 'imageio.imsave', (['output_path', 'max_filtered_image_stack'], {}), '(output_path, max_filtered_image_stack)\n', (3026, 3065), False, 'import imageio\n'), ((5619, 5647), 'starfish.core.imagestack.imagestack.ImageStack.from_numpy', 'ImageStack.from_numpy', (['block'], {}), '(block)\n', (5640, 5647), False, 'from starfish.core.imagestack.imagestack import ImageStack\n'), ((6443, 6490), 'numpy.vstack', 'np.vstack', (['[small_spot_coords, big_spot_coords]'], {}), '([small_spot_coords, big_spot_coords])\n', (6452, 6490), True, 'import numpy as np\n'), ((6568, 6614), 'numpy.vstack', 'np.vstack', (['[spot_coordinates, all_spot_coords]'], {}), '([spot_coordinates, all_spot_coords])\n', (6577, 6614), True, 'import numpy as np\n'), ((6032, 6117), 'numpy.stack', 'np.stack', (['[decoded_intensities[Axes.Y.value], decoded_intensities[Axes.X.value]]'], {}), '([decoded_intensities[Axes.Y.value], decoded_intensities[Axes.X.value]]\n )\n', (6040, 6117), True, 'import numpy as np\n'), ((6317, 6402), 'numpy.stack', 'np.stack', (['[decoded_intensities[Axes.Y.value], decoded_intensities[Axes.X.value]]'], {}), '([decoded_intensities[Axes.Y.value], decoded_intensities[Axes.X.value]]\n )\n', (6325, 6402), True, 'import numpy as np\n'), ((3801, 3869), 'numpy.log', 'np.log', (['(thresholded_image[thresholded_image > threshold] - threshold)'], {}), '(thresholded_image[thresholded_image > threshold] - threshold)\n', (3807, 3869), True, 'import numpy as np\n'), ((3870, 3881), 'numpy.log', 'np.log', (['(1.1)'], {}), '(1.1)\n', (3876, 3881), True, 'import numpy as np\n')] |
import numpy as np
from skorch import NeuralNet, NeuralNetRegressor
from skorch.callbacks import EpochScoring, ProgressBar
from skorch.helper import predefined_split
from skorch.utils import to_numpy
from sklearn.base import TransformerMixin
from braindecode import EEGClassifier, EEGRegressor
class EEGTransformer(EEGClassifier, TransformerMixin):
def __init__(self, *args, **kwargs):
super(EEGTransformer, self).__init__(*args, **kwargs)
def get_loss(self, y_pred, y_true, X, **kwargs):
if len(y_pred) == 2:
y_pred, _ = y_pred
return super().get_loss(y_pred, y_true=y_true, X=X, **kwargs)
def transform(self, X):
out = []
for outs in self.forward_iter(X, training=False):
outs = outs[1] if isinstance(outs, tuple) else outs
out.append(to_numpy(outs))
transforms = np.concatenate(out, 0)
return transforms | [
"skorch.utils.to_numpy",
"numpy.concatenate"
] | [((877, 899), 'numpy.concatenate', 'np.concatenate', (['out', '(0)'], {}), '(out, 0)\n', (891, 899), True, 'import numpy as np\n'), ((840, 854), 'skorch.utils.to_numpy', 'to_numpy', (['outs'], {}), '(outs)\n', (848, 854), False, 'from skorch.utils import to_numpy\n')] |
import logging
import numpy as np
import cv2
import easyocr
from skimage.segmentation import clear_border
import onnxruntime
import logging as log
class Inference_engine:
def __init__(self, input_image, detector_model, nlp_model, detector_conf=0.1, nlp_conf=0.4, iou_thresh=0.5):
self.input_img = input_image
self.input_img_width = self.input_img.shape[1]
self.input_img_height = self.input_img.shape[0]
# Define Prediction Cofficents
self.detector_conf = detector_conf
self.iou_thresh = iou_thresh
self.nlp_conf = nlp_conf
# flag for detection
self.success_detection = False
self.txt_data = None
# Load the model once in the memory
self.session = detector_model
self.en_reader = nlp_model[0]
self.ar_reader = nlp_model[1]
# call function to get licence_plate info
# self.get_licenceplate_info()
def get_licenceplate_info(self):
IN_IMAGE_H = self.session.get_inputs()[0].shape[2]
IN_IMAGE_W = self.session.get_inputs()[0].shape[3]
decoded_img = self.decode_img(self.input_img, shape=(IN_IMAGE_H, IN_IMAGE_W))
detections = self.detect(decoded_img)
boxes = self.post_processing(detections, conf_thresh=self.detector_conf,
nms_thresh=self.iou_thresh)
self.bounding_cords = self.decode_boxes(boxes)
if self.bounding_cords is None:
logging.info("No Detections from model")
elif not self.check_out_of_bounds():
img_alpr = self.input_img[self.bounding_cords[1] - 20:self.bounding_cords[3] + 5,
self.bounding_cords[0] - 20:self.bounding_cords[2] + 20]
self.txt_data = self.NLP_model(img_alpr, nlp_confidence=self.nlp_conf)
if len(self.txt_data) == 0:
img_alpr_mod = self.enhance_image(img_alpr)
mod_txt_data = self.NLP_model(img_alpr_mod, nlp_confidence=self.nlp_conf)
self.txt_data = mod_txt_data
return self.txt_data
def check_out_of_bounds(self):
out_of_bounds = False
if (self.bounding_cords[0] > self.input_img_width) and (self.bounding_cords[2] > self.input_img_width) and (
self.bounding_cords[1] > self.input_img_height) and (self.bounding_cords[3] > self.input_img_height):
out_of_bounds = True
return out_of_bounds
def enhance_image(self, crop_image):
gray = cv2.cvtColor(crop_image, cv2.COLOR_RGB2GRAY)
rectKern = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 10))
blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKern)
blackhat = clear_border(blackhat)
return blackhat
def NLP_model(self, cropped_img, nlp_confidence=0.0):
nlp_data = []
# run NLP model on cropped image
results_en = self.en_reader.readtext(cropped_img)
results_ar = self.ar_reader.readtext(cropped_img)
# get results
text_en = [r[-2].translate({ord(i): None for i in "':!?+|\/}{()*&#%-_= "}) for r in results_en]
text_ar = [r[-2].translate({ord(i): None for i in "':!?+|\/}{%&()-_= "}) for r in results_ar ]
diff_txt = set(text_ar) - set(text_en)
nlp_data = list(text_en + list(diff_txt))
return nlp_data
def detect(self, decoded_image):
input_name = self.session.get_inputs()[0].name
outputs = self.session.get_outputs()
output_names = list(map(lambda output: output.name, outputs))
detections = self.session.run(output_names, {input_name: decoded_image})
return detections
def nms_cpu(self, boxes, confs, nms_thresh=0.4, min_mode=False):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def post_processing(self, output, conf_thresh=0.3, nms_thresh=0.5):
# [batch, num, 1, 4]
box_array = output[0]
# [batch, num, num_classes]
confs = output[1]
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
bboxes = []
# nms for each class
for j in range(num_classes):
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = self.nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
for k in range(ll_box_array.shape[0]):
bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3],
ll_max_conf[k], ll_max_conf[k], ll_max_id[k]])
bboxes_batch.append(bboxes)
return bboxes_batch
def decode_boxes(self, boxes):
cords = None
for i in range(len(boxes[0])):
box = boxes[0]
x1 = int(box[i][0] * self.input_img_width)
y1 = int(box[i][1] * self.input_img_height)
x2 = int(box[i][2] * self.input_img_width)
y2 = int(box[i][3] * self.input_img_height)
cords = (x1, y1, x2, y2)
return cords
@staticmethod
def decode_img(img, shape=(320, 320), channel=3):
output_img = None
try:
resized = cv2.resize(img, shape, interpolation=cv2.INTER_LINEAR)
trp_img = np.transpose(resized, (2, 0, 1)).astype(np.float32)
output_img = np.expand_dims(trp_img, axis=0)
output_img /= 255.0
except IOError as e:
log.error('{}! Unable to read image'.format(e))
return output_img
if __name__ == '__main__':
# Add object detector models
Model_path = 'yolov4_1_3_320_320_static.onnx'
model = onnxruntime.InferenceSession(Model_path)
# add NLP Models
en_model = easyocr.Reader(['en'])
ar_model = easyocr.Reader(['ar'])
nlp_models = [en_model, ar_model]
# image path
img_path = '/home/tandonsa/PycharmProjects/test_gpu/Licence_plate/dataset/vehicleplates/IMG-20210610-WA0044.jpg'
input_img = cv2.imread(img_path)
input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB)
# create Object instance
model_infer = Inference_engine(input_img, model, nlp_models)
print(model_infer.get_licenceplate_info())
| [
"numpy.minimum",
"numpy.maximum",
"numpy.argmax",
"cv2.cvtColor",
"cv2.getStructuringElement",
"cv2.morphologyEx",
"numpy.transpose",
"numpy.expand_dims",
"onnxruntime.InferenceSession",
"cv2.imread",
"skimage.segmentation.clear_border",
"numpy.max",
"numpy.array",
"logging.info",
"numpy... | [((7465, 7505), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['Model_path'], {}), '(Model_path)\n', (7493, 7505), False, 'import onnxruntime\n'), ((7542, 7564), 'easyocr.Reader', 'easyocr.Reader', (["['en']"], {}), "(['en'])\n", (7556, 7564), False, 'import easyocr\n'), ((7580, 7602), 'easyocr.Reader', 'easyocr.Reader', (["['ar']"], {}), "(['ar'])\n", (7594, 7602), False, 'import easyocr\n'), ((7791, 7811), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (7801, 7811), False, 'import cv2\n'), ((7828, 7870), 'cv2.cvtColor', 'cv2.cvtColor', (['input_img', 'cv2.COLOR_BGR2RGB'], {}), '(input_img, cv2.COLOR_BGR2RGB)\n', (7840, 7870), False, 'import cv2\n'), ((2503, 2547), 'cv2.cvtColor', 'cv2.cvtColor', (['crop_image', 'cv2.COLOR_RGB2GRAY'], {}), '(crop_image, cv2.COLOR_RGB2GRAY)\n', (2515, 2547), False, 'import cv2\n'), ((2567, 2618), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(25, 10)'], {}), '(cv2.MORPH_RECT, (25, 10))\n', (2592, 2618), False, 'import cv2\n'), ((2638, 2690), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_BLACKHAT', 'rectKern'], {}), '(gray, cv2.MORPH_BLACKHAT, rectKern)\n', (2654, 2690), False, 'import cv2\n'), ((2710, 2732), 'skimage.segmentation.clear_border', 'clear_border', (['blackhat'], {}), '(blackhat)\n', (2722, 2732), False, 'from skimage.segmentation import clear_border\n'), ((4697, 4711), 'numpy.array', 'np.array', (['keep'], {}), '(keep)\n', (4705, 4711), True, 'import numpy as np\n'), ((5237, 5258), 'numpy.max', 'np.max', (['confs'], {'axis': '(2)'}), '(confs, axis=2)\n', (5243, 5258), True, 'import numpy as np\n'), ((5276, 5300), 'numpy.argmax', 'np.argmax', (['confs'], {'axis': '(2)'}), '(confs, axis=2)\n', (5285, 5300), True, 'import numpy as np\n'), ((1474, 1514), 'logging.info', 'logging.info', (['"""No Detections from model"""'], {}), "('No Detections from model')\n", (1486, 1514), False, 'import logging\n'), ((4073, 4112), 'numpy.maximum', 'np.maximum', (['x1[idx_self]', 'x1[idx_other]'], {}), '(x1[idx_self], x1[idx_other])\n', (4083, 4112), True, 'import numpy as np\n'), ((4131, 4170), 'numpy.maximum', 'np.maximum', (['y1[idx_self]', 'y1[idx_other]'], {}), '(y1[idx_self], y1[idx_other])\n', (4141, 4170), True, 'import numpy as np\n'), ((4189, 4228), 'numpy.minimum', 'np.minimum', (['x2[idx_self]', 'x2[idx_other]'], {}), '(x2[idx_self], x2[idx_other])\n', (4199, 4228), True, 'import numpy as np\n'), ((4247, 4286), 'numpy.minimum', 'np.minimum', (['y2[idx_self]', 'y2[idx_other]'], {}), '(y2[idx_self], y2[idx_other])\n', (4257, 4286), True, 'import numpy as np\n'), ((4303, 4329), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1)'], {}), '(0.0, xx2 - xx1)\n', (4313, 4329), True, 'import numpy as np\n'), ((4346, 4372), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1)'], {}), '(0.0, yy2 - yy1)\n', (4356, 4372), True, 'import numpy as np\n'), ((7008, 7062), 'cv2.resize', 'cv2.resize', (['img', 'shape'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, shape, interpolation=cv2.INTER_LINEAR)\n', (7018, 7062), False, 'import cv2\n'), ((7162, 7193), 'numpy.expand_dims', 'np.expand_dims', (['trp_img'], {'axis': '(0)'}), '(trp_img, axis=0)\n', (7176, 7193), True, 'import numpy as np\n'), ((4614, 4642), 'numpy.where', 'np.where', (['(over <= nms_thresh)'], {}), '(over <= nms_thresh)\n', (4622, 4642), True, 'import numpy as np\n'), ((4455, 4500), 'numpy.minimum', 'np.minimum', (['areas[order[0]]', 'areas[order[1:]]'], {}), '(areas[order[0]], areas[order[1:]])\n', (4465, 4500), True, 'import numpy as np\n'), ((7085, 7117), 'numpy.transpose', 'np.transpose', (['resized', '(2, 0, 1)'], {}), '(resized, (2, 0, 1))\n', (7097, 7117), True, 'import numpy as np\n')] |
__author__ = 'edill'
from pprint import pprint
from atom.api import Atom, Str, observe, Typed
import numpy as np
class XRF(Atom):
folder_name = Str()
file_name = Str()
data = Typed(object)
@observe('folder_name', 'file_name')
def update(self, changed):
pprint(changed)
if changed['type'] == 'create':
return
print('{} was changed from {} to {}'.format(changed['name'],
changed['oldvalue'],
changed['value']))
if changed['name'] == 'file_name':
self.load_data()
def load_data(self):
self.data = np.loadtxt(self.file_name)
@observe('data')
def data_changed(self, data):
print('The data was changed. First five lines of new data:\n{}'
''.format(self.data[:5]))
| [
"atom.api.Str",
"atom.api.observe",
"atom.api.Typed",
"numpy.loadtxt",
"pprint.pprint"
] | [((150, 155), 'atom.api.Str', 'Str', ([], {}), '()\n', (153, 155), False, 'from atom.api import Atom, Str, observe, Typed\n'), ((172, 177), 'atom.api.Str', 'Str', ([], {}), '()\n', (175, 177), False, 'from atom.api import Atom, Str, observe, Typed\n'), ((189, 202), 'atom.api.Typed', 'Typed', (['object'], {}), '(object)\n', (194, 202), False, 'from atom.api import Atom, Str, observe, Typed\n'), ((209, 244), 'atom.api.observe', 'observe', (['"""folder_name"""', '"""file_name"""'], {}), "('folder_name', 'file_name')\n", (216, 244), False, 'from atom.api import Atom, Str, observe, Typed\n'), ((723, 738), 'atom.api.observe', 'observe', (['"""data"""'], {}), "('data')\n", (730, 738), False, 'from atom.api import Atom, Str, observe, Typed\n'), ((284, 299), 'pprint.pprint', 'pprint', (['changed'], {}), '(changed)\n', (290, 299), False, 'from pprint import pprint\n'), ((690, 716), 'numpy.loadtxt', 'np.loadtxt', (['self.file_name'], {}), '(self.file_name)\n', (700, 716), True, 'import numpy as np\n')] |
import pyscipopt
from pyscipopt import Model
import ecole
import numpy
import matplotlib.pyplot as plt
import pathlib
from localbranching import addLBConstraint
from geco.mips.loading.miplib import Loader
from event import PrimalBoundChangeEventHandler
modes = ['improve-supportbinvars', 'improve-binvars']
mode = modes[1]
directory = './result/miplib2017/' + mode + '/'
pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
# directory = './result/miplib2017/improve/'
data = numpy.load('./result/miplib2017/miplib2017_binary39.npz')
miplib2017_binary39 = data['miplib2017_binary39']
for p in range(0, len(miplib2017_binary39)):
instance = Loader().load_instance(miplib2017_binary39[p] + '.mps.gz')
MIP_model = instance
print(MIP_model.getProbName())
primalbound_handler = PrimalBoundChangeEventHandler()
MIP_model.includeEventhdlr(primalbound_handler, 'primal_bound_update_handler',
'store every new primal bound and its time stamp')
MIP_model.setParam('presolving/maxrounds', 0)
MIP_model.setParam('presolving/maxrestarts', 0)
MIP_model.setParam("display/verblevel", 0)
MIP_model.optimize()
status = MIP_model.getStatus()
if status == 'optimal':
obj = MIP_model.getObjVal()
time = MIP_model.getSolvingTime()
data = [obj, time]
# filename = f'{directory_opt}{instance_name}-optimal-obj-time.pkl'
# with gzip.open(filename, 'wb') as f:
# pickle.dump(data, f)
print("instance:", MIP_model.getProbName(),
"status:", MIP_model.getStatus(),
"best obj: ", MIP_model.getObjVal(),
"solving time: ", MIP_model.getSolvingTime())
print('primal bounds: ')
print(primalbound_handler.primal_bounds)
print('times: ')
print(primalbound_handler.primal_times)
MIP_model.freeProb()
del MIP_model
| [
"event.PrimalBoundChangeEventHandler",
"geco.mips.loading.miplib.Loader",
"numpy.load",
"pathlib.Path"
] | [((486, 543), 'numpy.load', 'numpy.load', (['"""./result/miplib2017/miplib2017_binary39.npz"""'], {}), "('./result/miplib2017/miplib2017_binary39.npz')\n", (496, 543), False, 'import numpy\n'), ((801, 832), 'event.PrimalBoundChangeEventHandler', 'PrimalBoundChangeEventHandler', ([], {}), '()\n', (830, 832), False, 'from event import PrimalBoundChangeEventHandler\n'), ((374, 397), 'pathlib.Path', 'pathlib.Path', (['directory'], {}), '(directory)\n', (386, 397), False, 'import pathlib\n'), ((655, 663), 'geco.mips.loading.miplib.Loader', 'Loader', ([], {}), '()\n', (661, 663), False, 'from geco.mips.loading.miplib import Loader\n')] |
import os
import argparse
import numpy as np
import baseline as bl
parser = argparse.ArgumentParser(description='Embed text and save to a .npy')
parser.add_argument('--model', help='An embedding model', required=True, type=str)
parser.add_argument('--text', help='raw value', type=str)
parser.add_argument('--backend', help='backend', default='tf')
parser.add_argument('--remote', help='(optional) remote endpoint', type=str) # localhost:8500
parser.add_argument('--name', help='(optional) service name', type=str)
parser.add_argument('--device', help='device')
parser.add_argument('--preproc', help='(optional) where to perform preprocessing', choices={'client', 'server'}, default='client')
args = parser.parse_args()
if os.path.exists(args.text) and os.path.isfile(args.text):
texts = []
with open(args.text, 'r') as f:
for line in f:
text = line.strip().split()
texts += [text]
out = os.path.splitext(args.text)[0]
else:
texts = [args.text.split()]
out = 'cli_text'
m = bl.EmbeddingsService.load(
args.model, backend=args.backend,
remote=args.remote, name=args.name,
preproc=args.preproc, device=args.device,
)
embedded = m.predict(texts)
np.save(out, embedded)
| [
"numpy.save",
"argparse.ArgumentParser",
"os.path.exists",
"os.path.isfile",
"os.path.splitext",
"baseline.EmbeddingsService.load"
] | [((77, 145), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Embed text and save to a .npy"""'}), "(description='Embed text and save to a .npy')\n", (100, 145), False, 'import argparse\n'), ((1029, 1171), 'baseline.EmbeddingsService.load', 'bl.EmbeddingsService.load', (['args.model'], {'backend': 'args.backend', 'remote': 'args.remote', 'name': 'args.name', 'preproc': 'args.preproc', 'device': 'args.device'}), '(args.model, backend=args.backend, remote=args.\n remote, name=args.name, preproc=args.preproc, device=args.device)\n', (1054, 1171), True, 'import baseline as bl\n'), ((1212, 1234), 'numpy.save', 'np.save', (['out', 'embedded'], {}), '(out, embedded)\n', (1219, 1234), True, 'import numpy as np\n'), ((725, 750), 'os.path.exists', 'os.path.exists', (['args.text'], {}), '(args.text)\n', (739, 750), False, 'import os\n'), ((755, 780), 'os.path.isfile', 'os.path.isfile', (['args.text'], {}), '(args.text)\n', (769, 780), False, 'import os\n'), ((934, 961), 'os.path.splitext', 'os.path.splitext', (['args.text'], {}), '(args.text)\n', (950, 961), False, 'import os\n')] |
import os, gzip, csv, torch, cv2, torchvision, random
import torch.nn as nn
import numpy as np
import scipy.ndimage as ndi
import scipy.misc
import imageio
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from collections import defaultdict
IMG_HEIGHT, IMG_WIDTH = 400, 400
patch_size = 128
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
cuda = True if torch.cuda.is_available() else False
print('cuda:', cuda)
resnet = torchvision.models.resnet34(pretrained=True)
for param in resnet.parameters():
param.requires_grad = False
if cuda:
resnet = resnet.cuda()
layer1 = resnet._modules.get('layer1')
layer2 = resnet._modules.get('layer2')
layer3 = resnet._modules.get('layer3')
layer4 = resnet._modules.get('layer4')
layerfc = resnet._modules.get('avgpool')
def save_images(images, size, image_path):
return imsave(images, size, image_path)
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def generate_animation(path, num):
images = []
for e in range(num):
img_name = path + '_epoch%03d' % (e+1) + '.png'
images.append(imageio.imread(img_name))
imageio.mimsave(path + '_generate_animation.gif', images, fps=5)
def loss_plot(hist, path = 'Train_hist.png', model_name = '', cut=5):
names = [n for n in hist]
x = range(len(hist[names[0]]))
#y1 = hist[names[0]]
#y2 = hist[names[1]]
y = [hist[names[i]] for i in range(len(names))]
for i in range(len(names)):
plt.plot(x[cut:], y[i][cut:], label=names[i])
#plt.plot(x, y1, label=names[0])
#plt.plot(x, y2, label=names[1])
plt.xlabel('Iter')
plt.ylabel('Loss')
plt.legend(loc=4)
plt.grid(True)
plt.tight_layout()
path = os.path.join(path, model_name + '.png')
plt.savefig(path)
plt.close()
def initialize_weights(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
def get_vector(images, layer, model, num_ftrs, size, Flatten=False):
"""
Get feature vector from ResNet
layer: from ResNet
model: ResNet
"""
embedding = torch.zeros(images.shape[0], num_ftrs, size[0], size[1])
def copy_data(m, i, o):
embedding.copy_(o.data)
h = layer.register_forward_hook(copy_data)
hx = model(images)
h.remove()
if Flatten:
embedding = torch.flatten(embedding, 1)
return embedding
def get_feature(img, layer, resnet, filter_size, feature_size, flatten=False):
"""
Input: one 255 range BGR image read from cv2
Output: the feature from ResNet
"""
features = []
img = np.rollaxis(img, 1, 4)
for i in range(len(img)):
fimg = cv2.resize(img[i], (224,224), cv2.INTER_AREA)
#fimg = cv2.merge([fimg, fimg, fimg]).astype(np.float32) / 255. #(224,224,3)
#fimg = cv2.cvtColor(fimg, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
fimg = (fimg - mean) / std
fimg = np.rollaxis(fimg, 2, 0)
fimg = torch.from_numpy(fimg).float().cuda().unsqueeze(0)
features.append( get_vector(fimg, layer, resnet, filter_size,
[feature_size,feature_size]).squeeze().cpu().numpy() )
return np.array(features)
def plot_predict_joint(im, fake_grid, epoch, opt, root_dir, num=2):
"""
plot predicted joints overlapping on the original art
and save the images
"""
grid = fake_grid.clone().detach().cpu()
img = im.copy().astype(np.float32)
for i in range(num):
idx = torch.argsort(grid[i,...].flatten(), descending=True)[:opt.anchor]
centers = []
for index in idx:
ii, jj = index // opt.img_size, index % opt.img_size
centers.append([float(ii) / float(opt.img_size), float(jj) / float(opt.img_size)])
centers = np.array(centers) * opt.art_size
color = np.array([0.,0.,255.])
for k in range(len(centers)):
x, y = int(centers[k,0]), int(centers[k,1])
img[i,x,y,:] = color
if x+1<opt.art_size and x-1>0:
img[i,x-1,y,:]=img[i,x+1,y,:] = color
if y+1<opt.art_size and y-1>0:
img[i,x-1,y-1,:]=img[i,x,y-1,:]=img[i,x+1,y-1,:]=color
img[i,x-1,y+1,:]=img[i,x,y+1,:]=img[i,x+1,y+1,:]=color
for i in range(num):
image_name = str(epoch) + '_' + str(i) + '_' + 'joints' + '.png'
cv2.imwrite(os.path.join(root_dir, image_name), img[i,...].astype(np.uint8))
return img
def img_normalize(img_batch):
for i in range(len(img_batch)):
img_batch[i] = (img_batch[i] - np.min(img_batch[i])) / np.max(img_batch[i])
return img_batch
def ada_thre(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img,5)
img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
return img
def data_augmentation(img_batch, heatmap_batch, seed):
for i in range(len(img_batch)):
img_batch[i] = random_transform(img_batch[i], seed + i)
#maskart_batch[i] = random_transform(maskart_batch[i], seed + i)
for j in range(len(heatmap_batch[i])):
heatmap_batch[i,j,...] = random_transform(heatmap_batch[i,j,...], seed + i)
return img_batch, heatmap_batch
def data_generator(is_train, opt, seed):
coco1 = np.load('../../share/data/coco1.npy', allow_pickle=True)
coco2 = np.load('../../share/data/coco2.npy', allow_pickle=True)
# merge npy list of dict
coco = np.concatenate((coco1, coco2))
random.shuffle(coco) #shuffle
print('merge coco1 and coco2 length:', len(coco))
split = int(len(coco) * 0.8)
if is_train:
data = coco[:split]
else:
data = coco[split:]
counts = 0
while True:
np.random.seed(seed + counts)
idx = np.random.randint(0, len(data), size=opt.batch_size)
img_batch_ = np.array([ cv2.cvtColor(data[i]['img'], cv2.COLOR_BGR2RGB) for i in idx]) / 255.
img_batch_ = np.rollaxis(img_batch_, 3, 1)
heatmap_batch_ = np.array([data[i]['confidence_map'] for i in idx])
heatmap_batch_ = heatmap_batch_[:,:,None]
# data augmentation
img_batch, heatmap_batch = data_augmentation(
img_batch_, heatmap_batch_, seed + counts)
feature = (img_batch.squeeze() * 255.).astype(np.uint8)
feature_batch = get_feature(feature, layer2, resnet, 128, 28)
img_batch = img_batch * 2. - 1. # -1~1
heatmap_batch = heatmap_batch * 2. - 1. # -1~1
img_batch = torch.from_numpy(np.array(img_batch)).float().cuda()
feature_batch = torch.from_numpy(np.array(feature_batch)).float().cuda()
heatmap_batch = torch.from_numpy(np.array(heatmap_batch)).float().cuda().squeeze()
counts += opt.batch_size
yield img_batch, heatmap_batch, feature_batch
# Transform functions from Keras
def random_transform(x, seed=None, channel_first=True):
"""Randomly augment a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
np.random.seed(seed)
if channel_first:
img_row_axis = 1
img_col_axis = 2
img_channel_axis = 0
H, W = x.shape[1], x.shape[2]
else:
img_row_axis = 0
img_col_axis = 1
img_channel_axis = 2
H, W = x.shape[0], x.shape[1]
if float(np.random.uniform(0.0, 1.0, 1)) < 0.5:
if_flip = True
else:
if_flip = False
rotation_range = 10
theta = np.deg2rad(np.random.uniform(-rotation_range, rotation_range))
height_shift_range = width_shift_range = 0.1
if height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-height_shift_range,
height_shift_range)
if np.max(height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-width_shift_range,
width_shift_range)
if np.max(width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
zoom_range = (0.9, 1.1)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
transform_matrix = None
if if_flip:
flip_matrix = np.array([[-1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
transform_matrix = flip_matrix if transform_matrix is None else np.dot(transform_matrix, flip_matrix)
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
# apply transforming
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis, fill_mode='nearest')
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='constant', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel
in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
def rotation(x, rg):
theta = np.deg2rad(rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[0], x.shape[1]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, 2, fill_mode='constant', cval=255)
return x
def merge_heatmap(heatmap, normalize=True):
"""
a batch of isolated heatmap tensor (b,j,h,w) -> (b,3,h,w)
data distribution (-1, 1) -> (0, 1)
"""
heatmap_ = heatmap.detach().clone()
heatmap_ = (heatmap_ + 1. ) / 2.
if len(heatmap_.shape) > 3:
heatmap_ = torch.sum(heatmap_, 1, keepdim=True)
elif len(heatmap_.shape) == 3:
heatmap_ = heatmap_.unsqueeze(1)
else:
print('Invalid heatmap size!')
heatmap_ = heatmap_.repeat(1,3,1,1)
if normalize:
heatmap_ = (heatmap_ - torch.min(heatmap_)) / torch.max(heatmap_)
return heatmap_
def create_heatmap(im_map, im_cloud, kernel_size=(5,5),colormap=cv2.COLORMAP_TURBO,a1=0.5,a2=0.5,normalize=True):
'''
im_map: a batch of im_map tensor (b,3,h,w) in (-1,1)
im_cloud: a batch of isolated heatmap tensor (b,j,h,w)
return a batch of heatmap overlapped with img in tensor
'''
im_map_ = im_map.detach().clone()
if im_map_.shape[1] == 1:
im_map_ = im_map_.repeat(1,3,1,1)
im_map_ = (((im_map_ + 1. ) / 2.) * 255.).cpu().numpy().astype(np.uint8)
im_cloud_ = merge_heatmap(im_cloud, normalize=normalize).cpu().numpy() #0~1
im_cloud_ = (im_cloud_ * 255.).astype(np.uint8)
im_map_ = np.rollaxis(im_map_, 1, 4)
im_cloud_ = np.rollaxis(im_cloud_, 1, 4)
new = np.zeros_like(im_map_)
for i in range(len(im_map_)):
# create blur image, kernel must be an odd number
im_cloud_blur = cv2.GaussianBlur(im_cloud_[i],kernel_size,0)
# If you need to invert the black/white data image
# im_blur = np.invert(im_blur)
# Convert back to BGR for cv2
#im_cloud_blur = cv2.cvtColor(im_cloud_blur,cv2.COLOR_GRAY2BGR)
# Apply colormap
im_cloud_clr = cv2.applyColorMap(im_cloud_blur, colormap)
# blend images a1/a2
new[i] = (a1*im_map_[i] + a2*im_cloud_clr)
new = np.rollaxis(new, 3, 1)
new = torch.from_numpy(new).float()
return new
| [
"cv2.GaussianBlur",
"numpy.load",
"numpy.random.seed",
"cv2.medianBlur",
"random.shuffle",
"cv2.adaptiveThreshold",
"numpy.sin",
"torchvision.models.resnet34",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"imageio.mimsave",
"torch.flatten",
"numpy.zeros_like",
"cv2.cvtColor",
"matpl... | [((327, 358), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (335, 358), True, 'import numpy as np\n'), ((365, 396), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (373, 396), True, 'import numpy as np\n'), ((481, 525), 'torchvision.models.resnet34', 'torchvision.models.resnet34', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (508, 525), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((413, 438), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (436, 438), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((2170, 2234), 'imageio.mimsave', 'imageio.mimsave', (["(path + '_generate_animation.gif')", 'images'], {'fps': '(5)'}), "(path + '_generate_animation.gif', images, fps=5)\n", (2185, 2234), False, 'import imageio\n'), ((2640, 2658), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iter"""'], {}), "('Iter')\n", (2650, 2658), True, 'import matplotlib.pyplot as plt\n'), ((2663, 2681), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (2673, 2681), True, 'import matplotlib.pyplot as plt\n'), ((2687, 2704), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (2697, 2704), True, 'import matplotlib.pyplot as plt\n'), ((2709, 2723), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2717, 2723), True, 'import matplotlib.pyplot as plt\n'), ((2728, 2746), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2744, 2746), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2798), 'os.path.join', 'os.path.join', (['path', "(model_name + '.png')"], {}), "(path, model_name + '.png')\n", (2771, 2798), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((2804, 2821), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (2815, 2821), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2838), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2836, 2838), True, 'import matplotlib.pyplot as plt\n'), ((3424, 3480), 'torch.zeros', 'torch.zeros', (['images.shape[0]', 'num_ftrs', 'size[0]', 'size[1]'], {}), '(images.shape[0], num_ftrs, size[0], size[1])\n', (3435, 3480), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((3920, 3942), 'numpy.rollaxis', 'np.rollaxis', (['img', '(1)', '(4)'], {}), '(img, 1, 4)\n', (3931, 3942), True, 'import numpy as np\n'), ((4487, 4505), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (4495, 4505), True, 'import numpy as np\n'), ((5984, 6021), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (5996, 6021), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((6032, 6054), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(5)'], {}), '(img, 5)\n', (6046, 6054), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((6064, 6158), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['img', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (6085, 6158), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((6618, 6674), 'numpy.load', 'np.load', (['"""../../share/data/coco1.npy"""'], {'allow_pickle': '(True)'}), "('../../share/data/coco1.npy', allow_pickle=True)\n", (6625, 6674), True, 'import numpy as np\n'), ((6687, 6743), 'numpy.load', 'np.load', (['"""../../share/data/coco2.npy"""'], {'allow_pickle': '(True)'}), "('../../share/data/coco2.npy', allow_pickle=True)\n", (6694, 6743), True, 'import numpy as np\n'), ((6785, 6815), 'numpy.concatenate', 'np.concatenate', (['(coco1, coco2)'], {}), '((coco1, coco2))\n', (6799, 6815), True, 'import numpy as np\n'), ((6820, 6840), 'random.shuffle', 'random.shuffle', (['coco'], {}), '(coco)\n', (6834, 6840), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((8457, 8477), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8471, 8477), True, 'import numpy as np\n'), ((11556, 11603), 'numpy.array', 'np.array', (['[[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]'], {}), '([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n', (11564, 11603), True, 'import numpy as np\n'), ((11623, 11672), 'numpy.array', 'np.array', (['[[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]'], {}), '([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n', (11631, 11672), True, 'import numpy as np\n'), ((11876, 11908), 'numpy.rollaxis', 'np.rollaxis', (['x', 'channel_index', '(0)'], {}), '(x, channel_index, 0)\n', (11887, 11908), True, 'import numpy as np\n'), ((12249, 12281), 'numpy.stack', 'np.stack', (['channel_images'], {'axis': '(0)'}), '(channel_images, axis=0)\n', (12257, 12281), True, 'import numpy as np\n'), ((12290, 12326), 'numpy.rollaxis', 'np.rollaxis', (['x', '(0)', '(channel_index + 1)'], {}), '(x, 0, channel_index + 1)\n', (12301, 12326), True, 'import numpy as np\n'), ((12375, 12389), 'numpy.deg2rad', 'np.deg2rad', (['rg'], {}), '(rg)\n', (12385, 12389), True, 'import numpy as np\n'), ((14021, 14047), 'numpy.rollaxis', 'np.rollaxis', (['im_map_', '(1)', '(4)'], {}), '(im_map_, 1, 4)\n', (14032, 14047), True, 'import numpy as np\n'), ((14064, 14092), 'numpy.rollaxis', 'np.rollaxis', (['im_cloud_', '(1)', '(4)'], {}), '(im_cloud_, 1, 4)\n', (14075, 14092), True, 'import numpy as np\n'), ((14103, 14125), 'numpy.zeros_like', 'np.zeros_like', (['im_map_'], {}), '(im_map_)\n', (14116, 14125), True, 'import numpy as np\n'), ((14680, 14702), 'numpy.rollaxis', 'np.rollaxis', (['new', '(3)', '(1)'], {}), '(new, 3, 1)\n', (14691, 14702), True, 'import numpy as np\n'), ((1366, 1405), 'numpy.zeros', 'np.zeros', (['(h * size[0], w * size[1], c)'], {}), '((h * size[0], w * size[1], c))\n', (1374, 1405), True, 'import numpy as np\n'), ((2515, 2560), 'matplotlib.pyplot.plot', 'plt.plot', (['x[cut:]', 'y[i][cut:]'], {'label': 'names[i]'}), '(x[cut:], y[i][cut:], label=names[i])\n', (2523, 2560), True, 'import matplotlib.pyplot as plt\n'), ((3662, 3689), 'torch.flatten', 'torch.flatten', (['embedding', '(1)'], {}), '(embedding, 1)\n', (3675, 3689), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((3988, 4034), 'cv2.resize', 'cv2.resize', (['img[i]', '(224, 224)', 'cv2.INTER_AREA'], {}), '(img[i], (224, 224), cv2.INTER_AREA)\n', (3998, 4034), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((4249, 4272), 'numpy.rollaxis', 'np.rollaxis', (['fimg', '(2)', '(0)'], {}), '(fimg, 2, 0)\n', (4260, 4272), True, 'import numpy as np\n'), ((5139, 5166), 'numpy.array', 'np.array', (['[0.0, 0.0, 255.0]'], {}), '([0.0, 0.0, 255.0])\n', (5147, 5166), True, 'import numpy as np\n'), ((7061, 7090), 'numpy.random.seed', 'np.random.seed', (['(seed + counts)'], {}), '(seed + counts)\n', (7075, 7090), True, 'import numpy as np\n'), ((7281, 7310), 'numpy.rollaxis', 'np.rollaxis', (['img_batch_', '(3)', '(1)'], {}), '(img_batch_, 3, 1)\n', (7292, 7310), True, 'import numpy as np\n'), ((7336, 7386), 'numpy.array', 'np.array', (["[data[i]['confidence_map'] for i in idx]"], {}), "([data[i]['confidence_map'] for i in idx])\n", (7344, 7386), True, 'import numpy as np\n'), ((8903, 8953), 'numpy.random.uniform', 'np.random.uniform', (['(-rotation_range)', 'rotation_range'], {}), '(-rotation_range, rotation_range)\n', (8920, 8953), True, 'import numpy as np\n'), ((9983, 10033), 'numpy.random.uniform', 'np.random.uniform', (['zoom_range[0]', 'zoom_range[1]', '(2)'], {}), '(zoom_range[0], zoom_range[1], 2)\n', (10000, 10033), True, 'import numpy as np\n'), ((10101, 10145), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (10109, 10145), True, 'import numpy as np\n'), ((10625, 10670), 'numpy.array', 'np.array', (['[[1, 0, tx], [0, 1, ty], [0, 0, 1]]'], {}), '([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n', (10633, 10670), True, 'import numpy as np\n'), ((10899, 10944), 'numpy.array', 'np.array', (['[[zx, 0, 0], [0, zy, 0], [0, 0, 1]]'], {}), '([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])\n', (10907, 10944), True, 'import numpy as np\n'), ((11703, 11732), 'numpy.dot', 'np.dot', (['offset_matrix', 'matrix'], {}), '(offset_matrix, matrix)\n', (11709, 11732), True, 'import numpy as np\n'), ((12025, 12145), 'scipy.ndimage.interpolation.affine_transform', 'ndi.interpolation.affine_transform', (['x_channel', 'final_affine_matrix', 'final_offset'], {'order': '(0)', 'mode': 'fill_mode', 'cval': 'cval'}), '(x_channel, final_affine_matrix,\n final_offset, order=0, mode=fill_mode, cval=cval)\n', (12059, 12145), True, 'import scipy.ndimage as ndi\n'), ((13066, 13102), 'torch.sum', 'torch.sum', (['heatmap_', '(1)'], {'keepdim': '(True)'}), '(heatmap_, 1, keepdim=True)\n', (13075, 13102), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((14243, 14289), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im_cloud_[i]', 'kernel_size', '(0)'], {}), '(im_cloud_[i], kernel_size, 0)\n', (14259, 14289), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((14546, 14588), 'cv2.applyColorMap', 'cv2.applyColorMap', (['im_cloud_blur', 'colormap'], {}), '(im_cloud_blur, colormap)\n', (14563, 14588), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((1635, 1671), 'numpy.zeros', 'np.zeros', (['(h * size[0], w * size[1])'], {}), '((h * size[0], w * size[1]))\n', (1643, 1671), True, 'import numpy as np\n'), ((2140, 2164), 'imageio.imread', 'imageio.imread', (['img_name'], {}), '(img_name)\n', (2154, 2164), False, 'import imageio\n'), ((5089, 5106), 'numpy.array', 'np.array', (['centers'], {}), '(centers)\n', (5097, 5106), True, 'import numpy as np\n'), ((5702, 5736), 'os.path.join', 'os.path.join', (['root_dir', 'image_name'], {}), '(root_dir, image_name)\n', (5714, 5736), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((5912, 5932), 'numpy.max', 'np.max', (['img_batch[i]'], {}), '(img_batch[i])\n', (5918, 5932), True, 'import numpy as np\n'), ((8759, 8789), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(1)'], {}), '(0.0, 1.0, 1)\n', (8776, 8789), True, 'import numpy as np\n'), ((9087, 9123), 'numpy.random.choice', 'np.random.choice', (['height_shift_range'], {}), '(height_shift_range)\n', (9103, 9123), True, 'import numpy as np\n'), ((9142, 9167), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (9158, 9167), True, 'import numpy as np\n'), ((9335, 9361), 'numpy.max', 'np.max', (['height_shift_range'], {}), '(height_shift_range)\n', (9341, 9361), True, 'import numpy as np\n'), ((9514, 9549), 'numpy.random.choice', 'np.random.choice', (['width_shift_range'], {}), '(width_shift_range)\n', (9530, 9549), True, 'import numpy as np\n'), ((9568, 9593), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (9584, 9593), True, 'import numpy as np\n'), ((9759, 9784), 'numpy.max', 'np.max', (['width_shift_range'], {}), '(width_shift_range)\n', (9765, 9784), True, 'import numpy as np\n'), ((10282, 10319), 'numpy.dot', 'np.dot', (['transform_matrix', 'flip_matrix'], {}), '(transform_matrix, flip_matrix)\n', (10288, 10319), True, 'import numpy as np\n'), ((10810, 10848), 'numpy.dot', 'np.dot', (['transform_matrix', 'shift_matrix'], {}), '(transform_matrix, shift_matrix)\n', (10816, 10848), True, 'import numpy as np\n'), ((11081, 11118), 'numpy.dot', 'np.dot', (['transform_matrix', 'zoom_matrix'], {}), '(transform_matrix, zoom_matrix)\n', (11087, 11118), True, 'import numpy as np\n'), ((13340, 13359), 'torch.max', 'torch.max', (['heatmap_'], {}), '(heatmap_)\n', (13349, 13359), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((14713, 14734), 'torch.from_numpy', 'torch.from_numpy', (['new'], {}), '(new)\n', (14729, 14734), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((5888, 5908), 'numpy.min', 'np.min', (['img_batch[i]'], {}), '(img_batch[i])\n', (5894, 5908), True, 'import numpy as np\n'), ((9230, 9288), 'numpy.random.uniform', 'np.random.uniform', (['(-height_shift_range)', 'height_shift_range'], {}), '(-height_shift_range, height_shift_range)\n', (9247, 9288), True, 'import numpy as np\n'), ((9656, 9712), 'numpy.random.uniform', 'np.random.uniform', (['(-width_shift_range)', 'width_shift_range'], {}), '(-width_shift_range, width_shift_range)\n', (9673, 9712), True, 'import numpy as np\n'), ((12423, 12436), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (12429, 12436), True, 'import numpy as np\n'), ((12491, 12504), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (12497, 12504), True, 'import numpy as np\n'), ((12506, 12519), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (12512, 12519), True, 'import numpy as np\n'), ((13317, 13336), 'torch.min', 'torch.min', (['heatmap_'], {}), '(heatmap_)\n', (13326, 13336), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((7190, 7237), 'cv2.cvtColor', 'cv2.cvtColor', (["data[i]['img']", 'cv2.COLOR_BGR2RGB'], {}), "(data[i]['img'], cv2.COLOR_BGR2RGB)\n", (7202, 7237), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((10377, 10390), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10383, 10390), True, 'import numpy as np\n'), ((10449, 10462), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10455, 10462), True, 'import numpy as np\n'), ((10464, 10477), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10470, 10477), True, 'import numpy as np\n'), ((12439, 12452), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (12445, 12452), True, 'import numpy as np\n'), ((10393, 10406), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10399, 10406), True, 'import numpy as np\n'), ((7851, 7870), 'numpy.array', 'np.array', (['img_batch'], {}), '(img_batch)\n', (7859, 7870), True, 'import numpy as np\n'), ((7928, 7951), 'numpy.array', 'np.array', (['feature_batch'], {}), '(feature_batch)\n', (7936, 7951), True, 'import numpy as np\n'), ((4288, 4310), 'torch.from_numpy', 'torch.from_numpy', (['fimg'], {}), '(fimg)\n', (4304, 4310), False, 'import os, gzip, csv, torch, cv2, torchvision, random\n'), ((8009, 8032), 'numpy.array', 'np.array', (['heatmap_batch'], {}), '(heatmap_batch)\n', (8017, 8032), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 14 13:05:23 2019
@author: Yuki-F
"""
import scipy.signal as signal
import warnings
import scipy as sp
import numpy as np
from typing import List, Tuple
import sys
def impz(system:tuple, n:int=None, fs:int=1)->Tuple:
"""
Impulse response of a digital filter.
Parameters
----------
system : a tuple of array_like describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* (num, den)
n : int, optional
The number of time points to compute.
fs : int optional
Sampling frequency to calcurate time points. default is 1.
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except
for singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in describing exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
# when FIR filter
if type(system[1]) == int and system[1] == 1:
# calcurate time points
if n == None:
# automatically determine the length of time points
T = np.arange(0, (len(system[0])+1)/fs, 1/fs)
else:
# determine the time points which length is n
T = np.arange(0, (n+1)/fs, 1/fs)
# make impulse signal
x = np.zeros(len(T))
x[0] = 1
# output the impulse response
yout = signal.lfilter(system[0], system[1], x)
else:
# when IIR filter
# convert to instance of dlti
dl = signal.dlti(system[0], system[1], dt=1/fs)
# output impulse response of discrete-time system.
if n == None:
i_d = signal.dimpulse(dl)
else:
i_d = signal.dimpulse(dl, n=n)
# split to time points and impulse response
T = i_d[0]
yout = i_d[1][0]
return T, yout
def freqz(system, worN:int=512, fs=2*np.pi, outform:str='complex')->Tuple:
"""
Frequency response of a digital filter.
Parameters
----------
system : a tuple of array_like describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* (num, den)
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies
(default is N=512). This is a convenient alternative to:
np.linspace(0, fs if whole else fs/2, N, endpoint=False)
Using a number that is fast for FFT computations can result in
faster computations (see Notes).
If an array_like, compute the response at the frequencies given.
These are in the same units as fs.
fs : float, optional
The sampling frequency of the digital system.
Defaults to 2*pi radians/sample (so w is from 0 to pi).
Returns
-------
w : ndarray
The frequencies at which h was computed, in the same units as fs.
By default, w is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
"""
#周波数特性を計算
w, h = signal.freqz(system[0], system[1], worN=worN, fs=fs)
if outform == 'complex':
#complexの場合,周波数特性を複素数で返す
return w, h
elif outform == 'dB':
#dBの場合,周波数特性をdBの数値(20*np.log10(np.abs(h)))を返す
h = 20 * np.log10(np.abs(h))
return w, h
elif outform == 'abs':
#absの場合,周波数特性を複素数の絶対値(np.abs(h))で返す
h = np.abs(h)
return w, h
else:
#それ以外では例外をスローする
raise ValueError("Parameter outform is must be 'complex', 'dB', or"
+"'abs'.")
def grpdelay(system, worN:int=512, fs=2*np.pi)->Tuple:
"""
Group delay of a digital filter.
Parameters
----------
system : a tuple of array_like describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* (num, den)
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies
(default is N=512). This is a convenient alternative to:
np.linspace(0, fs if whole else fs/2, N, endpoint=False)
Using a number that is fast for FFT computations can result in
faster computations (see Notes).
If an array_like, compute the response at the frequencies given.
These are in the same units as fs.
fs : float, optional
The sampling frequency of the digital system.
Defaults to 2*pi radians/sample (so w is from 0 to pi).
Returns
-------
w : ndarray
The frequencies at which h was computed, in the same units as fs.
By default, w is normalized to the range [0, pi) (radians/sample).
gd : ndarray
The group delay.
"""
#デジタルフィルタの群遅延を計算
w, gd = signal.group_delay(system, w = worN, fs = fs)
#計算誤差を整数に丸める
if system[1] == 1:
# If filter is FIR, round the group_delay
gd = np.round(gd)
#周波数と対応する群遅延を返す
return w, gd
def phasez(system, worN:int=512, fs=2*np.pi, deg:bool=False)->Tuple:
"""
of a digital filter.
Parameters
----------
system : a tuple of array_like describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* (num, den)
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies
(default is N=512). This is a convenient alternative to:
np.linspace(0, fs if whole else fs/2, N, endpoint=False)
Using a number that is fast for FFT computations can result in
faster computations (see Notes).
If an array_like, compute the response at the frequencies given.
These are in the same units as fs.
fs : float, optional
The sampling frequency of the digital system.
Defaults to 2*pi radians/sample (so w is from 0 to pi).
deg : bool, optional
If True, the phase response is returned as degree.
Default is False.
Returns
-------
w : ndarray
The frequencies at which h was computed, in the same units as fs.
By default, w is normalized to the range [0, pi) (radians/sample).
phase : ndarray
The phase response.
"""
w, h = freqz(system, worN = worN, fs = fs)
phase = sp.unwrap(sp.angle(h))
if deg == True:
phase = np.rad2deg(phase)
return w, phase
def zplane(system, show:bool=True, figsize:Tuple[int, int]=(8, 8)):
"""
Zero-pole plot of a digital filter.
Parameters
----------
system : a tuple of array_like describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* (num, den)
show : bool, optional
If True, a zero-pole plot of the digital filter is shown
by matplorlib.pyplot.
Default is True.
figsize : tuple, optional
If show is True, you can set the figure size of zero-pole plot.
Default is (8, 8)
Returns
-------
z : array_like
Zeros of a digital filter.
p : array_like
Poles of a digital filter.
k : array_like
Gain of a digital filter.
"""
b = system[0]
a = system[1]
#The coefficients are less than 1, normalize the coefficients
if np.max(b) > 1:
kn = np.max(b)
b /= float(kn)
else:
kn = 1
if np.max(a) > 1:
kd = np.max(a)
a /= float(kd)
else:
kd = 1
# Get the poles, zeros and gains
p = np.round(np.roots(a), decimals=2)
z = np.round(np.roots(b), decimals=2)
k = kn / float(kd)
if show == True:
plt.figure(figsize=figsize)
ax = plt.subplot(111)
uc = patches.Circle((0, 0), radius=1, fill=False,
color='black', ls='dashed')
ax.add_patch(uc)
plt.plot(z.real, z.imag, 'go', ms=10)
plt.plot(p.real, p.imag, 'rx', ms=10)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
r = 1.5
plt.axis('scaled')
plt.axis([-r, r, -r, r])
ticks = [-1, -.5, .5, 1]
plt.xticks(ticks)
plt.yticks(ticks)
return z, p, k
def isstable(system)->bool:
"""
Determine whether filter is stable.
Parameters
----------
system : a tuple of array_like describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* (num, den)
Returns
-------
flag : bool
If `system` is a stable filter, returns True.
"""
_, p, _ = zplane(system, show=False)
frag = np.max(np.abs(p)) - 1.0 <= (sys.float_info.epsilon ** (2/3))
return frag
def isminphase(system, tol:float=sys.float_info.epsilon**(2/3))->bool:
"""
Determine whether is minimum phase.
Parameters
----------
system : a tuple of array_like describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* (num, den)
Returns
-------
flag : bool
If `system` is minimum phase, return True.
"""
z, p, _ = zplane(system, show=False)
frag = np.max(np.abs(z)) - 1.0 <= (sys.float_info.epsilon ** (2/3))
return frag
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib import patches
b, a = signal.butter(6, 0.7, btype='high')
#lti = signal.lti(b, a)
#b = signal.firwin(257, 21000/22050)
#a = 1
plt.figure()
T, yout = impz((b, a))
plt.plot(T, yout)
plt.xlabel('Sample')
plt.ylabel('Normalized Amplitude')
plt.figure()
w, h = freqz((b, a), fs = 44100, outform='dB')
plt.plot(w, h)
plt.xlabel('Frequency [Hz]')
plt.ylabel('Magnitude [dB]')
plt.figure()
w, gd = grpdelay((b, a), fs = 44100)
plt.plot(w, gd)
plt.xlabel('Frequency [Hz]')
plt.ylabel('Group delay [samples]')
plt.figure()
w, phase = phasez((b, a), fs = 44100)
plt.plot(w, phase)
plt.xlabel('Frequency [Hz]')
plt.ylabel('Phase [rad]')
z, p, k = zplane((b, a))
plt.title('Lowpass digital filter')
print(isstable((b, a)))
print(isminphase((b, a)))
| [
"matplotlib.pyplot.title",
"numpy.roots",
"numpy.abs",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.round",
"scipy.angle",
"scipy.signal.lfilter",
"scipy.signal.dlti",
"matplotlib.pyplot.yticks",
"numpy.max",
"matplotlib.pyplot.xticks",
"scipy.signal.butter",
"scipy.signal.dimpulse",... | [((3869, 3921), 'scipy.signal.freqz', 'signal.freqz', (['system[0]', 'system[1]'], {'worN': 'worN', 'fs': 'fs'}), '(system[0], system[1], worN=worN, fs=fs)\n', (3881, 3921), True, 'import scipy.signal as signal\n'), ((5854, 5895), 'scipy.signal.group_delay', 'signal.group_delay', (['system'], {'w': 'worN', 'fs': 'fs'}), '(system, w=worN, fs=fs)\n', (5872, 5895), True, 'import scipy.signal as signal\n'), ((11264, 11299), 'scipy.signal.butter', 'signal.butter', (['(6)', '(0.7)'], {'btype': '"""high"""'}), "(6, 0.7, btype='high')\n", (11277, 11299), True, 'import scipy.signal as signal\n'), ((11394, 11406), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11404, 11406), True, 'import matplotlib.pyplot as plt\n'), ((11440, 11457), 'matplotlib.pyplot.plot', 'plt.plot', (['T', 'yout'], {}), '(T, yout)\n', (11448, 11457), True, 'import matplotlib.pyplot as plt\n'), ((11463, 11483), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample"""'], {}), "('Sample')\n", (11473, 11483), True, 'import matplotlib.pyplot as plt\n'), ((11489, 11523), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized Amplitude"""'], {}), "('Normalized Amplitude')\n", (11499, 11523), True, 'import matplotlib.pyplot as plt\n'), ((11535, 11547), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11545, 11547), True, 'import matplotlib.pyplot as plt\n'), ((11605, 11619), 'matplotlib.pyplot.plot', 'plt.plot', (['w', 'h'], {}), '(w, h)\n', (11613, 11619), True, 'import matplotlib.pyplot as plt\n'), ((11625, 11653), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (11635, 11653), True, 'import matplotlib.pyplot as plt\n'), ((11659, 11687), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude [dB]"""'], {}), "('Magnitude [dB]')\n", (11669, 11687), True, 'import matplotlib.pyplot as plt\n'), ((11699, 11711), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11709, 11711), True, 'import matplotlib.pyplot as plt\n'), ((11759, 11774), 'matplotlib.pyplot.plot', 'plt.plot', (['w', 'gd'], {}), '(w, gd)\n', (11767, 11774), True, 'import matplotlib.pyplot as plt\n'), ((11780, 11808), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (11790, 11808), True, 'import matplotlib.pyplot as plt\n'), ((11814, 11849), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Group delay [samples]"""'], {}), "('Group delay [samples]')\n", (11824, 11849), True, 'import matplotlib.pyplot as plt\n'), ((11861, 11873), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11871, 11873), True, 'import matplotlib.pyplot as plt\n'), ((11922, 11940), 'matplotlib.pyplot.plot', 'plt.plot', (['w', 'phase'], {}), '(w, phase)\n', (11930, 11940), True, 'import matplotlib.pyplot as plt\n'), ((11946, 11974), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (11956, 11974), True, 'import matplotlib.pyplot as plt\n'), ((11980, 12005), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phase [rad]"""'], {}), "('Phase [rad]')\n", (11990, 12005), True, 'import matplotlib.pyplot as plt\n'), ((12047, 12082), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowpass digital filter"""'], {}), "('Lowpass digital filter')\n", (12056, 12082), True, 'import matplotlib.pyplot as plt\n'), ((1873, 1912), 'scipy.signal.lfilter', 'signal.lfilter', (['system[0]', 'system[1]', 'x'], {}), '(system[0], system[1], x)\n', (1887, 1912), True, 'import scipy.signal as signal\n'), ((2014, 2058), 'scipy.signal.dlti', 'signal.dlti', (['system[0]', 'system[1]'], {'dt': '(1 / fs)'}), '(system[0], system[1], dt=1 / fs)\n', (2025, 2058), True, 'import scipy.signal as signal\n'), ((6013, 6025), 'numpy.round', 'np.round', (['gd'], {}), '(gd)\n', (6021, 6025), True, 'import numpy as np\n'), ((7678, 7689), 'scipy.angle', 'sp.angle', (['h'], {}), '(h)\n', (7686, 7689), True, 'import scipy as sp\n'), ((7735, 7752), 'numpy.rad2deg', 'np.rad2deg', (['phase'], {}), '(phase)\n', (7745, 7752), True, 'import numpy as np\n'), ((8881, 8890), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (8887, 8890), True, 'import numpy as np\n'), ((8910, 8919), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (8916, 8919), True, 'import numpy as np\n'), ((8985, 8994), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (8991, 8994), True, 'import numpy as np\n'), ((9014, 9023), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (9020, 9023), True, 'import numpy as np\n'), ((9133, 9144), 'numpy.roots', 'np.roots', (['a'], {}), '(a)\n', (9141, 9144), True, 'import numpy as np\n'), ((9176, 9187), 'numpy.roots', 'np.roots', (['b'], {}), '(b)\n', (9184, 9187), True, 'import numpy as np\n'), ((9262, 9289), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (9272, 9289), True, 'import matplotlib.pyplot as plt\n'), ((9304, 9320), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (9315, 9320), True, 'import matplotlib.pyplot as plt\n'), ((9335, 9407), 'matplotlib.patches.Circle', 'patches.Circle', (['(0, 0)'], {'radius': '(1)', 'fill': '(False)', 'color': '"""black"""', 'ls': '"""dashed"""'}), "((0, 0), radius=1, fill=False, color='black', ls='dashed')\n", (9349, 9407), False, 'from matplotlib import patches\n'), ((9472, 9509), 'matplotlib.pyplot.plot', 'plt.plot', (['z.real', 'z.imag', '"""go"""'], {'ms': '(10)'}), "(z.real, z.imag, 'go', ms=10)\n", (9480, 9509), True, 'import matplotlib.pyplot as plt\n'), ((9519, 9556), 'matplotlib.pyplot.plot', 'plt.plot', (['p.real', 'p.imag', '"""rx"""'], {'ms': '(10)'}), "(p.real, p.imag, 'rx', ms=10)\n", (9527, 9556), True, 'import matplotlib.pyplot as plt\n'), ((9777, 9795), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (9785, 9795), True, 'import matplotlib.pyplot as plt\n'), ((9805, 9829), 'matplotlib.pyplot.axis', 'plt.axis', (['[-r, r, -r, r]'], {}), '([-r, r, -r, r])\n', (9813, 9829), True, 'import matplotlib.pyplot as plt\n'), ((9873, 9890), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks'], {}), '(ticks)\n', (9883, 9890), True, 'import matplotlib.pyplot as plt\n'), ((9900, 9917), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ticks'], {}), '(ticks)\n', (9910, 9917), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1720), 'numpy.arange', 'np.arange', (['(0)', '((n + 1) / fs)', '(1 / fs)'], {}), '(0, (n + 1) / fs, 1 / fs)\n', (1695, 1720), True, 'import numpy as np\n'), ((2169, 2188), 'scipy.signal.dimpulse', 'signal.dimpulse', (['dl'], {}), '(dl)\n', (2184, 2188), True, 'import scipy.signal as signal\n'), ((2223, 2247), 'scipy.signal.dimpulse', 'signal.dimpulse', (['dl'], {'n': 'n'}), '(dl, n=n)\n', (2238, 2247), True, 'import scipy.signal as signal\n'), ((4252, 4261), 'numpy.abs', 'np.abs', (['h'], {}), '(h)\n', (4258, 4261), True, 'import numpy as np\n'), ((10448, 10457), 'numpy.abs', 'np.abs', (['p'], {}), '(p)\n', (10454, 10457), True, 'import numpy as np\n'), ((11058, 11067), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (11064, 11067), True, 'import numpy as np\n'), ((4128, 4137), 'numpy.abs', 'np.abs', (['h'], {}), '(h)\n', (4134, 4137), True, 'import numpy as np\n')] |
import os
import sys
import cv2
import numpy as np
x_ps = []
y_ps = []
run_time = 120
# initialization
ONLINE = True
CALIBRATE = False
HD = 1280, 640
BGR_COLOR = {'red': (0, 0, 255),
'green': (127, 255, 0),
'blue': (255, 127, 0),
'yellow': (0, 127, 255),
'black': (0, 0, 0),
'white': (255, 255, 255)}
WAIT_DELAY = 1
THRESHOLD_WALL_VS_FLOOR = 80
layout = np.zeros(0)
RELATIVE_TRUTH_PATH = 'truth_rat/'
def counterclockwiseSort(rectangle):
rectangle = sorted(rectangle, key=lambda e: e[0])
rectangle[0:2] = sorted(rectangle[0:2], key=lambda e: e[1])
rectangle[2:4] = sorted(rectangle[2:4], key=lambda e: e[1], reverse=True)
return rectangle
def angleCos(p0, p1, p2):
d1, d2 = (p0 - p1).astype('float'), (p2 - p1).astype('float')
return np.abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2)))
# initialize for cropping
perspectiveMatrix = dict()
rectangle = []
name = ""
croppingPolygon = np.array([[0, 0]])
croppingPolygons = dict()
def floorCrop(file_name):
global perspectiveMatrix, rectangle, name, croppingPolygons
name = os.path.splitext(file_name)[0]
cap = cv2.VideoCapture(file_name)
h, w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# Take first non-null frame and find corners within it
ret, frame = cap.read()
while not frame.any():
ret, frame = cap.read()
frame = frame[:, w - h: w]
# Convert to the gray video
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Blurred frame
kernelSize = (5, 5)
frameBlur = cv2.GaussianBlur(frameGray, kernelSize, 0)
# threshold frame
retval, mask = cv2.threshold(frameBlur, THRESHOLD_WALL_VS_FLOOR, 255, cv2.THRESH_BINARY_INV)
# find contours in the threshold frame
contours, hierarchy = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
rectangle = []
HALF_AREA = 0.5 * h * h
for contour in contours:
contourPerimeter = cv2.arcLength(contour, True)
hull = cv2.convexHull(contour)
contour = cv2.approxPolyDP(hull, 0.02 * contourPerimeter, True)
# If the contour is convex rectangle and its area is above a half of total frame area,
# then it's most likely the floor
if len(contour) == 4 and cv2.contourArea(contour) > HALF_AREA:
contour = contour.reshape(-1, 2)
max_cos = np.max([angleCos(contour[i], contour[(i + 1) % 4], contour[(i + 2) % 4]) for i in range(4)])
if max_cos < 0.3:
rectangle.append(contour)
# Draw the floor contour to new frame
frameGray = cv2.cvtColor(frameGray, cv2.COLOR_GRAY2BGR)
imgSquare = np.zeros_like(frameGray)
cv2.fillPoly(imgSquare, rectangle, BGR_COLOR['red'], cv2.LINE_AA)
# cv2.add(frameGray, imgSquare / 2, frameGray)
cv2.drawContours(frameGray, rectangle, -1, BGR_COLOR['red'], 2, cv2.LINE_AA)
# if there is no suitable floor, then just use the new frame[h*h] as the floor
if len(rectangle) > 0:
rectVertices = rectangle[0]
else:
rectVertices = np.float32([[0, 0], [0, h], [h, h], [h, 0]])
# Sort the cropping rectangle vertices according to the following order:
# [left,top], [left,bottom], [right,bottom], [right,top]
rectVertices = counterclockwiseSort(rectVertices)
croppingPolygons[name] = rectVertices
rectVertices = np.float32(rectVertices)
tetragonVerticesUpd = np.float32([[0, 0], [0, h], [h, h], [h, 0]])
perspectiveMatrix[name] = cv2.getPerspectiveTransform(np.float32(croppingPolygons[name]), tetragonVerticesUpd)
frame = cv2.warpPerspective(frame, perspectiveMatrix[name], (h, h))
# show both floor frame and gray new frame[h*h] with contour
imgFloorCorners = np.hstack([frame, frameGray])
cv2.imshow(f'Floor Corners for {name}', imgFloorCorners)
# cv2.setMouseCallback(
# f'Floor Corners for {name}',
# drawFloorCrop,
# {'imgFloorCorners': imgFloorCorners, 'croppingPolygons': croppingPolygons},
# )
k = cv2.waitKey(0)
if k == 27:
sys.exit()
cv2.destroyWindow(f'Floor Corners for {name}')
return rectVertices, perspectiveMatrix[name]
def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
x_ps.append(x)
y_ps.append(y)
print('choosed:', x, y)
return x_ps, y_ps
def set_truth(file_name):
global perspectiveMatrix, croppingPolygons, rectangle, name, WAIT_DELAY, layout, run_time
# croppingPolygons[name] = np.array([[0,0]])
name = os.path.splitext(file_name)[0]
cap = cv2.VideoCapture(file_name)
h, w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# Take first non-null frame and find corners within it
ret, frame = cap.read()
while not frame.any():
ret, frame = cap.read()
background = frame.copy()
i_frame = 1
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
while frame is not None:
ret, frame = cap.read()
if frame is None:
break
background = cv2.addWeighted(frame, 0.5 * (1 - i_frame / n_frames),
background, 0.5 * (1 + i_frame / n_frames), 0)
i_frame += 1
cap = cv2.VideoCapture(file_name)
ret, frame = cap.read()
frame = frame[:, w - h: w]
while frame is not None:
ret, frame = cap.read()
if frame is None: # not logical
break
frameColor = frame[:, w - h: w].copy()
frame = cv2.subtract(frame, background)
r_time = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.
frame = frame[:, w - h: w]
if len(croppingPolygons[name]) == 4:
cv2.drawContours(frameColor, [np.reshape(croppingPolygons[name], (4, 2))], -1, BGR_COLOR['red'], 2,
cv2.LINE_AA)
else:
cv2.drawContours(frameColor, rectangle, -1, BGR_COLOR['red'], 2, cv2.LINE_AA)
frame = cv2.warpPerspective(frame, perspectiveMatrix[name], (h, h))
cv2.putText(frame, 'Time ' + str('%.0f sec' % (cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.)),
(200, 450), cv2.FONT_HERSHEY_DUPLEX, 1, BGR_COLOR['white'])
if ONLINE:
layout = np.hstack((frame, frameColor))
cv2.imshow(f'Open Field Trace of {name}', layout)
if r_time % 2 == 0:
print("At time:", r_time, "the position is")
cv2.namedWindow("Key frame")
cv2.setMouseCallback("Key frame", on_EVENT_LBUTTONDOWN)
cv2.imshow("Key frame", layout)
cv2.waitKey(0)
if x_ps[-1] is not None:
file = open(RELATIVE_TRUTH_PATH + 'truth.csv', 'a')
file.write(str(r_time) + ',%.1f' % x_ps[-1] + ',%.1f\n' % y_ps[-1])
file.close()
print("x position:", x_ps[-1], "y position", y_ps[-1])
cv2.destroyWindow("Key frame")
k = cv2.waitKey(WAIT_DELAY) & 0xff
if r_time >= run_time:
break
if k == 27:
break
if k == 32:
if WAIT_DELAY == 1:
WAIT_DELAY = 0 # pause
else:
WAIT_DELAY = 1 # play as fast as possible
cv2.destroyAllWindows()
cap.release()
if not os.path.exists(RELATIVE_TRUTH_PATH):
os.makedirs(RELATIVE_TRUTH_PATH)
file = open(RELATIVE_TRUTH_PATH + 'truth.csv', 'w')
file.write('key frame(second), x position, y position\n')
file.close()
# crop the floor
# for file_name in glob.glob('*.mp4'):
# floorCrop(file_name)
# for file_name in glob.glob('*.mp4'):
# set_truth(file_name)
# print("collecting end.", x_ps, y_ps)
file_name = "rat.mp4"
floorCrop(file_name)
set_truth(file_name) | [
"cv2.GaussianBlur",
"cv2.approxPolyDP",
"cv2.arcLength",
"cv2.fillPoly",
"cv2.imshow",
"cv2.warpPerspective",
"numpy.zeros_like",
"cv2.subtract",
"cv2.contourArea",
"cv2.cvtColor",
"os.path.exists",
"cv2.namedWindow",
"cv2.setMouseCallback",
"numpy.reshape",
"cv2.drawContours",
"cv2.de... | [((421, 432), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (429, 432), True, 'import numpy as np\n'), ((993, 1011), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (1001, 1011), True, 'import numpy as np\n'), ((1182, 1209), 'cv2.VideoCapture', 'cv2.VideoCapture', (['file_name'], {}), '(file_name)\n', (1198, 1209), False, 'import cv2\n'), ((1529, 1568), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1541, 1568), False, 'import cv2\n'), ((1629, 1671), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frameGray', 'kernelSize', '(0)'], {}), '(frameGray, kernelSize, 0)\n', (1645, 1671), False, 'import cv2\n'), ((1713, 1790), 'cv2.threshold', 'cv2.threshold', (['frameBlur', 'THRESHOLD_WALL_VS_FLOOR', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(frameBlur, THRESHOLD_WALL_VS_FLOOR, 255, cv2.THRESH_BINARY_INV)\n', (1726, 1790), False, 'import cv2\n'), ((1860, 1922), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (1876, 1922), False, 'import cv2\n'), ((2667, 2710), 'cv2.cvtColor', 'cv2.cvtColor', (['frameGray', 'cv2.COLOR_GRAY2BGR'], {}), '(frameGray, cv2.COLOR_GRAY2BGR)\n', (2679, 2710), False, 'import cv2\n'), ((2727, 2751), 'numpy.zeros_like', 'np.zeros_like', (['frameGray'], {}), '(frameGray)\n', (2740, 2751), True, 'import numpy as np\n'), ((2756, 2821), 'cv2.fillPoly', 'cv2.fillPoly', (['imgSquare', 'rectangle', "BGR_COLOR['red']", 'cv2.LINE_AA'], {}), "(imgSquare, rectangle, BGR_COLOR['red'], cv2.LINE_AA)\n", (2768, 2821), False, 'import cv2\n'), ((2877, 2953), 'cv2.drawContours', 'cv2.drawContours', (['frameGray', 'rectangle', '(-1)', "BGR_COLOR['red']", '(2)', 'cv2.LINE_AA'], {}), "(frameGray, rectangle, -1, BGR_COLOR['red'], 2, cv2.LINE_AA)\n", (2893, 2953), False, 'import cv2\n'), ((3433, 3457), 'numpy.float32', 'np.float32', (['rectVertices'], {}), '(rectVertices)\n', (3443, 3457), True, 'import numpy as np\n'), ((3484, 3528), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h], [h, h], [h, 0]]'], {}), '([[0, 0], [0, h], [h, h], [h, 0]])\n', (3494, 3528), True, 'import numpy as np\n'), ((3656, 3715), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'perspectiveMatrix[name]', '(h, h)'], {}), '(frame, perspectiveMatrix[name], (h, h))\n', (3675, 3715), False, 'import cv2\n'), ((3804, 3833), 'numpy.hstack', 'np.hstack', (['[frame, frameGray]'], {}), '([frame, frameGray])\n', (3813, 3833), True, 'import numpy as np\n'), ((3838, 3894), 'cv2.imshow', 'cv2.imshow', (['f"""Floor Corners for {name}"""', 'imgFloorCorners'], {}), "(f'Floor Corners for {name}', imgFloorCorners)\n", (3848, 3894), False, 'import cv2\n'), ((4093, 4107), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4104, 4107), False, 'import cv2\n'), ((4147, 4193), 'cv2.destroyWindow', 'cv2.destroyWindow', (['f"""Floor Corners for {name}"""'], {}), "(f'Floor Corners for {name}')\n", (4164, 4193), False, 'import cv2\n'), ((4660, 4687), 'cv2.VideoCapture', 'cv2.VideoCapture', (['file_name'], {}), '(file_name)\n', (4676, 4687), False, 'import cv2\n'), ((5323, 5350), 'cv2.VideoCapture', 'cv2.VideoCapture', (['file_name'], {}), '(file_name)\n', (5339, 5350), False, 'import cv2\n'), ((7402, 7425), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7423, 7425), False, 'import cv2\n'), ((7453, 7488), 'os.path.exists', 'os.path.exists', (['RELATIVE_TRUTH_PATH'], {}), '(RELATIVE_TRUTH_PATH)\n', (7467, 7488), False, 'import os\n'), ((7494, 7526), 'os.makedirs', 'os.makedirs', (['RELATIVE_TRUTH_PATH'], {}), '(RELATIVE_TRUTH_PATH)\n', (7505, 7526), False, 'import os\n'), ((1141, 1168), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1157, 1168), False, 'import os\n'), ((2027, 2055), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (2040, 2055), False, 'import cv2\n'), ((2071, 2094), 'cv2.convexHull', 'cv2.convexHull', (['contour'], {}), '(contour)\n', (2085, 2094), False, 'import cv2\n'), ((2113, 2166), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['hull', '(0.02 * contourPerimeter)', '(True)'], {}), '(hull, 0.02 * contourPerimeter, True)\n', (2129, 2166), False, 'import cv2\n'), ((3134, 3178), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h], [h, h], [h, 0]]'], {}), '([[0, 0], [0, h], [h, h], [h, 0]])\n', (3144, 3178), True, 'import numpy as np\n'), ((3587, 3621), 'numpy.float32', 'np.float32', (['croppingPolygons[name]'], {}), '(croppingPolygons[name])\n', (3597, 3621), True, 'import numpy as np\n'), ((4132, 4142), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4140, 4142), False, 'import sys\n'), ((4619, 4646), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (4635, 4646), False, 'import os\n'), ((5153, 5258), 'cv2.addWeighted', 'cv2.addWeighted', (['frame', '(0.5 * (1 - i_frame / n_frames))', 'background', '(0.5 * (1 + i_frame / n_frames))', '(0)'], {}), '(frame, 0.5 * (1 - i_frame / n_frames), background, 0.5 * (1 +\n i_frame / n_frames), 0)\n', (5168, 5258), False, 'import cv2\n'), ((5596, 5627), 'cv2.subtract', 'cv2.subtract', (['frame', 'background'], {}), '(frame, background)\n', (5608, 5627), False, 'import cv2\n'), ((6040, 6099), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'perspectiveMatrix[name]', '(h, h)'], {}), '(frame, perspectiveMatrix[name], (h, h))\n', (6059, 6099), False, 'import cv2\n'), ((836, 850), 'numpy.dot', 'np.dot', (['d1', 'd2'], {}), '(d1, d2)\n', (842, 850), True, 'import numpy as np\n'), ((5945, 6022), 'cv2.drawContours', 'cv2.drawContours', (['frameColor', 'rectangle', '(-1)', "BGR_COLOR['red']", '(2)', 'cv2.LINE_AA'], {}), "(frameColor, rectangle, -1, BGR_COLOR['red'], 2, cv2.LINE_AA)\n", (5961, 6022), False, 'import cv2\n'), ((6319, 6349), 'numpy.hstack', 'np.hstack', (['(frame, frameColor)'], {}), '((frame, frameColor))\n', (6328, 6349), True, 'import numpy as np\n'), ((6362, 6411), 'cv2.imshow', 'cv2.imshow', (['f"""Open Field Trace of {name}"""', 'layout'], {}), "(f'Open Field Trace of {name}', layout)\n", (6372, 6411), False, 'import cv2\n'), ((2338, 2362), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (2353, 2362), False, 'import cv2\n'), ((6522, 6550), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Key frame"""'], {}), "('Key frame')\n", (6537, 6550), False, 'import cv2\n'), ((6567, 6622), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Key frame"""', 'on_EVENT_LBUTTONDOWN'], {}), "('Key frame', on_EVENT_LBUTTONDOWN)\n", (6587, 6622), False, 'import cv2\n'), ((6639, 6670), 'cv2.imshow', 'cv2.imshow', (['"""Key frame"""', 'layout'], {}), "('Key frame', layout)\n", (6649, 6670), False, 'import cv2\n'), ((6687, 6701), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6698, 6701), False, 'import cv2\n'), ((7027, 7057), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""Key frame"""'], {}), "('Key frame')\n", (7044, 7057), False, 'import cv2\n'), ((7075, 7098), 'cv2.waitKey', 'cv2.waitKey', (['WAIT_DELAY'], {}), '(WAIT_DELAY)\n', (7086, 7098), False, 'import cv2\n'), ((861, 875), 'numpy.dot', 'np.dot', (['d1', 'd1'], {}), '(d1, d1)\n', (867, 875), True, 'import numpy as np\n'), ((878, 892), 'numpy.dot', 'np.dot', (['d2', 'd2'], {}), '(d2, d2)\n', (884, 892), True, 'import numpy as np\n'), ((5807, 5849), 'numpy.reshape', 'np.reshape', (['croppingPolygons[name]', '(4, 2)'], {}), '(croppingPolygons[name], (4, 2))\n', (5817, 5849), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Created on Wed Apr 19 15:29:27 2017
@author: bernier2
"""
import h5py
from matplotlib import pyplot as plt
import numpy as np
from hexrd.instrument import centers_of_edge_vec
"""
# UNCOMMENT IF YOU HAVE A SANE LATEX ENV AND WANT NICE FIG LABELS
#
# Options
params = {'text.usetex': True,
'font.size': 14,
'font.family': 'mathrm',
'text.latex.unicode': True,
'pgf.texsystem': 'pdflatex'
}
plt.rcParams.update(params)
"""
def montage(X, colormap=plt.cm.inferno, show_borders=True,
title=None, xlabel=None, ylabel=None,
threshold=None, filename=None, fig_ax=None,
ome_centers=None, frame_indices=None):
m, n, count = np.shape(X)
img_data = np.log(X - np.min(X) + 1)
if threshold is None:
threshold = 0.
else:
threshold = np.log(threshold - np.min(X) + 1)
mm = int(np.ceil(np.sqrt(count)))
nn = mm
M = np.zeros((mm * m, nn * n))
# colormap
colormap = colormap.copy()
colormap.set_under('b')
if fig_ax is not None:
fig, ax = fig_ax
else:
fig, ax = plt.subplots()
fig.canvas.manager.set_window_title(title)
image_id = 0
for j in range(mm):
sliceM = j * m
ax.plot()
for k in range(nn):
if image_id >= count:
img = np.nan*np.ones((m, n))
else:
img = img_data[:, :, image_id]
sliceN = k * n
M[sliceM:sliceM + m, sliceN:sliceN + n] = img
if ome_centers is not None and image_id < len(ome_centers):
center = ome_centers[image_id]
kwargs = {
'x': sliceN,
'y': sliceM + 0.035 * m * mm,
's': f'{center:8.3f}°',
'fontdict': {'color': 'w'},
}
ax.text(**kwargs)
if frame_indices is not None and image_id < len(frame_indices):
frame_index = frame_indices[image_id]
kwargs = {
'x': sliceN + n - 0.035 * n * nn,
'y': sliceM + 0.035 * m * mm,
's': f'{frame_index}',
'fontdict': {'color': 'w'},
}
ax.text(**kwargs)
image_id += 1
# M = np.sqrt(M + np.min(M))
im = ax.imshow(M, cmap=colormap, vmin=threshold, interpolation='nearest')
if show_borders:
xs = np.vstack(
[np.vstack([[n*i, n*i] for i in range(nn+1)]),
np.tile([0, nn*n], (mm+1, 1))]
)
ys = np.vstack(
[np.tile([0, mm*m], (nn+1, 1)),
np.vstack([[m*i, m*i] for i in range(mm+1)])]
)
for xp, yp in zip(xs, ys):
ax.plot(xp, yp, 'c:')
if xlabel is None:
ax.set_xlabel(r'$2\theta$', fontsize=14)
else:
ax.set_xlabel(xlabel, fontsize=14)
if ylabel is None:
ax.set_ylabel(r'$\eta$', fontsize=14)
else:
ax.set_ylabel(ylabel, fontsize=14)
ax.axis('auto')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
cbar_ax = fig.add_axes([0.875, 0.155, 0.025, 0.725])
cbar = fig.colorbar(im, cax=cbar_ax)
cbar.set_label(r"$\ln(intensity)$", labelpad=5)
ax.set_xticks([])
ax.set_yticks([])
if title is not None:
ax.set_title(title, fontsize=18)
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=300)
if fig_ax is None:
plt.show()
return M
def create_labels(det_key, tth_crd, eta_crd, peak_id, hkl):
tth_crd = np.degrees(tth_crd)
eta_crd = np.degrees(eta_crd)
hkl_str = ' '.join([f'{int(x):^3}' for x in hkl])
labels = {}
labels['title'] = f"Spot {peak_id}, detector '{det_key}' ({hkl_str})"
labels['xlabel'] = rf'$2\theta\in({tth_crd[0]:.3f}, {tth_crd[-1]:.3f})$'
labels['ylabel'] = rf'$\eta\in({eta_crd[0]:.3f}, {eta_crd[-1]:.3f})$'
return labels
def extract_hkls_from_spots_data(all_spots, grain_id=None, detector_key=None):
data_map = SPOTS_DATA_MAP
hkls = {}
for cur_grain_id, spots in all_spots.items():
if grain_id is not None and cur_grain_id != grain_id:
continue
for det_key, spot_output in spots[1].items():
if detector_key is not None and det_key != detector_key:
continue
for spot_id, data in enumerate(spot_output):
hkl_id = int(data[data_map['hkl_id']])
peak_id = int(data[data_map['peak_id']])
if hkl_id in hkls:
hkls[hkl_id]['peak_ids'].append(peak_id)
continue
hkl = data[data_map['hkl']]
hkl_str = ' '.join([f'{int(x):^3}' for x in hkl])
hkls[hkl_id] = {
'str': hkl_str,
'peak_ids': [peak_id],
}
return hkls
def plot_gvec_from_spots_data(all_spots, gvec_id, threshold=0.):
data_map = SPOTS_DATA_MAP
for grain_id, spots in all_spots.items():
for det_key, spot_output in spots[1].items():
for spot_id, data in enumerate(spot_output):
if data[data_map['hkl_id']] != gvec_id:
continue
tth_edges = data[data_map['tth_edges']]
eta_edges = data[data_map['eta_edges']]
kwargs = {
'det_key': det_key,
'tth_crd': centers_of_edge_vec(tth_edges),
'eta_crd': centers_of_edge_vec(eta_edges),
'peak_id': data[data_map['peak_id']],
'hkl': data[data_map['hkl']],
}
labels = create_labels(**kwargs)
intensities = np.transpose(
data[data_map['patch_data']],
(1, 2, 0)
)
# make montage
montage(intensities, threshold=threshold, **labels)
def plot_gvec_from_hdf5(fname, gvec_id, threshold=0.):
"""
"""
with h5py.File(fname, 'r') as f:
for det_key, panel_data in f['reflection_data'].items():
for spot_id, spot_data in panel_data.items():
attrs = spot_data.attrs
if attrs['hkl_id'] != gvec_id:
continue
kwargs = {
'det_key': det_key,
'tth_crd': spot_data['tth_crd'],
'eta_crd': spot_data['eta_crd'],
'peak_id': attrs['peak_id'],
'hkl': attrs['hkl'],
}
labels = create_labels(**kwargs)
intensities = np.transpose(
np.array(spot_data['intensities']),
(1, 2, 0)
)
# make montage
montage(intensities, threshold=threshold, **labels)
# Keep track of which list index is each piece of data
# This is for when the full data list gets returned
SPOTS_DATA_MAP = {
'detector_id': 0,
'iRefl': 1,
'peak_id': 2,
'hkl_id': 3,
'hkl': 4,
'tth_edges': 5,
'eta_edges': 6,
'ome_eval': 7,
'xyc_arr': 8,
'ijs': 9,
'frame_indices': 10,
'patch_data': 11,
'ang_centers_i_pt': 12,
'xy_centers_i_pt': 13,
'meas_angs': 14,
'meas_xy': 15,
}
# =============================================================================
# %% CMD LINE HOOK
# =============================================================================
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Montage of spot data for a specifed G-vector family")
parser.add_argument('hdf5_archive',
help="hdf5 archive filename",
type=str)
parser.add_argument('gvec_id',
help="unique G-vector ID from PlaneData",
type=int)
parser.add_argument('-t', '--threshold',
help="intensity threshold",
type=float, default=0.)
args = parser.parse_args()
h5file = args.hdf5_archive
hklid = args.gvec_id
threshold = args.threshold
plot_gvec_from_hdf5(h5file, hklid, threshold=threshold)
| [
"hexrd.instrument.centers_of_edge_vec",
"h5py.File",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.degrees",
"numpy.zeros",
"numpy.transpose",
"numpy.ones",
"numpy.shape",
"numpy.min",
"numpy.array",
"numpy.tile",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((738, 749), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (746, 749), True, 'import numpy as np\n'), ((962, 988), 'numpy.zeros', 'np.zeros', (['(mm * m, nn * n)'], {}), '((mm * m, nn * n))\n', (970, 988), True, 'import numpy as np\n'), ((3747, 3766), 'numpy.degrees', 'np.degrees', (['tth_crd'], {}), '(tth_crd)\n', (3757, 3766), True, 'import numpy as np\n'), ((3781, 3800), 'numpy.degrees', 'np.degrees', (['eta_crd'], {}), '(eta_crd)\n', (3791, 3800), True, 'import numpy as np\n'), ((7771, 7866), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Montage of spot data for a specifed G-vector family"""'}), "(description=\n 'Montage of spot data for a specifed G-vector family')\n", (7794, 7866), False, 'import argparse\n'), ((1145, 1159), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1157, 1159), True, 'from matplotlib import pyplot as plt\n'), ((3646, 3656), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3654, 3656), True, 'from matplotlib import pyplot as plt\n'), ((6223, 6244), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (6232, 6244), False, 'import h5py\n'), ((925, 939), 'numpy.sqrt', 'np.sqrt', (['count'], {}), '(count)\n', (932, 939), True, 'import numpy as np\n'), ((776, 785), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (782, 785), True, 'import numpy as np\n'), ((2584, 2617), 'numpy.tile', 'np.tile', (['[0, nn * n]', '(mm + 1, 1)'], {}), '([0, nn * n], (mm + 1, 1))\n', (2591, 2617), True, 'import numpy as np\n'), ((2662, 2695), 'numpy.tile', 'np.tile', (['[0, mm * m]', '(nn + 1, 1)'], {}), '([0, mm * m], (nn + 1, 1))\n', (2669, 2695), True, 'import numpy as np\n'), ((5929, 5982), 'numpy.transpose', 'np.transpose', (["data[data_map['patch_data']]", '(1, 2, 0)'], {}), "(data[data_map['patch_data']], (1, 2, 0))\n", (5941, 5982), True, 'import numpy as np\n'), ((889, 898), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (895, 898), True, 'import numpy as np\n'), ((1385, 1400), 'numpy.ones', 'np.ones', (['(m, n)'], {}), '((m, n))\n', (1392, 1400), True, 'import numpy as np\n'), ((5628, 5658), 'hexrd.instrument.centers_of_edge_vec', 'centers_of_edge_vec', (['tth_edges'], {}), '(tth_edges)\n', (5647, 5658), False, 'from hexrd.instrument import centers_of_edge_vec\n'), ((5691, 5721), 'hexrd.instrument.centers_of_edge_vec', 'centers_of_edge_vec', (['eta_edges'], {}), '(eta_edges)\n', (5710, 5721), False, 'from hexrd.instrument import centers_of_edge_vec\n'), ((6890, 6924), 'numpy.array', 'np.array', (["spot_data['intensities']"], {}), "(spot_data['intensities'])\n", (6898, 6924), True, 'import numpy as np\n')] |
import numpy as np
import json
class Pfpr:
"""
False Positive Rate
UCID
"""
def __init__(self):
pass
def run(self, blocksize=64, t_list=[0.9888]):
with open('modules/figure4/ucid/ucid_cc_dict_%d.json' % blocksize, 'r') as f:
ucid_cc_dict = json.loads(f.read())
ucid_cc_list = np.array(ucid_cc_dict['cc_list'])
result_list = []
for T in t_list:
denominator = ucid_cc_list.size
numerator = ucid_cc_list[ucid_cc_list > T].size
result = numerator / denominator
result = result
result_list.append(result)
return list(result_list)
class Ptpr(object):
"""
True Positive Rate
Copydays
"""
def __init__(self):
pass
def run(self, blocksize=64, t_list=[0.9888]):
with open('modules/figure4/copydays/copydays_cc_dict_%d.json' % blocksize, 'r') as f:
copydays_cc_dict = json.loads(f.read())
numerator = 0
denominator = 0
copydays_cc_list = []
for d in copydays_cc_dict.values():
cc_list = d['cc_list']
copydays_cc_list = np.append(copydays_cc_list, cc_list)
result_list = []
for T in t_list:
denominator = copydays_cc_list.size
numerator = copydays_cc_list[copydays_cc_list > T].size
result = numerator / denominator
result = result
result_list.append(result)
return list(result_list)
| [
"numpy.append",
"numpy.array"
] | [((339, 372), 'numpy.array', 'np.array', (["ucid_cc_dict['cc_list']"], {}), "(ucid_cc_dict['cc_list'])\n", (347, 372), True, 'import numpy as np\n'), ((1168, 1204), 'numpy.append', 'np.append', (['copydays_cc_list', 'cc_list'], {}), '(copydays_cc_list, cc_list)\n', (1177, 1204), True, 'import numpy as np\n')] |
import numpy as np
def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lambda_):
"""returns the cost and gradient for the collaborative filtering problem.
"""
# Unfold the U and W matrices from params
X = np.reshape(
params[:num_movies * num_features], (num_movies, num_features),
order='F')
Theta = np.reshape(
params[num_movies * num_features:], (num_users, num_features),
order='F')
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the cost function and gradient for collaborative
# filtering. Concretely, you should first implement the cost
# function (without regularization) and make sure it is
# matches our costs. After that, you should implement the
# gradient and use the checkCostFunction routine to check
# that the gradient is correct. Finally, you should implement
# regularization.
#
# Notes: X - num_movies x num_features matrix of movie features
# Theta - num_users x num_features matrix of user features
# Y - num_movies x num_users matrix of user ratings of movies
# R - num_movies x num_users matrix, where R(i, j) = 1 if the
# i-th movie was rated by the j-th user
#
# You should set the following variables correctly:
#
# X_grad - num_movies x num_features matrix, containing the
# partial derivatives w.r.t. to each element of X
# Theta_grad - num_users x num_features matrix, containing the
# partial derivatives w.r.t. to each element of Theta
H = np.dot(X, np.transpose(Theta))
E = H - Y
J = np.sum(E**2 * R) / 2
J += lambda_ * (np.sum(Theta**2) + np.sum(X**2)) / 2
X_grad = np.dot(E * R, Theta)
X_grad += lambda_ * X
Theta_grad = np.dot(np.transpose(E * R), X)
Theta_grad += lambda_ * Theta
# =============================================================
grad = np.r_[X_grad.ravel(order='F'), Theta_grad.ravel(order='F')]
return J, grad
| [
"numpy.dot",
"numpy.sum",
"numpy.transpose",
"numpy.reshape"
] | [((240, 329), 'numpy.reshape', 'np.reshape', (['params[:num_movies * num_features]', '(num_movies, num_features)'], {'order': '"""F"""'}), "(params[:num_movies * num_features], (num_movies, num_features),\n order='F')\n", (250, 329), True, 'import numpy as np\n'), ((355, 443), 'numpy.reshape', 'np.reshape', (['params[num_movies * num_features:]', '(num_users, num_features)'], {'order': '"""F"""'}), "(params[num_movies * num_features:], (num_users, num_features),\n order='F')\n", (365, 443), True, 'import numpy as np\n'), ((1885, 1905), 'numpy.dot', 'np.dot', (['(E * R)', 'Theta'], {}), '(E * R, Theta)\n', (1891, 1905), True, 'import numpy as np\n'), ((1749, 1768), 'numpy.transpose', 'np.transpose', (['Theta'], {}), '(Theta)\n', (1761, 1768), True, 'import numpy as np\n'), ((1793, 1811), 'numpy.sum', 'np.sum', (['(E ** 2 * R)'], {}), '(E ** 2 * R)\n', (1799, 1811), True, 'import numpy as np\n'), ((1957, 1976), 'numpy.transpose', 'np.transpose', (['(E * R)'], {}), '(E * R)\n', (1969, 1976), True, 'import numpy as np\n'), ((1834, 1852), 'numpy.sum', 'np.sum', (['(Theta ** 2)'], {}), '(Theta ** 2)\n', (1840, 1852), True, 'import numpy as np\n'), ((1853, 1867), 'numpy.sum', 'np.sum', (['(X ** 2)'], {}), '(X ** 2)\n', (1859, 1867), True, 'import numpy as np\n')] |
"""
Library for representing 3D volumetric CT scan data and manipulating them
by resampling, etc.
"""
from __future__ import print_function
import sys, os
import numpy as np
import scipy.ndimage.interpolation as interpolation
import transforms3d.affines as affine3d
import copy
def map_coords_to_scaled_float(coords, orig_size, new_size):
"""
maps coordinates relative to the original 3-D image to coordinates corresponding to the
re-scaled 3-D image, given the coordinates and the shapes of the original and "new" scaled
images. Returns a floating-point coordinate center where the pixel at array coordinates
(0,0) has its center at (0.5, 0.5). Take the floor of the return value from this function
to get back to indices.
"""
if not all(
isinstance(arg, (tuple, list, set)) for arg in (coords, orig_size, new_size)
):
raise TypeError(
"`coords`, `orig_size` and `new_size` must be tuples corresponding to the image shape."
)
if not all(len(arg) == len(coords) for arg in (orig_size, new_size)):
raise TypeError(
"Number of dimensions in `coords` ({}), `orig_size` ({}), and `new_size` ({}) did not match.".format(
len(coords), len(orig_size), len(new_size)
)
)
ratio = lambda dim: float(new_size[dim]) / orig_size[dim]
center = lambda s, dim: s[dim] / 2.0
offset = lambda dim: (coords[dim] + 0.5) - center(orig_size, dim)
new_index = lambda dim: (center(new_size, dim) + ratio(dim) * offset(dim))
return tuple([new_index(dim) for dim in range(len(orig_size))])
def map_coords_to_scaled(coords, orig_size, new_size):
"""
maps coordinate indices relative to the original 3-D image to indices corresponding to the
re-scaled 3-D image, given the coordinate indices and the shapes of the original
and "new" scaled images. Returns integer indices of the voxel that contains the center of
the transformed coordinate location.
"""
return tuple(
[int(i) for i in map_coords_to_scaled_float(coords, orig_size, new_size)]
)
class StandardizedScan(object):
"""
Represents a CT scan standardized to a common cubic voxel size (by default, 1mm x 1mm x 1mm).
"""
class TransformError(RuntimeError):
"""
Thrown when transform cannot be applied to image.
Catch as type `StandardizedScan.TransformError`.
"""
pass
def __init__(
self, dicomdir=None, img=None, hdr=None, mm_per_voxel=1.0, orientation="view"
):
"""
Initialize a standardized scan where each voxel is a cube of size `mm_per_voxel`, given
a path to a directory containing DICOM image files (`dicomdir`) or an image `img` and
corresponding header `hdr` (which must contain an attribute `.Affine` containing the affine
transformation matrix corresponding to the image scan).
Initializing from DICOM is preferred; if `dicomdir` is provided, `img` and `hdr` are ignored.
The scan may be represented in one of two possible orientations:
'view' - dimension ordering (Z, Y, X) (or slices, columns, rows) -- this is default
since it is easier to reason about in a numpy array sense; it puts the "head"
at index 0 and increases Z toward the "feet". X increases toward the patient's
left, Y increases toward the patient's posterior.
'dicom' - dimension ordering (X, Y, Z) -- this is the ordering that corresponds to the
DICOM header standard; the Z axis increases from "feet" toward "head", with
index 0 at the "feet". X increases toward the patient's left, Y increases
toward the patient's posterior.
You may also load the scan from an hdf5 file previously saved by the `.save_hd5` method; to
do so, place the file name in the `img` parameter.
If `mm_per_voxel` is set to a negative value, the original voxel shapes will be preserved.
If `mm_per_voxel` is a tuple, list, or ndarray of length 3, the voxel shapes will be adjusted
to the specified sizes (and a negative in any dimension keeps the original size for that
dimension).
"""
self.__ready = False
try:
self.__mm_per_voxel = float(mm_per_voxel)
except TypeError:
self.__mm_per_voxel = np.array(mm_per_voxel, dtype="float")
self.__orientation = "view" if orientation.lower() != "dicom" else "dicom"
if dicomdir is not None:
self.from_dicom_dir(dicomdir)
elif img is not None:
if isinstance(img, str):
if os.path.splitext(img)[1].lower() in [".hd5", ".hdf5", ".hdf", ".h5"]:
self.load_hd5(img)
else:
raise RuntimeError(
"`img` must be either an image (in numpy array form) or the filename of an HDF5-format file to load."
)
else:
self.from_img(img, hdr)
def from_dicom_dir(self, dicomdir):
"""
(Re)-initialize the StandardizedScan object given a path to a directory containing DICOM files
corresponding to slices of the volume. The orientation will be set to the current orientation of
the StandardizedScan object.
"""
import load_dicom_dir as ldd
img, hdr = ldd.load_dicom_dir_dicom_orientation(dicomdir)
img = img.astype(np.int16)
self.from_img(img, hdr, orientation="dicom")
def from_img(self, img, hdr=None, orientation="dicom"):
"""
(Re)-initialize the StandardizedScan object given an image (NumPy array) and header (similar
to pydicom header format); the orientation of `img` must be specified as 'dicom' or 'view',
with the default being 'dicom' (X,Y,Z).
The final orientation will be set to match the current orientation of the StandardizedScan
object (if it differs from `orientation`).
"""
orientation = "view" if orientation.lower() != "dicom" else "dicom"
try:
if hdr.RescaleSlope != 1.0 and hdr.RescaleIntercept != 0:
import load_dicom_dir as ldd
img = rescale_image(img, hdr)
img = img.astype(np.int16)
hdr.RescaleIntercept = 0
hdr.RescaleSlope = 1.0
except:
pass
self.__img = img
self.__hdr = hdr
self.__orig_shape = self.__img.shape
self.__resample()
if orientation != self.__orientation:
if orientation == "dicom":
self.__reorient_dicom_to_view()
else:
self.__reorient_view_to_dicom()
self.__ready = True
@property
def shape(self):
"""
Get the shape of the current StandardizedScan image.
"""
self.__assert_ready()
return self.__img.shape
@property
def original_shape(self):
"""
Get the shape of the original image this StandardizedScan was created from.
"""
self.__assert_ready()
return copy.deepcopy(self.__orig_shape)
@property
def img(self):
"""
Get the current StandardizedScan image (as a NumPy array).
"""
self.__assert_ready()
return self.__img
@img.setter
def img(self, img):
"""
Set the image component of the StandardizedScan to a new image volume, which must be
a NumPy array of the same shape and orientation as the current image.
"""
if not issubclass(type(img), np.ndarray):
raise RuntimeError("Values assigned to `img` must be NumPy arrays.")
if img.shape != self.__img.shape:
raise RuntimeError(
"Cannot assign an image of a different shape to a StandardizedScan object. Expected shape {}, attepted to assign shape {}.".format(
self.__img.shape, img.shape
)
)
self.__img = img.copy()
@property
def orientation(self):
"""
Get the current orientation of the StandardizedScan image.
"""
return self.__orientation
@orientation.setter
def orientation(self, orientation):
"""
Set orientation to one of ["view", "dicom"].
"""
orientation = "view" if orientation.lower() != "dicom" else "dicom"
if self.__orientation != orientation:
if self.__ready:
if self.__orientation == "view":
self.__reorient_view_to_dicom()
else:
self.__reorient_dicom_to_view()
else:
self.__orientation = orientation
def save_hd5(self, filename, create_path=False):
"""
Saves the StandardizedScan in HDF5 format so that it can be loaded again
directly. Info for mapping original coordinates to the standardized scan
volume is preserved.
"""
self.__assert_ready()
import h5py
directory = os.path.dirname(filename)
basename = os.path.basename(filename)
if basename == "":
raise RuntimeError("A non-empty filename must be specified for `save_as`.")
if not os.path.isdir(directory) and not create_path:
raise RuntimeError(
'Directory path "{}" does not exist; use `create_path` option to automatically create it.'.format(
directory
)
)
if create_path and not os.path.isdir(directory):
os.makedirs(directory)
if not os.path.splitext(basename)[1].lower() in [
".hd5",
".hdf5",
".hdf",
".h5",
]:
basename = "{}.hd5".format(basename)
filename = os.path.join(directory, basename)
fp = h5py.File(filename, "w")
fp["img"] = self.__img
fp["mm_per_voxel"] = self.__mm_per_voxel
fp["original_shape"] = self.__orig_shape
fp["dicom_orientation"] = 0 if self.__orientation == "view" else 1
fp.close()
def load_hd5(self, filename):
"""
Loads directly from HD5-format produced by the `save_as` method.
"""
import h5py
fp = h5py.File(filename, "r")
self.__img = fp["img"].value
self.__mm_per_voxel = fp["mm_per_voxel"].value
self.__orig_shape = tuple(fp["original_shape"].value)
file_orientation = "view" if fp["dicom_orientation"].value == 0 else "dicom"
fp.close()
self.__hdr = None
if file_orientation != self.__orientation:
if file_orientation == "view":
self.__reorient_view_to_dicom()
else:
self.__reorient_dicom_to_view()
self.__ready = True
def map_original_coordinates_float(self, coords, orientation="view"):
"""
Returns a set of voxel-centered (floating-point) coordinates corresponding
to coordinates from the original image the StandardizedScan was created from.
Specify the orientation of the original coordinates in "view" or "dicom" format
(default is "view" (Z,Y,X)).
"""
orientation = "view" if orientation.lower() != "dicom" else "dicom"
if orientation != self.__orientation:
coords = list(coords)
coords[self.__zdim(orientation)] = (
self.shape[self.__zdim()] - coords[self.__zdim(orientation)] - 1
)
coords = tuple(reversed(coords))
return map_coords_to_scaled_float(coords, self.__orig_shape, self.__img.shape)
def map_original_coordinates(self, coords, orientation="view"):
"""
Returns integer coordinates corresponding to coordinates from
the original image the StandardizedScan was created from.
Specify the orientation of the original coordinates in "view" or "dicom" format
(default is "view" (Z,Y,X)).
"""
return tuple(
[int(i) for i in self.map_original_coordinates_float(coords, orientation)]
)
def __resample(self):
"""
Resample image to standard shape; this occurs only at (re)-initialization time
"""
shape = self.__img.shape
affine = None
hdr = self.__hdr
try:
affine = hdr.Affine
except:
print("header: {}".format(hdr))
raise self.TransformError(
"Image header must contain affine transformation information in `.Affine` attribute."
)
_, R, Z, S = affine3d.decompose44(hdr.Affine)
# If any of the __mm_per_voxel items are negative, keep the original zoom at those locations:
mm_per_voxel = np.atleast_1d(self.__mm_per_voxel).astype("float")
if len(mm_per_voxel) not in [1, len(Z)]:
raise RuntimeError(
"`mm_per_voxel` must be a scalar value or tuple of length {}.".format(
len(Z)
)
)
if any(mm_per_voxel < 0):
if len(mm_per_voxel) == 1:
mm_per_voxel = Z
else:
mm_per_voxel[mm_per_voxel < 0] = Z[mm_per_voxel < 0]
# If there are shears, bail out (we don't support that yet):
if S.sum() != 0:
raise self.TransformError(
"Image affine includes shear, which is not supported in this version."
)
# See if any rotations are necessary (we don't support that yet):
if np.any(np.eye(R.shape[0]).astype(R.dtype) != R):
raise self.TransformError(
"Image affine includes rotation, which is not supported in this version"
)
# Now apply scaling
Z = Z / np.array(mm_per_voxel)
self.__img = interpolation.zoom(self.__img, Z)
def __reorient_dicom_to_view(self):
"""
Change DICOM (X,Y,Z) orientation to "view" (Z,Y,X) orientation with Z axis inverted (head at index 0).
"""
self.__img = np.transpose(self.__img, (2, 1, 0)) # Move from (X,Y,Z) to (Z,Y,X)
self.__img = self.__img[::-1] # Arrange slices so "head" end is at index 0.
self.__orig_shape = tuple(
[self.__orig_shape[2], self.__orig_shape[1], self.__orig_shape[0]]
)
self.__orientation = "view"
def __reorient_view_to_dicom(self):
"""
Change "view" (Z,Y,X) orientation to "DICOM" (X,Y,Z) orientation with Z axis increasing from feet toward head (feet at index 0).
"""
self.__img = self.__img[::-1] # Arrange slices so "feet" end is at index 0.
self.__img = np.transpose(self.__img, (2, 1, 0)) # Move from (Z,Y,X) to (X,Y,Z)
self.__orig_shape = tuple(
[self.__orig_shape[2], self.__orig_shape[1], self.__orig_shape[0]]
)
self.__orientation = "dicom"
def __assert_ready(self):
"""
Checks that the image has been properly initialized and raises an exception if it has not.
"""
if not self.__ready:
raise RuntimeError("StandardizedScan object has not been initialized.")
def __zdim(self, orientation=None):
"""
Get the index of the dimension that represents the 'Z' axis in a given orientation;
if orientation is omitted, the current 'Z' dimension for this StandardizedScan is
returned.
"""
if orientation is None:
orientation = self.__orientation
orientation = "view" if orientation.lower() != "dicom" else "dicom"
return 0 if orientation == "view" else 2
if __name__ == "__main__":
print("This script is not designed to be executed directly.")
sys.exit(1)
| [
"copy.deepcopy",
"h5py.File",
"transforms3d.affines.decompose44",
"os.makedirs",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"numpy.transpose",
"scipy.ndimage.interpolation.zoom",
"numpy.array",
"os.path.splitext",
"load_dicom_dir.load_dicom_dir_dicom_orientation",
"numpy.eye",
... | [((15937, 15948), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15945, 15948), False, 'import sys, os\n'), ((5501, 5547), 'load_dicom_dir.load_dicom_dir_dicom_orientation', 'ldd.load_dicom_dir_dicom_orientation', (['dicomdir'], {}), '(dicomdir)\n', (5537, 5547), True, 'import load_dicom_dir as ldd\n'), ((7257, 7289), 'copy.deepcopy', 'copy.deepcopy', (['self.__orig_shape'], {}), '(self.__orig_shape)\n', (7270, 7289), False, 'import copy\n'), ((9217, 9242), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (9232, 9242), False, 'import sys, os\n'), ((9262, 9288), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9278, 9288), False, 'import sys, os\n'), ((9986, 10019), 'os.path.join', 'os.path.join', (['directory', 'basename'], {}), '(directory, basename)\n', (9998, 10019), False, 'import sys, os\n'), ((10034, 10058), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (10043, 10058), False, 'import h5py\n'), ((10448, 10472), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (10457, 10472), False, 'import h5py\n'), ((12794, 12826), 'transforms3d.affines.decompose44', 'affine3d.decompose44', (['hdr.Affine'], {}), '(hdr.Affine)\n', (12814, 12826), True, 'import transforms3d.affines as affine3d\n'), ((14021, 14054), 'scipy.ndimage.interpolation.zoom', 'interpolation.zoom', (['self.__img', 'Z'], {}), '(self.__img, Z)\n', (14039, 14054), True, 'import scipy.ndimage.interpolation as interpolation\n'), ((14252, 14287), 'numpy.transpose', 'np.transpose', (['self.__img', '(2, 1, 0)'], {}), '(self.__img, (2, 1, 0))\n', (14264, 14287), True, 'import numpy as np\n'), ((14873, 14908), 'numpy.transpose', 'np.transpose', (['self.__img', '(2, 1, 0)'], {}), '(self.__img, (2, 1, 0))\n', (14885, 14908), True, 'import numpy as np\n'), ((9744, 9766), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (9755, 9766), False, 'import sys, os\n'), ((13977, 13999), 'numpy.array', 'np.array', (['mm_per_voxel'], {}), '(mm_per_voxel)\n', (13985, 13999), True, 'import numpy as np\n'), ((4474, 4511), 'numpy.array', 'np.array', (['mm_per_voxel'], {'dtype': '"""float"""'}), "(mm_per_voxel, dtype='float')\n", (4482, 4511), True, 'import numpy as np\n'), ((9420, 9444), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (9433, 9444), False, 'import sys, os\n'), ((9706, 9730), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (9719, 9730), False, 'import sys, os\n'), ((12952, 12986), 'numpy.atleast_1d', 'np.atleast_1d', (['self.__mm_per_voxel'], {}), '(self.__mm_per_voxel)\n', (12965, 12986), True, 'import numpy as np\n'), ((13749, 13767), 'numpy.eye', 'np.eye', (['R.shape[0]'], {}), '(R.shape[0])\n', (13755, 13767), True, 'import numpy as np\n'), ((9783, 9809), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (9799, 9809), False, 'import sys, os\n'), ((4756, 4777), 'os.path.splitext', 'os.path.splitext', (['img'], {}), '(img)\n', (4772, 4777), False, 'import sys, os\n')] |
"""Distance metrics related to common features of receptor-ligand
processes in molecular systems.
The ReceptorDistance class is an abstract class that provides some
common functionality for normalizing reference states, providing
correct indices of receptor and ligand atoms, and a common image
function.
Subclasses of ReceptorDistance need only implement the
'image_distance' function according to their needs.
The UnbindingDistance is a useful metric for enhancing ligand movement
away from the reference bound state conformation.
The RebindingDistance is a useful metric for enhancing the movement of
a ligand towards a reference state.
"""
import logging
import numpy as np
from wepy.util.util import box_vectors_to_lengths_angles
from geomm.grouping import group_pair
from geomm.superimpose import superimpose
from geomm.rmsd import calc_rmsd
from geomm.centering import center_around
from wepy.resampling.distances.distance import Distance
class ReceptorDistance(Distance):
"""Common abstract class for receptor-ligand molecular systems.
Any input walker state should have the attributes 'positions' and
'box_vectors' and will first preprocess these states, by
recentering the complex. Two component systems like
receptor-ligands in periodic boxes can sometimes drift to
different periodic images of the box making them unsuitable for
RMSD calculations.
The 'image' method will align the receptor structures of the state
to the reference state after the recentering.
"""
def __init__(self, ligand_idxs, binding_site_idxs, ref_state):
"""Construct a distance metric.
Parameters
----------
ligand_idxs : arraylike of int
The indices of the atoms from the 'positions' attribute of
states that correspond to the ligand molecule.
binding_site_idxs : arraylike of int
The indices of the atoms from the 'positions' attribute of
states that correspond to the atoms of the binding site of
the receptor. These are the atoms you want to perform
alignment on and so can be a subset of a complete molecule.
ref_state : object implementing WalkerState
The reference state all walker states will be aligned to
with 'positions' (Nx3 dims) and 'box_vectors' (3x3 array)
attributes.
"""
# the idxs of the ligand and binding site from the whole state
self._lig_idxs = ligand_idxs
self._bs_idxs = binding_site_idxs
# number of atoms in each
self._n_lig_atoms = len(self._lig_idxs)
self._n_bs_atoms = len(self._bs_idxs)
# the idxs used for the whole image
self._image_idxs = np.concatenate( (self._lig_idxs, self._bs_idxs) )
# the idxs of the ligand and binding site within the image
self._image_lig_idxs = np.arange(self._n_lig_atoms)
self._image_bs_idxs = np.arange(self._n_lig_atoms, self._n_lig_atoms + self._n_bs_atoms)
# save the reference state's image so we can align all further
# images to it
self.ref_image = self._unaligned_image(ref_state)
def _unaligned_image(self, state):
"""The preprocessing method of states.
First it groups the binding site and ligand into the same
periodic box image and then centers the box around their
mutual center of mass and returns only the positions of the
binding site and ligand.
Parameters
----------
state : object implementing WalkerState
State with 'positions' (Nx3 dims) and 'box_vectors' (3x3
array) attributes.
Returns
-------
"""
# get the box lengths from the vectors
box_lengths, box_angles = box_vectors_to_lengths_angles(state['box_vectors'])
# recenter the protein-ligand complex into the center of the
# periodic boundary conditions
# regroup the ligand and protein in together
grouped_positions = group_pair(state['positions'], box_lengths,
self._bs_idxs, self._lig_idxs)
# then center them around the binding site
centered_positions = center_around(grouped_positions, self._bs_idxs)
# slice these positions to get the image
state_image = centered_positions[self._image_idxs]
return state_image
def image(self, state):
"""Transform a state to a receptor image.
A receptor image is one in which the binding site and ligand
are first normalized to be within the same periodic box image
and at the center of the box, and then only the binding site
and ligand are retained.
Parameters
----------
state : object implementing WalkerState
State with 'positions' (Nx3 dims) and 'box_vectors' (3x3 array)
attributes.
Returns
-------
receptor_image : array of float
The positions of binding site and ligand after
preprocessing.
"""
# get the unaligned image
state_image = self._unaligned_image(state)
# then superimpose it to the reference structure
sup_image, _, _ = superimpose(self.ref_image, state_image, idxs=self._image_bs_idxs)
return sup_image
class UnbindingDistance(ReceptorDistance):
"""Distance metric for measuring differences between walker states in
regards to the RMSDs between ligands.
Images are produced using the ReceptorDistance.image method. The
distance between images then is solely the RMSD of the ligand
atoms.
Thus walkers were ligands are divergent in position will have
higher distances.
"""
def image_distance(self, image_a, image_b):
# we calculate the rmsd of only the ligands between the
# images
lig_rmsd = calc_rmsd(image_a, image_b, idxs=self._image_lig_idxs)
return lig_rmsd
class RebindingDistance(ReceptorDistance):
"""Distance metric for measuring differences between walker states in
regards to the RMSDs between ligands.
Images are produced using the ReceptorDistance.image method. The
distance between images then is the relative difference between
the ligand RMSDs to the reference state.
"""
def image_distance(self, image_a, image_b):
# we calculate the rmsd of only the ligands between each image
# and the reference
state_a_rmsd = calc_rmsd(self.ref_image, image_a, idxs=self._image_lig_idxs)
state_b_rmsd = calc_rmsd(self.ref_image, image_b, idxs=self._image_lig_idxs)
# then we get the absolute value of the reciprocals of these rmsd
# values
d = abs(1./state_a_rmsd - 1./state_b_rmsd)
return d
| [
"geomm.grouping.group_pair",
"geomm.superimpose.superimpose",
"geomm.rmsd.calc_rmsd",
"numpy.arange",
"wepy.util.util.box_vectors_to_lengths_angles",
"numpy.concatenate",
"geomm.centering.center_around"
] | [((2756, 2803), 'numpy.concatenate', 'np.concatenate', (['(self._lig_idxs, self._bs_idxs)'], {}), '((self._lig_idxs, self._bs_idxs))\n', (2770, 2803), True, 'import numpy as np\n'), ((2905, 2933), 'numpy.arange', 'np.arange', (['self._n_lig_atoms'], {}), '(self._n_lig_atoms)\n', (2914, 2933), True, 'import numpy as np\n'), ((2964, 3030), 'numpy.arange', 'np.arange', (['self._n_lig_atoms', '(self._n_lig_atoms + self._n_bs_atoms)'], {}), '(self._n_lig_atoms, self._n_lig_atoms + self._n_bs_atoms)\n', (2973, 3030), True, 'import numpy as np\n'), ((3820, 3871), 'wepy.util.util.box_vectors_to_lengths_angles', 'box_vectors_to_lengths_angles', (["state['box_vectors']"], {}), "(state['box_vectors'])\n", (3849, 3871), False, 'from wepy.util.util import box_vectors_to_lengths_angles\n'), ((4063, 4137), 'geomm.grouping.group_pair', 'group_pair', (["state['positions']", 'box_lengths', 'self._bs_idxs', 'self._lig_idxs'], {}), "(state['positions'], box_lengths, self._bs_idxs, self._lig_idxs)\n", (4073, 4137), False, 'from geomm.grouping import group_pair\n'), ((4255, 4302), 'geomm.centering.center_around', 'center_around', (['grouped_positions', 'self._bs_idxs'], {}), '(grouped_positions, self._bs_idxs)\n', (4268, 4302), False, 'from geomm.centering import center_around\n'), ((5291, 5357), 'geomm.superimpose.superimpose', 'superimpose', (['self.ref_image', 'state_image'], {'idxs': 'self._image_bs_idxs'}), '(self.ref_image, state_image, idxs=self._image_bs_idxs)\n', (5302, 5357), False, 'from geomm.superimpose import superimpose\n'), ((5940, 5994), 'geomm.rmsd.calc_rmsd', 'calc_rmsd', (['image_a', 'image_b'], {'idxs': 'self._image_lig_idxs'}), '(image_a, image_b, idxs=self._image_lig_idxs)\n', (5949, 5994), False, 'from geomm.rmsd import calc_rmsd\n'), ((6544, 6605), 'geomm.rmsd.calc_rmsd', 'calc_rmsd', (['self.ref_image', 'image_a'], {'idxs': 'self._image_lig_idxs'}), '(self.ref_image, image_a, idxs=self._image_lig_idxs)\n', (6553, 6605), False, 'from geomm.rmsd import calc_rmsd\n'), ((6629, 6690), 'geomm.rmsd.calc_rmsd', 'calc_rmsd', (['self.ref_image', 'image_b'], {'idxs': 'self._image_lig_idxs'}), '(self.ref_image, image_b, idxs=self._image_lig_idxs)\n', (6638, 6690), False, 'from geomm.rmsd import calc_rmsd\n')] |
import logging
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
logger = logging.getLogger(__name__)
class BraggPeak(object):
def __init__(self, bp_domain, bp_vals):
"""
BraggPeak object is a function created from bp_domain and bp_vals
using scipy.interpolate module. Whenever a class function is called,
this calculated spline function along with domain specified by user
is used.
:param bp_domain: used as a domain in interpolation, lost after class initialization
:param bp_vals: used as values in interpolation, lost after class initialization
"""
if len(bp_domain) != len(bp_vals):
raise ValueError("Domain and values have different lengths!")
self.spline = interpolate.InterpolatedUnivariateSpline(bp_domain, bp_vals, ext=3)
self.initial_position = bp_domain[np.array(bp_vals).argmax()]
self.current_position = self.initial_position
self._weight = 1.0
logger.debug("Creating BraggPeak...\n\tPrimary max position: {0}"
"\n\tPeak range: {1}".format(self.initial_position, self.range()))
def __repr__(self):
return str("{0} with position: {1} and weight: {2}".format(
self.__class__.__name__, self.position, self.weight))
def __str__(self):
return str(self.spline)
def __getitem__(self, point):
"""Returns value for given point on X-axis"""
return self.spline(point)
@property
def position(self):
return self.current_position
@position.setter
def position(self, new_position):
self.current_position = new_position
@property
def weight(self):
return self._weight
@weight.setter
def weight(self, new_weight):
if new_weight < 0.0 or new_weight > 1.0:
raise ValueError("Weight should be from 0.0 to 1.0")
else:
self._weight = new_weight
def evaluate(self, domain):
"""Calculate BP values for given domain"""
return self._weight * self.spline(domain + self.initial_position - self.current_position)
def _calculate_idx_for_given_height_value(self, domain, val=0.8):
"""
This is a helper function and it returns indices based on given domain.
For single peak this should find closest value to given
on the left and right side of BraggPeak.
This functions splits search domain in two parts to ensure
two different points from left and right side of the peak.
:param domain - search domain
:param val - percentage value where the width is calculated at
"""
val_arr = self.evaluate(domain=domain)
if val > val_arr.max():
raise ValueError('Desired values cannot be greater than max in BraggPeak!')
merge_idx = val_arr.argmax()
left = val_arr[:merge_idx]
right = val_arr[merge_idx:]
try:
idx_left = (np.abs(left - val)).argmin()
except ValueError:
idx_left = None
idx_right = (np.abs(right - val)).argmin()
return idx_left, merge_idx + idx_right
def range(self, val=0.90, precision=0.001):
"""Return range at given value on the dropping/further side of BP"""
pos = self.position
tmp_dom = np.arange(pos, pos + 2, precision)
peak_cp = copy(self)
peak_cp.weight = 1.0
ran = np.interp([val], peak_cp.evaluate(tmp_dom)[::-1], tmp_dom[::-1])
return ran[0]
def proximal_range(self, val=0.990, precision=0.001):
pos = self.position
tmp_dom = np.arange(pos - 2, pos, precision)
peak_cp = copy(self)
peak_cp.weight = 1.0
proximal = np.interp([val], peak_cp.evaluate(tmp_dom), tmp_dom)
return proximal[0]
def width_at(self, val=0.80, precision=0.001):
distal = self.range(val=val, precision=precision)
proximal = self.proximal_range(val=val, precision=precision)
return distal - proximal
if __name__ == '__main__':
from os.path import join
from beprof import profile
from pbc.helpers import load_data_from_dump
x_peak, y_peak = load_data_from_dump(file_name=join('..', 'data', 'cydos1.dat'), delimiter=' ')
# x_peak, y_peak = load_data_from_dump(file_name=join('..', 'data', '3500.dat'), delimiter=' ')
y_peak /= y_peak.max()
a = BraggPeak(x_peak, y_peak)
yy = np.vstack((x_peak, y_peak)).T
prof = profile.Profile(yy)
print("left 99% bef", prof.x_at_y(0.99, reverse=False))
print("left 99% pbc", a.proximal_range(val=0.99))
print("right 90% bef", prof.x_at_y(0.90, reverse=True))
print("right 90% pbc", a.range(0.90))
print("wid new", a.width_at(val=0.80))
# they should cover each other
# plt.plot(prof.x, prof.y, 'r')
plt.plot(x_peak, y_peak, 'b')
plt.show()
| [
"matplotlib.pyplot.show",
"scipy.interpolate.InterpolatedUnivariateSpline",
"matplotlib.pyplot.plot",
"numpy.abs",
"copy.copy",
"numpy.arange",
"numpy.array",
"beprof.profile.Profile",
"numpy.vstack",
"os.path.join",
"logging.getLogger"
] | [((129, 156), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'import logging\n'), ((4546, 4565), 'beprof.profile.Profile', 'profile.Profile', (['yy'], {}), '(yy)\n', (4561, 4565), False, 'from beprof import profile\n'), ((4903, 4932), 'matplotlib.pyplot.plot', 'plt.plot', (['x_peak', 'y_peak', '"""b"""'], {}), "(x_peak, y_peak, 'b')\n", (4911, 4932), True, 'import matplotlib.pyplot as plt\n'), ((4937, 4947), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4945, 4947), True, 'import matplotlib.pyplot as plt\n'), ((818, 885), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interpolate.InterpolatedUnivariateSpline', (['bp_domain', 'bp_vals'], {'ext': '(3)'}), '(bp_domain, bp_vals, ext=3)\n', (858, 885), False, 'from scipy import interpolate\n'), ((3391, 3425), 'numpy.arange', 'np.arange', (['pos', '(pos + 2)', 'precision'], {}), '(pos, pos + 2, precision)\n', (3400, 3425), True, 'import numpy as np\n'), ((3444, 3454), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (3448, 3454), False, 'from copy import copy\n'), ((3690, 3724), 'numpy.arange', 'np.arange', (['(pos - 2)', 'pos', 'precision'], {}), '(pos - 2, pos, precision)\n', (3699, 3724), True, 'import numpy as np\n'), ((3743, 3753), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (3747, 3753), False, 'from copy import copy\n'), ((4505, 4532), 'numpy.vstack', 'np.vstack', (['(x_peak, y_peak)'], {}), '((x_peak, y_peak))\n', (4514, 4532), True, 'import numpy as np\n'), ((4283, 4315), 'os.path.join', 'join', (['""".."""', '"""data"""', '"""cydos1.dat"""'], {}), "('..', 'data', 'cydos1.dat')\n", (4287, 4315), False, 'from os.path import join\n'), ((3142, 3161), 'numpy.abs', 'np.abs', (['(right - val)'], {}), '(right - val)\n', (3148, 3161), True, 'import numpy as np\n'), ((928, 945), 'numpy.array', 'np.array', (['bp_vals'], {}), '(bp_vals)\n', (936, 945), True, 'import numpy as np\n'), ((3037, 3055), 'numpy.abs', 'np.abs', (['(left - val)'], {}), '(left - val)\n', (3043, 3055), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import cv2
import csv
import sys
import numpy as np
import net.preprocessing.preprocess as p
import concurrent.futures
LANDMARKS_MODEL_PATH = 'landmarks\\shape_predictor_68_face_landmarks.dat'
def get_landmarks_metadata(landmark_file):
landmarks = []
with open(landmark_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
landmarks.append(row)
return np.array(landmarks)
def process_image(input_folder, output_folder, img_id, landmarks, mean_landmarks):
img = cv2.imread(input_folder + "/" + img_id)
faces = p.get_faces(img)
if len(faces) != 1:
return
img_aligned = p.preprocess(img, faces[0], LANDMARKS_MODEL_PATH, landmarks, mean_landmarks)
cv2.imwrite(output_folder + "/" + img_id, img_aligned)
def get_mean_landmarks(landmarks):
left_eye = landmarks[:, 0, :]
right_eye = landmarks[:, 1, :]
nose = landmarks[:, 2, :]
left_mouth = landmarks[:, 3, :]
right_mouth = landmarks[:, 4, :]
left = (left_eye + nose + left_mouth) / 3.0
right = (right_eye + nose + right_mouth) / 3.0
top = (left_eye + nose + right_eye) / 3.0
bottom = (left_mouth + nose + right_mouth) / 3.0
top_mid = (top + left + right) / 3.0
bottom_mid = (bottom + left + right) / 3.0
mid = (top_mid + bottom_mid) / 2.0
v_size = np.linalg.norm((left_eye + right_eye) / 2.0 - (left_mouth + right_mouth) / 2.0, axis=1)
mid.shape = -1, 1, 2
v_size.shape = -1, 1, 1
norm_lm = (landmarks - mid) / v_size
mean_lm = np.mean(norm_lm, axis=0)
mean_lm = mean_lm / max(np.max(mean_lm[:, 0]) - np.min(mean_lm[:, 0]),
np.max(mean_lm[:, 1]) - np.min(mean_lm[:, 1]))
return mean_lm
def process_images(input_folder, output_folder, landmark_file, num_workers=5):
landmarks_metadata = get_landmarks_metadata(landmark_file)
ids = [row[0] for row in landmarks_metadata]
landmarks = np.reshape(np.array(landmarks_metadata[:, 1:], dtype=int),
(len(landmarks_metadata), (len(landmarks_metadata[0]) - 1) // 2, -1))
mean_landmarks = get_mean_landmarks(landmarks)
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
for img_id, landmark in zip(ids, landmarks):
executor.submit(process_image, input_folder, output_folder, img_id, landmark, mean_landmarks)
if __name__ == "__main__":
if len(sys.argv) != 4:
print("input directory, output directory and landmarks file should be provided!")
exit(1)
input_dir = sys.argv[1]
output_dir = sys.argv[2]
landmarks_file = sys.argv[3]
process_images(input_dir, output_dir, landmarks_file)
| [
"net.preprocessing.preprocess.get_faces",
"net.preprocessing.preprocess.preprocess",
"csv.reader",
"cv2.imwrite",
"cv2.imread",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.linalg.norm",
"numpy.min"
] | [((459, 478), 'numpy.array', 'np.array', (['landmarks'], {}), '(landmarks)\n', (467, 478), True, 'import numpy as np\n'), ((574, 613), 'cv2.imread', 'cv2.imread', (["(input_folder + '/' + img_id)"], {}), "(input_folder + '/' + img_id)\n", (584, 613), False, 'import cv2\n'), ((626, 642), 'net.preprocessing.preprocess.get_faces', 'p.get_faces', (['img'], {}), '(img)\n', (637, 642), True, 'import net.preprocessing.preprocess as p\n'), ((701, 777), 'net.preprocessing.preprocess.preprocess', 'p.preprocess', (['img', 'faces[0]', 'LANDMARKS_MODEL_PATH', 'landmarks', 'mean_landmarks'], {}), '(img, faces[0], LANDMARKS_MODEL_PATH, landmarks, mean_landmarks)\n', (713, 777), True, 'import net.preprocessing.preprocess as p\n'), ((783, 837), 'cv2.imwrite', 'cv2.imwrite', (["(output_folder + '/' + img_id)", 'img_aligned'], {}), "(output_folder + '/' + img_id, img_aligned)\n", (794, 837), False, 'import cv2\n'), ((1386, 1478), 'numpy.linalg.norm', 'np.linalg.norm', (['((left_eye + right_eye) / 2.0 - (left_mouth + right_mouth) / 2.0)'], {'axis': '(1)'}), '((left_eye + right_eye) / 2.0 - (left_mouth + right_mouth) / \n 2.0, axis=1)\n', (1400, 1478), True, 'import numpy as np\n'), ((1583, 1607), 'numpy.mean', 'np.mean', (['norm_lm'], {'axis': '(0)'}), '(norm_lm, axis=0)\n', (1590, 1607), True, 'import numpy as np\n'), ((346, 381), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (356, 381), False, 'import csv\n'), ((1998, 2044), 'numpy.array', 'np.array', (['landmarks_metadata[:, 1:]'], {'dtype': 'int'}), '(landmarks_metadata[:, 1:], dtype=int)\n', (2006, 2044), True, 'import numpy as np\n'), ((1636, 1657), 'numpy.max', 'np.max', (['mean_lm[:, 0]'], {}), '(mean_lm[:, 0])\n', (1642, 1657), True, 'import numpy as np\n'), ((1660, 1681), 'numpy.min', 'np.min', (['mean_lm[:, 0]'], {}), '(mean_lm[:, 0])\n', (1666, 1681), True, 'import numpy as np\n'), ((1711, 1732), 'numpy.max', 'np.max', (['mean_lm[:, 1]'], {}), '(mean_lm[:, 1])\n', (1717, 1732), True, 'import numpy as np\n'), ((1735, 1756), 'numpy.min', 'np.min', (['mean_lm[:, 1]'], {}), '(mean_lm[:, 1])\n', (1741, 1756), True, 'import numpy as np\n')] |
##################################################
# Train a RAW-to-RGB model using training images #
##################################################
import tensorflow as tf
from tensorflow.keras.utils import Progbar
import imageio
import numpy as np
import sys
from datetime import datetime
from load_dataset import load_train_patch_exp, load_val_data_exp
from model import resnet, adversarial
import utils
import vgg
from tqdm import tqdm
# Processing command arguments
dataset_dir, model_dir, result_dir, vgg_dir, dslr_dir, phone_dir,\
arch, LEVEL, inst_norm, num_maps_base, restore_iter, patch_w, patch_h,\
batch_size, train_size, learning_rate, eval_step, num_train_iters, save_mid_imgs, \
leaky, norm_gen, fac_content, fac_mse, fac_ssim, fac_color, fac_texture \
= utils.process_command_args(sys.argv)
# Defining the size of the input and target image patches
PATCH_WIDTH = patch_w//2
PATCH_HEIGHT = patch_h//2
PATCH_DEPTH = 4 * 3
PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * PATCH_DEPTH
LEVEL = 0
DSLR_SCALE = float(1) / (2 ** (max(LEVEL,0) - 1))
TARGET_WIDTH = int(PATCH_WIDTH * DSLR_SCALE)
TARGET_HEIGHT = int(PATCH_HEIGHT * DSLR_SCALE)
TARGET_DEPTH = 3
TARGET_SIZE = TARGET_WIDTH * TARGET_HEIGHT * TARGET_DEPTH
np.random.seed(0)
# Defining the model architecture
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
time_start = datetime.now()
# Placeholders for training data
phone_ = tf.compat.v1.placeholder(tf.float32, [batch_size, PATCH_HEIGHT, PATCH_WIDTH, PATCH_DEPTH])
dslr_ = tf.compat.v1.placeholder(tf.float32, [batch_size, TARGET_HEIGHT, TARGET_WIDTH, TARGET_DEPTH])
# determine model name
# Get the processed enhanced image
if arch == "resnet":
name_model = "resnet"
enhanced = resnet(phone_, leaky = leaky, instance_norm = norm_gen)
# Losses
## MSE loss
loss_mse = tf.reduce_mean(tf.math.squared_difference(enhanced, dslr_))
loss_generator = loss_mse * fac_mse
loss_list = [loss_mse]
loss_text = ["loss_mse"]
## Adversarial loss
# enhanced_gray = tf.reshape(tf.image.rgb_to_grayscale(enhanced), [-1, TARGET_WIDTH * TARGET_HEIGHT])
# dslr_gray = tf.reshape(tf.image.rgb_to_grayscale(dslr_),[-1, TARGET_WIDTH * TARGET_HEIGHT])
# adv_ = tf.compat.v1.placeholder(tf.float32, [None, 1])
# adversarial_ = tf.multiply(enhanced_gray, 1 - adv_) + tf.multiply(dslr_gray, adv_)
# adversarial_image = tf.reshape(adversarial_, [-1, TARGET_HEIGHT, TARGET_WIDTH, 1])
# discrim_predictions = adversarial(adversarial_image)
# discrim_target = tf.concat([adv_, 1 - adv_], 1)
# loss_discrim = -tf.reduce_sum(discrim_target * tf.compat.v1.log(tf.clip_by_value(discrim_predictions, 1e-10, 1.0)))
# loss_texture = -loss_discrim
# correct_predictions = tf.equal(tf.argmax(discrim_predictions, 1), tf.argmax(discrim_target, 1))
# discim_accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
# loss_list.append(loss_texture)
# loss_text.append("loss_texture")
## Color loss
if fac_color > 0:
enhanced_blur = utils.blur(enhanced)
dslr_blur = utils.blur(dslr_)
loss_color = tf.reduce_mean(tf.math.squared_difference(dslr_blur, enhanced_blur))
loss_generator += loss_color * fac_color
loss_list.append(loss_color)
loss_text.append("loss_color")
## PSNR loss
loss_psnr = tf.reduce_mean(tf.image.psnr(enhanced, dslr_, 1.0))
loss_list.append(loss_psnr)
loss_text.append("loss_psnr")
## SSIM loss
if fac_ssim > 0:
loss_ssim = tf.reduce_mean(tf.image.ssim(enhanced, dslr_, 1.0))
loss_generator += (1 - loss_ssim) * fac_ssim
loss_list.append(loss_ssim)
loss_text.append("loss_ssim")
## MS-SSIM loss
#loss_ms_ssim = tf.reduce_mean(tf.image.ssim_multiscale(enhanced, dslr_, 1.0))
## Content loss
if fac_content > 0:
CONTENT_LAYER = 'relu5_4'
enhanced_vgg = vgg.net(vgg_dir, vgg.preprocess(enhanced * 255))
dslr_vgg = vgg.net(vgg_dir, vgg.preprocess(dslr_ * 255))
loss_content = tf.reduce_mean(tf.math.squared_difference(enhanced_vgg[CONTENT_LAYER], dslr_vgg[CONTENT_LAYER]))
loss_generator += loss_content * fac_content
loss_list.append(loss_content)
loss_text.append("loss_content")
## Final loss function
loss_list.insert(0, loss_generator)
loss_text.insert(0, "loss_generator")
# Optimize network parameters
generator_vars = [v for v in tf.compat.v1.global_variables() if v.name.startswith("generator")]
train_step_gen = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(loss_generator, var_list=generator_vars)
# discriminator_vars = [v for v in tf.compat.v1.global_variables() if v.name.startswith("discriminator")]
# train_step_disc = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(loss_discrim, var_list=discriminator_vars)
# Initialize and restore the variables
print("Initializing variables...")
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver(var_list=generator_vars, max_to_keep=100)
# all_zeros = np.reshape(np.zeros((batch_size, 1)), [batch_size, 1])
if restore_iter > 0: # restore the variables/weights
name_model_restore = name_model
name_model_restore_full = name_model_restore + "_iteration_" + str(restore_iter)
print("Restoring Variables from:", name_model_restore_full)
saver.restore(sess, model_dir + name_model_restore_full + ".ckpt")
# Loading training and validation data
print("Loading validation data...")
val_data, val_answ = load_val_data_exp(dataset_dir, dslr_dir, phone_dir, 'mediatek_raw_over/', 'mediatek_raw_under/', PATCH_WIDTH, PATCH_HEIGHT, DSLR_SCALE)
print("Validation data was loaded\n")
print("Loading training data...")
train_data, train_answ = load_train_patch_exp(dataset_dir, dslr_dir, phone_dir, 'mediatek_raw_over/', 'mediatek_raw_under/', train_size, PATCH_WIDTH, PATCH_HEIGHT, DSLR_SCALE)
print("Training data was loaded\n")
VAL_SIZE = val_data.shape[0]
num_val_batches = int(val_data.shape[0] / batch_size)
if save_mid_imgs:
visual_crops_ids = np.random.randint(0, VAL_SIZE, batch_size)
visual_val_crops = val_data[visual_crops_ids, :]
visual_target_crops = val_answ[visual_crops_ids, :]
print("Training network...")
iter_start = restore_iter+1 if restore_iter > 0 else 0
logs = open(model_dir + "logs_" + str(iter_start) + "-" + str(num_train_iters) + ".txt", "w+")
logs.close()
training_loss = 0.0
train_acc_discrim = 0.0
name_model_save = name_model
for i in tqdm(range(iter_start, num_train_iters + 1), miniters=100):
name_model_save_full = name_model_save + "_iteration_" + str(i)
# Train genarator
idx_train = np.random.randint(0, train_size, batch_size)
phone_images = train_data[idx_train]
dslr_images = train_answ[idx_train]
# Data augmentation: random flips and rotations
for k in range(batch_size):
random_rotate = np.random.randint(1, 100) % 4
phone_images[k] = np.rot90(phone_images[k], random_rotate)
dslr_images[k] = np.rot90(dslr_images[k], random_rotate)
random_flip = np.random.randint(1, 100) % 2
if random_flip == 1:
phone_images[k] = np.flipud(phone_images[k])
dslr_images[k] = np.flipud(dslr_images[k])
# Training step
[loss_temp, temp] = sess.run([loss_generator, train_step_gen], feed_dict={phone_: phone_images, dslr_: dslr_images})
training_loss += loss_temp / eval_step
# [loss_temp, temp] = sess.run([loss_generator, train_step_gen], feed_dict={phone_: phone_images, dslr_: dslr_images, adv_: all_zeros})
# training_loss += loss_temp / eval_step
# Train discrimintator
# idx_train = np.random.randint(0, train_size, batch_size)
# # generate image swaps (dslr or enhanced) for discriminator
# swaps = np.reshape(np.random.randint(0, 2, batch_size), [batch_size, 1])
# phone_images = train_data[idx_train]
# dslr_images = train_answ[idx_train]
# [accuracy_temp, temp] = sess.run([discim_accuracy, train_step_disc],
# feed_dict={phone_: phone_images, dslr_: dslr_images, adv_: swaps})
# train_acc_discrim += accuracy_temp / eval_step
if i % eval_step == 0:
# Evaluate model
val_losses_gen = np.zeros((1, len(loss_text)))
# val_acc_disc = 0.0
for j in range(num_val_batches):
be = j * batch_size
en = (j+1) * batch_size
phone_images = val_data[be:en]
dslr_images = val_answ[be:en]
# [enhanced_crops, accuracy_disc, losses] = sess.run([enhanced, discim_accuracy, loss_list], \
# feed_dict={phone_: phone_images, dslr_: dslr_images, adv_: swaps})
[enhanced_crops, losses] = sess.run([enhanced, loss_list], \
feed_dict={phone_: phone_images, dslr_: dslr_images})
val_losses_gen += np.asarray(losses) / num_val_batches
# val_acc_disc += accuracy_disc / num_val_batches
logs_gen = "step %d | training: %.4g, " % (i, training_loss)
for idx, loss in enumerate(loss_text):
logs_gen += "%s: %.4g; " % (loss, val_losses_gen[0][idx])
# logs_gen += "\n | discriminator accuracy: %.4g\n " % val_acc_disc
print(logs_gen)
# Save the results to log file
logs = open(model_dir + "logs_" + str(iter_start) + "-" + str(num_train_iters) + ".txt", "a")
logs.write(logs_gen)
logs.write('\n')
logs.close()
# Optional: save visual results for several validation image crops
if save_mid_imgs:
enhanced_crops = sess.run(enhanced, feed_dict={phone_: visual_val_crops, dslr_: dslr_images})
idx = 0
for crop in enhanced_crops:
if idx < 4:
before_after = np.hstack((crop,
np.reshape(visual_target_crops[idx], [TARGET_HEIGHT, TARGET_WIDTH, TARGET_DEPTH])))
imageio.imwrite(result_dir + name_model_save_full + "_img_" + str(idx) + ".jpg",
before_after)
idx += 1
# Saving the model that corresponds to the current iteration
saver.save(sess, model_dir + name_model_save_full + ".ckpt", write_meta_graph=False)
training_loss = 0.0
#if i % test_step == 0 and i > 0:
# Loading new training data
if i % 1000 == 0 and i > 0:
del train_data
del train_answ
train_data, train_answ = load_train_patch_exp(dataset_dir, dslr_dir, phone_dir, 'mediatek_raw_over/', 'mediatek_raw_under/', train_size, PATCH_WIDTH, PATCH_HEIGHT, DSLR_SCALE)
print('total train/eval time:', datetime.now() - time_start) | [
"numpy.random.seed",
"vgg.preprocess",
"load_dataset.load_val_data_exp",
"tensorflow.image.psnr",
"tensorflow.image.ssim",
"numpy.random.randint",
"load_dataset.load_train_patch_exp",
"numpy.rot90",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"tensorf... | [((807, 843), 'utils.process_command_args', 'utils.process_command_args', (['sys.argv'], {}), '(sys.argv)\n', (833, 843), False, 'import utils\n'), ((1257, 1274), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1271, 1274), True, 'import numpy as np\n'), ((1340, 1362), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1360, 1362), True, 'import tensorflow as tf\n'), ((1389, 1403), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1401, 1403), False, 'from datetime import datetime\n'), ((1455, 1549), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[batch_size, PATCH_HEIGHT, PATCH_WIDTH, PATCH_DEPTH]'], {}), '(tf.float32, [batch_size, PATCH_HEIGHT, PATCH_WIDTH,\n PATCH_DEPTH])\n', (1479, 1549), True, 'import tensorflow as tf\n'), ((1558, 1655), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[batch_size, TARGET_HEIGHT, TARGET_WIDTH, TARGET_DEPTH]'], {}), '(tf.float32, [batch_size, TARGET_HEIGHT,\n TARGET_WIDTH, TARGET_DEPTH])\n', (1582, 1655), True, 'import tensorflow as tf\n'), ((5111, 5177), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {'var_list': 'generator_vars', 'max_to_keep': '(100)'}), '(var_list=generator_vars, max_to_keep=100)\n', (5135, 5177), True, 'import tensorflow as tf\n'), ((5692, 5831), 'load_dataset.load_val_data_exp', 'load_val_data_exp', (['dataset_dir', 'dslr_dir', 'phone_dir', '"""mediatek_raw_over/"""', '"""mediatek_raw_under/"""', 'PATCH_WIDTH', 'PATCH_HEIGHT', 'DSLR_SCALE'], {}), "(dataset_dir, dslr_dir, phone_dir, 'mediatek_raw_over/',\n 'mediatek_raw_under/', PATCH_WIDTH, PATCH_HEIGHT, DSLR_SCALE)\n", (5709, 5831), False, 'from load_dataset import load_train_patch_exp, load_val_data_exp\n'), ((5938, 6092), 'load_dataset.load_train_patch_exp', 'load_train_patch_exp', (['dataset_dir', 'dslr_dir', 'phone_dir', '"""mediatek_raw_over/"""', '"""mediatek_raw_under/"""', 'train_size', 'PATCH_WIDTH', 'PATCH_HEIGHT', 'DSLR_SCALE'], {}), "(dataset_dir, dslr_dir, phone_dir, 'mediatek_raw_over/',\n 'mediatek_raw_under/', train_size, PATCH_WIDTH, PATCH_HEIGHT, DSLR_SCALE)\n", (5958, 6092), False, 'from load_dataset import load_train_patch_exp, load_val_data_exp\n'), ((1794, 1845), 'model.resnet', 'resnet', (['phone_'], {'leaky': 'leaky', 'instance_norm': 'norm_gen'}), '(phone_, leaky=leaky, instance_norm=norm_gen)\n', (1800, 1845), False, 'from model import resnet, adversarial\n'), ((1911, 1954), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['enhanced', 'dslr_'], {}), '(enhanced, dslr_)\n', (1937, 1954), True, 'import tensorflow as tf\n'), ((3119, 3139), 'utils.blur', 'utils.blur', (['enhanced'], {}), '(enhanced)\n', (3129, 3139), False, 'import utils\n'), ((3160, 3177), 'utils.blur', 'utils.blur', (['dslr_'], {}), '(dslr_)\n', (3170, 3177), False, 'import utils\n'), ((3442, 3477), 'tensorflow.image.psnr', 'tf.image.psnr', (['enhanced', 'dslr_', '(1.0)'], {}), '(enhanced, dslr_, 1.0)\n', (3455, 3477), True, 'import tensorflow as tf\n'), ((5053, 5096), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (5094, 5096), True, 'import tensorflow as tf\n'), ((6271, 6313), 'numpy.random.randint', 'np.random.randint', (['(0)', 'VAL_SIZE', 'batch_size'], {}), '(0, VAL_SIZE, batch_size)\n', (6288, 6313), True, 'import numpy as np\n'), ((6926, 6970), 'numpy.random.randint', 'np.random.randint', (['(0)', 'train_size', 'batch_size'], {}), '(0, train_size, batch_size)\n', (6943, 6970), True, 'import numpy as np\n'), ((1315, 1325), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1323, 1325), True, 'import tensorflow as tf\n'), ((3214, 3266), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['dslr_blur', 'enhanced_blur'], {}), '(dslr_blur, enhanced_blur)\n', (3240, 3266), True, 'import tensorflow as tf\n'), ((3619, 3654), 'tensorflow.image.ssim', 'tf.image.ssim', (['enhanced', 'dslr_', '(1.0)'], {}), '(enhanced, dslr_, 1.0)\n', (3632, 3654), True, 'import tensorflow as tf\n'), ((4007, 4037), 'vgg.preprocess', 'vgg.preprocess', (['(enhanced * 255)'], {}), '(enhanced * 255)\n', (4021, 4037), False, 'import vgg\n'), ((4075, 4102), 'vgg.preprocess', 'vgg.preprocess', (['(dslr_ * 255)'], {}), '(dslr_ * 255)\n', (4089, 4102), False, 'import vgg\n'), ((4143, 4228), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['enhanced_vgg[CONTENT_LAYER]', 'dslr_vgg[CONTENT_LAYER]'], {}), '(enhanced_vgg[CONTENT_LAYER], dslr_vgg[CONTENT_LAYER]\n )\n', (4169, 4228), True, 'import tensorflow as tf\n'), ((4536, 4567), 'tensorflow.compat.v1.global_variables', 'tf.compat.v1.global_variables', ([], {}), '()\n', (4565, 4567), True, 'import tensorflow as tf\n'), ((4624, 4671), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (4656, 4671), True, 'import tensorflow as tf\n'), ((7243, 7283), 'numpy.rot90', 'np.rot90', (['phone_images[k]', 'random_rotate'], {}), '(phone_images[k], random_rotate)\n', (7251, 7283), True, 'import numpy as np\n'), ((7313, 7352), 'numpy.rot90', 'np.rot90', (['dslr_images[k]', 'random_rotate'], {}), '(dslr_images[k], random_rotate)\n', (7321, 7352), True, 'import numpy as np\n'), ((11088, 11242), 'load_dataset.load_train_patch_exp', 'load_train_patch_exp', (['dataset_dir', 'dslr_dir', 'phone_dir', '"""mediatek_raw_over/"""', '"""mediatek_raw_under/"""', 'train_size', 'PATCH_WIDTH', 'PATCH_HEIGHT', 'DSLR_SCALE'], {}), "(dataset_dir, dslr_dir, phone_dir, 'mediatek_raw_over/',\n 'mediatek_raw_under/', train_size, PATCH_WIDTH, PATCH_HEIGHT, DSLR_SCALE)\n", (11108, 11242), False, 'from load_dataset import load_train_patch_exp, load_val_data_exp\n'), ((11276, 11290), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11288, 11290), False, 'from datetime import datetime\n'), ((7183, 7208), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (7200, 7208), True, 'import numpy as np\n'), ((7379, 7404), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (7396, 7404), True, 'import numpy as np\n'), ((7477, 7503), 'numpy.flipud', 'np.flipud', (['phone_images[k]'], {}), '(phone_images[k])\n', (7486, 7503), True, 'import numpy as np\n'), ((7537, 7562), 'numpy.flipud', 'np.flipud', (['dslr_images[k]'], {}), '(dslr_images[k])\n', (7546, 7562), True, 'import numpy as np\n'), ((9318, 9336), 'numpy.asarray', 'np.asarray', (['losses'], {}), '(losses)\n', (9328, 9336), True, 'import numpy as np\n'), ((10404, 10489), 'numpy.reshape', 'np.reshape', (['visual_target_crops[idx]', '[TARGET_HEIGHT, TARGET_WIDTH, TARGET_DEPTH]'], {}), '(visual_target_crops[idx], [TARGET_HEIGHT, TARGET_WIDTH,\n TARGET_DEPTH])\n', (10414, 10489), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import random
# 由题目要求知道,这里的图用邻接矩阵来表示,而不是前向星
# 这个函数是返回ER图, 我们知道邻接矩阵中,每个位置都表示一个连接状态
def CreateER(N, p):
mat = np.random.rand(N, N)
mat = np.where(mat>p, 0, 1)
for i in range(N):
mat[i, i] = 0
mat[i, :] = mat[:, i]
return mat
# 创建BA网络
def CreateBA(N, m, m0):
mat = np.zeros((N, N))
M = np.ones((m,m))
for i in range(m):
M[i, i] = 0
mat[:m, :m] = M
for i in range(m, N, 1):
prob = [mat[:,j].sum()/mat.sum() for j in range(m)]
selected = np.random.choice(m, m0, p=prob, replace=False)
for k in selected:
mat[k, i] = 1
mat[i, k] = 1
m = m + 1
print("BA mat is generating %d/%d"%(m,N))
return mat
# 画出节点的分布图,可看出结果类似正态分布
def Distribution(mat):
(a, b) = mat.shape
Count = np.array([mat[i, :].sum() for i in range(a)])
hist = np.histogram(Count, bins=1000, range=(0,1000))
plt.plot(hist[0])
plt.xlabel('degree')
plt.ylabel('p(degree)')
plt.show()
return hist
# 对一个mat进行一次SIR的传播 S 1 -- I 2 -- R 3 普通人--1 感染者--2 恢复者
def SIRSpread(mat, beta, mu, vec):
nvec = np.array(vec)
for i in range(vec.size):
if vec[i] == 1:
num = 0
for j in range(vec.size):
if mat[i,j] == 1 and vec[j] == 2:
num = num + 1
prob = 1 - (1-beta)**num
rand = random.random()
if rand < prob:
nvec[i] = 2
elif vec[i] == 2:
rand = random.random()
if rand < mu:
nvec[i] = 3
return nvec
# 设置传播次数,来进行传播,并返回每个阶段S,I,R的数量
def MultiSpread(N, beta, mu, t):
mat = CreateER(N, 0.01)
vec = np.array([1 for i in range(N)])
rNum = random.randint(0, N-1)
vec[rNum] = 2
S = []
I = []
R = []
for i in range(t):
vec = SIRSpread(mat, beta, mu, vec)
S.append(np.where(np.array(vec)==1, 1, 0).sum())
I.append(np.where(np.array(vec)==2, 1, 0).sum())
R.append(np.where(np.array(vec)==3, 1, 0).sum())
return S,I,R
# 画出SIR模型的统计结果
def DrawSIRResult(N, beta, mu, t):
S,I,R = MultiSpread(N, beta, mu, t)
X = range(t)
fig,ax = plt.subplots()
plt.xlabel('t')
plt.ylabel('N(t)')
plt.plot(X, S, marker = "o", c = "g", label="S")
plt.plot(X, I, marker = "s", c = "r", label="I")
plt.plot(X, R, marker = ">", c = "b", label="R")
plt.legend()
plt.show()
if __name__ == '__main__':
DrawSIRResult(1000, 0.15, 0.3, 100)
# BA = CreateBA(200, 3, 3)
# Distribution(BA)
| [
"matplotlib.pyplot.show",
"random.randint",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.ones",
"random.random",
"numpy.histogram",
"numpy.where",
"numpy.array",
"numpy.random.choice",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel... | [((165, 185), 'numpy.random.rand', 'np.random.rand', (['N', 'N'], {}), '(N, N)\n', (179, 185), True, 'import numpy as np\n'), ((196, 219), 'numpy.where', 'np.where', (['(mat > p)', '(0)', '(1)'], {}), '(mat > p, 0, 1)\n', (204, 219), True, 'import numpy as np\n'), ((352, 368), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (360, 368), True, 'import numpy as np\n'), ((377, 392), 'numpy.ones', 'np.ones', (['(m, m)'], {}), '((m, m))\n', (384, 392), True, 'import numpy as np\n'), ((911, 958), 'numpy.histogram', 'np.histogram', (['Count'], {'bins': '(1000)', 'range': '(0, 1000)'}), '(Count, bins=1000, range=(0, 1000))\n', (923, 958), True, 'import numpy as np\n'), ((962, 979), 'matplotlib.pyplot.plot', 'plt.plot', (['hist[0]'], {}), '(hist[0])\n', (970, 979), True, 'import matplotlib.pyplot as plt\n'), ((984, 1004), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""degree"""'], {}), "('degree')\n", (994, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1009, 1032), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(degree)"""'], {}), "('p(degree)')\n", (1019, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1037, 1047), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1045, 1047), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1179), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (1174, 1179), True, 'import numpy as np\n'), ((1787, 1811), 'random.randint', 'random.randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (1801, 1811), False, 'import random\n'), ((2244, 2258), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2256, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2264, 2279), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (2274, 2279), True, 'import matplotlib.pyplot as plt\n'), ((2284, 2302), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""N(t)"""'], {}), "('N(t)')\n", (2294, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2352), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'S'], {'marker': '"""o"""', 'c': '"""g"""', 'label': '"""S"""'}), "(X, S, marker='o', c='g', label='S')\n", (2316, 2352), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2405), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'I'], {'marker': '"""s"""', 'c': '"""r"""', 'label': '"""I"""'}), "(X, I, marker='s', c='r', label='I')\n", (2369, 2405), True, 'import matplotlib.pyplot as plt\n'), ((2414, 2458), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'R'], {'marker': '""">"""', 'c': '"""b"""', 'label': '"""R"""'}), "(X, R, marker='>', c='b', label='R')\n", (2422, 2458), True, 'import matplotlib.pyplot as plt\n'), ((2472, 2484), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2482, 2484), True, 'import matplotlib.pyplot as plt\n'), ((2490, 2500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2498, 2500), True, 'import matplotlib.pyplot as plt\n'), ((563, 609), 'numpy.random.choice', 'np.random.choice', (['m', 'm0'], {'p': 'prob', 'replace': '(False)'}), '(m, m0, p=prob, replace=False)\n', (579, 609), True, 'import numpy as np\n'), ((1432, 1447), 'random.random', 'random.random', ([], {}), '()\n', (1445, 1447), False, 'import random\n'), ((1549, 1564), 'random.random', 'random.random', ([], {}), '()\n', (1562, 1564), False, 'import random\n'), ((1956, 1969), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (1964, 1969), True, 'import numpy as np\n'), ((2013, 2026), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (2021, 2026), True, 'import numpy as np\n'), ((2070, 2083), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (2078, 2083), True, 'import numpy as np\n')] |
import numpy as np
import itertools
def cindex_td(death_ages, survival_funcs, survival_ages, observed, weights = []):
num = len(death_ages)
pairs = itertools.permutations(range(0,num),2)
if len(weights) == 0:
weights = np.ones(num)
N = 0.0
C = 0.0
for (i,j) in pairs:
if death_ages[i] < death_ages[j] and observed[i] == 1:
N += 1.0*weights[i]*weights[j]
index_i = np.searchsorted(survival_ages[i], death_ages[i])
index_j = np.searchsorted(survival_ages[j], death_ages[i])
if index_i == len(survival_ages[i]):
index_i -= 1
if index_j == len(survival_ages[j]):
index_j -= 1
S_i = survival_funcs[i].flatten()[index_i]
S_j = survival_funcs[j].flatten()[index_j]
if S_i < S_j and death_ages[i] < death_ages[j]:
C += 1.0*weights[i]*weights[j]
elif S_i == S_j and death_ages[i] < death_ages[j]:
C += 0.5*weights[i]*weights[j]
if N > 0:
return C/N
else:
return np.nan
| [
"numpy.searchsorted",
"numpy.ones"
] | [((245, 257), 'numpy.ones', 'np.ones', (['num'], {}), '(num)\n', (252, 257), True, 'import numpy as np\n'), ((455, 503), 'numpy.searchsorted', 'np.searchsorted', (['survival_ages[i]', 'death_ages[i]'], {}), '(survival_ages[i], death_ages[i])\n', (470, 503), True, 'import numpy as np\n'), ((526, 574), 'numpy.searchsorted', 'np.searchsorted', (['survival_ages[j]', 'death_ages[i]'], {}), '(survival_ages[j], death_ages[i])\n', (541, 574), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import json
import asyncio
import random
import os
from adapter import Adapter
from flask import Flask, request, jsonify
from nn_image_checker import NNModelChecker
from PIL import Image
from contract.config import config
from contract.download_images import download_images as contract_download_images
from contract.download_images import metadata as download_images_metadata
from contract.listen_images import listen_images as contract_listen_images
from contract.listen_images import metadata as listen_images_metadata
from contract.get_contract import get_contract
from contract.download_image_data import download_image_data
from contract.download_image_data import metadata as download_images_data_metadata
from contract.download_image_data import errors as download_images_data_errors
from image_manager import ImageManager
app = Flask(__name__)
image_checker = NNModelChecker()
image_manager = ImageManager(image_checker)
@app.before_request
def log_request_info():
app.logger.debug('Headers: %s', request.headers)
app.logger.debug('Body: %s', request.get_data())
def load_image(data):
nparr = np.fromstring(data, np.uint8)
# decode image
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
img = Image.fromarray(img)
return img
@app.route('/register_image', methods=['POST'])
def register_image():
image_manager.register_new_images()
img = load_image(request.data)
image_checker.add_image_to_storage(img, 'None')
# build a response dict to send back to client
response = {'message': 'image received.'}
# encode response using jsonpickle
response = jsonify(response)
return response
@app.route('/image_score', methods=['POST'])
def image_score():
image_manager.register_new_images()
img = load_image(request.data)
scores, descriptions = image_checker.find_most_simular_images(img)
# build a response dict to send back to client
response = {'scores': str(scores), 'descriptions': str(descriptions)}
# encode response using jsonpickle
response = jsonify(response)
return response
@app.route('/info', methods=['GET'])
def info():
return jsonify({
'lauding_contract': config.CONTRACT_ADDRESS,
'analysis_network': 'ethereum mainnet',
'adapter_version': '0.0.1',
'found_images_count_while_downloading': download_images_metadata['found_images'],
'downloaded_images_count': download_images_metadata['downloaded_images'],
'found_images_count_while_watching': listen_images_metadata['found_images'],
'watched_images_count': listen_images_metadata['downloaded_images'],
'last_downloaded_urls': download_images_data_metadata['last_urls'],
})
@app.route('/download_images', methods=['GET'])
def download_images():
contract_download_images()
return jsonify({'is_succeed': True})
@app.route('/listen_images', methods=['GET'])
def listen_images():
contract_listen_images()
return jsonify({'is_succeed': True})
@app.route('/load_image', methods=['POST'])
def load_image():
body = json.loads(request.data)
if 'id' not in body:
return jsonify({'is_succeed': False})
image_id = body['id']
if not os.path.exists(config.DATA_PATH):
os.mkdir(config.DATA_PATH)
if not os.path.exists(config.SOURCE_PATH):
os.mkdir(config.SOURCE_PATH)
contract = get_contract()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
data_ipfs_url = contract.functions.uri(image_id).call()
task = asyncio.gather(download_image_data(data_ipfs_url, image_id))
loop.run_until_complete(task)
return jsonify({'is_succeed': True})
@app.route('/check', methods=['POST'])
def call_adapter():
body = json.loads(request.data)
adapter = Adapter(body, image_checker, image_manager)
return jsonify(adapter.result)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port='9090', threaded=True)
| [
"os.mkdir",
"nn_image_checker.NNModelChecker",
"json.loads",
"image_manager.ImageManager",
"adapter.Adapter",
"asyncio.set_event_loop",
"flask.Flask",
"cv2.imdecode",
"os.path.exists",
"contract.listen_images.listen_images",
"flask.jsonify",
"flask.request.get_data",
"contract.get_contract.g... | [((873, 888), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (878, 888), False, 'from flask import Flask, request, jsonify\n'), ((905, 921), 'nn_image_checker.NNModelChecker', 'NNModelChecker', ([], {}), '()\n', (919, 921), False, 'from nn_image_checker import NNModelChecker\n'), ((938, 965), 'image_manager.ImageManager', 'ImageManager', (['image_checker'], {}), '(image_checker)\n', (950, 965), False, 'from image_manager import ImageManager\n'), ((1155, 1184), 'numpy.fromstring', 'np.fromstring', (['data', 'np.uint8'], {}), '(data, np.uint8)\n', (1168, 1184), True, 'import numpy as np\n'), ((1214, 1251), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_COLOR'], {}), '(nparr, cv2.IMREAD_COLOR)\n', (1226, 1251), False, 'import cv2\n'), ((1262, 1282), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1277, 1282), False, 'from PIL import Image\n'), ((1650, 1667), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (1657, 1667), False, 'from flask import Flask, request, jsonify\n'), ((2081, 2098), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (2088, 2098), False, 'from flask import Flask, request, jsonify\n'), ((2181, 2703), 'flask.jsonify', 'jsonify', (["{'lauding_contract': config.CONTRACT_ADDRESS, 'analysis_network':\n 'ethereum mainnet', 'adapter_version': '0.0.1',\n 'found_images_count_while_downloading': download_images_metadata[\n 'found_images'], 'downloaded_images_count': download_images_metadata[\n 'downloaded_images'], 'found_images_count_while_watching':\n listen_images_metadata['found_images'], 'watched_images_count':\n listen_images_metadata['downloaded_images'], 'last_downloaded_urls':\n download_images_data_metadata['last_urls']}"], {}), "({'lauding_contract': config.CONTRACT_ADDRESS, 'analysis_network':\n 'ethereum mainnet', 'adapter_version': '0.0.1',\n 'found_images_count_while_downloading': download_images_metadata[\n 'found_images'], 'downloaded_images_count': download_images_metadata[\n 'downloaded_images'], 'found_images_count_while_watching':\n listen_images_metadata['found_images'], 'watched_images_count':\n listen_images_metadata['downloaded_images'], 'last_downloaded_urls':\n download_images_data_metadata['last_urls']})\n", (2188, 2703), False, 'from flask import Flask, request, jsonify\n'), ((2822, 2848), 'contract.download_images.download_images', 'contract_download_images', ([], {}), '()\n', (2846, 2848), True, 'from contract.download_images import download_images as contract_download_images\n'), ((2860, 2889), 'flask.jsonify', 'jsonify', (["{'is_succeed': True}"], {}), "({'is_succeed': True})\n", (2867, 2889), False, 'from flask import Flask, request, jsonify\n'), ((2963, 2987), 'contract.listen_images.listen_images', 'contract_listen_images', ([], {}), '()\n', (2985, 2987), True, 'from contract.listen_images import listen_images as contract_listen_images\n'), ((2999, 3028), 'flask.jsonify', 'jsonify', (["{'is_succeed': True}"], {}), "({'is_succeed': True})\n", (3006, 3028), False, 'from flask import Flask, request, jsonify\n'), ((3104, 3128), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (3114, 3128), False, 'import json\n'), ((3409, 3423), 'contract.get_contract.get_contract', 'get_contract', ([], {}), '()\n', (3421, 3423), False, 'from contract.get_contract import get_contract\n'), ((3435, 3459), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (3457, 3459), False, 'import asyncio\n'), ((3464, 3492), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (3486, 3492), False, 'import asyncio\n'), ((3671, 3700), 'flask.jsonify', 'jsonify', (["{'is_succeed': True}"], {}), "({'is_succeed': True})\n", (3678, 3700), False, 'from flask import Flask, request, jsonify\n'), ((3773, 3797), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (3783, 3797), False, 'import json\n'), ((3812, 3855), 'adapter.Adapter', 'Adapter', (['body', 'image_checker', 'image_manager'], {}), '(body, image_checker, image_manager)\n', (3819, 3855), False, 'from adapter import Adapter\n'), ((3867, 3890), 'flask.jsonify', 'jsonify', (['adapter.result'], {}), '(adapter.result)\n', (3874, 3890), False, 'from flask import Flask, request, jsonify\n'), ((1098, 1116), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (1114, 1116), False, 'from flask import Flask, request, jsonify\n'), ((3170, 3200), 'flask.jsonify', 'jsonify', (["{'is_succeed': False}"], {}), "({'is_succeed': False})\n", (3177, 3200), False, 'from flask import Flask, request, jsonify\n'), ((3240, 3272), 'os.path.exists', 'os.path.exists', (['config.DATA_PATH'], {}), '(config.DATA_PATH)\n', (3254, 3272), False, 'import os\n'), ((3282, 3308), 'os.mkdir', 'os.mkdir', (['config.DATA_PATH'], {}), '(config.DATA_PATH)\n', (3290, 3308), False, 'import os\n'), ((3320, 3354), 'os.path.exists', 'os.path.exists', (['config.SOURCE_PATH'], {}), '(config.SOURCE_PATH)\n', (3334, 3354), False, 'import os\n'), ((3364, 3392), 'os.mkdir', 'os.mkdir', (['config.SOURCE_PATH'], {}), '(config.SOURCE_PATH)\n', (3372, 3392), False, 'import os\n'), ((3579, 3623), 'contract.download_image_data.download_image_data', 'download_image_data', (['data_ipfs_url', 'image_id'], {}), '(data_ipfs_url, image_id)\n', (3598, 3623), False, 'from contract.download_image_data import download_image_data\n')] |
"""
input: rescaled_ct scan with shape [512, 512, 512]
output: removed airway, blood vessel and set the parenchyma value to 0,
"""
import Tool_Functions.Functions as Functions
import visualization.visualize_3d.visualize_stl as visualize
import os
import numpy as np
import post_processing.remove_airway_blood_vessel as extend_functions
import prediction.predict_rescaled as predictor
import warnings
np.set_printoptions(threshold=np.inf)
def remove_airway_and_blood_vessel_based_on_upper_frontal(rescaled_ct, lung_mask=None, airway=None, blood_vessel=None,
extend_ratio=1.1, max_diameter=50, show_stl=False, parenchyma_value=False):
"""
:param rescaled_ct: the [512, 512, 512] spatial and signal normalized data
:param lung_mask: we will predict if it is None
:param airway: the airway mask, we will predict if it is None
:param blood_vessel: the blood_vessel mask, we will predict if it is None
:param extend_ratio: the diameter of this region will be extend by this ratio and this ratio + 0.1. The tissue in
the middle of these two regions is ASSUMED AS PARENCHYMA!
:param max_diameter: if the diameter of the region is greater than the max_diameter, we extend the region to new
diameter: (extend_ratio - 1) * max_diameter + old_diameter
:param show_stl: whether to visualize the 3D model after segmentation
:param parenchyma_value: False, not return it, True, return it
:return: enhanced_array in shape [512, 512, 512], which is enhanced rescaled ct images;
or enhanced_array, parenchyma_value
"""
if lung_mask is None:
lung_mask = predictor.predict_lung_masks_rescaled_array(rescaled_ct)
lung_mask_box = Functions.get_bounding_box(lung_mask)
print("lung_mask_box:", lung_mask_box)
lung_length_z = lung_mask_box[2][1] - lung_mask_box[2][0]
superior_start = int(lung_mask_box[2][1] - lung_length_z * 0.32988803223955687) # this is the upper
superior_end = lung_mask_box[2][1]
air_way_merge_z_bounding_box = Functions.get_bounding_box(lung_mask[:, :, superior_start])
upper_start = air_way_merge_z_bounding_box[0][0] # this is the frontal
upper_end = air_way_merge_z_bounding_box[0][1] - int((air_way_merge_z_bounding_box[0][1] -
air_way_merge_z_bounding_box[0][0])/2)
print("lung length on z:", lung_length_z, "superior range:", superior_start, superior_end,
"upper range:", upper_start, upper_end)
upper_superior_mask = np.zeros(np.shape(lung_mask), 'float32')
upper_superior_mask[upper_start: upper_end, :, superior_start: superior_end] = 1.0
upper_superior_mask = upper_superior_mask * lung_mask # this is the mask for upper frontal lung
if airway is None:
refined_airway_mask = predictor.get_prediction_airway(rescaled_ct, lung_mask=lung_mask)
else:
refined_airway_mask = airway
if blood_vessel is None:
refined_blood_vessel_mask = predictor.get_prediction_blood_vessel(rescaled_ct, lung_mask=lung_mask)
else:
refined_blood_vessel_mask = blood_vessel
if show_stl:
print("lung")
visualize.visualize_numpy_as_stl(lung_mask)
print("airway")
visualize.visualize_numpy_as_stl(refined_airway_mask)
print("blood vessels")
visualize.visualize_numpy_as_stl(refined_blood_vessel_mask)
rescaled_ct = rescaled_ct * lung_mask
visible_non_infection = np.array((refined_blood_vessel_mask + refined_airway_mask) * lung_mask > 0.5, 'float32')
rescaled_ct_original = np.array(rescaled_ct)
assert extend_ratio > 1
print("extending air way")
visible_extended_outer = extend_functions.extend_tubes(visible_non_infection, None, extend_ratio + 0.1, int(max_diameter * 1.1))
visible_extended = extend_functions.extend_tubes(visible_non_infection, None, extend_ratio, max_diameter)
visible_extended = visible_extended_outer - visible_extended
context_mask = visible_extended - visible_non_infection
context_mask = np.clip(context_mask, 0, 1)
context_mask = context_mask * upper_superior_mask
print(Functions.stat_on_mask(rescaled_ct_original, context_mask))
exit()
num_context_points = np.sum(context_mask)
print("there are:", num_context_points, "parenchyma sample points")
if num_context_points < 10000:
warnings.warn('Too less (<10000) sampled parenchyma points. Maybe use "general sampling"', SyntaxWarning)
context = context_mask * rescaled_ct_original + context_mask * 10
context = np.reshape(context, (-1,))
context = np.sort(context)
total_points = len(context)
percentile = 50
threshold = context[total_points - int(num_context_points * (100 - percentile) / 100)] - 10
if threshold > -0.1:
warnings.warn('Too high threshold. Maybe use "general sampling"?', SyntaxWarning)
print("the context is:", threshold, 'at percentile', percentile)
rescaled_ct[np.where(visible_non_infection >= 0.5)] = threshold # removed the airway and blood vessel
rescaled_ct = rescaled_ct - threshold * lung_mask # threshold is the value of lung parenchyma
# set the parenchyma value to zero
enhanced_array = np.zeros([512, 512, 512], 'float32')
enhanced_array[:, :, :] = rescaled_ct
enhanced_array = np.clip(enhanced_array, -0.05, 0.25)
if parenchyma_value:
return enhanced_array, threshold
return enhanced_array
def remove_airway_and_blood_vessel_general_sampling(rescaled_ct, lung_mask=None, airway=None, blood_vessel=None,
extend_ratio=1.1, max_diameter=50, show_stl=False, parenchyma_value=False, window=False):
"""
:param window: if True, return the scan optimal window
:param rescaled_ct: the [512, 512, 512] spatial and signal normalized data
:param lung_mask: we will predict if it is None
:param airway: the airway mask, we will predict if it is None
:param blood_vessel: the blood_vessel mask, we will predict if it is None
:param extend_ratio: the diameter of this region will be extend by this ratio. And the extended region is ASSUMED AS
PARENCHYMA!
:param max_diameter: if the diameter of the region is greater than the max_diameter, we extend the region to new
diameter: (extend_ratio - 1) * max_diameter + old_diameter
:param show_stl: whether to visualize the 3D model after segmentation
:param parenchyma_value: False, not return it, True, return it
:return: enhanced_array in shape [512, 512, 512], which is enhanced rescaled ct images;
or enhanced_array, parenchyma_value
"""
if lung_mask is None:
lung_mask = predictor.predict_lung_masks_rescaled_array(rescaled_ct)
lung_mask_box = Functions.get_bounding_box(lung_mask)
print("lung_mask_box:", lung_mask_box)
if airway is None:
refined_airway_mask = predictor.get_prediction_airway(rescaled_ct, lung_mask=lung_mask)
else:
refined_airway_mask = airway
if blood_vessel is None:
refined_blood_vessel_mask = predictor.get_prediction_blood_vessel(rescaled_ct, lung_mask=lung_mask)
else:
refined_blood_vessel_mask = blood_vessel
if show_stl:
print("lung")
visualize.visualize_numpy_as_stl(lung_mask)
print("airway")
visualize.visualize_numpy_as_stl(refined_airway_mask)
print("blood vessels")
visualize.visualize_numpy_as_stl(refined_blood_vessel_mask)
rescaled_ct = rescaled_ct * lung_mask
visible_non_infection = np.array((refined_blood_vessel_mask + refined_airway_mask) * lung_mask > 0.5, 'float32')
rescaled_ct_original = np.array(rescaled_ct)
assert extend_ratio > 1
print("extending air way and blood vessels")
visible_extended_outer = extend_functions.extend_tubes(visible_non_infection, None, extend_ratio + 0.1,
int(max_diameter * 1.1))
visible_extended = extend_functions.extend_tubes(visible_non_infection, None, extend_ratio, max_diameter)
visible_extended = visible_extended_outer - visible_extended
context_mask = visible_extended - visible_non_infection
context_mask = np.clip(context_mask, 0, 1)
num_context_points = np.sum(context_mask)
print("there are:", num_context_points, "parenchyma sample points")
context = context_mask * rescaled_ct_original + context_mask * 10
context = np.reshape(context, (-1,))
context = np.sort(context)
total_points = len(context)
percentile = 50
threshold = context[total_points - int(num_context_points * (100 - percentile) / 100)] - 10
std = np.std(context[total_points - int(num_context_points * 0.9): total_points - int(num_context_points * 0.1)])
print("the context is:", threshold, 'at percentile', percentile)
rescaled_ct[np.where(visible_non_infection >= 0.5)] = threshold # removed the airway and blood vessel
rescaled_ct = rescaled_ct - threshold * lung_mask # threshold is the value of lung parenchyma
# set the parenchyma value to zero
enhanced_array = np.zeros([512, 512, 512], 'float32')
enhanced_array[:, :, :] = rescaled_ct
enhanced_array = np.clip(enhanced_array, -0.05, 0.25)
if parenchyma_value:
return enhanced_array, threshold
if window:
return enhanced_array, -600 + threshold * 1600, std * 1.5 * 1600
return enhanced_array
def prepare_arrays_raw_for_normal_and_hospitalize(rescaled_ct, lung_mask=None, airway=None, blood_vessel=None,
lesion=None, extend_ratio=1.1, max_diameter=50, normal=True, mask_name=None, save_dict=None, upperlobe=True):
"""
:param rescaled_ct: the [512, 512, 512] spatial and signal normalized data
:param lung_mask: the [512, 512, 512] mask array, we will predict if it is None
:param airway: the airway mask, we will predict if it is None
:param blood_vessel: the blood_vessel mask, we will predict if it is None
:param lesion: the mask of the lesion, we will predict if it is None AND "normal" is False
:param extend_ratio: the diameter of this region will be extend by this ratio. And the extended region is ASSUMED AS
PARENCHYMA!
:param max_diameter: if the diameter of the region is greater than the max_diameter, we extend the region to new
diameter: (extend_ratio - 1) * max_diameter + old_diameter
:param normal: if it is true and the lesion mask is None, predict the lesion
:return: the arrays_raw in shape [512, 512, 512, 2]:
arrays_raw[:, :, :, 0] is the enhanced_array rescaled ct data, which is the input images
arrays_raw[:, :, :, 1] is the mask for lesion, which is the ground truth (can be all zeros)
"""
if lung_mask is None:
lung_mask = predictor.predict_lung_masks_rescaled_array(rescaled_ct)
if airway is None:
airway = predictor.get_prediction_airway(rescaled_ct, lung_mask=lung_mask)
if blood_vessel is None:
blood_vessel = predictor.get_prediction_blood_vessel(rescaled_ct, lung_mask=lung_mask)
if lesion is not None:
assert normal is not True
if lesion is None and normal is not True:
lesion = predictor.predict_covid_19_infection_rescaled_array(rescaled_ct, lung_mask=lung_mask)
if normal is True:
lesion = np.zeros([512, 512, 512], 'float32')
if mask_name is not None:
Functions.save_np_array(os.path.join(save_dict, 'lung_masks') + '/', mask_name, lung_mask, True)
Functions.save_np_array(os.path.join(save_dict, 'airway_stage_two') + '/', mask_name, airway, True)
Functions.save_np_array(os.path.join(save_dict, 'blood_vessel_stage_two') + '/', mask_name, blood_vessel, True)
Functions.save_np_array(os.path.join(save_dict, 'lesion') + '/', mask_name, lesion, True)
if upperlobe:
enhanced_rescaled_ct = remove_airway_and_blood_vessel_based_on_upper_frontal(rescaled_ct, lung_mask, airway,
blood_vessel, extend_ratio, max_diameter, False)
else:
enhanced_rescaled_ct = remove_airway_and_blood_vessel_general_sampling(rescaled_ct, lung_mask, airway,
blood_vessel, extend_ratio, max_diameter, False)
arrays_raw = np.zeros([512, 512, 512, 2], 'float32')
arrays_raw[:, :, :, 0] = enhanced_rescaled_ct
arrays_raw[:, :, :, 1] = lesion
return arrays_raw
if __name__ == "__main__":
exit()
| [
"numpy.sum",
"numpy.clip",
"prediction.predict_rescaled.predict_covid_19_infection_rescaled_array",
"numpy.shape",
"prediction.predict_rescaled.get_prediction_airway",
"os.path.join",
"numpy.set_printoptions",
"prediction.predict_rescaled.predict_lung_masks_rescaled_array",
"numpy.reshape",
"predi... | [((400, 437), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (419, 437), True, 'import numpy as np\n'), ((1741, 1778), 'Tool_Functions.Functions.get_bounding_box', 'Functions.get_bounding_box', (['lung_mask'], {}), '(lung_mask)\n', (1767, 1778), True, 'import Tool_Functions.Functions as Functions\n'), ((2063, 2122), 'Tool_Functions.Functions.get_bounding_box', 'Functions.get_bounding_box', (['lung_mask[:, :, superior_start]'], {}), '(lung_mask[:, :, superior_start])\n', (2089, 2122), True, 'import Tool_Functions.Functions as Functions\n'), ((3502, 3595), 'numpy.array', 'np.array', (['((refined_blood_vessel_mask + refined_airway_mask) * lung_mask > 0.5)', '"""float32"""'], {}), "((refined_blood_vessel_mask + refined_airway_mask) * lung_mask > \n 0.5, 'float32')\n", (3510, 3595), True, 'import numpy as np\n'), ((3618, 3639), 'numpy.array', 'np.array', (['rescaled_ct'], {}), '(rescaled_ct)\n', (3626, 3639), True, 'import numpy as np\n'), ((3856, 3946), 'post_processing.remove_airway_blood_vessel.extend_tubes', 'extend_functions.extend_tubes', (['visible_non_infection', 'None', 'extend_ratio', 'max_diameter'], {}), '(visible_non_infection, None, extend_ratio,\n max_diameter)\n', (3885, 3946), True, 'import post_processing.remove_airway_blood_vessel as extend_functions\n'), ((4089, 4116), 'numpy.clip', 'np.clip', (['context_mask', '(0)', '(1)'], {}), '(context_mask, 0, 1)\n', (4096, 4116), True, 'import numpy as np\n'), ((4280, 4300), 'numpy.sum', 'np.sum', (['context_mask'], {}), '(context_mask)\n', (4286, 4300), True, 'import numpy as np\n'), ((4606, 4632), 'numpy.reshape', 'np.reshape', (['context', '(-1,)'], {}), '(context, (-1,))\n', (4616, 4632), True, 'import numpy as np\n'), ((4647, 4663), 'numpy.sort', 'np.sort', (['context'], {}), '(context)\n', (4654, 4663), True, 'import numpy as np\n'), ((5267, 5303), 'numpy.zeros', 'np.zeros', (['[512, 512, 512]', '"""float32"""'], {}), "([512, 512, 512], 'float32')\n", (5275, 5303), True, 'import numpy as np\n'), ((5367, 5403), 'numpy.clip', 'np.clip', (['enhanced_array', '(-0.05)', '(0.25)'], {}), '(enhanced_array, -0.05, 0.25)\n', (5374, 5403), True, 'import numpy as np\n'), ((6818, 6855), 'Tool_Functions.Functions.get_bounding_box', 'Functions.get_bounding_box', (['lung_mask'], {}), '(lung_mask)\n', (6844, 6855), True, 'import Tool_Functions.Functions as Functions\n'), ((7610, 7703), 'numpy.array', 'np.array', (['((refined_blood_vessel_mask + refined_airway_mask) * lung_mask > 0.5)', '"""float32"""'], {}), "((refined_blood_vessel_mask + refined_airway_mask) * lung_mask > \n 0.5, 'float32')\n", (7618, 7703), True, 'import numpy as np\n'), ((7726, 7747), 'numpy.array', 'np.array', (['rescaled_ct'], {}), '(rescaled_ct)\n', (7734, 7747), True, 'import numpy as np\n'), ((8041, 8131), 'post_processing.remove_airway_blood_vessel.extend_tubes', 'extend_functions.extend_tubes', (['visible_non_infection', 'None', 'extend_ratio', 'max_diameter'], {}), '(visible_non_infection, None, extend_ratio,\n max_diameter)\n', (8070, 8131), True, 'import post_processing.remove_airway_blood_vessel as extend_functions\n'), ((8274, 8301), 'numpy.clip', 'np.clip', (['context_mask', '(0)', '(1)'], {}), '(context_mask, 0, 1)\n', (8281, 8301), True, 'import numpy as np\n'), ((8328, 8348), 'numpy.sum', 'np.sum', (['context_mask'], {}), '(context_mask)\n', (8334, 8348), True, 'import numpy as np\n'), ((8506, 8532), 'numpy.reshape', 'np.reshape', (['context', '(-1,)'], {}), '(context, (-1,))\n', (8516, 8532), True, 'import numpy as np\n'), ((8547, 8563), 'numpy.sort', 'np.sort', (['context'], {}), '(context)\n', (8554, 8563), True, 'import numpy as np\n'), ((9170, 9206), 'numpy.zeros', 'np.zeros', (['[512, 512, 512]', '"""float32"""'], {}), "([512, 512, 512], 'float32')\n", (9178, 9206), True, 'import numpy as np\n'), ((9270, 9306), 'numpy.clip', 'np.clip', (['enhanced_array', '(-0.05)', '(0.25)'], {}), '(enhanced_array, -0.05, 0.25)\n', (9277, 9306), True, 'import numpy as np\n'), ((12442, 12481), 'numpy.zeros', 'np.zeros', (['[512, 512, 512, 2]', '"""float32"""'], {}), "([512, 512, 512, 2], 'float32')\n", (12450, 12481), True, 'import numpy as np\n'), ((1664, 1720), 'prediction.predict_rescaled.predict_lung_masks_rescaled_array', 'predictor.predict_lung_masks_rescaled_array', (['rescaled_ct'], {}), '(rescaled_ct)\n', (1707, 1720), True, 'import prediction.predict_rescaled as predictor\n'), ((2571, 2590), 'numpy.shape', 'np.shape', (['lung_mask'], {}), '(lung_mask)\n', (2579, 2590), True, 'import numpy as np\n'), ((2845, 2910), 'prediction.predict_rescaled.get_prediction_airway', 'predictor.get_prediction_airway', (['rescaled_ct'], {'lung_mask': 'lung_mask'}), '(rescaled_ct, lung_mask=lung_mask)\n', (2876, 2910), True, 'import prediction.predict_rescaled as predictor\n'), ((3023, 3094), 'prediction.predict_rescaled.get_prediction_blood_vessel', 'predictor.get_prediction_blood_vessel', (['rescaled_ct'], {'lung_mask': 'lung_mask'}), '(rescaled_ct, lung_mask=lung_mask)\n', (3060, 3094), True, 'import prediction.predict_rescaled as predictor\n'), ((3202, 3245), 'visualization.visualize_3d.visualize_stl.visualize_numpy_as_stl', 'visualize.visualize_numpy_as_stl', (['lung_mask'], {}), '(lung_mask)\n', (3234, 3245), True, 'import visualization.visualize_3d.visualize_stl as visualize\n'), ((3278, 3331), 'visualization.visualize_3d.visualize_stl.visualize_numpy_as_stl', 'visualize.visualize_numpy_as_stl', (['refined_airway_mask'], {}), '(refined_airway_mask)\n', (3310, 3331), True, 'import visualization.visualize_3d.visualize_stl as visualize\n'), ((3371, 3430), 'visualization.visualize_3d.visualize_stl.visualize_numpy_as_stl', 'visualize.visualize_numpy_as_stl', (['refined_blood_vessel_mask'], {}), '(refined_blood_vessel_mask)\n', (3403, 3430), True, 'import visualization.visualize_3d.visualize_stl as visualize\n'), ((4183, 4241), 'Tool_Functions.Functions.stat_on_mask', 'Functions.stat_on_mask', (['rescaled_ct_original', 'context_mask'], {}), '(rescaled_ct_original, context_mask)\n', (4205, 4241), True, 'import Tool_Functions.Functions as Functions\n'), ((4416, 4531), 'warnings.warn', 'warnings.warn', (['"""Too less (<10000) sampled parenchyma points. Maybe use "general sampling\\""""', 'SyntaxWarning'], {}), '(\n \'Too less (<10000) sampled parenchyma points. Maybe use "general sampling"\'\n , SyntaxWarning)\n', (4429, 4531), False, 'import warnings\n'), ((4847, 4932), 'warnings.warn', 'warnings.warn', (['"""Too high threshold. Maybe use "general sampling"?"""', 'SyntaxWarning'], {}), '(\'Too high threshold. Maybe use "general sampling"?\',\n SyntaxWarning)\n', (4860, 4932), False, 'import warnings\n'), ((5016, 5054), 'numpy.where', 'np.where', (['(visible_non_infection >= 0.5)'], {}), '(visible_non_infection >= 0.5)\n', (5024, 5054), True, 'import numpy as np\n'), ((6741, 6797), 'prediction.predict_rescaled.predict_lung_masks_rescaled_array', 'predictor.predict_lung_masks_rescaled_array', (['rescaled_ct'], {}), '(rescaled_ct)\n', (6784, 6797), True, 'import prediction.predict_rescaled as predictor\n'), ((6953, 7018), 'prediction.predict_rescaled.get_prediction_airway', 'predictor.get_prediction_airway', (['rescaled_ct'], {'lung_mask': 'lung_mask'}), '(rescaled_ct, lung_mask=lung_mask)\n', (6984, 7018), True, 'import prediction.predict_rescaled as predictor\n'), ((7131, 7202), 'prediction.predict_rescaled.get_prediction_blood_vessel', 'predictor.get_prediction_blood_vessel', (['rescaled_ct'], {'lung_mask': 'lung_mask'}), '(rescaled_ct, lung_mask=lung_mask)\n', (7168, 7202), True, 'import prediction.predict_rescaled as predictor\n'), ((7310, 7353), 'visualization.visualize_3d.visualize_stl.visualize_numpy_as_stl', 'visualize.visualize_numpy_as_stl', (['lung_mask'], {}), '(lung_mask)\n', (7342, 7353), True, 'import visualization.visualize_3d.visualize_stl as visualize\n'), ((7386, 7439), 'visualization.visualize_3d.visualize_stl.visualize_numpy_as_stl', 'visualize.visualize_numpy_as_stl', (['refined_airway_mask'], {}), '(refined_airway_mask)\n', (7418, 7439), True, 'import visualization.visualize_3d.visualize_stl as visualize\n'), ((7479, 7538), 'visualization.visualize_3d.visualize_stl.visualize_numpy_as_stl', 'visualize.visualize_numpy_as_stl', (['refined_blood_vessel_mask'], {}), '(refined_blood_vessel_mask)\n', (7511, 7538), True, 'import visualization.visualize_3d.visualize_stl as visualize\n'), ((8919, 8957), 'numpy.where', 'np.where', (['(visible_non_infection >= 0.5)'], {}), '(visible_non_infection >= 0.5)\n', (8927, 8957), True, 'import numpy as np\n'), ((10871, 10927), 'prediction.predict_rescaled.predict_lung_masks_rescaled_array', 'predictor.predict_lung_masks_rescaled_array', (['rescaled_ct'], {}), '(rescaled_ct)\n', (10914, 10927), True, 'import prediction.predict_rescaled as predictor\n'), ((10969, 11034), 'prediction.predict_rescaled.get_prediction_airway', 'predictor.get_prediction_airway', (['rescaled_ct'], {'lung_mask': 'lung_mask'}), '(rescaled_ct, lung_mask=lung_mask)\n', (11000, 11034), True, 'import prediction.predict_rescaled as predictor\n'), ((11088, 11159), 'prediction.predict_rescaled.get_prediction_blood_vessel', 'predictor.get_prediction_blood_vessel', (['rescaled_ct'], {'lung_mask': 'lung_mask'}), '(rescaled_ct, lung_mask=lung_mask)\n', (11125, 11159), True, 'import prediction.predict_rescaled as predictor\n'), ((11284, 11374), 'prediction.predict_rescaled.predict_covid_19_infection_rescaled_array', 'predictor.predict_covid_19_infection_rescaled_array', (['rescaled_ct'], {'lung_mask': 'lung_mask'}), '(rescaled_ct, lung_mask=\n lung_mask)\n', (11335, 11374), True, 'import prediction.predict_rescaled as predictor\n'), ((11411, 11447), 'numpy.zeros', 'np.zeros', (['[512, 512, 512]', '"""float32"""'], {}), "([512, 512, 512], 'float32')\n", (11419, 11447), True, 'import numpy as np\n'), ((11511, 11548), 'os.path.join', 'os.path.join', (['save_dict', '"""lung_masks"""'], {}), "(save_dict, 'lung_masks')\n", (11523, 11548), False, 'import os\n'), ((11616, 11659), 'os.path.join', 'os.path.join', (['save_dict', '"""airway_stage_two"""'], {}), "(save_dict, 'airway_stage_two')\n", (11628, 11659), False, 'import os\n'), ((11724, 11773), 'os.path.join', 'os.path.join', (['save_dict', '"""blood_vessel_stage_two"""'], {}), "(save_dict, 'blood_vessel_stage_two')\n", (11736, 11773), False, 'import os\n'), ((11844, 11877), 'os.path.join', 'os.path.join', (['save_dict', '"""lesion"""'], {}), "(save_dict, 'lesion')\n", (11856, 11877), False, 'import os\n')] |
from diatom.Hamiltonian import vector_dot
import numpy
from scipy.linalg import block_diag
'''
This module contains code that is incorrect beyond the diagonal elements of the
Hamiltonian in N,MN and is left purely for legacy purposes. In almost all
cicumstances the code in Hamiltonian is better.
'''
def tensor_nuclear(C3,I1,I2,N):
''' The tensor - nuclear spin spin interaction.
This version uses cartesian angular momentum matrices and is incorrect.
Correct version has off-diagonal terms in N. this only has the diagonals.
It is close but only suitable where high-performance requirements replace
accuracy requirements.
Args:
C3 (float): Tensor spin-spin coupling coefficient
I1,I2,N (lists of numpy.ndarray): Angular momentum Vectors
Returns:
H (numpy.ndarray): Tensor spin-spin term
'''
with warnings.catch_warnings():
# this is a statement to make the code nicer to use, python wants to
# warn the user whenever the data type is changed from Complex. But we
# know that it will always be real so it doesn't matter.
warnings.filterwarnings("ignore",category=numpy.ComplexWarning)
#find max values for angular momentum from their projections onto z
Nmax = int(numpy.round(numpy.real(numpy.amax(N[2])),1))
I1max = numpy.real(numpy.round(numpy.amax(I1[2]),1))
I2max = numpy.real(numpy.round(numpy.amax(I2[2]),1))
I1shape = int(2*I1max+1)
I2shape = int(2*I2max+1)
# The tensor nuclear spin-spin interaction depends on the rotational level
# not its projection, so we have to create a new matrix that contains the
# values of N. Thankfully the terms are block-diagonal in N so we don't have
# to worry what the term <N,MN|I1 dot T dot I|N',MN'> looks like
Narray = numpy.zeros((1,1))
for n in range(0,Nmax+1):
#this loop iterates over all the values for N (indexed as n) allowed and
# builds an nxn matrix of only one value.
shape = int((2*n+1)*(2*I1max+1)*(2*I2max+1))
nsub = numpy.zeros((shape,shape))+n
Narray = block_diag(Narray,nsub)
#first element is fixed to be zero - get rid of it
Narray = Narray[1:,1:]
#Now calculate the terms as shown earlier
prefactor = C3/((2*Narray+3)*(2*Narray-1))
term1 = 3*numpy.dot(vector_dot(I1,N),vector_dot(I2,N))
term2 = 3*numpy.dot(vector_dot(I2,N),vector_dot(I1,N))
term3 = -2*vector_dot(I1,I2)*Narray*(Narray+1)
return prefactor*(term1+term2+term3)
def Quadrupole(Q,I1,I2,N):
''' Legacy Quadrupole moment calculation
This form of the quadrupole moments is only accurate on the diagonal.
it comes from doi:10.1103/PhysRev.91.1403, which quotes the quadrupole interaction
for KBr
Args:
Q (tuple of floats) : Tuple or list of the nuclear quadrupole moments as (Q1,Q2)
I1,I2,N (lists of numpy.ndarray): Angular momentum Vectors
Returns:
Quad (numpy.ndarray) - Quadrupole term
'''
Q1,Q2 = Q
with warnings.catch_warnings():
# this is a statement to make the code nicer to use, python wants to
# warn the user whenever the data type is changed from Complex. But we
# know that it will always be real so it doesn't matter.
warnings.filterwarnings("ignore",category=numpy.ComplexWarning)
#find max values for angular momentum from their projections onto z
Nmax = int(numpy.round(numpy.real(numpy.amax(N[2])),1))
I1max = numpy.round(numpy.real(numpy.amax(I1[2])),1)
I2max = numpy.round(numpy.real(numpy.amax(I2[2])),1)
Narray = numpy.array([])
Narray=numpy.zeros((1,1))
for n in range(Nmax+1):
# this loop iterates over all the values for N (indexed as n) allowed &
# builds an (2*I1+1)*(2*I2+1)*(2*n+1)x(2*I1+1)*(2*I2+1)*(2*n+1) matrix
# of only one value.
shape = int((2*I1max+1)*(2*I2max+1)*(2*n+1))
subarray = numpy.zeros((shape,shape))+n
Narray= scipy.linalg.block_diag(Narray,subarray)
Narray = Narray[1:,1:]
# there is the possibility for division by zero here, so define a machine
# epsilon to avoid NaN errors. Epsilon is insignificantly small,
# particularly on modern 64-bit machines.
epsilon = (numpy.finfo(float).eps)
prefactor1 = numpy.zeros(Narray.shape)
prefactor2 = numpy.zeros(Narray.shape)
# Calculate the terms as earlier. This is presented in Sigma notation in the
# text but is actually just two terms.
prefactor1 = -Q1/(2*I1max*(2*I1max-1)*(2*Narray-1)\
*(2*Narray+3))
term1_1= 3*(numpy.dot(vector_dot(I1,N),vector_dot(I1,N)))
term2_1 = 1.5*vector_dot(I1,N)
term3_1 = -1*numpy.dot(vector_dot(I1,I1),vector_dot(N,N))
Quad1 = prefactor1*(term1_1 +term2_1+term3_1)
prefactor2 = -Q2/(2*I2max*(2*I2max-1)*(2*Narray-1)*\
(2*Narray+3))
term1_2= 3*(numpy.dot(vector_dot(I2,N),vector_dot(I2,N)))
term2_2 = 1.5*vector_dot(I2,N)
term3_2 = -1*numpy.dot(vector_dot(I2,I2),vector_dot(N,N))
Quad2 = prefactor2*(term1_2 +term2_2+term3_2)
return Quad1+Quad2
#These are the functions that the user will use to generate any interesting maps
#obviously these can be added to by writing custom scripts but these should
# cover most needs
def Vary_magnetic(Hams,fields0,Bz,return_states = False):
''' Vary magnetic field
find Eigenvalues (and optionally Eigenstates) of the total Hamiltonian
This function works differently to the applied field ones. Because beta
changes the matrix elements in the Hamiltonian we cannot simply
multiply it through. Therefore we have to recalculate the matrix
elements on each interation. This makes the function slower.
Args:
Hams: list or tuple of hamiltonians. Should all be the same size
fields0: initial field conditions, allows for zeeman + Stark effects
Bz: magnetic fields to iterate over
return_states: Switch to return EigenStates as well as Eigenenergies
Returns:
energy:array of Eigenenergies, sorted from smallest to largest along the 0 axis
states:array of Eigenstates, sorted as in energy.
'''
H0,Hz,HDC,HAC = Hams
E,B,I = fields0
#warn the user if they've done something silly, so they don't waste time
if type(Hz) != numpy.ndarray:
warnings.warn("Hamiltonian is zero: nothing will change!")
else:
EigenValues = numpy.zeros((H0.shape[0],len(Bz)))
if return_states:
States = numpy.zeros((H0.shape[0],H0.shape[0],len(Bz)))
for i,b in enumerate(Bz):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=numpy.ComplexWarning)
H = H0+E*HDC+I*HAC+b*Hz
if return_states:
Eigen = eig(H)
order = numpy.argsort(Eigen[0])
EigenValues[:,i]=Eigen[0][order]
States[:,:,i] = Eigen[1][:,order]
else:
Eigen = eigvals(H)
EigenValues[:,i]=numpy.sort(Eigen)
if return_states:
return EigenValues,States
else:
return EigenValues
def Vary_ElectricDC(Hams,fields0,Ez,return_states = False):
''' vary electric field DC
find Eigenvalues (and optionally Eigenstates) of the total Hamiltonian
This function works differently to the applied field ones. Because beta
changes the matrix elements in the Hamiltonian we cannot simply
multiply it through. Therefore we have to recalculate the matrix
elements on each interation. This makes the function slower.
Args:
Hams: list or tuple of hamiltonians. Should all be the same size
fields0: initial field conditions, allows for zeeman + Stark effects
Ez: Electric fields to iterate over
return_states: Switch to return EigenStates as well as Eigenenergies
Returns:
energy:array of Eigenenergies, sorted from smallest to largest along the 0 axis
states:array of Eigenstates, sorted as in energy.
'''
E,B,I = fields0
H0,Hz,HDC,HAC = Hams
EigenValues = numpy.zeros((H0.shape[0],len(Ez)))
#warn the user if they've done something silly, so they don't waste time
if type(HDC) != numpy.ndarray:
warnings.warn("Hamiltonian is zero: nothing will change!")
else:
if return_states:
States = numpy.zeros((H0.shape[0],H0.shape[0],len(Ez)))
for i,e in enumerate(Ez):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=numpy.ComplexWarning)
H = H0+e*HDC+I*HAC+B*Hz
if return_states:
Eigen = eig(H)
order = numpy.argsort(Eigen[0])
EigenValues[:,i]=Eigen[0][order]
States[:,:,i] = Eigen[1][:,order]
else:
Eigen = eigvals(H)
EigenValues[:,i]=numpy.sort(Eigen)
if return_states:
return EigenValues,States
else:
return EigenValues
def Vary_Intensity(Hams,fields0,I_app,return_states = False):
''' vary intensity of off-resonant laser field
find Eigenvalues (and optionally Eigenstates) of the total Hamiltonian
This function works differently to the applied field ones. Because beta
changes the matrix elements in the Hamiltonian we cannot simply
multiply it through. Therefore we have to recalculate the matrix
elements on each interation. This makes the function slower.
Args:
Hams: list or tuple of hamiltonians. Should all be the same size
fields0: initial field conditions, allows for zeeman + Stark effects
Intensity: Intensities to iterate over
return_states: Switch to return EigenStates as well as Eigenenergies
Returns:
energy:array of Eigenenergies, sorted from smallest to largest along the 0 axis
states:array of Eigenstates, sorted as in energy.
'''
H0,Hz,HDC,HAC = Hams
E,B,I = fields0
#warn the user if they've done something silly, so they don't waste time
if type(HAC) != numpy.ndarray:
warnings.warn("Hamiltonian is zero: nothing will change")
else:
EigenValues = numpy.zeros((H0.shape[0],len(I_app)))
if return_states:
States = numpy.zeros((H0.shape[0],H0.shape[0],len(I_app)))
else:
for i,Int in enumerate(I_app):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
category=numpy.ComplexWarning)
H = H0+E*HDC+Int*HAC+B*Hz
if return_states:
Eigen = eig(H)
order = numpy.argsort(Eigen[0])
EigenValues[:,i]=Eigen[0][order]
States[:,:,i] = Eigen[1][:,order]
else:
Eigen = eigvals(H)
EigenValues[:,i]=numpy.sort(Eigen)
if return_states:
return EigenValues,States
else:
return EigenValues
def Vary_Beta(Hams,fields0,Angles,Molecule_pars,return_states = False):
''' vary polarisation of laser field
find Eigenvalues (and optionally Eigenstates) of the total Hamiltonian
This function works differently to the applied field ones. Because beta
changes the matrix elements in the Hamiltonian we cannot simply
multiply it through. Therefore we have to recalculate the matrix
elements on each interation. This makes the function slower.
Args:
Hams: list or tuple of hamiltonians. Should all be the same size
fields0: initial field conditions, allows for zeeman + Stark effects
Angles: Polarisation angles to iterate over
Molecule_pars: Nmax,I1,I2,a2, arguments to feed to regenerate the anisotropic Stark shift matrix.
return_states: Switch to return EigenStates as well as Eigenenergies
Returns:
energy: array of Eigenenergies, sorted from smallest to largest along the 0 axis
states: array of Eigenstates, sorted as in energy.
'''
Nmax,I1,I2,a2 = Molecule_pars
H0,Hz,HDC,HAC = Hams
E,B,I = fields0
#warn the user if they've done something silly, so they don't waste time
if I == 0:
warnings.warn("Intensity is zero: nothing will change")
else:
EigenValues = numpy.zeros((H0.shape[0],len(Angles)))
if return_states:
States = numpy.zeros((H0.shape[0],H0.shape[0],len(Angles)))
for i,beta in enumerate(Angles):
HAC = AC_aniso(Nmax,a2,beta,I1,I2)/(2*eps0*c)
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=numpy.ComplexWarning)
H = H0+E*HDC+I*HAC+B*Hz
if return_states:
Eigen = eig(H)
order = numpy.argsort(Eigen[0])
EigenValues[:,i]=Eigen[0][order]
States[:,:,i] = Eigen[1][:,order]
else:
Eigen = eigvals(H)
EigenValues[:,i]=numpy.sort(Eigen)
if return_states:
return EigenValues,States
else:
return EigenValues
| [
"scipy.linalg.block_diag",
"numpy.zeros",
"diatom.Hamiltonian.vector_dot",
"numpy.amax",
"numpy.finfo",
"numpy.argsort",
"numpy.sort",
"numpy.array"
] | [((1862, 1881), 'numpy.zeros', 'numpy.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1873, 1881), False, 'import numpy\n'), ((3704, 3719), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (3715, 3719), False, 'import numpy\n'), ((3731, 3750), 'numpy.zeros', 'numpy.zeros', (['(1, 1)'], {}), '((1, 1))\n', (3742, 3750), False, 'import numpy\n'), ((4402, 4427), 'numpy.zeros', 'numpy.zeros', (['Narray.shape'], {}), '(Narray.shape)\n', (4413, 4427), False, 'import numpy\n'), ((4445, 4470), 'numpy.zeros', 'numpy.zeros', (['Narray.shape'], {}), '(Narray.shape)\n', (4456, 4470), False, 'import numpy\n'), ((2158, 2182), 'scipy.linalg.block_diag', 'block_diag', (['Narray', 'nsub'], {}), '(Narray, nsub)\n', (2168, 2182), False, 'from scipy.linalg import block_diag\n'), ((4360, 4378), 'numpy.finfo', 'numpy.finfo', (['float'], {}), '(float)\n', (4371, 4378), False, 'import numpy\n'), ((4772, 4789), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I1', 'N'], {}), '(I1, N)\n', (4782, 4789), False, 'from diatom.Hamiltonian import vector_dot\n'), ((5078, 5095), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I2', 'N'], {}), '(I2, N)\n', (5088, 5095), False, 'from diatom.Hamiltonian import vector_dot\n'), ((2112, 2139), 'numpy.zeros', 'numpy.zeros', (['(shape, shape)'], {}), '((shape, shape))\n', (2123, 2139), False, 'import numpy\n'), ((2383, 2400), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I1', 'N'], {}), '(I1, N)\n', (2393, 2400), False, 'from diatom.Hamiltonian import vector_dot\n'), ((2400, 2417), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I2', 'N'], {}), '(I2, N)\n', (2410, 2417), False, 'from diatom.Hamiltonian import vector_dot\n'), ((2442, 2459), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I2', 'N'], {}), '(I2, N)\n', (2452, 2459), False, 'from diatom.Hamiltonian import vector_dot\n'), ((2459, 2476), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I1', 'N'], {}), '(I1, N)\n', (2469, 2476), False, 'from diatom.Hamiltonian import vector_dot\n'), ((4039, 4066), 'numpy.zeros', 'numpy.zeros', (['(shape, shape)'], {}), '((shape, shape))\n', (4050, 4066), False, 'import numpy\n'), ((4718, 4735), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I1', 'N'], {}), '(I1, N)\n', (4728, 4735), False, 'from diatom.Hamiltonian import vector_dot\n'), ((4735, 4752), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I1', 'N'], {}), '(I1, N)\n', (4745, 4752), False, 'from diatom.Hamiltonian import vector_dot\n'), ((4816, 4834), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I1', 'I1'], {}), '(I1, I1)\n', (4826, 4834), False, 'from diatom.Hamiltonian import vector_dot\n'), ((4834, 4850), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['N', 'N'], {}), '(N, N)\n', (4844, 4850), False, 'from diatom.Hamiltonian import vector_dot\n'), ((5024, 5041), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I2', 'N'], {}), '(I2, N)\n', (5034, 5041), False, 'from diatom.Hamiltonian import vector_dot\n'), ((5041, 5058), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I2', 'N'], {}), '(I2, N)\n', (5051, 5058), False, 'from diatom.Hamiltonian import vector_dot\n'), ((5122, 5140), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I2', 'I2'], {}), '(I2, I2)\n', (5132, 5140), False, 'from diatom.Hamiltonian import vector_dot\n'), ((5140, 5156), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['N', 'N'], {}), '(N, N)\n', (5150, 5156), False, 'from diatom.Hamiltonian import vector_dot\n'), ((1399, 1416), 'numpy.amax', 'numpy.amax', (['I1[2]'], {}), '(I1[2])\n', (1409, 1416), False, 'import numpy\n'), ((1460, 1477), 'numpy.amax', 'numpy.amax', (['I2[2]'], {}), '(I2[2])\n', (1470, 1477), False, 'import numpy\n'), ((2492, 2510), 'diatom.Hamiltonian.vector_dot', 'vector_dot', (['I1', 'I2'], {}), '(I1, I2)\n', (2502, 2510), False, 'from diatom.Hamiltonian import vector_dot\n'), ((3607, 3624), 'numpy.amax', 'numpy.amax', (['I1[2]'], {}), '(I1[2])\n', (3617, 3624), False, 'import numpy\n'), ((3668, 3685), 'numpy.amax', 'numpy.amax', (['I2[2]'], {}), '(I2[2])\n', (3678, 3685), False, 'import numpy\n'), ((1338, 1354), 'numpy.amax', 'numpy.amax', (['N[2]'], {}), '(N[2])\n', (1348, 1354), False, 'import numpy\n'), ((3546, 3562), 'numpy.amax', 'numpy.amax', (['N[2]'], {}), '(N[2])\n', (3556, 3562), False, 'import numpy\n'), ((6981, 7004), 'numpy.argsort', 'numpy.argsort', (['Eigen[0]'], {}), '(Eigen[0])\n', (6994, 7004), False, 'import numpy\n'), ((7210, 7227), 'numpy.sort', 'numpy.sort', (['Eigen'], {}), '(Eigen)\n', (7220, 7227), False, 'import numpy\n'), ((8914, 8937), 'numpy.argsort', 'numpy.argsort', (['Eigen[0]'], {}), '(Eigen[0])\n', (8927, 8937), False, 'import numpy\n'), ((9143, 9160), 'numpy.sort', 'numpy.sort', (['Eigen'], {}), '(Eigen)\n', (9153, 9160), False, 'import numpy\n'), ((13169, 13192), 'numpy.argsort', 'numpy.argsort', (['Eigen[0]'], {}), '(Eigen[0])\n', (13182, 13192), False, 'import numpy\n'), ((13398, 13415), 'numpy.sort', 'numpy.sort', (['Eigen'], {}), '(Eigen)\n', (13408, 13415), False, 'import numpy\n'), ((10975, 10998), 'numpy.argsort', 'numpy.argsort', (['Eigen[0]'], {}), '(Eigen[0])\n', (10988, 10998), False, 'import numpy\n'), ((11224, 11241), 'numpy.sort', 'numpy.sort', (['Eigen'], {}), '(Eigen)\n', (11234, 11241), False, 'import numpy\n')] |
# this script uses Ewald summition to evaluate the substitution structure
from pymatgen.core import Structure, Lattice
from pymatgen.io.vasp.inputs import Incar
from pymatgen.transformations.advanced_transformations import OrderDisorderedStructureTransformation
import numpy as np
# Phase Diagram will cover Na fraction 20/32 16/32 12/32 10/32 8/32 6/32 4/32
Na_frac_groups = []
Na_frac_list = [20/32, 16/32, 12/32, 10/32, 8/32, 6/32, 4/32, 2/32]
for Na_frac in Na_frac_list:
# import the original structure
poscar_file_path = 'POSCAR'
structure = Structure.from_file(poscar_file_path)
structure.add_oxidation_state_by_element({"Na":0, "Mn":0, "O":0, "Ni":0, "Ti":0})
lattice = structure.lattice
species = structure.species
frac_coords = structure.frac_coords
# substitute Mn with {"Mn":0.8125, "Ni":0.0625, "Ti":0.125}
#sub_Mn_specie = {"Mn0+":0.8125, "Ni0+":0.0625, "Ti0+":0.125}
sub_Nae_specie = {"Na0+":Na_frac}
# sub_Naf_specie = {"Na0+":0.125}
for n,element in enumerate(species):
# if element.symbol == "Mn":
# species[n] = sub_Mn_specie
if element.symbol == "Na":
species[n] = sub_Nae_specie
# if n < 8:
# species[n] = sub_Naf_specie
# else:
# species[n] = sub_Nae_specie
# # read magmom from INCAR
incar = Incar.from_file("INCAR")
magmom_list = incar.as_dict()["MAGMOM"]
# magmom_list = [0,0,0,0,0,0,0,0,3.856,-3.856,-3.856,3.856,3.856,-3.856,-3.856,3.856,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
sub_structure = Structure(lattice, species, frac_coords, site_properties={"magmom":magmom_list})
# making supercell
scaling_matrix = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 2]
])
sub_structure_supercell = sub_structure * scaling_matrix
print("processing Na{}_32...".format(int(Na_frac*32), end=""))
odst = OrderDisorderedStructureTransformation()
ordered_sub_structure = odst.apply_transformation(sub_structure_supercell, return_ranked_list = 30)
# reduce the symmetric structures
from pymatgen.analysis.structure_matcher import StructureMatcher
matcher = StructureMatcher()
groups = matcher.group_structures([d["structure"] for d in ordered_sub_structure])
print(" find {} structures".format(len(groups)))
Na_frac_groups.append(groups)
# write the files to dict in a json file
import json
Na_frac_groups_dict = []
for groups, Na_frac in zip(Na_frac_groups, Na_frac_list):
Na_frac_groups_entry = {}
Na_frac_groups_entry["Na_frac"] = Na_frac
Na_frac_groups_entry["groups"] = [struct[0].as_dict() for struct in groups]
Na_frac_groups_dict.append(Na_frac_groups_entry)
json_file = "Na_frac_gourps.json"
with open(json_file, mode="w") as f:
json_data = json.dumps(Na_frac_groups_dict, sort_keys=True, indent=4, separators=(',', ': '))
f.write(json_data)
| [
"pymatgen.transformations.advanced_transformations.OrderDisorderedStructureTransformation",
"pymatgen.io.vasp.inputs.Incar.from_file",
"pymatgen.core.Structure",
"pymatgen.analysis.structure_matcher.StructureMatcher",
"pymatgen.core.Structure.from_file",
"json.dumps",
"numpy.array"
] | [((585, 622), 'pymatgen.core.Structure.from_file', 'Structure.from_file', (['poscar_file_path'], {}), '(poscar_file_path)\n', (604, 622), False, 'from pymatgen.core import Structure, Lattice\n'), ((1415, 1439), 'pymatgen.io.vasp.inputs.Incar.from_file', 'Incar.from_file', (['"""INCAR"""'], {}), "('INCAR')\n", (1430, 1439), False, 'from pymatgen.io.vasp.inputs import Incar\n'), ((1633, 1718), 'pymatgen.core.Structure', 'Structure', (['lattice', 'species', 'frac_coords'], {'site_properties': "{'magmom': magmom_list}"}), "(lattice, species, frac_coords, site_properties={'magmom':\n magmom_list})\n", (1642, 1718), False, 'from pymatgen.core import Structure, Lattice\n'), ((1762, 1805), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 2]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 2]])\n', (1770, 1805), True, 'import numpy as np\n'), ((1984, 2024), 'pymatgen.transformations.advanced_transformations.OrderDisorderedStructureTransformation', 'OrderDisorderedStructureTransformation', ([], {}), '()\n', (2022, 2024), False, 'from pymatgen.transformations.advanced_transformations import OrderDisorderedStructureTransformation\n'), ((2263, 2281), 'pymatgen.analysis.structure_matcher.StructureMatcher', 'StructureMatcher', ([], {}), '()\n', (2279, 2281), False, 'from pymatgen.analysis.structure_matcher import StructureMatcher\n'), ((2918, 3003), 'json.dumps', 'json.dumps', (['Na_frac_groups_dict'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(Na_frac_groups_dict, sort_keys=True, indent=4, separators=(',',\n ': '))\n", (2928, 3003), False, 'import json\n')] |
from argparse import ArgumentParser
from typing import List, Optional, Tuple
import numpy as np
from numpy.random import RandomState
from rlai.actions import Action
from rlai.agents import Agent
from rlai.environments import Environment
from rlai.meta import rl_text
from rlai.runners.monitor import Monitor
from rlai.states import State
from rlai.utils import parse_arguments
ARM_QSTAR_BUFFER_SIZE = 1000
@rl_text(chapter=2, page=25)
class Arm:
"""
Bandit arm.
"""
def pull(
self
) -> float:
"""
Pull the arm.
:return: Reward value.
"""
# refill the reward buffer if it is empty or hasn't been initialized
if self.q_star_buffer_idx >= len(self.q_star_buffer):
self.q_star_buffer = self.random_state.normal(loc=self.mean, scale=self.variance, size=ARM_QSTAR_BUFFER_SIZE)
self.q_star_buffer_idx = 0
# return next value from buffer
value = self.q_star_buffer[self.q_star_buffer_idx]
self.q_star_buffer_idx += 1
return value
def __init__(
self,
i: int,
mean: float,
variance: float,
random_state: RandomState
):
"""
Initialize the arm.
:param i: Arm index.
:param mean: Mean reward value.
:param variance: Variance of reward value.
:param random_state: Random state.
"""
self.i: int = i
self.mean: float = mean
self.variance: float = variance
self.random_state: RandomState = random_state
self.q_star_buffer: np.ndarray = np.array([])
self.q_star_buffer_idx: int = 0
def __str__(
self
) -> str:
return f'Mean: {self.mean}, Variance: {self.variance}'
@rl_text(chapter=2, page=28)
class KArmedBandit(Environment):
"""
K-armed bandit.
"""
@classmethod
def get_argument_parser(
cls
) -> ArgumentParser:
"""
Get argument parser.
:return: Argument parser.
"""
parser = ArgumentParser(
prog=f'{cls.__module__}.{cls.__name__}',
parents=[super().get_argument_parser()],
allow_abbrev=False,
add_help=False
)
parser.add_argument(
'--k',
type=int,
default=10,
help='Number of bandit arms.'
)
parser.add_argument(
'--reset-probability',
type=float,
default=0.0,
help="Probability of resetting the bandit's arms at each time step. This effectively creates a nonstationary environment."
)
parser.add_argument(
'--q-star-mean',
type=float,
default=0.0,
help='Mean of q-star (true reward mean) distribution.'
)
parser.add_argument(
'--q-star-variance',
type=float,
default=1.0,
help='Variance of q-star (true reward mean) distribution.'
)
parser.add_argument(
'--reward-variance',
type=float,
default=1.0,
help='Variance of rewards.'
)
return parser
@classmethod
def init_from_arguments(
cls,
args: List[str],
random_state: RandomState
) -> Tuple[Environment, List[str]]:
"""
Initialize an environment from arguments.
:param args: Arguments.
:param random_state: Random state.
:return: 2-tuple of an environment and a list of unparsed arguments.
"""
parsed_args, unparsed_args = parse_arguments(cls, args)
bandit = cls(
random_state=random_state,
**vars(parsed_args)
)
return bandit, unparsed_args
def reset_for_new_run(
self,
agent: Agent
) -> State:
"""
Reset the the bandit, initializing arms to new expected values.
:param agent: Agent.
:return: New State.
"""
super().reset_for_new_run(agent)
# get new arm reward means and initialize new arms
q_star_means = self.random_state.normal(loc=self.q_star_mean, scale=self.q_star_variance, size=self.k)
self.arms = [
Arm(
i=i,
mean=mean,
variance=self.reward_variance,
random_state=self.random_state
)
for i, mean in enumerate(q_star_means)
]
self.best_arm = max(self.arms, key=lambda arm: arm.mean)
return State(i=0, AA=[Action(i) for i in range(self.k)])
def pull(
self,
arm: int
) -> float:
"""
Pull an arm.
:param arm: Arm index.
:return: Reward value.
"""
return self.arms[arm].pull()
def run_step(
self,
t: int,
agent: Agent,
monitor: Monitor
) -> bool:
"""
Run a step of the environment with an agent.
:param t: Step.
:param agent: Agent.
:param monitor: Monitor.
:return: True if a terminal state was entered and the run should terminate, and False otherwise.
"""
if self.random_state.random_sample() < self.reset_probability:
self.reset_for_new_run(agent)
action = agent.act(t=t)
monitor.report(t=t, agent_action=action, optimal_action=Action(self.best_arm.i))
reward = self.pull(action.i)
monitor.report(t=t, action_reward=reward)
agent.reward(reward)
return False
def __init__(
self,
random_state: RandomState,
T: Optional[int],
k: int,
q_star_mean: float,
q_star_variance: float,
reward_variance: float,
reset_probability: float
):
"""
Initialize the bandit.
:param random_state: Random state.
:param T: Maximum number of steps to run, or None for no limit.
:param k: Number of arms.
:param q_star_mean: Mean of q_star.
:param q_star_variance: Variance of q_star.
:param reward_variance: Reward variance.
:param reset_probability: Per-step probability of resetting (nonstationarity).
"""
super().__init__(
name=f'{k}-armed bandit',
random_state=random_state,
T=T
)
self.k = k
self.q_star_mean = q_star_mean
self.q_star_variance = q_star_variance
self.reward_variance = reward_variance
self.reset_probability = reset_probability
self.arms: List[Arm] = []
self.best_arm: Optional[Arm] = None
| [
"rlai.utils.parse_arguments",
"numpy.array",
"rlai.actions.Action",
"rlai.meta.rl_text"
] | [((412, 439), 'rlai.meta.rl_text', 'rl_text', ([], {'chapter': '(2)', 'page': '(25)'}), '(chapter=2, page=25)\n', (419, 439), False, 'from rlai.meta import rl_text\n'), ((1803, 1830), 'rlai.meta.rl_text', 'rl_text', ([], {'chapter': '(2)', 'page': '(28)'}), '(chapter=2, page=28)\n', (1810, 1830), False, 'from rlai.meta import rl_text\n'), ((1633, 1645), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1641, 1645), True, 'import numpy as np\n'), ((3690, 3716), 'rlai.utils.parse_arguments', 'parse_arguments', (['cls', 'args'], {}), '(cls, args)\n', (3705, 3716), False, 'from rlai.utils import parse_arguments\n'), ((5525, 5548), 'rlai.actions.Action', 'Action', (['self.best_arm.i'], {}), '(self.best_arm.i)\n', (5531, 5548), False, 'from rlai.actions import Action\n'), ((4667, 4676), 'rlai.actions.Action', 'Action', (['i'], {}), '(i)\n', (4673, 4676), False, 'from rlai.actions import Action\n')] |
import numpy as np
from numpy.linalg import multi_dot
# discrete xkp1=fk(xk,uk,w,L)
def fk(x,u,w,L):
fk_out=np.array([x[2],
x[3],
(2*x[2]*x[3]*np.sin(x[1]) - w*np.sin(x[0]) + (u[1]*np.cos(x[0]))/L)/np.cos(x[1]),
- np.cos(x[1])*np.sin(x[1])*np.square(x[2]) - (u[0]*np.cos(x[1]) + u[1]*np.sin(x[0])*np.sin(x[1]))/L - w*np.cos(x[0])*np.sin(x[1]),
0,
0])
return fk_out
def Fk(x,u,w,L,dt):
'''Transition matrix'''
Fk_out = np.array([[ 1, 0, dt, 0,0,0],
[ 0, 1, 0, dt,0,0],
[ -(dt*(w*np.cos(x[0]) + (u[1]*np.sin(x[0]))/L))/np.cos(x[1]), 2*dt*x[2]*x[3] + (dt*np.sin(x[1])*(2*x[2]*x[3]*np.sin(x[1]) - w*np.sin(x[0]) + (u[1]*np.cos(x[0]))/L))/np.square(np.cos(x[1])) , (2*dt*x[3]*np.sin(x[1]))/np.cos(x[1]) + 1, (2*dt*x[2]*np.sin(x[1]))/np.cos(x[1]),0,0],
[ dt*(w*np.sin(x[0])*np.sin(x[1]) - (u[1]*np.cos(x[0])*np.sin(x[1]))/L), -dt*(np.square(x[2])*np.square(np.cos(x[1])) - np.square(x[2])*np.square(np.sin(x[1])) - (u[0]*np.sin(x[1]) - u[1]*np.cos(x[1])*np.sin(x[0]))/L + w*np.cos(x[0])*np.cos(x[1])), -2*dt*x[2]*np.cos(x[1])*np.sin(x[1]), 1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1]])
return Fk_out
def ekf(Lvec, uk, hat_Pkm1, hat_thetakm1, theta, r, dt):
'''Extended Kalman filter'''
D = 10 # number of times to do repeated Euler's method
g = 9.81 # gravity
L = r # lenght of pendulum
u = uk # acceleration of the crane tip
x = hat_thetakm1 # estimated pendulum oscillation angles and rates, and bias of pendulum oscillation angles
# Covariance matrix for measurement noise
R = np.array([[0.00377597, -0.00210312],
[-0.00210312, 0.00125147]])
# Covariance matrix for process noise
Q = np.diag([0.00003, 0.00003, 0.0005, 0.0005, 0.0001, 0.0001])
# Q = np.diag([0.00003, 0.00003, 0.0005, 0.0005, 0.0, 0.0])
# Observation matrix
H = np.array([[1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1]])
Fi = Fk(x, u, g/r, L, dt)
# Measurement of payload oscillation angles
zkp1 = np.array([np.arctan2(-Lvec[1], Lvec[2]),
np.arctan2(Lvec[0], np.sqrt(np.square(Lvec[1])+np.square(Lvec[2])))])
# Repeated Euler's method
for i in range(D-1):
x = fk(x, u, g/r, L)*dt/D+x
barP_kp1 = multi_dot([Fi, hat_Pkm1, Fi.T])+Q
K_kp1 = multi_dot(
[barP_kp1, H.T, np.linalg.inv(R+multi_dot([H, barP_kp1, H.T]))])
hat_thetak = x+np.dot(K_kp1, zkp1-np.dot(H, x))
hat_Pk = np.dot((np.diag([1, 1, 1, 1, 1, 1])-np.dot(K_kp1, H)), barP_kp1)
return hat_thetak, hat_Pk, zkp1
| [
"numpy.arctan2",
"numpy.square",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.dot",
"numpy.diag",
"numpy.linalg.multi_dot"
] | [((2381, 2445), 'numpy.array', 'np.array', (['[[0.00377597, -0.00210312], [-0.00210312, 0.00125147]]'], {}), '([[0.00377597, -0.00210312], [-0.00210312, 0.00125147]])\n', (2389, 2445), True, 'import numpy as np\n'), ((2514, 2569), 'numpy.diag', 'np.diag', (['[3e-05, 3e-05, 0.0005, 0.0005, 0.0001, 0.0001]'], {}), '([3e-05, 3e-05, 0.0005, 0.0005, 0.0001, 0.0001])\n', (2521, 2569), True, 'import numpy as np\n'), ((2671, 2721), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1]])\n', (2679, 2721), True, 'import numpy as np\n'), ((3068, 3099), 'numpy.linalg.multi_dot', 'multi_dot', (['[Fi, hat_Pkm1, Fi.T]'], {}), '([Fi, hat_Pkm1, Fi.T])\n', (3077, 3099), False, 'from numpy.linalg import multi_dot\n'), ((2840, 2869), 'numpy.arctan2', 'np.arctan2', (['(-Lvec[1])', 'Lvec[2]'], {}), '(-Lvec[1], Lvec[2])\n', (2850, 2869), True, 'import numpy as np\n'), ((3271, 3298), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (3278, 3298), True, 'import numpy as np\n'), ((3299, 3315), 'numpy.dot', 'np.dot', (['K_kp1', 'H'], {}), '(K_kp1, H)\n', (3305, 3315), True, 'import numpy as np\n'), ((245, 257), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (251, 257), True, 'import numpy as np\n'), ((3236, 3248), 'numpy.dot', 'np.dot', (['H', 'x'], {}), '(H, x)\n', (3242, 3248), True, 'import numpy as np\n'), ((398, 410), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (404, 410), True, 'import numpy as np\n'), ((1236, 1248), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (1242, 1248), True, 'import numpy as np\n'), ((1493, 1505), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (1499, 1505), True, 'import numpy as np\n'), ((1818, 1830), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1824, 1830), True, 'import numpy as np\n'), ((3165, 3194), 'numpy.linalg.multi_dot', 'multi_dot', (['[H, barP_kp1, H.T]'], {}), '([H, barP_kp1, H.T])\n', (3174, 3194), False, 'from numpy.linalg import multi_dot\n'), ((308, 323), 'numpy.square', 'np.square', (['x[2]'], {}), '(x[2])\n', (317, 323), True, 'import numpy as np\n'), ((385, 397), 'numpy.cos', 'np.cos', (['x[0]'], {}), '(x[0])\n', (391, 397), True, 'import numpy as np\n'), ((1450, 1462), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (1456, 1462), True, 'import numpy as np\n'), ((1479, 1491), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1485, 1491), True, 'import numpy as np\n'), ((1805, 1817), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (1811, 1817), True, 'import numpy as np\n'), ((2920, 2938), 'numpy.square', 'np.square', (['Lvec[1]'], {}), '(Lvec[1])\n', (2929, 2938), True, 'import numpy as np\n'), ((2939, 2957), 'numpy.square', 'np.square', (['Lvec[2]'], {}), '(Lvec[2])\n', (2948, 2957), True, 'import numpy as np\n'), ((190, 202), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (196, 202), True, 'import numpy as np\n'), ((207, 219), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (213, 219), True, 'import numpy as np\n'), ((228, 240), 'numpy.cos', 'np.cos', (['x[0]'], {}), '(x[0])\n', (234, 240), True, 'import numpy as np\n'), ((295, 307), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (301, 307), True, 'import numpy as np\n'), ((1376, 1388), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (1382, 1388), True, 'import numpy as np\n'), ((1436, 1448), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1442, 1448), True, 'import numpy as np\n'), ((1556, 1568), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1562, 1568), True, 'import numpy as np\n'), ((1769, 1781), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (1775, 1781), True, 'import numpy as np\n'), ((282, 294), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (288, 294), True, 'import numpy as np\n'), ((332, 344), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (338, 344), True, 'import numpy as np\n'), ((365, 377), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (371, 377), True, 'import numpy as np\n'), ((1284, 1296), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1290, 1296), True, 'import numpy as np\n'), ((1543, 1555), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (1549, 1555), True, 'import numpy as np\n'), ((1590, 1602), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1596, 1602), True, 'import numpy as np\n'), ((1756, 1768), 'numpy.cos', 'np.cos', (['x[0]'], {}), '(x[0])\n', (1762, 1768), True, 'import numpy as np\n'), ((352, 364), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (358, 364), True, 'import numpy as np\n'), ((1197, 1209), 'numpy.cos', 'np.cos', (['x[0]'], {}), '(x[0])\n', (1203, 1209), True, 'import numpy as np\n'), ((1577, 1589), 'numpy.cos', 'np.cos', (['x[0]'], {}), '(x[0])\n', (1583, 1589), True, 'import numpy as np\n'), ((1613, 1628), 'numpy.square', 'np.square', (['x[2]'], {}), '(x[2])\n', (1622, 1628), True, 'import numpy as np\n'), ((1655, 1670), 'numpy.square', 'np.square', (['x[2]'], {}), '(x[2])\n', (1664, 1670), True, 'import numpy as np\n'), ((1218, 1230), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (1224, 1230), True, 'import numpy as np\n'), ((1310, 1322), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1316, 1322), True, 'import numpy as np\n'), ((1327, 1339), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (1333, 1339), True, 'import numpy as np\n'), ((1348, 1360), 'numpy.cos', 'np.cos', (['x[0]'], {}), '(x[0])\n', (1354, 1360), True, 'import numpy as np\n'), ((1639, 1651), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (1645, 1651), True, 'import numpy as np\n'), ((1681, 1693), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1687, 1693), True, 'import numpy as np\n'), ((1703, 1715), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (1709, 1715), True, 'import numpy as np\n'), ((1736, 1748), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (1742, 1748), True, 'import numpy as np\n'), ((1723, 1735), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (1729, 1735), True, 'import numpy as np\n')] |
# Series 有索引, 可以用字典的方式, DataFrame: 每一列看作一个Series,培养按列构建数据的思维
import pandas as pd
pd.__version__
import numpy as np
from pandas import Series, DataFrame
a = np.random.randn(5)
print(a)
s = Series(a,index=['a','b','c','d','e'])
print(s)
d = {'a':1,'b':2}
s = Series(d)
print(s)
d = {'one':Series([1.0,2.0,3.0],index=['a','b','c']),'two':Series([1.0,2.0,3.0,4.0],index=['a','b','c','d'])}
df = DataFrame(d)
print(df)
# df.concat 合并
print(df['one'])
# df.drop_duplicate, sort_index,
df.plot() | [
"pandas.DataFrame",
"pandas.Series",
"numpy.random.randn"
] | [((159, 177), 'numpy.random.randn', 'np.random.randn', (['(5)'], {}), '(5)\n', (174, 177), True, 'import numpy as np\n'), ((192, 234), 'pandas.Series', 'Series', (['a'], {'index': "['a', 'b', 'c', 'd', 'e']"}), "(a, index=['a', 'b', 'c', 'd', 'e'])\n", (198, 234), False, 'from pandas import Series, DataFrame\n'), ((263, 272), 'pandas.Series', 'Series', (['d'], {}), '(d)\n', (269, 272), False, 'from pandas import Series, DataFrame\n'), ((399, 411), 'pandas.DataFrame', 'DataFrame', (['d'], {}), '(d)\n', (408, 411), False, 'from pandas import Series, DataFrame\n'), ((295, 341), 'pandas.Series', 'Series', (['[1.0, 2.0, 3.0]'], {'index': "['a', 'b', 'c']"}), "([1.0, 2.0, 3.0], index=['a', 'b', 'c'])\n", (301, 341), False, 'from pandas import Series, DataFrame\n'), ((343, 399), 'pandas.Series', 'Series', (['[1.0, 2.0, 3.0, 4.0]'], {'index': "['a', 'b', 'c', 'd']"}), "([1.0, 2.0, 3.0, 4.0], index=['a', 'b', 'c', 'd'])\n", (349, 399), False, 'from pandas import Series, DataFrame\n')] |
import numpy as np
import pickle
import pandas as pd
from sklearn.linear_model import LogisticRegression
# Load true allusions
pickle_off = open("../output/nietzsche/orderedTuples.pickle","rb")
scoreTuples = pickle.load(pickle_off)
trueAllusions=list()
for tup in scoreTuples:
trueAllusions.append(list(tup))
# Load false allusions
pickle_off = open("../output/n1-lim//orderedTuples.pickle","rb")
scoreTuples_1 = pickle.load(pickle_off)
pickle_off = open("../output/n3-lim//orderedTuples.pickle","rb")
scoreTuples_2 = pickle.load(pickle_off)
falseAllusions=list()
for tup in scoreTuples_1:
falseAllusions.append(list(tup)[3:])
for tup in scoreTuples_2:
falseAllusions.append(list(tup)[3:])
# Converting to numpy arrays
tr=np.array(trueAllusions)
fa=np.array(falseAllusions)
tr=np.delete(tr,7,axis=1) # remove LCS string
fa=np.delete(fa,7,axis=1) # remove LCS string
tr=tr.astype(float)
tr=np.nan_to_num(tr)
fa=fa.astype(float)
fa=np.nan_to_num(fa)
# Write basic statistics of metrics to a file
metrics=list()
metrics.append(list(np.amin(tr,0)))
metrics.append(list(np.amax(tr,0)))
metrics.append(list(np.mean(tr,0)))
metrics.append(list(np.median(tr,0)))
metrics.append(list(np.amin(fa,0)))
metrics.append(list(np.amax(fa,0)))
metrics.append(list(np.mean(fa,0)))
metrics.append(list(np.median(fa,0)))
colNames=['syntactic similarity', 'semantic similarity all words', 'semantic similarity without stopwords',
'semantic similarity nouns', 'semantic similarity verbs', 'average similairty',
'lcs length', 'syntactic similarity without tokens', 'common proper nouns', 'jaccard nouns',
'jaccard verbs', 'jaccard adjectives']
df=pd.DataFrame(metrics)
df.columns=colNames
df.to_csv('../csv/metrics.csv')
# Create training data
tr_x=np.ones((tr.shape[0],tr.shape[1]+1))
tr_x[:,:-1]=tr
fa_x=np.zeros((fa.shape[0],fa.shape[1]+1))
fa_x[:,:-1]=fa
X=np.concatenate((tr_x,fa_x),axis=0)
np.random.shuffle(X)
y=X[:,-1]
X=X[:,:-1]
model=LogisticRegression()
model.fit(X,y)
| [
"pandas.DataFrame",
"numpy.random.shuffle",
"numpy.nan_to_num",
"numpy.amin",
"numpy.median",
"numpy.zeros",
"numpy.ones",
"numpy.amax",
"sklearn.linear_model.LogisticRegression",
"pickle.load",
"numpy.array",
"numpy.mean",
"numpy.delete",
"numpy.concatenate"
] | [((210, 233), 'pickle.load', 'pickle.load', (['pickle_off'], {}), '(pickle_off)\n', (221, 233), False, 'import pickle\n'), ((422, 445), 'pickle.load', 'pickle.load', (['pickle_off'], {}), '(pickle_off)\n', (433, 445), False, 'import pickle\n'), ((529, 552), 'pickle.load', 'pickle.load', (['pickle_off'], {}), '(pickle_off)\n', (540, 552), False, 'import pickle\n'), ((746, 769), 'numpy.array', 'np.array', (['trueAllusions'], {}), '(trueAllusions)\n', (754, 769), True, 'import numpy as np\n'), ((773, 797), 'numpy.array', 'np.array', (['falseAllusions'], {}), '(falseAllusions)\n', (781, 797), True, 'import numpy as np\n'), ((801, 825), 'numpy.delete', 'np.delete', (['tr', '(7)'], {'axis': '(1)'}), '(tr, 7, axis=1)\n', (810, 825), True, 'import numpy as np\n'), ((847, 871), 'numpy.delete', 'np.delete', (['fa', '(7)'], {'axis': '(1)'}), '(fa, 7, axis=1)\n', (856, 871), True, 'import numpy as np\n'), ((913, 930), 'numpy.nan_to_num', 'np.nan_to_num', (['tr'], {}), '(tr)\n', (926, 930), True, 'import numpy as np\n'), ((954, 971), 'numpy.nan_to_num', 'np.nan_to_num', (['fa'], {}), '(fa)\n', (967, 971), True, 'import numpy as np\n'), ((1684, 1705), 'pandas.DataFrame', 'pd.DataFrame', (['metrics'], {}), '(metrics)\n', (1696, 1705), True, 'import pandas as pd\n'), ((1789, 1828), 'numpy.ones', 'np.ones', (['(tr.shape[0], tr.shape[1] + 1)'], {}), '((tr.shape[0], tr.shape[1] + 1))\n', (1796, 1828), True, 'import numpy as np\n'), ((1846, 1886), 'numpy.zeros', 'np.zeros', (['(fa.shape[0], fa.shape[1] + 1)'], {}), '((fa.shape[0], fa.shape[1] + 1))\n', (1854, 1886), True, 'import numpy as np\n'), ((1901, 1937), 'numpy.concatenate', 'np.concatenate', (['(tr_x, fa_x)'], {'axis': '(0)'}), '((tr_x, fa_x), axis=0)\n', (1915, 1937), True, 'import numpy as np\n'), ((1936, 1956), 'numpy.random.shuffle', 'np.random.shuffle', (['X'], {}), '(X)\n', (1953, 1956), True, 'import numpy as np\n'), ((1985, 2005), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2003, 2005), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1055, 1069), 'numpy.amin', 'np.amin', (['tr', '(0)'], {}), '(tr, 0)\n', (1062, 1069), True, 'import numpy as np\n'), ((1091, 1105), 'numpy.amax', 'np.amax', (['tr', '(0)'], {}), '(tr, 0)\n', (1098, 1105), True, 'import numpy as np\n'), ((1127, 1141), 'numpy.mean', 'np.mean', (['tr', '(0)'], {}), '(tr, 0)\n', (1134, 1141), True, 'import numpy as np\n'), ((1163, 1179), 'numpy.median', 'np.median', (['tr', '(0)'], {}), '(tr, 0)\n', (1172, 1179), True, 'import numpy as np\n'), ((1201, 1215), 'numpy.amin', 'np.amin', (['fa', '(0)'], {}), '(fa, 0)\n', (1208, 1215), True, 'import numpy as np\n'), ((1237, 1251), 'numpy.amax', 'np.amax', (['fa', '(0)'], {}), '(fa, 0)\n', (1244, 1251), True, 'import numpy as np\n'), ((1273, 1287), 'numpy.mean', 'np.mean', (['fa', '(0)'], {}), '(fa, 0)\n', (1280, 1287), True, 'import numpy as np\n'), ((1309, 1325), 'numpy.median', 'np.median', (['fa', '(0)'], {}), '(fa, 0)\n', (1318, 1325), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 1 11:22:35 2015
@author: jmmauricio
"""
import numpy as np
import xlrd
import pandas as pd
def losses(i_rms, m, fp, T_a, params):
a_i = params['a_i']
b_i = params['b_i']
c_i = params['c_i']
d_i = params['d_i']
e_i = params['e_i']
a_d = params['a_d']
b_d = params['b_d']
c_d = params['c_d']
d_d = params['d_d']
e_d = params['e_d']
R_th_igbt =params['R_th_igbt']
R_th_diode=params['R_th_diode']
R_th_igbt_case=params['R_th_igbt_case']
R_th_diode_case=params['R_th_diode_case']
R_th_sink =params['R_th_sink']
p_igbt = a_i + (b_i - c_i*m*fp)*i_rms + (d_i - e_i*m*fp)*i_rms*i_rms
p_diode = a_d + (b_d - c_d*m*fp)*i_rms + (d_d - e_d*m*fp)*i_rms*i_rms
p_switch = p_igbt + p_diode
T_sink = T_a + p_switch*R_th_sink;
T_case_igbt = T_sink + p_igbt *R_th_igbt_case;
T_case_diode = T_sink + p_diode*R_th_diode_case;
T_igbt = T_case_igbt + p_igbt *R_th_igbt;
T_diode = T_case_diode + p_diode*R_th_diode;
#print(R_th_igbt,R_th_igbt_case,T_case_igbt,T_sink)
powers = dict(p_igbt=p_igbt,
p_diode=p_diode)
temperatures = dict(T_igbt=T_igbt,
T_diode=T_diode,
T_case_igbt=T_case_igbt,
T_case_diode=T_case_diode,
T_sink=T_sink,
T_igbt_deg=T_igbt-273.15,
T_diode_deg=T_diode-273.15,
T_sink_deg=T_sink-273.15)
return powers, temperatures
def vscthmodel(i_rms, m, fp, T_a, params):
a_i = params['a_i']
b_i = params['b_i']
c_i = params['c_i']
d_i = params['d_i']
e_i = params['e_i']
a_d = params['a_d']
b_d = params['b_d']
c_d = params['c_d']
d_d = params['d_d']
e_d = params['e_d']
R_th_igbt_sink=params['R_th_igbt_sink']
R_th_diode_sink=params['R_th_diode_sink']
R_th_sink_a =params['R_th_sink_a']
p_igbt = a_i + (b_i - c_i*m*fp)*i_rms + (d_i - e_i*m*fp)*i_rms*i_rms
p_diode = a_d + (b_d - c_d*m*fp)*i_rms + (d_d - e_d*m*fp)*i_rms*i_rms
p_switch = p_igbt + p_diode
T_sink = T_a + p_switch*R_th_sink_a
T_igbt = T_sink + p_igbt *R_th_igbt_sink
T_diode = T_sink + p_diode*R_th_diode_sink
#print(R_th_igbt,R_th_igbt_case,T_case_igbt,T_sink)
powers = dict(p_igbt=p_igbt,
p_diode=p_diode)
temperatures = dict(T_igbt=T_igbt,
T_diode=T_diode,
T_sink=T_sink,
T_igbt_deg=T_igbt ,
T_diode_deg=T_diode ,
T_sink_deg=T_sink)
return powers, temperatures
def man2param_1(man_electric, man_thermal):
'''
Input
-----
List of 5 tuples
[
[i_1,m_1,cosphi_1,p_igbt_1,p_diode_1],
[i_2,m_1,cosphi_1,p_igbt_2,p_diode_2],
[i_3,m_1,cosphi_1,p_igbt_3,p_diode_3],
[i_4,m_2,cosphi_2,p_igbt_4,p_diode_4],
[i_5,m_2,cosphi_2,p_igbt_5,p_diode_5]
]
'''
k2deg = 273.15
i_1 = man_electric[0][0]
i_2 = man_electric[1][0]
i_3 = man_electric[2][0]
i_4 = man_electric[3][0]
i_5 = man_electric[4][0]
p_igbt_1 = man_electric[0][3]
p_igbt_2 = man_electric[1][3]
p_igbt_3 = man_electric[2][3]
p_igbt_4 = man_electric[3][3]
p_igbt_5 = man_electric[4][3]
p_diode_1 = man_electric[0][4]
p_diode_2 = man_electric[1][4]
p_diode_3 = man_electric[2][4]
p_diode_4 = man_electric[3][4]
p_diode_5 = man_electric[4][4]
m_1 = man_electric[0][1]
m_2 = man_electric[1][1]
m_3 = man_electric[2][1]
m_4 = man_electric[3][1]
m_5 = man_electric[4][1]
cosphi_1 = man_electric[0][2]
cosphi_2 = man_electric[1][2]
cosphi_3 = man_electric[2][2]
cosphi_4 = man_electric[3][2]
cosphi_5 = man_electric[4][2]
alpha_1 = m_1*cosphi_1
alpha_2 = m_2*cosphi_2
alpha_3 = m_3*cosphi_3
alpha_4 = m_4*cosphi_4
alpha_5 = m_5*cosphi_5
A = np.array(
[
[1, i_1, -i_1*alpha_1, i_1**2, -i_1**2*alpha_1],
[1, i_2, -i_2*alpha_2, i_2**2, -i_2**2*alpha_2],
[1, i_3, -i_3*alpha_3, i_3**2, -i_3**2*alpha_3],
[1, i_4, -i_4*alpha_4, i_4**2, -i_4**2*alpha_4],
[1, i_5, -i_5*alpha_5, i_5**2, -i_5**2*alpha_5]
]
)
print(A)
b_igbt = np.array(
[
[p_igbt_1],
[p_igbt_2],
[p_igbt_3],
[p_igbt_4],
[p_igbt_5]
]
)
x = np.linalg.solve(A, b_igbt)
a_i = x[0]
b_i = x[1]
c_i = x[2]
d_i = x[3]
e_i = x[4]
b_diode = np.array(
[
[p_diode_1],
[p_diode_2],
[p_diode_3],
[p_diode_4],
[p_diode_5]
]
)
x = np.linalg.solve(A, b_diode)
a_d = x[0]
b_d = x[1]
c_d = x[2]
d_d = x[3]
e_d = x[4]
points=[
[1, i_1,m_1,cosphi_1,alpha_1,p_igbt_1,p_diode_1],
[2, i_2,m_2,cosphi_2,alpha_2,p_igbt_2,p_diode_2],
[3, i_3,m_3,cosphi_3,alpha_3,p_igbt_3,p_diode_3],
[4, i_4,m_4,cosphi_4,alpha_4,p_igbt_4,p_diode_4],
[5, i_5,m_5,cosphi_5,alpha_5,p_igbt_5,p_diode_5]
]
# man_thermal += [[p_igbt,p_diode,T_igbt+k2deg,T_diode+k2deg,T_sink+k2deg,T_a+k2deg]]
idx = 0
p_igbt = man_thermal[idx][0]
p_diode = man_thermal[idx][1]
T_igbt = man_thermal[idx][2]
T_diode = man_thermal[idx][3]
T_sink = man_thermal[idx][4]
T_a = man_thermal[idx][5]
p_switch = p_igbt + p_diode
R_th_igbt_sink = (T_igbt-T_sink)/p_igbt
R_th_sink_a = (T_sink-(T_a))/p_switch
idx = 0
p_diode = man_thermal[idx][1]
T_diode = man_thermal[idx][3]
T_sink = man_thermal[idx][4]
R_th_diode_sink = (T_diode-T_sink)/p_diode
# print(tabulate(points,tablefmt='latex'))
params = dict(
a_i = a_i,
b_i = b_i,
c_i = c_i,
d_i = d_i,
e_i = e_i,
a_d = a_d,
b_d = b_d,
c_d = c_d,
d_d = d_d,
e_d = e_d,
R_th_igbt_sink = R_th_igbt_sink,
R_th_diode_sink = R_th_diode_sink,
R_th_sink_a = R_th_sink_a,
)
return params
def man2param(man_data, validation=False, method='lsq1'):
'''
Input
-----
List of dictionaries, each dictionary with the following information:
{'i': 29.0,'m':1.0,'cosphi':1.0,'p_i': 27.0,'p_d':6.99,'T_i': 54.0,'T_d': 51.0, 'T_c': 49.0, 'T_s':46.0, 'T_a':40.0 }
'i' : RMS current (A)
'm' : Modulator peak value (pu)
'cosphi' : Power factor (-)
'p_i' : IGBT power loss (W)
'p_d' : Diode power loss (W)
'T_i' : IGBT junture temperature (Celcius degrees)
'T_d' : Diode junture temperature (Celcius degrees)
'T_c' : Case temperature (Celcius degrees)
'T_s' : Heatsink temperature (Celcius degrees)
'T_a' : Ambient temperature (Celcius degrees)
'Tau_h' : Sink temperature time constant (s)
'''
k2deg = 273.15
N = len(man_data)
A = np.zeros((N,5))
b_i = np.zeros((N,1))
b_d = np.zeros((N,1))
A_th = np.zeros((N,5))
R_th_igbt_sink_k = 0.0
R_th_sink_a_k = 0.0
R_th_diode_sink_k = 0.0
k = 0
k_th = 0.0
for item in man_data:
i_k = item['i']
m_k = item['m']
cosphi_k = item['cosphi']
alpha_k = m_k*cosphi_k
p_i_k = item['p_i']
p_d_k = item['p_d']
A[k,:] = np.array([1, i_k, -i_k*alpha_k, i_k**2, -i_k**2*alpha_k])
b_i[k,:] = np.array([p_i_k])
b_d[k,:] = np.array([p_d_k])
k += 1
if 'T_i' in item:
T_i = item['T_i']
T_d = item['T_d']
T_c = item['T_c']
T_s = item['T_s']
T_a = item['T_a']
p_switch_k = p_i_k + p_d_k
R_th_igbt_sink_k += (T_i-T_s)/p_i_k
R_th_sink_a_k += (T_s-(T_a))/p_switch_k
R_th_diode_sink_k += (T_d-T_s)/p_d_k
k_th += 1.0
R_th_igbt_sink = R_th_igbt_sink_k/k_th
R_th_sink_a = R_th_sink_a_k/k_th
R_th_diode_sink = R_th_diode_sink_k/k_th
# x_i = np.linalg.solve(A, b_i)
# x_d = np.linalg.solve(A, b_d)
if method == 'lsq1':
x_i = np.linalg.lstsq(A, b_i, rcond=None)[0]
x_d = np.linalg.lstsq(A, b_d, rcond=None)[0]
if method == 'lsq2':
b = np.hstack((b_i,b_d))
x = np.linalg.lstsq(A, b, rcond=None)[0]
print(b)
print(x)
x_i = x[0:5,0]
x_d = x[0:5,1]
params = dict(
a_i = x_i[0],
b_i = x_i[1],
c_i = x_i[2],
d_i = x_i[3],
e_i = x_i[4],
a_d = x_d[0],
b_d = x_d[1],
c_d = x_d[2],
d_d = x_d[3],
e_d = x_d[4],
R_th_igbt_sink = R_th_igbt_sink,
R_th_diode_sink = R_th_diode_sink,
R_th_sink_a = R_th_sink_a,
)
if validation:
for item in man_data:
i_rms = item['i']
m = item['m']
cosphi = item['cosphi']
T_a = item['T_a']
powers, temperatures = vscthmodel(i_rms, m, cosphi, T_a, params)
p_i_k, p_d_k, T_i_k, T_d_k, T_s_k = float(item['p_i']),float(item['p_d']),float(item['T_i']),float(item['T_d']),float(item['T_s'])
p_i, p_d, T_i, T_d, T_s = float(powers['p_igbt']),float(powers['p_diode']),float(temperatures['T_igbt']),float(temperatures['T_diode']),float(temperatures['T_sink'])
print(f'{p_i_k:0.2f}, {p_d_k:0.2f}, {T_i_k:0.2f}, {T_d_k:0.2f}, {T_s_k:0.2f}')
print(f'{p_i:0.2f}, {p_d:0.2f}, {T_i:0.2f}, {T_d:0.2f}, {T_s:0.2f}')
eps_pi = (p_i_k - p_i)/p_i_k*100.0
eps_pd = (p_d_k - p_d)/p_d_k*100.0
eps_ti = (T_i_k - T_i)/T_i_k*100.0
eps_td = (T_d_k - T_d)/T_d_k*100.0
eps_ts = (T_s_k - T_s)/T_s_k*100.0
print(f'{eps_pi:<0.2f}%, {eps_pd:0.2f}%, {eps_ti:0.2f}%, {eps_td:0.2f}%, {eps_ts:0.2f}%')
print(f'-------------------------------------------------------------------------------')
return params
# powers = dict(p_igbt=p_igbt,
# p_diode=p_diode)
# temperatures = dict(T_igbt=T_igbt,
# T_diode=T_diode,
# T_sink=T_sink,
# T_igbt_deg=T_igbt ,
# T_diode_deg=T_diode ,
# T_sink_deg=T_sink)
def semisel_xls(file,shs_for_param, shs_for_validate, T_a_deg):
k2deg = 273.15
wb = xlrd.open_workbook(file)
man_electric = []
man_thermal = []
for sh_num in shs_for_param:
sh = wb.sheet_by_index(sh_num)
V_dc = float(sh.cell_value(0,1).split(' ')[0])
V_ac = float(sh.cell_value(1,1).split(' ')[0])
i_rms = float(sh.cell_value(2,1).split(' ')[0])
freq = float(sh.cell_value(4,1).split(' ')[0])
fp = float(sh.cell_value(5,1))
f_sw = float(sh.cell_value(7,1).split(' ')[0])
p_igbt = float(sh.cell_value(15,1).split(' ')[0])
p_diode = float(sh.cell_value(18,1).split(' ')[0])
T_igbt = float(sh.cell_value(23,1).split(' ')[0])
T_diode = float(sh.cell_value(24,1).split(' ')[0])
T_sink= float(sh.cell_value(21,1).split(' ')[0])
m = V_ac*np.sqrt(2)/V_dc
man_electric += [[i_rms,m,fp,p_igbt,p_diode]]
man_thermal += [[p_igbt,p_diode,T_igbt+k2deg,T_diode+k2deg,T_sink+k2deg,T_a_deg+k2deg]]
print('{} & {} & {} & {} & {} & {} & {}'.format(i_rms, fp, p_igbt, p_diode, T_igbt, T_diode, T_sink))
params = man2param(man_electric,man_thermal)
print('\midrule')
for sh_num in shs_for_validate:
sh = wb.sheet_by_index(sh_num)
V_dc = float(sh.cell_value(0,1).split(' ')[0])
V_ac = float(sh.cell_value(1,1).split(' ')[0])
i_rms = float(sh.cell_value(2,1).split(' ')[0])
freq = float(sh.cell_value(4,1).split(' ')[0])
fp = float(sh.cell_value(5,1))
f_sw = float(sh.cell_value(7,1).split(' ')[0])
p_igbt = float(sh.cell_value(15,1).split(' ')[0])
p_diode = float(sh.cell_value(18,1).split(' ')[0])
T_igbt = float(sh.cell_value(23,1).split(' ')[0])
T_diode = float(sh.cell_value(24,1).split(' ')[0])
T_sink= float(sh.cell_value(21,1).split(' ')[0])
m = V_ac*np.sqrt(2)/V_dc
pows, temps = vscthmodel(i_rms, m, fp, T_a_deg, params)
print('{:2.1f} & {:2.2f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} \\\\ '.format(i_rms,
fp,
p_igbt, pows['p_igbt'][0],
p_diode, pows['p_diode'][0],
T_igbt, temps['T_igbt_deg'][0],
T_diode, temps['T_diode_deg'][0],
T_sink, temps['T_sink_deg'][0],
))
return params
def imposim_xls(file):
k2deg = 273.15
wb = xlrd.open_workbook(file)
man_electric = []
man_thermal = []
sh = wb.sheet_by_index(0)
V_dc = float(sh.cell_value(20,3))
m = float(sh.cell_value(26,3))
idx = 9
T_a_deg = float(sh.cell_value(idx+53,9))
i_rms = float(sh.cell_value(idx,0))
m = float(sh.cell_value(26,3))
fp = float(sh.cell_value(27,3))
p_igbt = float(sh.cell_value(idx,6))+ 0.5*float(sh.cell_value(idx,13))
p_diode = float(sh.cell_value(idx,11)) + 0.5*float(sh.cell_value(idx,13))
T_igbt = float(sh.cell_value(idx+34,10))
T_diode = float(sh.cell_value(idx+52,10))
T_sink = float(sh.cell_value(idx+52,8)) + float(sh.cell_value(idx+53,9))
man_electric += [[i_rms,m,fp,p_igbt,p_diode]]
man_thermal += [[p_igbt,p_diode,T_igbt+k2deg,T_diode+k2deg,T_sink+k2deg,T_a_deg+k2deg]]
print('{:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} \\\\'.format(i_rms, fp, p_igbt, p_diode, T_igbt, T_diode, T_sink))
idx = 13
i_rms = float(sh.cell_value(idx,0))
m = float(sh.cell_value(26,3))
fp = float(sh.cell_value(27,3))
p_igbt = float(sh.cell_value(idx,6))+ 0.5*float(sh.cell_value(idx,13))
p_diode = float(sh.cell_value(idx,11))+ 0.5*float(sh.cell_value(idx,13))
T_igbt = float(sh.cell_value(idx+34,10))
T_diode = float(sh.cell_value(idx+52,10))
T_sink = float(sh.cell_value(idx+52,8)) + float(sh.cell_value(idx+53,9))
man_electric += [[i_rms,m,fp,p_igbt,p_diode]]
man_thermal += [[p_igbt,p_diode,T_igbt+k2deg,T_diode+k2deg,T_sink+k2deg,T_a_deg+k2deg]]
print('{:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} \\\\'.format(i_rms, fp, p_igbt, p_diode, T_igbt, T_diode, T_sink))
idx = 16
i_rms = float(sh.cell_value(idx,0))
m = float(sh.cell_value(26,3))
fp = float(sh.cell_value(27,3))
p_igbt = float(sh.cell_value(idx,6)) + 0.5*float(sh.cell_value(idx,13))
p_diode = float(sh.cell_value(idx,11)) + 0.5*float(sh.cell_value(idx,13))
T_igbt = float(sh.cell_value(idx+34,10))
T_diode = float(sh.cell_value(idx+52,10))
T_sink = float(sh.cell_value(idx+52,8)) + float(sh.cell_value(idx+53,9))
man_electric += [[i_rms,m,fp,p_igbt,p_diode]]
man_thermal += [[p_igbt,p_diode,T_igbt+k2deg,T_diode+k2deg,T_sink+k2deg,T_a_deg+k2deg]]
print('{:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} \\\\'.format(i_rms, fp, p_igbt, p_diode, T_igbt, T_diode, T_sink))
sh = wb.sheet_by_index(1)
m = float(sh.cell_value(26,3))
idx = 13
i_rms = float(sh.cell_value(idx,0))
m = float(sh.cell_value(26,3))
fp = float(sh.cell_value(27,3))
p_igbt = float(sh.cell_value(idx,6)) + 0.5*float(sh.cell_value(idx,13))
p_diode = float(sh.cell_value(idx,11)) + 0.5*float(sh.cell_value(idx,13))
T_igbt = float(sh.cell_value(idx+34,10))
T_diode = float(sh.cell_value(idx+52,10))
T_sink = float(sh.cell_value(idx+52,8)) + float(sh.cell_value(idx+53,9))
man_electric += [[i_rms,m,fp,p_igbt,p_diode]]
man_thermal += [[p_igbt,p_diode,T_igbt+k2deg,T_diode+k2deg,T_sink+k2deg,T_a_deg+k2deg]]
print('{:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} \\\\'.format(i_rms, fp, p_igbt, p_diode, T_igbt, T_diode, T_sink))
idx = 16
i_rms = float(sh.cell_value(idx,0))
m = float(sh.cell_value(26,3))
fp = float(sh.cell_value(27,3))
p_igbt = float(sh.cell_value(idx,6)) + 0.5*float(sh.cell_value(idx,13))
p_diode = float(sh.cell_value(idx,11)) + 0.5*float(sh.cell_value(idx,13))
T_igbt = float(sh.cell_value(idx+34,10))
T_diode = float(sh.cell_value(idx+52,10))
T_sink = float(sh.cell_value(idx+52,8)) + float(sh.cell_value(idx+53,9))
man_electric += [[i_rms,m,fp,p_igbt,p_diode]]
man_thermal += [[p_igbt,p_diode,T_igbt+k2deg,T_diode+k2deg,T_sink+k2deg,T_a_deg+k2deg]]
print('{:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} & {:2.1f} \\\\'.format(i_rms, fp, p_igbt, p_diode, T_igbt, T_diode, T_sink))
print(np.array(man_electric))
params = man2param(man_electric,man_thermal)
def imposim2xls(file):
k2deg = 273.15
df_1 = pd.read_excel(file, sheetname=0, skiprows=8)
df_2 = pd.read_excel(file, sheetname=1, skiprows=8)
print(df_1)
idx1 = 3
idx2 = 10
idx3 = 16
man_electric = [
[df_1.i_rms[idx1],df_1.m[idx1],df_1.fp[idx1],df_1.p_igbt[idx1],df_1.p_diode[idx1]],
[df_1.i_rms[idx2],df_1.m[idx2],df_1.fp[idx2],df_1.p_igbt[idx2],df_1.p_diode[idx2]],
[df_1.i_rms[idx3],df_1.m[idx3],df_1.fp[idx3],df_1.p_igbt[idx3],df_1.p_diode[idx3]],
[df_2.i_rms[idx2],df_2.m[idx2],df_2.fp[idx2],df_2.p_igbt[idx2],df_2.p_diode[idx2]],
[df_2.i_rms[idx3],df_2.m[idx3],df_2.fp[idx3],df_2.p_igbt[idx3],df_2.p_diode[idx3]],
]
man_thermal = [
[df_1.p_igbt[idx2],df_1.p_diode[idx2],df_1.T_igbt[idx2]+k2deg,df_1.T_diode[idx2]+k2deg,df_1.T_sink[idx2]+k2deg,df_1.T_a[idx2]+k2deg],
]
params = man2param(man_electric, man_thermal)
return params
if __name__ == '__main__':
k2deg = 273.15
file = '/home/jmmauricio/Documents/public/jmmauricio6/RESEARCH/ARTICLES/doing/vsc_model/code/semikron_100kva/semikron_SKiiP38GB12E4V1.xls'
# shs_for_param = [0,2,4,5,6]
# shs_for_validate = [7,8,9,10]
# T_a_deg = 40.0+k2deg
# params = semisel_xls(file,shs_for_param, shs_for_validate, T_a_deg)
#
# print(params)
file = '/home/jmmauricio/Documents/public/jmmauricio6/RESEARCH/ARTICLES/doing/vsc_model/code/imposim/imposim_F800R17.xls'
params = imposim2xls(file)
print(params)
T_a = 40+ k2deg
R_th_diode_sink = params['R_th_diode_sink']
R_th_igbt_sink = params['R_th_igbt_sink']
R_th_sink_a = params['R_th_sink_a']
a_d = params['a_d']
a_i = params['a_i']
b_d = params['b_d']
b_i = params['b_i']
c_d = params['c_d']
c_i = params['c_i']
d_d = params['d_d']
d_i = params['d_i']
e_d = params['e_d']
e_i = params['e_i']
i_rms = 520
m = 400*np.sqrt(2)/800
fp = 1.0
p_igbt = 1.000*(a_i + (b_i - c_i*m*fp)*i_rms + (d_i - e_i*m*fp)*i_rms*i_rms)
p_diode = 1.000*(a_d + (b_d - c_d*m*fp)*i_rms + (d_d - e_d*m*fp)*i_rms*i_rms)
p_switch = p_igbt + p_diode + i_rms**2*0.000
print('p_igbt',p_igbt)
print('p_diode',p_diode)
print('p_switch', p_switch)
T_sink_0 = T_a + p_switch*R_th_sink_a;
T_igbt_0 = T_sink_0 + p_igbt *R_th_igbt_sink;
T_diode_0 = T_sink_0 + p_diode*R_th_diode_sink;
print('T_sink_0', T_sink_0-k2deg)
print('T_igbt_0', T_igbt_0-k2deg)
print('T_diode_0', T_diode_0-k2deg)
#
## C_th_diode= 10
## C_th_diode_case= 2
## C_th_igbt= 18
## C_th_igbt_case= 5
## C_th_sink= 6000.0
## R_th_diode= 0.01979045401629802
## R_th_diode_case= 0.018
## R_th_igbt= 0.009765625
## R_th_igbt_case= 0.009
## R_th_sink= 0.007
## a_d = 143.48507451
## a_i = 421.02132341
## b_d = 0.589627
## b_i = 0.55708434
## c_d = 0.18337165
## c_i =-0.12254324
## d_d = 0.00026235
## d_i = 0.00089385
## e_d = 0.00021407
## e_i =-0.00041411
## T_a = 35.0+273
## params ={'C_th_diode': C_th_diode,
## 'C_th_diode_case': C_th_diode_case,
## 'C_th_igbt': C_th_igbt,
## 'C_th_igbt_case': C_th_igbt_case,
## 'C_th_sink': C_th_sink,
## 'R_th_diode': R_th_diode,
## 'R_th_diode_case': R_th_diode_case,
## 'R_th_igbt': R_th_igbt,
## 'R_th_igbt_case':R_th_igbt_case,
## 'R_th_sink': R_th_sink,
## 'a_d': a_d,
## 'a_i': a_i,
## 'b_d': b_d,
## 'b_i': b_i,
## 'c_d': c_d,
## 'c_i': c_i,
## 'd_d': d_d,
## 'd_i': d_i,
## 'e_d': e_d,
## 'e_i': e_i,
## 'm':0.85,
## 'fp':0.8,
## 'i_rms':1400,
## 'T_a':35+273.3,
## }
##
##
## i_rms = 1500
## fp = 0.85
## m = 0.8
## pows, temps = losses(i_rms, m, fp, T_a, params)
## print(pows)
## print(temps) | [
"numpy.linalg.lstsq",
"xlrd.open_workbook",
"numpy.zeros",
"numpy.hstack",
"pandas.read_excel",
"numpy.array",
"numpy.linalg.solve",
"numpy.sqrt"
] | [((4294, 4601), 'numpy.array', 'np.array', (['[[1, i_1, -i_1 * alpha_1, i_1 ** 2, -i_1 ** 2 * alpha_1], [1, i_2, -i_2 *\n alpha_2, i_2 ** 2, -i_2 ** 2 * alpha_2], [1, i_3, -i_3 * alpha_3, i_3 **\n 2, -i_3 ** 2 * alpha_3], [1, i_4, -i_4 * alpha_4, i_4 ** 2, -i_4 ** 2 *\n alpha_4], [1, i_5, -i_5 * alpha_5, i_5 ** 2, -i_5 ** 2 * alpha_5]]'], {}), '([[1, i_1, -i_1 * alpha_1, i_1 ** 2, -i_1 ** 2 * alpha_1], [1, i_2,\n -i_2 * alpha_2, i_2 ** 2, -i_2 ** 2 * alpha_2], [1, i_3, -i_3 * alpha_3,\n i_3 ** 2, -i_3 ** 2 * alpha_3], [1, i_4, -i_4 * alpha_4, i_4 ** 2, -i_4 **\n 2 * alpha_4], [1, i_5, -i_5 * alpha_5, i_5 ** 2, -i_5 ** 2 * alpha_5]])\n', (4302, 4601), True, 'import numpy as np\n'), ((4637, 4707), 'numpy.array', 'np.array', (['[[p_igbt_1], [p_igbt_2], [p_igbt_3], [p_igbt_4], [p_igbt_5]]'], {}), '([[p_igbt_1], [p_igbt_2], [p_igbt_3], [p_igbt_4], [p_igbt_5]])\n', (4645, 4707), True, 'import numpy as np\n'), ((4768, 4794), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b_igbt'], {}), '(A, b_igbt)\n', (4783, 4794), True, 'import numpy as np\n'), ((4900, 4975), 'numpy.array', 'np.array', (['[[p_diode_1], [p_diode_2], [p_diode_3], [p_diode_4], [p_diode_5]]'], {}), '([[p_diode_1], [p_diode_2], [p_diode_3], [p_diode_4], [p_diode_5]])\n', (4908, 4975), True, 'import numpy as np\n'), ((5031, 5058), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b_diode'], {}), '(A, b_diode)\n', (5046, 5058), True, 'import numpy as np\n'), ((7338, 7354), 'numpy.zeros', 'np.zeros', (['(N, 5)'], {}), '((N, 5))\n', (7346, 7354), True, 'import numpy as np\n'), ((7364, 7380), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (7372, 7380), True, 'import numpy as np\n'), ((7390, 7406), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (7398, 7406), True, 'import numpy as np\n'), ((7418, 7434), 'numpy.zeros', 'np.zeros', (['(N, 5)'], {}), '((N, 5))\n', (7426, 7434), True, 'import numpy as np\n'), ((10827, 10851), 'xlrd.open_workbook', 'xlrd.open_workbook', (['file'], {}), '(file)\n', (10845, 10851), False, 'import xlrd\n'), ((13936, 13960), 'xlrd.open_workbook', 'xlrd.open_workbook', (['file'], {}), '(file)\n', (13954, 13960), False, 'import xlrd\n'), ((18261, 18305), 'pandas.read_excel', 'pd.read_excel', (['file'], {'sheetname': '(0)', 'skiprows': '(8)'}), '(file, sheetname=0, skiprows=8)\n', (18274, 18305), True, 'import pandas as pd\n'), ((18317, 18361), 'pandas.read_excel', 'pd.read_excel', (['file'], {'sheetname': '(1)', 'skiprows': '(8)'}), '(file, sheetname=1, skiprows=8)\n', (18330, 18361), True, 'import pandas as pd\n'), ((7764, 7829), 'numpy.array', 'np.array', (['[1, i_k, -i_k * alpha_k, i_k ** 2, -i_k ** 2 * alpha_k]'], {}), '([1, i_k, -i_k * alpha_k, i_k ** 2, -i_k ** 2 * alpha_k])\n', (7772, 7829), True, 'import numpy as np\n'), ((7844, 7861), 'numpy.array', 'np.array', (['[p_i_k]'], {}), '([p_i_k])\n', (7852, 7861), True, 'import numpy as np\n'), ((7881, 7898), 'numpy.array', 'np.array', (['[p_d_k]'], {}), '([p_d_k])\n', (7889, 7898), True, 'import numpy as np\n'), ((8683, 8704), 'numpy.hstack', 'np.hstack', (['(b_i, b_d)'], {}), '((b_i, b_d))\n', (8692, 8704), True, 'import numpy as np\n'), ((18129, 18151), 'numpy.array', 'np.array', (['man_electric'], {}), '(man_electric)\n', (18137, 18151), True, 'import numpy as np\n'), ((8553, 8588), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'b_i'], {'rcond': 'None'}), '(A, b_i, rcond=None)\n', (8568, 8588), True, 'import numpy as np\n'), ((8606, 8641), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'b_d'], {'rcond': 'None'}), '(A, b_d, rcond=None)\n', (8621, 8641), True, 'import numpy as np\n'), ((8716, 8749), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'b'], {'rcond': 'None'}), '(A, b, rcond=None)\n', (8731, 8749), True, 'import numpy as np\n'), ((20295, 20305), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20302, 20305), True, 'import numpy as np\n'), ((11650, 11660), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11657, 11660), True, 'import numpy as np\n'), ((12772, 12782), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (12779, 12782), True, 'import numpy as np\n')] |
import numpy as np
from ground.base import get_context
context = get_context()
Point, Segment = context.point_cls, context.segment_cls
from bentley_ottmann.planar import segments_intersect
from tqdm import tqdm
import matplotlib.pylab as plt
from matplotlib import collections as mc
class LineSegmentSampling2D:
"""
A class to generate line segments in a rectangular domain of
size lx, ly
"""
def __init__(self, min_length, max_length, lx, ly):
self.lx = lx
self.ly = ly
self.min_length = min_length
self.max_length = max_length
def generateLine(self):
x1 = np.random.uniform(0, self.lx)
y1 = np.random.uniform(0, self.ly)
a = np.random.rand() * 2 * np.pi
# Sqrt since the cumulative distribution function (CDF) changes linearly
r = np.sqrt(np.random.uniform(self.min_length, self.max_length))
x2 = r * np.cos(a) + x1
if x2 > self.lx:
x2 = self.lx
elif x2 < 0:
x2 = 0
y2 = r * np.sin(a) + y1
if y2 > self.ly:
y2 = self.ly
elif y2 < 0:
y2 = 0
line_seg = Segment(Point(x1, y1), Point(x2, y2))
return line_seg
def generate_N_lines(self, N):
lines = []
for i in range(N):
lines.append(self.generateLine())
return lines
def generate_N_non_intersecting_lines(self, N):
lines = []
pbar = tqdm(total = N)
while len(lines) < N:
lines.append(self.generateLine())
if (segments_intersect(lines)):
lines = lines[:-1]
else:
pbar.update(1)
return lines | [
"numpy.random.uniform",
"tqdm.tqdm",
"ground.base.get_context",
"bentley_ottmann.planar.segments_intersect",
"numpy.sin",
"numpy.cos",
"numpy.random.rand"
] | [((65, 78), 'ground.base.get_context', 'get_context', ([], {}), '()\n', (76, 78), False, 'from ground.base import get_context\n'), ((629, 658), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.lx'], {}), '(0, self.lx)\n', (646, 658), True, 'import numpy as np\n'), ((672, 701), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.ly'], {}), '(0, self.ly)\n', (689, 701), True, 'import numpy as np\n'), ((1475, 1488), 'tqdm.tqdm', 'tqdm', ([], {'total': 'N'}), '(total=N)\n', (1479, 1488), False, 'from tqdm import tqdm\n'), ((847, 898), 'numpy.random.uniform', 'np.random.uniform', (['self.min_length', 'self.max_length'], {}), '(self.min_length, self.max_length)\n', (864, 898), True, 'import numpy as np\n'), ((1583, 1608), 'bentley_ottmann.planar.segments_intersect', 'segments_intersect', (['lines'], {}), '(lines)\n', (1601, 1608), False, 'from bentley_ottmann.planar import segments_intersect\n'), ((715, 731), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (729, 731), True, 'import numpy as np\n'), ((918, 927), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (924, 927), True, 'import numpy as np\n'), ((1042, 1051), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1048, 1051), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.